query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
listlengths 30
30
| negative_scores
listlengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Creating fake profiles for given number of people using namedtuples | def init_profiles_using_namedtuple(no_profiles: int):
profiles = []
Profile = namedtuple('Profile', fake.profile().keys())
for _ in range(no_profiles):
profiles.append(Profile(**fake.profile()))
return profiles | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def enumerate_person(hf=0.5, age=(18, 60), n=100):\n for i in range(n):\n hfi = random.random() <= hf\n agei = random.randint(*age)\n namei = first_names[hfi]\n yield dict(gender=(1 if hfi else 0), age=agei, name=namei, idc=uuid.uuid4())",
"def create_members(N):\n for _ in range(N):\n name = fake.name()\n phone = fake.phone_number()\n email = fake.email()\n address = fake.address()\n Member.objects.create(\n name=name,phone=phone,\n email=email,address=address\n )",
"def get_stats_on_profiles_using_named_tuple(profiles: list) -> \"Stats\":\n\n \n if type(profiles) != list:\n raise TypeError(\"A list needs to be provided as an argument\")\n\n if not len(profiles):\n raise ValueError(\"An empty list is provided\")\n\n Profile = namedtuple(\"Profile\", (\"job\", \"company\", \"ssn\", \"residence\", \"current_location\", \n \"blood_group\", \"website\", \"username\", \"name\", \"sex\", \"address\", \"mail\", \"birthdate\"))\n \n \n Stats = namedtuple(\"Stats\", (\"largest_blood_type\", \"mean_current_location\", \"oldest_person_age\", \"average_age\"))\n\n Stats.__doc__ = \"Represents the various statistics on the complete profile list\"\n Stats.largest_blood_type.__doc__ = \"This is the Blood Group of the maximum profiles in the dataset\"\n Stats.mean_current_location.__doc__ = \"This is the Mean of all the locations in the dataset\"\n Stats.oldest_person_age.__doc__ = \"The age of the oldest person in the dataset\"\n Stats.average_age.__doc__ = \"The average age of all the people in the dataset\"\n\n try:\n profiles_tuple = [Profile(**p) for p in profiles]\n except:\n raise ValueError(\"The valid list of fake profiles need to be sent.\")\n\n return Stats(\n largest_blood_type = Counter(p.blood_group for p in profiles_tuple).most_common(1)[0][0],\n mean_current_location = (sum(p.current_location[0] for p in profiles_tuple)/len(profiles_tuple),\n sum(p.current_location[1] for p in profiles_tuple)/len(profiles_tuple)),\n oldest_person_age = (date.today() - max(profiles_tuple, key = lambda item: (date.today() - item.birthdate).days).birthdate).days,\n average_age = sum((date.today() - p.birthdate).days for p in profiles_tuple)/len(profiles_tuple)\n )",
"def test_more_profiles(self):\n\n for x in range(0, 10):\n User.objects.create_user(\n username=\"\".join((\"koalabear\", str(x))),\n email=\"\".join((\"[email protected]\", str(x))),\n password=\"\".join((\"secret\", str(x)))\n )\n\n c = Client()\n response = c.get(reverse('profiles:index'))\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(response.data), 10)",
"def test_populate_spawning_profile_list():\n\n tester = TestClass()\n spawning_profiles = tester.populate_spawning_profile_list()\n\n assert spawning_profiles\n\n assert len(spawning_profiles) == 2\n\n assert spawning_profiles[1].get_spawning_profile_name() == 'testSpawnName'\n\n assert spawning_profiles[0].get_spawning_profile_name() == 'Default'",
"def build_profile(first,last,**userInfo):\r\n #empty dictionary to hold the user's profile.\r\n profile={}\r\n profile['firstName']=first\r\n profile['lastName']=last\r\n\r\n \"\"\"loop though the additional key-value pairs in the dictionary userInfo and add each pair to the profile dictionary.\"\"\"\r\n for key, value in userInfo.items():\r\n profile[key]=value\r\n return profile",
"def create_multiple_people(sqla, n):\n person_schema = PersonSchema()\n new_people = []\n for i in range(n):\n valid_person = person_schema.load(person_object_factory())\n new_people.append(Person(**valid_person))\n sqla.add_all(new_people)\n sqla.commit()",
"def createFamilies(namesList, surnamesList):\n familiesList = []\n surnameIndex = 0\n for _ in range(0, NUMBER_OF_FAMILY):\n # Choose a size for the family\n numberOfMembers = randint(1, MAX_NUMBER_OF_FAMILY_MEMBER)\n # Family will contain the name in pos 0 and the surname in pos 1\n familyEl = [None] * numberOfMembers\n casualFamily = False\n for j in range(0, len(familyEl)):\n familyEl[j] = [None] * PersonAttribute.numberOfAttribute()\n # Append a random name\n name = str(namesList[randint(0, len(names) - 1)])\n familyEl[j][int(PersonAttribute.NAME)] = name\n # Append the next surname\n surname = str(surnamesList[surnameIndex])\n familyEl[j][int(PersonAttribute.SURNAME)] = surname\n # Append a random age\n if j == 0:\n age = randint(18, 99)\n else:\n age = randint(1, 99)\n familyEl[j][int(PersonAttribute.AGE)] = age\n # Append the mail\n mail = name.lower() + \".\" + surname.lower() + str(age) + \"@immunoPoli.it\"\n\n familyEl[j][int(PersonAttribute.MAIL)] = mail\n # Append the phone number\n number = 0\n for i in range(0, PHONE_NUMBER_LENGTH):\n number += randint(0, 9) * 10 ** i\n familyEl[j][int(PersonAttribute.NUMBER)] = number\n # Append the app attribute\n if random() < PROBABILITY_TO_HAVE_APP:\n app = \"True\"\n else:\n app = \"False\"\n familyEl[j][int(PersonAttribute.APP)] = app\n\n # In every family there will be at least 2 surnames\n # In case of friends living together there is a probability of 30% to have more than 2 surnames in a family\n if j == 0 and randint(0, 100) < 30: # Family of not familiar\n casualFamily = True\n if j == 0 or (numberOfMembers > 2 and casualFamily):\n surnameIndex += 1\n if surnameIndex >= len(surnames):\n surnameIndex = 0\n familiesList.append(familyEl)\n surnameIndex += 1\n if surnameIndex >= len(surnames):\n surnameIndex = 0\n return familiesList",
"def populate(N=5):\n for entry in range(N):\n # Create the fake data for the entry\n fake_name = fakegen.name().split()\n fake_first_name = fake_name[0]\n fake_last_name = fake_name[1]\n fake_email = fakegen.email()\n\n # Create the new User entry\n user = User.objects.get_or_create(first_name=fake_first_name, last_name=fake_last_name, email=fake_email)[0]",
"def setup_people(access_control_list):\n all_users = set()\n for users in access_control_list.values():\n all_users.update({(user[\"email\"], user[\"name\"]) for user in users})\n\n with factories.single_commit():\n for email, name in all_users:\n factories.PersonFactory(email=email, name=name)",
"def getHouseholdProfiles(\n n_persons,\n weather_data,\n weatherID,\n seeds=[0],\n ignore_weather=True,\n mean_load=True,\n cores=mp.cpu_count() - 1,\n):\n\n # get the potential profile names\n filenames = {}\n for seed in seeds:\n profile_ID = \"Profile\" + \"_occ\" + str(int(n_persons)) + \"_seed\" + str(seed)\n if not ignore_weather:\n profile_ID = profile_ID + \"_wea\" + str(weatherID)\n\n if mean_load:\n profile_ID = profile_ID + \"_mean\"\n\n filenames[seed] = os.path.join(\n tsib.data.PATH, \"results\", \"occupantprofiles\", profile_ID + \".csv\"\n )\n\n # check how many profiles do not exist#\n not_existing_profiles = {}\n for seed in seeds:\n if not os.path.isfile(filenames[seed]):\n not_existing_profiles[seed] = filenames[seed]\n\n # info about runtime\n if cores < 1:\n warnings.warn('Recognized cores are less than one. The code will behave as the number is one.')\n cores = 1\n\n _runtime = np.floor(float(len(not_existing_profiles))/cores)\n _log_str = str(len(not_existing_profiles)) + \" household profiles need to get calculated. \\n\"\n _log_str += \"With \" + str(cores) + \" threads, the estimated runtime is \" + str(_runtime) + \" minutes.\"\n logging.info(_log_str)\n\n # run in parallel all profiles\n if len(not_existing_profiles) > 1:\n new_profiles = simHouseholdsParallel(\n int(n_persons),\n 2010,\n len(not_existing_profiles),\n singleProfiles=True,\n weather_data=weather_data,\n get_hot_water=True,\n resample_mean=mean_load,\n cores=cores,\n )\n # if single profile just create one profile and avoid multiprocessing\n elif len(not_existing_profiles) > 0:\n one_profile = simSingleHousehold(\n int(n_persons),\n 2010,\n weather_data=weather_data,\n get_hot_water=True,\n resample_mean=mean_load,\n )\n new_profiles = [one_profile]\n\n # write results to csv files\n for i, seed in enumerate(not_existing_profiles):\n new_profiles[i].to_csv(not_existing_profiles[seed])\n\n # load all profiles\n profiles = []\n for seed in seeds:\n profile = pd.read_csv(filenames[seed], index_col=0)\n # TODO get a proper indexing in tsorb based on the weather data\n profile.index = weather_data.index\n\n profiles.append(profile)\n\n return profiles",
"def create_users(N):\n for _ in range(N):\n name = fake.name()\n phone = fake.phone_number()\n email = fake.email()\n role = random.choice([\"shepherd\",\"admin\"])\n password = fake.user_name\n User.objects.create(\n name=name,phone=phone,\n email=email,role=role,\n password=password\n )",
"def build_profile(first, last, **user_info):\n profile = {}\n profile['first_name'] = first\n profile['last_name'] = last\n for key, value in user_info.items():\n profile[key] = value\n return profile",
"def build_profile(first, last, **user_info):\n profile = {}\n profile['first_name'] = first\n profile['last_name'] = last\n for key, value in user_info.items():\n profile[key] = value\n return profile",
"def build_profile(first, last, **user_info):\r\n # Build a dict with the required keys.\r\n profile = {'first': first, 'last': last}\r\n # Add any other keys and values.\r\n for key, value in user_info.items():\r\n profile[key] = value\r\n return profile",
"def build_profile(first, last, **user_info):\n profile = {}\n profile['first_name'] = first\n profile['last_name'] = last\n for key, value in user_info.items():\n profile[key] = value\n return(profile)",
"def build_profile(first, last, **user_info):\n profile = {}\n profile['first_name'] = first\n profile['last_name'] = last\n for key, value in user_info.items():\n profile[key] = value\n return(profile)",
"def create_n_defenders(n, rs_nb, hp_proportion, hp_unit_cost=0, offset=0, name=\"\"):\n defenders = []\n for i in range(offset,n+offset):\n if(name != \"\"):\n d = Defender(i,rs_nb,hp_proportion=hp_proportion,hp_unit_cost=hp_unit_cost, name=name)\n else:\n d = Defender(i,rs_nb,hp_proportion=hp_proportion,hp_unit_cost=hp_unit_cost)\n defenders.append(d)\n return defenders",
"def build_profile(first, last, **user_info):\n profile = {}\n profile['first_name'] = first\n profile['last_name'] = last\n for key, value in user_info.items():\n profile[key] = value\n return profile",
"def build_profile(first, last, **user_info):\n profile = {}\n profile['first_name'] = first\n profile['last_name'] = last\n for key, value in user_info.items():\n profile[key] = value\n return profile",
"def build_profile(first, last, **user_info):\n profile = {}\n profile['first_name'] = first\n profile['last_name'] = last\n for key, value in user_info.items():\n profile[key] = value\n return profile",
"def fake_friend_data():\n\n for _ in range(0, 35):\n user_id = random.randint(1, 8)\n name = fake.name()\n phone = fake.phone_number()\n\n print (str(user_id) + '|' + name + '|' + phone)",
"def build_profile(first,last, **user_info): #creates dictionary called user_info to pass info\n profile = {} #empty dictionary named \"profile\" to store a users info\n profile['first name'] = first #stores 'first' parameter under key 'first name'\n profile['last name'] = last #stores 'last' parameter under key 'last name'\n for key, value in user_info.items(): #loops through key-value pairs in user_info\n profile[key] = value #adds them to the 'profile' dictionary\n return profile #returns that completed dictionary to call line",
"def user_gen(usernames_number = 1):\n for i in range(usernames_number):\n name1 = random.choice(words).title()\n name2 = random.choice(words).title()\n str_number = str(random.randint(1, 100)) \n print(f\"{name1}{name2}{str_number}\")",
"def test_creation_profile_1():\n assert tuple_NT[0][0] == LIST_dict[0]['name'], \"Name is not getting stored properly\"",
"def populate_community():\n\t\tfor i in range(1,11):\n\t\t\tSimulation.community[\"person\"+str(i)] = Person(\"person\"+str(i))",
"def fill_repo_with_random_persons(self, n=10, id_lb=1, id_ub=100):\r\n random_ids, random_names, random_phone_numbers = self.generate_random_persons(n, id_lb, id_ub)\r\n for id_, name, phone_num in zip(random_ids, random_names, random_phone_numbers):\r\n self.add_person(id_, ' '.join(name), phone_num)",
"def create_fake_users(count, no_echo):\n users = User.create_fake_users(count=count)\n if not no_echo:\n for user in users:\n print(f'{user[0]}: {user[1]}')",
"def build_profile(first, last, **user_info):\n user_info['first_name'] = first\n user_info['last_name'] = last\n return user_info",
"def build_profile(first, last, **user_info):\n user_info['first_name'] = first\n user_info['last_name'] = last\n return user_info"
]
| [
"0.6443193",
"0.63009214",
"0.6153241",
"0.6137819",
"0.6063342",
"0.6063036",
"0.60477436",
"0.60470337",
"0.60437965",
"0.60285556",
"0.6014141",
"0.59978324",
"0.5987455",
"0.5987455",
"0.59811175",
"0.593664",
"0.593664",
"0.5933317",
"0.59086925",
"0.59086925",
"0.59086925",
"0.5889015",
"0.58483",
"0.5804192",
"0.5802574",
"0.57890385",
"0.57856715",
"0.57833755",
"0.5759924",
"0.5759924"
]
| 0.7333659 | 0 |
This function finds the oldest person from the slot, calculates the \ duration. The minimum birthdate and time is returned. | def oldest_person_nt(all_profile_nt: namedtuple) -> float:
"""Param: all_profile_nt: Named tuple containing all profiles"""
value = min(all_profile_nt, key=lambda v: v[-1])
date_today = datetime.date.today()
age = (date_today - value.birthdate).days
return int(age/365) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def oldest():\n def get_age(person_list):\n return person_list['age']\n return sorted(PEOPLE_LIST, key = get_age, reverse=True)",
"def take_min(self):\n return self.get_first()",
"def min(self):\n\n return time_stat(self, stat=\"min\")",
"def oldest():\n # fill it out\n newlist = sorted(PEOPLE_LIST, key=itemgetter('age'), reverse=True)\n return newlist",
"def oldest_person_dc(all_profile_dict: dict) -> float:\n \"\"\"Param:all_profile_dc: dictionary containing all profiles\"\"\"\n value = min(all_profile_dict.values(), key=lambda v: v['birthdate'])\n date_today = datetime.date.today()\n age = (date_today - value['birthdate']).days\n return int(age/365)",
"def minStartTime(listAct):\n maxAct = max(listAct, key=lambda activity: activity.seq + activity.duration)\n return maxAct.seq + maxAct.duration",
"def find_min(self):\n return self.min",
"def find_min(self):\n return self.min",
"def reservetime_min(self):\n return self._get_time_info([\"Reserve_Time_M\", \"reserveTimeMinute\"])",
"def min_earned(table):\n money_earned = employees_earning(table)\n temp = 0\n min_earned_employee = [0]\n for employee in money_earned:\n if temp == 0:\n temp = money_earned[employee]\n if money_earned[employee] <= temp:\n temp = money_earned[employee]\n if money_earned[employee] > 0:\n min_earned_employee[0] = (str(employee) + \": \" + str(money_earned[employee]))\n elif money_earned[employee] == 0:\n min_earned_employee.append(str(employee) + \": \" + str(money_earned[employee]))\n if len(min_earned_employee) > 1:\n min_earned_employee.pop(0)\n return min_earned_employee",
"def min(self):\n return self.get_first()",
"def smallest_ellapsed(login):\n df = login\n df[\"Time\"] = pd.to_datetime(df[\"Time\"])\n\n\n return df.groupby(\"Login Id\").agg(lambda group: group.diff().min()).dropna()[\"Time\"].to_frame()",
"def oldest(self):\n # Your implementation here",
"def py2_miller_min_left(start_date=BITE_CREATED_DT):\n\n td = (PY2_DEATH_DT - start_date)\n return round(((td.days*24 + td.seconds/3600)/1022), 2)",
"def MinLifetime(self):\r\n\t\treturn self._get_attribute('minLifetime')",
"def starting_date(cls, player):\r\n\r\n\t\treturn cls.RESULTDAILY[player][0]",
"def _get_minimum(self):\n return self._minimum",
"def call(roster):\n value_list = roster.values()\n min_value = min(value_list)\n\n name = []\n for key,value in roster.items():\n if value == min_value:\n name.append(key)\n return random.choice(names)",
"def _findSmallestMoney(self):\n\t\tsmallest = 99999999 # Just some high number\n\t\tfor x in self.playerList:\n\t\t\tif x != None:\n\t\t\t\tif x.money < smallest:\n\t\t\t\t\tsmallest = x.money\n\t\t\t\t\t\n\t\treturn smallest",
"def min(self):\n return min(self)",
"def min_time(self):\n #{{{ function to return time of first sample\n\n return self.mintime",
"def oldest_ow_instance(ow_launch_data):\n log.info(\"oldest_ow_instance( %s )\", ow_launch_data)\n sorted_ow_launch_data = sorted(ow_launch_data.items(), key=lambda x: x[1])\n log.info(\"sorted_ow_launch_data = %s\", sorted_ow_launch_data)\n oldest_ow_instance = sorted_ow_launch_data[0]\n ow_instance_id, launch_time = oldest_ow_instance\n log.info(\"ow_instance_id = %s, ow_launch_data = %s\",\n ow_instance_id, ow_launch_data)\n print(\"Oldest OW instance ==> {}\".format(ow_instance_id))\n log.info(\"Oldest OW instance ==> %s\", ow_instance_id)\n return ow_instance_id",
"def _get_min_expense(self):\n pass",
"def _get_earliest_start(self, valid_list):\n\n return min([item.coords[\"time\"].values[0] for item in valid_list])",
"def min_time(self):\n return self._ll_tree_sequence.get_min_time()",
"def py2_miller_min_left():\r\n left = PY2_RETIRED_DT - BITE_CREATED_DT\r\n left_earth_mins = round(left.total_seconds()/60,2)\r\n\r\n left_miller_hours = left_earth_mins/3679200\r\n left_miller_mins = round(left_miller_hours * 60,2) #assume that 1 miller hour is 60 miller mins (same as earth)\r\n\r\n return left_miller_mins",
"def get_oldest_dog(*args):\n\toldest_dog = args[0]\n\tfor d in args:\n\t\tif(d.age > oldest_dog.age):\n\t\t\toldest_dog = d\n\treturn oldest_dog",
"def get_min(self):\n return self.serie.min()",
"def findbestminute(dataset):\n guard = \"\"\n max_min = 0\n max_min_times = 0\n for item in dataset:\n sleep = item[1]\n maxi = sleep.most_common(1)\n if maxi[0][1] > max_min_times:\n guard, max_min, max_min_times = item[0], maxi[0][0], maxi[0][1]\n return (guard, max_min, max_min_times)",
"def _ensure_min(self):\n\t\tif self.min is None: return\n\t\twhile len(self.members) < self.min:\n\t\t\tself.create()"
]
| [
"0.6221359",
"0.5758762",
"0.57277995",
"0.57165074",
"0.57137835",
"0.5579486",
"0.5531427",
"0.5531427",
"0.5455574",
"0.5450134",
"0.5420511",
"0.5401952",
"0.5370815",
"0.5282806",
"0.51883173",
"0.5179302",
"0.5165424",
"0.5159719",
"0.5157662",
"0.515641",
"0.5151814",
"0.51311404",
"0.5130226",
"0.5130022",
"0.51274765",
"0.5118046",
"0.5117664",
"0.5116436",
"0.5110701",
"0.5110317"
]
| 0.6448769 | 0 |
This function uses the mode function defined in statisics library to find \ the most occured blood group from the list. The list is generated \ using the lambda function and returned to the mode function as a \ parameters. The code is then timed and the result and time is \ sent back. | def max_bloodgroup_nt(all_profile_nt: namedtuple) -> tuple:
"""Param:all_profile_nt: Named tuple containing all profiles"""
blood_group = mode(list(map(lambda v: v[5], all_profile_nt)))
return blood_group | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def findMode(list):\n # Use Python's Counter function on the list\n values = Counter(list)\n # Returns the highest occurring item\n return values.most_common(1)[0][0]",
"def mode(lst):\n cnt = Counter(lst)\n return cnt.most_common(1)[0][0]",
"def lmode(inlist):\r\n\r\n scores = pstats.unique(inlist)\r\n scores.sort()\r\n freq = []\r\n for item in scores:\r\n freq.append(inlist.count(item))\r\n maxfreq = max(freq)\r\n mode = []\r\n stillmore = 1\r\n while stillmore:\r\n try:\r\n indx = freq.index(maxfreq)\r\n mode.append(scores[indx])\r\n del freq[indx]\r\n del scores[indx]\r\n except ValueError:\r\n stillmore=0\r\n return maxfreq, mode",
"def mode():\n\n # assumption: if more than 1 mode is found, return list of modes instead of single item\n\n # use dictionary to track occurance for each int w/ key rep int, val rep occurance count\n countdict = {}\n\n for item in inlist:\n # to process each int, check if int already exists in dict as a key\n if item in countdict: \n # int already exists - increment the associated count (occurance count)\n countdict[item] = countdict[item]+1\n else: \n # int does not exist - make new entry in dict for first occurance of new key\n countdict[item] = 1\n \n # call values method to return a list of val in dict\n countlist = countdict.values()\n\n maxcount = max(countlist)\n\n modelist = []\n # itering though the dict keys looking for a key w/ a val that matches max count\n for item in countdict:\n # when found such a key, place that key in the mode list \n if countdict[item] == maxcount:\n # key/s assoc w/ count, appended to list of modes\n modelist.append(item)\n\n # check num of modes in collection if there's only one mode - output single item\n if len(modelist) <= 1:\n # for single mode - output single mode\n for item in modelist:\n return item\n else: \n # more than 1 mode in collection - output list of modes\n return modelist",
"def mode(lst):\n # Create the dictionnary\n counter = dict()\n for e in lst:\n counter[e] = counter.get(e, 0) + 1\n # Find the most frequent value\n max_value = 0\n max_count = 0\n for n, count in counter.items():\n if count > max_count:\n max_count = count\n max_value = n\n return max_value",
"def mode(lyst):\n # Create a set of one occurance of the numbers\n nums = set(lyst)\n return_value = 0\n top_count = 0\n \n # Iterate over nums and count the occurance of each number in lyst\n for num in nums:\n if lyst.count(num) > top_count:\n return_value = num \n \n return return_value",
"def get_mode(lst):\n freq = {}\n for i in lst:\n if i in freq:\n freq[i] += 1\n else:\n freq[i] = 1\n\n return max(freq, key=lambda f: freq[f])",
"def calc_mode(nums):\n c = Counter(nums)\n nums_freq = c.most_common()\n max_count = nums_freq[0][1]\n\n modes = []\n for num in nums_freq:\n if num[1] == max_count:\n modes.append(num[0])\n\n return modes",
"def maximum_F_score(groundtruth, eval_boxes, match_mode=\"ellipse\"):\n if match_mode==\"ellipse\":\n matcher = lambda gt,other: gt.ellipse_matches(other)\n elif match_mode==\"iou\":\n matcher = lambda gt,other: gt.iou_score(other) >= 0.5\n all_confidences = {box.confidence\n for boxes in eval_boxes.values()\n for box in boxes}\n if len(all_confidences) > 50:\n all_confidences = set(np.random.choice(list(all_confidences), 50, replace=False))\n if all_confidences:\n return max([\n F1_score(**calc_TP_FP(groundtruth, eval_boxes, matcher, confidence))\n for confidence in all_confidences\n ])\n else:\n return 0",
"def max_bloodgroup_dc(all_profile_dict: dict) -> tuple:\n \"\"\"Param:all_profile_dc: dictionary containing all profiles\"\"\"\n value = mode(\n list(map(lambda v: v['blood_group'], all_profile_dict.values())))\n return value",
"def mode(self):\r\n\t\t_set\t= set(self.sample)\r\n\t\t_list\t= [self.sample.count(i) for i in _set]\r\n\t\treturn list(_set)[_list.index(max(_list))]",
"def mode(nums):\n dict_my = {num: nums.count(num) for num in nums}\n print(dict_my, 'my dict')\n max_value = max(dict_my.values())\n print(max_value, 'max value')\n # now we need to see at which index the highest value is at\n\n for (num, freq) in dict_my.items():\n if freq == max_value:\n print('found number with max freq', num)\n return num",
"def find_mode(lst):\n\n num_counts = {}\n\n for num in lst:\n num_counts[num] = num_counts.get(num, 0) + 1\n\n v = sorted(num_counts.values())\n v.reverse()\n mode_count = v[0]\n\n mode = []\n\n for num in num_counts:\n if num_counts[num] == mode_count:\n mode.append(num)\n return mode",
"def get_most_common(self, lst):\n data = Counter(lst)\n mc = data.most_common(2) \n #if len(mc) == 1 or (mc[0][1] != (mc[1][1])):\n # return mc[0][0]\n #return \"AMB\"\n return data.most_common(1)[0][0]",
"def get_mode(numlist):\n count = np.bincount(numlist)\n return np.argmax(count)",
"def get_mode(x):\n mode, count = Counter(x).most_common(1)[0]\n return mode",
"def mode(x: List[float]) -> List[float]:\n counts = Counter(x)\n max_count = max(counts.values())\n return [x_i for (x_i, count) in counts.items() if count == max_count]",
"def findMode(arr):\n \n #*********************************************************************\n #@param : an array of n elements in read from prompt *\n #@return : the value that appears the most in the tree, it frequency *\n # and also the number of unique numbers *\n #*********************************************************************\n \n tree = bst.BST()\n modalVal = 0\n maxFreq = 0\n uniques = 0\n for k in arr:\n node = tree.search(k)\n if node == None:\n tree.insert(k,1)\n uniques+=1\n else:\n if node.key == k:\n node.value+=1\n if node.value > maxFreq:\n maxFreq = node.value\n modalVal = k\n return (modalVal, maxFreq, uniques)",
"def mode(points):\r\n\t\tcount = {}\r\n\t\tmax = None\r\n\t\tfor item in points:\r\n\t\t\tif item in count:\r\n\t\t\t\tcount[item] = count[item] + 1\r\n\t\t\telse:\r\n\t\t\t\tcount[item] = 1\r\n\t\t\tif max is None:\r\n\t\t\t\tmax = (item, 1)\r\n\t\t\telse:\r\n\t\t\t\tif count[item] > max[1]:\r\n\t\t\t\t\tmax = (item, count[item])\r\n\t\treturn max[0]",
"def f1max_score(pred, mode, namespace, ftype=1):\n results = run(pred, mode, namespace, ftype)\n return max(results[:,1])",
"def find_modes(self, ntarget, ltarget):\n\n ages = []\n freqs = []\n for model in self.models:\n freq = model.find_mode(ntarget, ltarget)\n if (math.isnan(freq)): continue\n freqs.append(freq)\n ages.append(model.glb[iage])\n return ages, freqs",
"def mode_statistic(data, percentiles=range(10,91,10)):\n from scipy.interpolate import UnivariateSpline, LSQUnivariateSpline\n \n so = np.argsort(data)\n order = np.arange(len(data))\n #spl = UnivariateSpline(data[so], order)\n \n knots = np.percentile(data, percentiles)\n dx = np.diff(knots)\n mask = (data[so] >= knots[0]-dx[0]) & (data[so] <= knots[-1]+dx[-1])\n spl = LSQUnivariateSpline(data[so][mask], order[mask], knots, ext='zeros')\n \n mask = (data[so] >= knots[0]) & (data[so] <= knots[-1])\n ix = (spl(data[so], nu=1, ext='zeros')*mask).argmax()\n mode = data[so][ix]\n return mode",
"def get_mostFrequent(self, n=5):\r\n pass",
"def get_mostFrequent(self, n=5):\r\n pass",
"def get_max_score(location_list, grid, shape):",
"def ensemble(all_predict, size):\n mode_pred = np.zeros(shape=(size,1))\n for i in range(np.shape(all_predict)[1]):\n pred= mode(all_predict[:,i])\n # break ties randomly\n if pred[1] == 1:\n pred_val = random.randrange(2)\n else:\n pred_val = pred[0]\n mode_pred[i,0] = pred_val\n # return most common prediction\n return mode_pred",
"def getmost(val_list):\n # Problem: In GC datasets is the purpose a string-list\n if val_list[0][0] == \"[\":\n val_list = [elem for val in val_list for elem in eval(val)]\n # remove unkowns\n val_list = [elem for elem in val_list if elem != \"unknown\"]\n if len(val_list) == 0:\n return \"unknown\"\n # Select the most prevalent purpose\n uni, counts = np.unique(val_list, return_counts=True)\n return uni[np.argmax(counts)]",
"def mode(self):\n mode = max(self.data, key=self.data.count)\n return mode",
"def get_max(criterion, max_card, elements):\n if max_card <= 0:\n raise ValueError(\n \"max_card: \" + str(max_card) + \"\\n\" +\n \"The maximum cardinal cannot be null nor negative!\"\n )\n\n maxima = []\n currentMax = 0\n for e in elements:\n if 0 < e.cardinal <= max_card:\n if len(maxima) == 0:\n currentMax = criterion(e)\n maxima.append((e, currentMax))\n else:\n newCandidate = criterion(e)\n if newCandidate == currentMax:\n maxima.append((e, currentMax))\n elif newCandidate > currentMax:\n maxima = []\n currentMax = newCandidate\n maxima.append((e, currentMax))\n return maxima",
"def most_reducible(wordlist):\n\n\t# We create a memo for reducible words since is_reducible is \n\t# recursive. The keys are the words and the values are the \n\t# number of characters\n\tglobal reducible_words\n\treducible_words = dict()\n\treducible_words['a'], reducible_words['i'] = 1, 1\n\t\n\tword_dict = to_dictionary(wordlist)\n\tfor line in word_dict:\n\t\tis_reducible(line, word_dict)\n\n\t# Varible that will search the memo for the longest word\n\tcurrent_greatest = ''\n\tfor word in reducible_words:\n\t\tif reducible_words[word] > len(current_greatest):\n\t\t\tcurrent_greatest = word\n\tprint(current_greatest)"
]
| [
"0.6360939",
"0.57988197",
"0.5796576",
"0.5660203",
"0.5659024",
"0.56108975",
"0.5607258",
"0.5543546",
"0.5522401",
"0.54757273",
"0.5412688",
"0.5288786",
"0.52810484",
"0.5259109",
"0.52543527",
"0.51720995",
"0.507658",
"0.50760496",
"0.5062678",
"0.50548244",
"0.5053744",
"0.50534534",
"0.5051898",
"0.5051898",
"0.49908882",
"0.49853164",
"0.497433",
"0.496644",
"0.49571103",
"0.49555242"
]
| 0.6243897 | 1 |
To create a fake stock data set for imaginary stock exchange for \ top 100 companies (name, symbol, open, high, close). \ | def stock_market(no_profiles: int) -> tuple:
all_companies = []
Stocks = namedtuple("Stocks", 'name symbol open high close company_weight')
MkValue_ = random.uniform(1000, 50000, 100)
wts_ = random.uniform(0, 1, 100)
wts_ = wts_/sum(wts_)
for _ in range(100):
name = fake.company()
open_ = round(MkValue_[_]*wts_[_],2)
close = round(open_ * random.uniform(0.7, 1.15), 2)
high = round(open_ * random.uniform(0.85, 1.15), 2)
if high < open_:
high = open_
if high < close:
high = close
all_companies.append(
Stocks(name=name, symbol=symbol(name), open=open_, high=round(high, 2), close=round(close, 2), company_weight=round(wts_[_], 4)))
stock_index = round(
sum(x.open * x.company_weight for x in all_companies), 4)
highest_for_day = round(
sum(x.high * x.company_weight for x in all_companies), 2)
lowest_close_for_day = round(
sum(x.close * x.company_weight for x in all_companies), 2)
# print(f"\n------------------------------------Top 100 listed companies on Fake Stock Exchange------------------------------------")
# [print(x) for x in sorted(all_companies, key=lambda x:x.symbol)]
# print(f"\n--------------Main details on {date.today()}--------------")
# print(f"\nStart of the day: {stock_index}")
# print(f"Highest for the day: {highest_for_day}")
# print(f"Lowest close for the day: {lowest_close_for_day}")
return sorted(all_companies, key=lambda x: x.symbol), stock_index, highest_for_day, lowest_close_for_day | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_52_week_high_low_for_stocks(stocks):\n print(\"Fetching stock quotes.\")\n # Build a full list of symbols\n symbols = []\n for key in stocks.keys():\n symbols.append(key)\n\n num_of_batches = int(len(symbols)/BATCH_SIZE) + 1\n\n all_stocks_df = pandas.DataFrame()\n\n #all_stocks_df = pandas.DataFrame()\n\n # Get quotes for all the stocks in batches\n for i in range(0, num_of_batches):\n print(\"Fetching quotes in batch: \" + str(i+1) + \"/\" + str(num_of_batches))\n start = i*BATCH_SIZE\n end = start + BATCH_SIZE\n batch_symbols = symbols[start: end]\n batch_symbols_query = '+'.join(batch_symbols)\n request_url = YAHOO_FINANCE_API + \"?\" + YAHOO_FINANCE_SYMBOL_PARAM + \"=\" + batch_symbols_query +\\\n \"&\" + YAHOO_FINANCE_FORMAT_PARAM + \"=\" + YAHOO_FINANCE_SYMBOL_PARAM + YAHOO_FINANCE_52_ASK_PRICE +\\\n YAHOO_FINANCE_BID_PRICE + YAHOO_FINANCE_52_CLOSE_PRICE + YAHOO_FINANCE_52_WEEK_LOW +\\\n YAHOO_FINANCE_52_WEEK_HIGH + YAHOO_FINANCE_52_LOW_CHANGE +\\\n YAHOO_FINANCE_52_HIGH_CHANGE + YAHOO_FINANCE_DIV_YIELD\n r = requests.get(request_url)\n\n # Read the returned CSV as a pandas table\n # Returned format is NAME,ASK,BID,52-wLow,52-wHigh\n df = pandas.read_table(StringIO(r.text), header=None, sep=',')\n all_stocks_df = all_stocks_df.append(df, ignore_index=True)\n\n # Delay to slow down things\n time.sleep(1)\n\n\n # Assign columns\n print(\"Stock quotes have been fetched. Beginning analysis...\")\n all_stocks_df.columns=['symbol', 'ask', 'bid', 'close', '52w-low', '52w-high', '52w-low-change', '52w-high-change', 'div-iteryield']\n\n # Add the percent change columns\n all_stocks_df['52w-%-low-change'] = all_stocks_df['52w-low-change']/all_stocks_df['52w-low']*100\n all_stocks_df['52w-%-high-change'] = all_stocks_df['52w-high-change'] / all_stocks_df['52w-high'] * 100\n\n # Add the names and sectors\n all_stocks_df['name'] = \"\"\n all_stocks_df['sector'] = \"\"\n for index, row in all_stocks_df.iterrows():\n all_stocks_df.loc[index, 'name'] = stocks[row['symbol']][0]\n all_stocks_df.loc[index, 'sector'] = stocks[row['symbol']][1]\n\n\n # Process the received quotes\n sorted_values = all_stocks_df.sort_values('52w-%-low-change')\n\n # Done\n print(\"Analysis completed.\")\n return sorted_values",
"def reqData(self):\r\n #self.reqGlobalCancel()\r\n #self.add_historical(\"Stock('TSLA', 'SMART', 'USD')\")\r\n #self.add_historical(\"Stock('IBM', 'SMART', 'USD')\")\r\n #self.add_historical(\"Stock('MSFT', 'SMART', 'USD')\")\r\n self.add_historical(\"Stock('FB', 'SMART', 'USD')\")",
"def get_stock_data(symbol):\n # Set current dates\n start = date(date.today().year, 1, 1) # first of current year\n end = date.today() # today\n\n # Get yahoo Yahoo data\n data = pdr.get_data_yahoo(symbol, start=start, end=end)\n\n # Rename columns\n data.columns = [\"Highest price (USD)\",\n \"Lowest price (USD)\",\n \"Opening price (USD)\",\n \"Closing price (USD)\",\n \"Volume\",\n \"Adjusted closing price (USD)\"]\n\n return data",
"def get_stock_price_df(info, symbols):\n\n df_l = []\n\n for num, i in enumerate(info):\n df = pd.DataFrame.from_dict(i, orient='index')\n df['Symbol'] = symbols[num]\n df_l.append(df)\n\n df_full = pd.concat(df_l)\n df_full = df_full.rename(columns={'1. open': 'Open',\n '2. high': 'High',\n '3. low': 'Low',\n '4. close': 'Close',\n '5. volume': 'Volume'})\n\n return df_full",
"def getStockData():\n pass",
"def test_find_stock_items(self):\n pass",
"def get_stock(symbol, start, end):\n df = pdr.DataReader(symbol, 'yahoo', start, end)\n df = df.sort_index(axis=0)\n return df",
"def YahooFinancials_Data(Ticker=[],Start='',End ='',Frequency ='daily'):\n\n\n \n import pandas as pd\n from yahoofinancials import YahooFinancials\n import datetime as dt \n \n Ticker = Ticker or input(\"Enter Tcikers separated by',': \").split(',')\n Start = Start or input(\"Enter Start Date separated by '-': \") or (dt.date.today()-\n dt.timedelta(1825)).strftime(\"%Y-%m-%d\")\n End = End or input(\"Enter End Date separated by '-': \") or (dt.date.today()).strftime(\"%Y-%m-%d\")\n Frequency = Frequency or input(\"Enter Frequency like 'daily','weekly': \") or 'daily'\n \n data = pd.DataFrame()\n for i in range(len(Ticker)):\n try:\n yahoo_financials = YahooFinancials(Ticker[i])\n Json_obj = yahoo_financials.get_historical_price_data(Start, End, Frequency)\n Ohlv = Json_obj[Ticker[i]]['prices']\n temp = pd.DataFrame(Ohlv)[[\"formatted_date\",\"adjclose\"]]\n temp.set_index(\"formatted_date\", inplace = True)\n temp = temp[~temp.index.duplicated(keep = 'first')]\n data[Ticker[i]] = temp['adjclose']\n \n except:\n print(f\"Unable to get the Data for: {Ticker[i]}\")\n continue\n \n return data",
"def getStock(symbol, start, end):\n df = pd.io.data.get_data_yahoo(symbol, start, end)\n\n df.columns.values[-1] = 'AdjClose'\n df.columns = df.columns + '_' + symbol\n df['Return_%s' % symbol] = df['AdjClose_%s' % symbol].pct_change()\n\n return df",
"def stock():\n stock=stock_data('AAPL',start(2019,12,1))\n return stock",
"def __init__(self, stocks=None, start_date='FiveYear', end_date='Today', features=None, verbose=False, capital=0):\n\n # Set default features\n if type(features) is not list:\n features = [features]\n\n if features is None:\n features = []\n\n if DataTypes.ALL in features:\n features = DataTypes.ALL\n\n # set variables for a stock universe\n self.verbose = verbose\n self.stocks = stocks\n self.features = features\n self.stock_data = {}\n end_date = datetime.datetime.today() if end_date == 'Today' \\\n else datetime.datetime.strptime(end_date, \"%Y-%m-%d\")\n\n if type(end_date) is not datetime.datetime and type(end_date) is not pd.tslib.Timestamp:\n end_date = datetime.datetime.strptime(end_date, \"%Y-%m-%d\")\n\n self.end_date = end_date\n\n start_date = end_date - datetime.timedelta(365 * 5 + 1) if start_date == 'FiveYear' \\\n else datetime.datetime.strptime(start_date, \"%Y-%m-%d\")\n\n if type(start_date) is not datetime.datetime and type(start_date) is not pd.tslib.Timestamp:\n start_date = datetime.datetime.strptime(start_date, \"%Y-%m-%d\")\n\n self.start_date = start_date\n\n self.date = start_date # initial date that the stock universe is on\n\n # create a list of dates in the YYYY-MM-DD format\n self.str_dates = []\n self.dates = []\n\n self.starting_capital = capital\n self.cash = []\n\n self.collect_all_stock_data()\n self.unique_data = {}\n self.shuffled_data_reset()\n # TODO add ability to order stocks and build a profile having total percent returns as well as capital\n # TODO have ability to select types of data to get fundementals, trends, stock twits anal,\n # TODO ad meter, past prices and volumes, twitter reddit and press releases",
"def getStock(symbol, start, end):\n df = data.get_data_yahoo(symbol, start, end)\n\n df.columns.values[-1] = 'AdjClose'\n df.columns = df.columns + '_' + symbol\n df['Return_%s' % symbol] = df['AdjClose_%s' % symbol].pct_change()\n\n return df",
"def add_stock(self, symbol):\n verbose_message(\"Adding \" + symbol + \"...\")\n if symbol not in self.stocks:\n self.stocks += [symbol]\n\n data = StockData()\n\n data.name = StockDataCollection.get_stock_name(symbol)\n data.symbol = symbol\n data.market = StockDataCollection.get_market_data(symbol,\n str(self.start_date)[:USEFUL_TIMESTAMP_CHARS],\n str(self.end_date)[:USEFUL_TIMESTAMP_CHARS])\n\n # create a list of dates in the YYYY-MM-DD format\n data.str_dates = [str(i)[:USEFUL_TIMESTAMP_CHARS] for i in list(data.market.index)]\n data.dates = data.market.index\n\n for i in data.dates:\n if i not in self.dates:\n self.dates += [i]\n self.dates.sort()\n self.str_dates = [str(i)[:USEFUL_TIMESTAMP_CHARS] for i in list(self.dates)]\n\n for collection_function in self.features:\n collection_function(data)\n\n data.position = []\n for _ in data.dates:\n data.position += [0]\n if type(self.cash) is not pd.DataFrame:\n self.cash += [self.starting_capital]\n\n data.position = pd.DataFrame({\"Position\": data.position}).set_index(data.dates)\n if type(self.cash) is not pd.DataFrame:\n self.cash = pd.DataFrame({\"cash\": self.cash}).set_index(data.dates)\n debug_message(data)\n self.shuffled_data_reset()\n self.stock_data[symbol] = data",
"def retrieve_company_data(self):\n self.set_stock_sym_append_str('')\n self.set_stock_retrieval_type('all') #'all', watcher\n self.load_stock_symbol_fr_file()",
"def get_stock_data(company, start_date_inc, stop_date_inc):\n\n api_key = 'Bo9P_cJnmf5EsQPp1Bdp'\n desired_cols = 'date,close'\n\n# ticker = 'FB'\n# start_date_inc = '20170801'\n# end_date_inc = '20170831'\n\n # format and send the request\n payload = {\n 'date.gte': start_date_inc,\n 'date.lte': stop_date_inc,\n 'ticker': company,\n 'qopts.columns': desired_cols,\n 'api_key': api_key\n }\n meta_url = r'https://www.quandl.com/api/v3/datatables/WIKI/PRICES'\n r = requests.get(meta_url, params=payload)\n\n # convert to a pandas dataframe\n df = pd.DataFrame(r.json()['datatable']['data'])\n if not df.empty:\n df.columns = ['date', 'price']\n df['date'] = pd.to_datetime(df['date'])\n\n return df",
"def test_low_stockprice_high_interest(self):\n stock_prices = np.array([[5, 4, 4, 2],\n [5, 3, 3, 3],\n [5, 4, 2, 2],\n [5, 3, 3, 1]], dtype=float)\n interest_rate = 2.0 # 200%\n test_case = StockMarket(5, stock_prices, interest_rate)\n test_case.dynamic_programming_bottom_up()\n for portfolio in set(test_case.backtracing_portfolio()):\n self.assertEqual(0, portfolio)",
"def get_stocks():\n print(\"fetching remote...\")\n code_dataframes = pd.read_html(\n 'http://kind.krx.co.kr/corpgeneral/corpList.do?method=download&searchType=13', header=0)[0]\n # 우리가 필요한 것은 회사명과 종목코드이기 때문에 필요없는 column들은 제외해준다.\n print(\"parsing and filtering data...\")\n code_dataframes.종목코드 = code_dataframes.종목코드.map('{:06d}'.format)\n # 한글로된 컬럼명을 영어로 바꿔준다.\n code_dataframes = code_dataframes[['회사명', '종목코드']]\n code_dataframes = code_dataframes.rename(\n columns={'회사명': 'name', '종목코드': 'code'})\n codes = code_dataframes['code']\n names = code_dataframes['name']\n stocks = []\n for i in range(len(names)):\n stocks.append({\n 'name': names[i],\n 'code': codes[i]\n })\n return stocks",
"async def _get_stock_data(self, stocks: list):\n\t\tapi_url = 'https://sandbox.tradier.com/v1/markets/quotes'\n\t\tstocks = ','.join(stocks)\n\t\tif not stocks:\n\t\t\treturn []\n\t\ttoken = await self.bot.get_shared_api_tokens('stocks')\n\t\ttoken = token.get('key', None)\n\t\tif not token:\n\t\t\traise ValueError(\n\t\t\t\t'You need to set an API key!\\n'\n\t\t\t\t'Follow this guide for instructions on how to get one:\\n'\n\t\t\t\t'<https://github.com/Flame442/FlameCogs/blob/master/stocks/setup.md>'\n\t\t\t)\n\t\tparams = {'symbols': stocks}\n\t\theaders = {'Authorization': f'Bearer {token}', 'Accept': 'application/json'}\n\t\tasync with aiohttp.ClientSession() as session:\n\t\t\tasync with session.get(api_url, params=params, headers=headers) as r:\n\t\t\t\ttry:\n\t\t\t\t\tr = await r.json()\n\t\t\t\texcept aiohttp.client_exceptions.ContentTypeError:\n\t\t\t\t\t#This might happen when being rate limited, but IDK for sure...\n\t\t\t\t\traise ValueError('Could not get stock data. The API key entered is most likely not valid.')\n\t\tr = r['quotes']\n\t\tif 'quote' not in r:\n\t\t\treturn []\n\t\tr = r['quote']\n\t\tif not isinstance(r, list):\n\t\t\tr = [r]\n\t\tstock = {\n\t\t\tx['symbol']: {\n\t\t\t\t'price': max(1, int(x['last'] * 100)),\n\t\t\t\t#New API does not give this info.\n\t\t\t\t'total_count': None, #int(x['marketCap'] / x['last']) if x['marketCap'] else None\n\t\t\t} for x in r if 'last' in x and x['last'] is not None\n\t\t}\n\t\treturn stock",
"def compile_data():\r\n with open('sp500_tickers.pickle', 'rb') as file:\r\n tickers = pickle.load(file)\r\n metasp = pd.DataFrame()\r\n for count, ticker in enumerate(tickers):\r\n df = pd.read_csv('sp500_data\\{}.csv'.format(ticker))\r\n df.set_index('Date', inplace=True)\r\n df.rename(columns={'Adj Close': ticker}, inplace=True)\r\n df.drop(['Open', 'High', 'Low', 'Close', 'Volume'], 1, inplace=True)\r\n if metasp.empty:\r\n metasp = df\r\n else:\r\n metasp = metasp.join(df, how = 'outer')\r\n if count % 10 == 0:\r\n print(count)\r\n metasp.to_csv('sp500_meta.csv')",
"def get_historic_data(end_date = datetime.now(), \n start_date = datetime.now() + timedelta(-365),\n ticker=[],\n close_only=True):\n #checks if the parameters provided through \"ticker\" is not an empty list\n #if it is, the function won't go forward after this point. returns explanatory message.\n if ticker == []:\n return \"Empty list of tickers\"\n \n #if a string is provided as \"ticker\" parameter, then it splits the string by \n #spaces and store the outcome in a list.\n elif type(ticker) is str:\n ticker = ticker.split(\" \")\n \n iex_token = os.getenv(\"IEX_TOKEN\")#not necessary anymore.\n if type(iex_token) == str: print(\"IEX Key found successfully ...getting data\")\n else: return \"Error: IEX Key NOT found\"\n \n \n #Gets historical data with the parameters provided.\n #Gets only \"close\" and \"volume\" value for efficiency.\n prices = get_historical_data(ticker, start_date, end_date,\n output_format='pandas', \n token=iex_token, \n close_only=close_only\n )\n \n #If only one ticker is provided, then it adds another indexing level to the column\n #with the ticker. This is done for two reasons: 1) To visualize the ticker downloaded \n #as a confirmation that I am working with correct data. 2) To mimic the format of the\n #dataframe obtained when getting 2 or more tickers data (2-level column indexing).\n if len(ticker) == 1:\n new_columns = pd.MultiIndex.from_product([ [ticker[0]],prices.columns ] )\n prices.columns = new_columns\n \n return prices",
"def _process_stocks(self, limit):\n if self.testMode:\n g = self.testgraph\n else:\n g = self.graph\n model = Model(g)\n line_counter = 0\n\n raw = '/'.join((self.rawdir, 'stock'))\n logger.info(\"building labels for stocks\")\n\n with open(raw, 'r') as f:\n f.readline() # read the header row; skip\n filereader = csv.reader(f, delimiter='\\t', quotechar='\\\"')\n for line in filereader:\n line_counter += 1\n\n (stock_id, dbxref_id, organism_id, name, uniquename,\n description, type_id, is_obsolete) = line\n# 2 12153979 1 2 FBst0000002 w[*]; betaTub60D[2] Kr[If-1]/CyO 10670\n\n stock_num = stock_id\n stock_id = 'FlyBase:'+uniquename\n self.idhash['stock'][stock_num] = stock_id\n stock_label = description\n\n organism_key = organism_id\n taxon = self.idhash['organism'][organism_key]\n\n # from what i can tell, the dbxrefs are just more FBst,\n # so no added information vs uniquename\n\n if not self.testMode \\\n and limit is not None and line_counter > limit:\n pass\n else:\n if self.testMode \\\n and int(stock_num) not in self.test_keys['strain']:\n continue\n\n # tax_label = self.label_hash[taxon] # unused\n # add the tax in case it hasn't been already\n model.addClassToGraph(taxon)\n model.addIndividualToGraph(stock_id, stock_label, taxon)\n if is_obsolete == 't':\n model.addDeprecatedIndividual(stock_id)\n\n return",
"def stock_data(ticker, start,today=date.today()):\n df= web.DataReader(ticker,'yahoo',start,today)\n return df",
"def init_stock():\n return {\"five\":0, \"one\": 0, \"quarter\": 25, \"dime\": 25, \"nickel\":25}",
"def _get_financials_by_chunk(self, args):\n (istart, iend) = args\n comp_index = self.components.index\n # download financials\n browser=webdriver.Chrome()\n for sym in comp_index[istart:iend]:\n print('Chunk %s-%s: downloading financial data for %s' %(comp_index[istart], comp_index[iend], sym))\n stock = Symbol(sym)\n if 'Exchange' in self.components.columns:\n exch = self.components['Exchange'][sym]\n if type(exch) == pd.Series:\n # unexpected duplicates, e.g. AMOV\n exch = exch.iloc[0]\n if type(exch) == str:\n stock.exch = exch\n stock.get_financials(browser=browser)\n stock.save_financial_data()\n browser.quit()\n return",
"def download_stock_price_hist(\n\ttickers = [ 'AAPL' ],\n\tprice_column = 'Adj Close',\t\t\t\t\t\t\t\t# assume it's the Adjusted Close price that are interested\n\tstart = datetime.date( 2009, 12, 31 ),\t\t\t\t# assume start is guaranteed to be a weekday\n\tend = datetime.date( 2015, 12, 31 ),\n\tcsv_file = \"stock_price_test.csv\",\n):\n\t# Check validity of inputs\n\tif len( tickers ) <= 0:\n\t\tprint \"Tickers must not be empty\";\n\t\treturn False;\n\tif start > end:\n\t\tprint \"Start date \" + start.isoformat() + \" can't be later than End date \" + end.isoformat();\n\n\tdf = pd.DataFrame();\t\t\t# data frame to return\n\tfor _i in range( len(tickers) ):\n\t\tticker = tickers[_i];\n\t\tprint \"Index\" + str(_i) + \"\\t\" + \"Ticker: \" + ticker;\n\n\t\tstart_str = start.isoformat();\n\t\tend_str = end.isoformat();\n\t\thist = ystockquote.get_historical_prices( ticker, start_str, end_str );\t# dictionary with date string as the key\n\n\t\t# Get time series of stock prices (Don't sort before forming the Series!!!)\n\t\tdate_index = [];\n\t\tprice_data = [];\n\t\tfor key, val in hist.iteritems():\n\t\t\tdate_index.append( datetime.datetime.strptime( key, \"%Y-%m-%d\" ).date() );\n\t\t\tprice_data.append( float( val[ price_column ] ) )\n\n\t\tif min( date_index ) > start:\t\t\t\t\t\t\t\t# Pass if the no stock price is available on Start\n\t\t\tcontinue;\n\t\tstock_ts = pd.Series( price_data, date_index );\n\t\tstock_ts = stock_ts.sort_index();\n\n\t\t# Add current stock TS to the DataFrame\n\t\tdf[ticker] = stock_ts;\n\t\n\tdf.to_csv( csv_file, index_label='Date' );\n\treturn True;",
"def test_interest_vs_stockprice(self):\n stock_prices = np.array([[5, 10, 20, 40]], dtype=float)\n interest_rate = 2.0 # 200%\n test_case = StockMarket(5, stock_prices, interest_rate)\n test_case.dynamic_programming_bottom_up()\n for portfolio in set(test_case.backtracing_portfolio()):\n self.assertEqual(0, portfolio)",
"def get_crypto_daily_price(cryptotickers = [], allData=False,limit = 90):\n api_key = os.getenv(\"CC_API\")\n ticker_list = cryptotickers\n crypto_df = pd.DataFrame()\n\n for ticker in ticker_list:\n #if allData is true, then it gets all the data available. If not, select data according to limit.\n if allData:\n url = f\"https://min-api.cryptocompare.com/data/v2/histoday?fsym={ticker}&tsym=USD&allData=true&api_key={api_key}\"\n else:\n url = f\"https://min-api.cryptocompare.com/data/v2/histoday?fsym={ticker}&tsym=USD&limit={limit}&api_key={api_key}\"\n \n raw_data = read_json(url)\n #print(json.dumps(raw_data, indent=5))\n df = pd.DataFrame(raw_data['Data']['Data'])\n df['time'] = pd.to_datetime(df['time'],unit='s')\n df.set_index(df['time'], inplace=True)\n df['close'] = df['close'].astype(float)\n crypto_df[ticker] = df['close']\n \n #\n new_columns = pd.MultiIndex.from_product([ crypto_df.columns, [\"close\"] ])\n crypto_df.columns = new_columns\n\n return crypto_df",
"def setupStockTable(self):\n # Get the date\n # NOTE: This is probably un\n date = datetime.date()\n dateStr = date.month() + \"/\" + date.day() + \"/\" + date.year()\n\n stocks = (\"INTC\", \"AAPL\", \"GOOG\", \"YHOO\", \"SYK\", \"VZ\")\n\n for stock in stocks:\n stockObj = self.securityFactory(stock)\n stockObj.queryAPI()\n\n self.stockDB.query(\"INSERT INTO basic_info (ticker, price, daily_change, company, year_high, year_low, \\\n daily_percent, date, streak) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)\", (stockObj.target, stockObj.curr, \\\n stockObj.daily_change, stockObj.company,\\\n stockObj.year_high, stockObj.year_low,\\\n stockObj.daily_percent, dateStr, 0))",
"def gather_stock_data(tickers, save=True):\n prices = pd.DataFrame()\n ts = TimeSeries(key='EY2QBMV6MD9FX9CP', output_format='pandas')\n\n for ticker in tickers:\n successful_grab = False\n ticker_daily_adj = None\n\n while successful_grab is not True:\n try:\n ticker_daily_adj = ts.get_daily_adjusted(ticker, outputsize='full')[0]\n successful_grab = True\n except ValueError:\n print('Waiting for API to let me in')\n time.sleep(10)\n\n ticker_daily_adj.loc[:, '0. ticker'] = ticker\n ticker_daily_adj = ticker_daily_adj[sorted(ticker_daily_adj.columns)]\n\n prices = pd.concat([prices, ticker_daily_adj])\n\n prices.sort_index(inplace=True)\n prices.reset_index(inplace=True)\n prices['date'] = pd.to_datetime(prices['date'])\n if save:\n prices.to_csv('stockdata.csv', index=True)\n\n return prices",
"def get_all_binance_modified(symbol, kline_size, save=True, client=Client()):\n\n filename = 'history/%s-%s-data.csv' % (symbol, kline_size)\n if os.path.isfile(filename):\n data_df = pd.read_csv(filename)\n else:\n data_df = pd.DataFrame()\n oldest_point, newest_point = minutes_of_new_data(symbol, kline_size, data_df, source=\"binance\", client=client)\n oldest_point = datetime.strptime('23 Sep 2021', '%d %b %Y')\n delta_min = (newest_point - oldest_point).total_seconds() / 60\n available_data = math.ceil(delta_min / binsizes[kline_size])\n print(oldest_point)\n if oldest_point == datetime.strptime('1 Jan 2017', '%d %b %Y'):\n print('Downloading all available %s data for %s. Be patient..!' % (kline_size, symbol))\n else:\n print('Downloading %d minutes of new data available for %s, i.e. %d instances of %s data.' % (\n delta_min, symbol, available_data, kline_size))\n klines = client.get_historical_klines(symbol, kline_size, oldest_point.strftime(\"%d %b %Y %H:%M:%S\"),\n newest_point.strftime(\"%d %b %Y %H:%M:%S\"))\n data = pd.DataFrame(klines,\n columns=['timestamp', 'open', 'high', 'low', 'close', 'volume', 'close_time', 'quote_av',\n 'trades', 'tb_base_av', 'tb_quote_av', 'ignore'])\n data['timestamp'] = pd.to_datetime(data['timestamp'], unit='ms')\n if len(data_df) > 0:\n temp_df = pd.DataFrame(data)\n data_df = data_df.append(temp_df)\n else:\n data_df = data\n data_df.set_index('timestamp', inplace=True)\n data_df = data_df[~data_df.index.duplicated(keep='last')]\n if save and os.path.exists('./history'): data_df.to_csv(filename)\n print('All caught up..!')\n data_df.index = pd.to_datetime(data_df.index, utc=True)\n data_df = data_df[~data_df.index.duplicated(keep='last')]\n return data_df.astype(float)"
]
| [
"0.6489456",
"0.6323425",
"0.63060725",
"0.62956864",
"0.6255761",
"0.62412673",
"0.6200349",
"0.6188368",
"0.61862606",
"0.61424375",
"0.61337084",
"0.6123032",
"0.61126554",
"0.6108505",
"0.61013997",
"0.6084537",
"0.6067216",
"0.6050892",
"0.6036497",
"0.60350937",
"0.6013006",
"0.6007195",
"0.60034233",
"0.597014",
"0.59671205",
"0.59637684",
"0.5959647",
"0.59565717",
"0.59514993",
"0.5947171"
]
| 0.7113026 | 0 |
Explains the model with LimeExplainer | def explain_model_with_lime(
model,
data_to_explain=None,
train_data=None,
total_data=None,
examples_to_explain: Union[int, float, list] = 0,
) -> "LimeExplainer":
if total_data is None:
train_x = train_data
test_x = data_to_explain
test_y = None
else:
assert total_data is not None
train_x, _ = model.training_data(data=total_data)
test_x, test_y = model.test_data(data=total_data)
features = model.input_features
lime_exp_path = maybe_make_path(os.path.join(model.path, "explainability", "lime"))
test_x, index = choose_examples(test_x, examples_to_explain, test_y)
mode = model.mode
verbosity = model.verbosity
if model.lookback > 1:
explainer = "RecurrentTabularExplainer"
else:
explainer = "LimeTabularExplainer"
model, _, _, _ = convert_ai4water_model(model)
if mode == "classification":
return
explainer = LimeExplainer(model,
data=test_x,
train_data=train_x,
path=lime_exp_path,
feature_names=features,
explainer=explainer,
mode=mode,
verbosity=verbosity,
show=False
)
for i in range(explainer.data.shape[0]):
explainer.explain_example(i, name=f"lime_exp_for_{index[i]}")
return explainer | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def explainer(model, text):\r\n from lime.lime_text import LimeTextExplainer\r\n\r\n model = Explainer(model)\r\n\r\n explainer = LimeTextExplainer(\r\n split_expression=lambda x: x.split(),\r\n bow=False,\r\n class_names=[\"positive probability\"]\r\n )\r\n\r\n exp = explainer.explain_instance(\r\n text,\r\n num_features=20,\r\n top_labels=1,\r\n classifier_fn=model.predict,\r\n num_samples=5000\r\n )\r\n return exp",
"def _explain_model(self):\n raise NotImplementedError",
"def get_model_explanations(model, train_data, test_data, background_samples=10, nsamples='auto', num_features=50):\n explainer = shap.KernelExplainer(model.predict, shap.kmeans(train_data, background_samples), link='identity')\n explanations = explainer.shap_values(test_data, nsamples=nsamples, l1_reg='num_features({:})'.format(num_features))\n\n return explanations",
"def explain_model(model, train_data, test_data, samples):\n model_name = type(model).__name__\n random.seed(13)\n samples_to_explain = samples\n if model_name not in [\"RandomForestClassifier\", \"XGBClassifier\"]:\n explainer = shap.KernelExplainer(model.predict_proba, train_data[:50], link=\"identity\")\n shap_values = explainer.shap_values(train_data[:50], nsamples=200, l1_reg=\"num_features(100)\")\n\n else:\n explainer = shap.TreeExplainer(model, data=shap.sample(train_data, samples_to_explain),\n feature_perturbation='interventional')\n shap_values = explainer.shap_values(shap.sample(train_data, samples_to_explain), check_additivity=False)\n\n fig = shap.summary_plot(shap_values, test_data, max_display=5, show=False)\n return fig",
"def lime_explanation(self, instance_ind, num_features=10, explainer_type='tabular', class_names=None):\n explainer = LimeExplainer(self.x_train, self.model, explainer_type=explainer_type, class_names=class_names)\n return explainer.show_lime_instance_explanation(instance_ind, num_features)",
"def explain_model(\n model,\n data_to_expalin=None,\n train_data=None,\n total_data=None,\n features_to_explain: Union[str, list] = None,\n examples_to_explain: Union[int, float, list] = 0,\n explainer=None,\n layer: Union[str, int] = None,\n method: str = \"both\"\n):\n data = {'data_to_explain': data_to_expalin,\n 'train_data': train_data,\n 'total_data': total_data}\n\n if method == 'both':\n\n exp1 = _explain_with_lime(model=model, examples_to_explain=examples_to_explain, **data)\n\n exp2 = _explain_with_shap(model,\n features_to_explain=features_to_explain,\n examples_to_explain=examples_to_explain,\n explainer=explainer, layer=layer, **data)\n explainer = (exp1, exp2)\n\n elif method == 'shap' and shap:\n explainer = _explain_with_shap(model,\n features_to_explain=features_to_explain,\n examples_to_explain=examples_to_explain,\n explainer=explainer, layer=layer, **data)\n\n elif method == 'lime' and lime:\n explainer = _explain_with_lime(model=model, examples_to_explain=examples_to_explain, **data)\n\n else:\n ValueError(f\"unrecognized method {method}\")\n\n return explainer",
"def get_age_corrected_model_explanations(model, train_data, train_age, test_data, age=12, background_samples=10, nsamples='auto', num_features=50):\n ranked_samples = np.argsort(abs(train_age.values - age))[:background_samples]\n explainer = shap.KernelExplainer(model.predict, train_data[ranked_samples,:], link='identity')\n explanations = explainer.shap_values(test_data, nsamples=nsamples, l1_reg='num_features({:})'.format(num_features), silent=True)\n\n return explanations",
"def explain(self, idx):\r\n np.random.seed(1)\r\n exp = self.explainer.explain_instance(self.data[idx], self.model.predict, threshold=0.95)\r\n res = {'Prediction': self.model.predict(self.data[idx].reshape(1, -1))[0],\r\n 'Anchor': (' AND '.join(exp.names())), 'Precision': exp.precision(), 'Coverage': exp.coverage()}\r\n return res",
"def __explain_model(dataset, round_id, pipe_model, model, feature_names):\n try:\n exp = eli5.explain_weights(model, feature_names=list(feature_names))\n with open(get_dataset_folder(dataset.dataset_id) + '/predict/eli5_model_%s.html' % round_id, 'w') as f:\n f.write(eli5.format_as_html(exp))\n except:\n return",
"def explanation(self, instance):\r\n \"\"\"\r\n Args:\r\n instance: [numpy.array or sparse matrix] instance on which \r\n to explain the model prediction\r\n \r\n Returns:\r\n A tuple (explanation_set[0:self.max_explained], number_active_elements, \r\n number_explanations, minimum_size_explanation, time_elapsed, where:\r\n \r\n explanation_set: explanation(s) ranked from high to low change in predicted score or probability.\r\n The number of explanations shown depends on the argument max_explained.\r\n \r\n number_active_elements: number of active elements of the instance of interest.\r\n \r\n number_explanations: number of explanations found by algorithm.\r\n \r\n minimum_size_explanation: number of features in the smallest explanation.\r\n \r\n time_elapsed: number of seconds passed to generate explanation(s).\r\n \r\n explanations_score_change: change in predicted score/probability when removing\r\n the features in the explanation, ranked from high to low change.\r\n \"\"\"\r\n \r\n# *** INITIALIZATION ***\r\n \r\n time_max=0\r\n tic=time.time()\r\n instance=lil_matrix(instance)\r\n iteration=0\r\n nb_explanations=0\r\n minimum_size_explanation=np.nan\r\n explanations=[]\r\n explanations_sets=[]\r\n explanations_score_change=[]\r\n \r\n class_index = np.argmax(self.classifier_fn_multiclass(instance))\r\n score_predicted = self.classifier_fn_multiclass(instance)[class_index] \r\n #a tuple of predicted scores of one vs rest\r\n #get predicted score for the class that is predicted\r\n \r\n indices_active_elements=np.nonzero(instance)[1]\r\n number_active_elements=len(indices_active_elements)\r\n indices_active_elements=indices_active_elements.reshape((number_active_elements,1))\r\n threshold=-1\r\n stop=0\r\n expanded_combis=[]\r\n \r\n #use orderedset() \r\n combinations_to_expand=[]\r\n for features in indices_active_elements:\r\n combinations_to_expand.append(OrderedSet(features))\r\n #in the first iteration, the new combinations to explore\r\n #whether it are explanations are the combinations_to_expand\r\n new_combinations=combinations_to_expand.copy() \r\n \r\n #indices of active features are the feature set to explore\r\n feature_set=[]\r\n for features in indices_active_elements:\r\n feature_set.append(frozenset(features))\r\n \r\n time_max += (time.time()-tic)\r\n \r\n print('Initialization complete.')\r\n print('\\n Elapsed time %d \\n' %(time.time()-tic))\r\n\r\n while (iteration < self.max_iter) and (nb_explanations < self.max_explained) and (len(combinations_to_expand)!=0) and (len(new_combinations)!=0) and (time_max<(self.time_maximum)): \r\n \r\n time_extra=time.time()\r\n \r\n iteration+=1\r\n print('\\n Iteration %d \\n' %iteration)\r\n \r\n new_combinations_to_expand=[]\r\n scores_new_combinations_to_expand=[]\r\n for combination in new_combinations: #verify each set in new_combinations if it is an explanation or not\r\n perturbed_instance=instance.copy()\r\n for feature_in_combination in combination: \r\n perturbed_instance[:,feature_in_combination]=0\r\n score_new = self.classifier_fn_multiclass(perturbed_instance)[class_index]\r\n \r\n if (score_new[0] != np.max(self.classifier_fn_multiclass(perturbed_instance))): #if class_index has no longer the top predicted score, an explanation is found.\r\n explanations.append(combination)\r\n explanations_sets.append(set(combination))\r\n explanations_score_change.append(score_predicted - score_new)\r\n nb_explanations+=1\r\n else:\r\n new_combinations_to_expand.append(combination)\r\n scores_new_combinations_to_expand.append(score_new)\r\n \r\n if (len(new_combinations[0]) == number_active_elements): \r\n stop=1\r\n else:\r\n stop=0 \r\n \r\n if (self.BB==True): #branch-and-bound\r\n if (len(explanations)!=0):\r\n lengths=[]\r\n for explanation in explanations:\r\n lengths.append(len(explanation))\r\n lengths=np.array(lengths)\r\n max_length=lengths.min() \r\n else: \r\n max_length=number_active_elements \r\n else: \r\n max_length=number_active_elements\r\n \r\n if (len(scores_new_combinations_to_expand) != 0):\r\n index_combi_max = np.argmax(score_predicted - scores_new_combinations_to_expand) #best-first combination or feature is chosen.\r\n new_score = scores_new_combinations_to_expand[index_combi_max]\r\n difference = score_predicted - new_score\r\n if difference[0] >= threshold:\r\n expand = 1\r\n else:\r\n expand = 0\r\n else:\r\n expand = 0\r\n\r\n if ((len(new_combinations[0]) < max_length) and (expand == 1) and (stop==0) and (nb_explanations < self.max_explained) and (len(new_combinations[0]) < self.max_features)): \r\n \r\n print('length of new_combinations is %d features.' %len(new_combinations[0]))\r\n print('new combinations can be expanded')\r\n \r\n comb=new_combinations_to_expand[index_combi_max]\r\n func=fn_1(comb, expanded_combis, feature_set, combinations_to_expand, explanations_sets)\r\n new_combinations=func[0]\r\n combinations_to_expand=func[1]\r\n expanded_combis=func[2]\r\n \r\n #Calculate new threshold\r\n scores_combinations_to_expand=[]\r\n for combination in combinations_to_expand:\r\n perturbed_instance=instance.copy()\r\n for feature_in_combination in combination:\r\n perturbed_instance[:,feature_in_combination]=0\r\n score_new = self.classifier_fn_multiclass(perturbed_instance)[class_index]\r\n \r\n if (score_new[0] == np.max(self.classifier_fn_multiclass(perturbed_instance))):\r\n scores_combinations_to_expand.append(score_new)\r\n \r\n index_combi_max = np.argmax(score_predicted - scores_combinations_to_expand)\r\n new_score = scores_combinations_to_expand[index_combi_max]\r\n threshold = score_predicted - new_score\r\n \r\n time_extra2=time.time()\r\n time_max+=(time_extra2-time_extra)\r\n print('\\n Elapsed time %d \\n' %time_max)\r\n size_COMBIS=len(combinations_to_expand)\r\n print('\\n size combis to expand %d \\n' %size_COMBIS)\r\n \r\n else:\r\n \r\n print('length of new_combinations is %d features.' %len(new_combinations[0]))\r\n print('new combination cannot be expanded')\r\n \r\n combinations=[]\r\n for combination in combinations_to_expand:\r\n if ((len(combination) < number_active_elements) and (len(combination) < (max_length)) and (len(combination) < self.max_features)):\r\n combinations.append(combination)\r\n \r\n if (len(combinations) == 0) or (nb_explanations >= self.max_explained) or (len(combinations_to_expand) == len(new_combinations)):\r\n new_combinations=[]\r\n \r\n elif (len(combinations) != 0):\r\n \r\n new_combinations=[]\r\n it=0\r\n indices=[]\r\n new_score=0\r\n combinations_to_expand_copy = combinations.copy()\r\n \r\n scores_combinations_to_expand2=[]\r\n for combination in combinations_to_expand_copy:\r\n perturbed_instance=instance.copy()\r\n for feature_in_combination in combination:\r\n perturbed_instance[:,feature_in_combination]=0\r\n score_new = self.classifier_fn_multiclass(perturbed_instance)[class_index]\r\n \r\n if (score_new[0] != np.max(self.classifier_fn_multiclass(perturbed_instance))):\r\n scores_combinations_to_expand2.append(2 * score_predicted)\r\n else:\r\n scores_combinations_to_expand2.append(score_new)\r\n \r\n while ((len(new_combinations) == 0) and (it<len(scores_combinations_to_expand2)) and ((time_max+(time.time() - time_extra))<self.time_maximum)):\r\n \r\n print('while loop %d' %it)\r\n \r\n if (it!=0):\r\n for index in indices:\r\n scores_combinations_to_expand2[index]= 2 * score_predicted\r\n #to make sure this index is never chosen again\r\n \r\n index_combi_max=np.argmax(score_predicted - scores_combinations_to_expand2) #best-first combi\r\n indices.append(index_combi_max)\r\n \r\n comb=combinations_to_expand_copy[index_combi_max]\r\n func=fn_1(comb, expanded_combis, feature_set, combinations_to_expand_copy, explanations_sets)\r\n new_combinations=func[0]\r\n combinations_to_expand=func[1]\r\n expanded_combis=func[2]\r\n \r\n #Calculate new threshold\r\n scores_combinations_to_expand=[]\r\n for combination in combinations_to_expand:\r\n perturbed_instance=instance.copy()\r\n for feature_in_combination in combination:\r\n perturbed_instance[:,feature_in_combination]=0\r\n score_new = self.classifier_fn_multiclass(perturbed_instance)[class_index]\r\n \r\n if (score_new[0] == np.max(self.classifier_fn_multiclass(perturbed_instance))):\r\n scores_combinations_to_expand.append(score_new) \r\n \r\n if (len(scores_combinations_to_expand)!=0): \r\n index_combi_max=np.argmax(score_predicted - scores_combinations_to_expand) #best-first combi\r\n new_score=scores_combinations_to_expand[index_combi_max]\r\n threshold=score_predicted - new_score\r\n it+=1 \r\n print('length of new_combinations is %d features.' %len(new_combinations))\r\n print('score_predicted minus new_score is %f.' %(score_predicted - new_score))\r\n \r\n time_max += (time.time()-time_extra)\r\n print('\\n Elapsed time %d \\n' %time_max)\r\n print('\\n size combis to expand %d \\n' %len(combinations_to_expand))\r\n\r\n print(\"iterations are done\") \r\n explanation_set=[]\r\n explanation_feature_names=[]\r\n for i in range(len(explanations)):\r\n explanation_feature_names=[]\r\n for features in explanations[i]:\r\n explanation_feature_names.append(self.feature_names[features])\r\n explanation_set.append(explanation_feature_names)\r\n \r\n if (len(explanations)!=0):\r\n lengths_explanation=[]\r\n for explanation in explanations:\r\n l=len(explanation)\r\n lengths_explanation.append(l)\r\n minimum_size_explanation=np.min(lengths_explanation)\r\n \r\n number_explanations=len(explanations)\r\n #show explanation in explanation set which is minimum in size and highest score change (delta)\r\n if (np.size(explanations_score_change)>1):\r\n inds=np.argsort(explanations_score_change, axis=0)\r\n inds = np.fliplr([inds])[0]\r\n inds_2=[]\r\n for i in range(np.size(inds)):\r\n inds_2.append(inds[i][0])\r\n explanation_set_adjusted=[]\r\n for i in range(np.size(inds)):\r\n j=inds_2[i]\r\n explanation_set_adjusted.append(explanation_set[j])\r\n explanations_score_change_adjusted=[]\r\n for i in range(np.size(inds)):\r\n j=inds_2[i]\r\n explanations_score_change_adjusted.append(explanations_score_change[j])\r\n explanation_set=explanation_set_adjusted\r\n explanations_score_change=explanations_score_change_adjusted\r\n \r\n toc=time.time()\r\n time_elapsed=toc-tic\r\n print('\\n Elapsed time %d \\n' %time_elapsed)\r\n\r\n return (explanation_set[0:self.max_explained], number_active_elements, number_explanations, minimum_size_explanation, time_elapsed, explanations_score_change[0:self.max_explained], iteration)",
"def lime_outline(model, img_path = None, img = None, explanation = None):\n \n from skimage.segmentation import mark_boundaries\n \n if (img_path is None) and (img is None):\n print('One of \"img_path\" or \"img\" is required')\n return None, None\n elif img_path is not None:\n img = cv2.imread(img_path, cv2.IMREAD_COLOR)\n \n img = cv2.resize(img, (model.input.shape[1], model.input.shape[2]))\n img = img / 255\n img = np.expand_dims(img, axis=0)\n \n if explanation is None:\n explainer = lime_image.LimeImageExplainer()\n explanation = explainer.explain_instance(img[0].astype('double'), \n model.predict, \n top_labels=3, \n hide_color=0, \n num_samples=1000)\n\n temp_1, mask_1 = explanation.get_image_and_mask(explanation.top_labels[0], \n positive_only=False, \n num_features=1,\n hide_rest=False)\n \n plt.imshow(mark_boundaries(temp_1, mask_1))\n plt.axis('off')\n \n return explanation",
"def __init__(self, *argv, **kwargs):\n super(LinearExplainer, self).__init__(*argv, **kwargs)\n\n self.explainer = shap.LinearExplainer(*argv, **kwargs)",
"def model_summary():\n print(\"\\n\")\n print(\"=\" * 30 + \"Model Structure\" + \"=\" * 30)\n model_vars = tf.trainable_variables()\n slim.model_analyzer.analyze_vars(model_vars, print_info=True)\n print(\"=\" * 60 + \"\\n\")",
"def visualize_model():\n dictionary_vis = gensim.corpora.Dictionary.load('dictionary.gensim')\n corpus_vis = pickle.load(open('corpus.pkl', 'rb'))\n lda = gensim.models.ldamodel.LdaModel.load('model5.gensim')\n lda_display = pyLDAvis.gensim.prepare(lda, corpus_vis, dictionary_vis,\n sort_topics=False)\n pyLDAvis.display(lda_display)",
"def test_functional_model_deep(self):\n model, inputs = get_dl_fmodel_for_multi_inputs()\n expl = ShapExplainer(model, data=inputs,\n save=False,\n show=False,\n train_data=inputs,\n explainer=\"DeepExplainer\")\n sv = expl.shap_values\n assert isinstance(sv, list)\n return",
"def lime_explanation_as_df(self, instance_ind=None, instance_interval=None,\n explainer_type='tabular', class_names=None, num_features=10):\n explainer = LimeExplainer(self.x_train, self.model, explainer_type=explainer_type, class_names=class_names)\n return explainer.get_explanation_as_df(instance_ind=instance_ind, instance_interval=instance_interval,\n num_features=num_features)",
"def explain(self):\n # build the 2 versions of the model\n model = self.build_model()\n last_conv_model = self.build_cut_model()\n\n for i, label_name in enumerate(self.label_names):\n # This is the algorithm for the last convolution layer's tensor image\n # Get the index of the image that was classified correctly with the most confidence for the class\n predicted_col_proba = np.array(self.predicted_labels)[0][:, i]\n predicted_col_argsort = predicted_col_proba.argsort()[::-1]\n predicted_col = (predicted_col_proba > 0.2).astype(int)\n true_col = self.true_labels[:, 0]\n\n representative_image_index = None\n for most_probable_arg_index in predicted_col_argsort:\n if predicted_col[most_probable_arg_index] == true_col[most_probable_arg_index]:\n representative_image_index = most_probable_arg_index\n break\n\n # Resize the image to fit the neural network and keep the original resized image\n original_img = io.imread('{}/{}/{}'.format(path_to_img_directory, self.ex_format, np.array(self.image_names)[representative_image_index]))\n original_img = cv2.normalize(original_img, None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)\n original_img = cv2.resize(original_img, dsize=(self.ex_input_size, self.ex_input_size), interpolation=cv2.INTER_CUBIC)\n img = np.expand_dims(original_img, axis=0)\n original_img = original_img[:, :, :3]\n\n # Get the output of the neural network for this image as a tensor\n model.predict(np.array(img))\n class_output = model.output[:, i]\n last_conv_layer = model.get_layer(self.ex_last_conv_layer_name1).output\n # if self.model_name == 'vit':\n # last_conv_layer = tf.nn.relu(tf.reshape(last_conv_layer[:, :256, :], (-1, 16, 16, 1024)))\n\n # Get the output for the cut model\n cut_img = last_conv_model.predict(np.array(img))[0]\n if self.model_name == 'vit':\n cut_img = np.reshape(cut_img[:256, :], (16, 16, 1024))\n cut_img = np.mean(cut_img, axis=-1)\n cut_img = cv2.normalize(cut_img, None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)\n if self.model_name == 'vit':\n cut_img[0, 0] = np.mean(cut_img)\n cut_img = cv2.normalize(cut_img, None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)\n cut_img = cv2.resize(cut_img, (self.ex_input_size, self.ex_input_size))\n\n # This is the algorithm of the Grad-CAM model\n # Refine the output of the last convolutional layer according to the class output\n grads = K.gradients(class_output, last_conv_layer)[0]\n if self.model_name == 'vit':\n last_conv_layer = tf.reshape(last_conv_layer[:, :256, :], (-1, 16, 16, 1024))\n last_conv_layer = last_conv_layer / tf.norm(last_conv_layer)\n\n grads = tf.reshape(grads[:, :256, :], (-1, 16, 16, 1024))\n grads = grads / tf.norm(grads)\n\n pooled_grads = K.mean(grads, axis=(0, 1, 2))\n iterate = K.function([model.input], [pooled_grads, last_conv_layer[0]])\n pooled_grads_value, conv_layer_output_value = iterate([img])\n for j in range(self.ex_last_conv_layer_filter_number):\n conv_layer_output_value[:, :, j] *= pooled_grads_value[j]\n\n # Create a 16x16 heatmap and scale it to the same size as the original image\n heatmap = np.mean(conv_layer_output_value, axis=-1)\n heatmap = np.maximum(heatmap, 0)\n heatmap /= np.max(heatmap)\n heatmap = cv2.resize(heatmap, (self.ex_input_size, self.ex_input_size))\n heatmap = np.uint8(255 * heatmap)\n heatmap = cv2.applyColorMap(heatmap, cv2.COLORMAP_JET)\n heatmap = cv2.normalize(heatmap, None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)\n superimposed_img = cv2.addWeighted(original_img, 0.7, heatmap, 0.4, 0)\n\n # save the original image\n plt.matshow(original_img)\n plt.axis('off')\n plt.title(label_name, fontdict={'fontsize': 18})\n plt.savefig('{}/{}/{}_{}.png'.format(path_to_explainable, 'original', self.model_name, label_name), bbox_inches='tight', pad_inches=0.1)\n\n # save the cut image\n plt.matshow(cut_img, cmap=plt.get_cmap('Spectral'))\n plt.colorbar(shrink=0.75, ticks=np.linspace(0, 1, 11).tolist())\n plt.axis('off')\n plt.title(label_name, fontdict={'fontsize': 18})\n plt.savefig('{}/{}/{}_{}.png'.format(path_to_explainable, 'cut', self.model_name, label_name), bbox_inches='tight', pad_inches=0.1)\n\n # save the superimposed gradcam image\n plt.matshow(superimposed_img, cmap=plt.get_cmap('Spectral'))\n plt.colorbar(shrink=0.75, ticks=np.linspace(0, 1, 11).tolist())\n plt.axis('off')\n plt.title(label_name, fontdict={'fontsize': 18})\n plt.savefig('{}/{}/{}_{}.png'.format(path_to_explainable, 'gradcam', self.model_name, label_name), bbox_inches='tight', pad_inches=0.1)",
"def explain(self):\n \n self.logger.verbose = False \n dbpath, config = self._start() \n \n if config.explain not in [\"specific\", \"general\"]:\n return \"--explain must be 'general' or 'specific'\"\n config.obo = check_file(config.obo, dbpath, \"obo\")\n \n # allow user to pass several model/reference pairs\n models = config.model.split(\",\")\n references = config.reference.split(\",\") \n M = len(models)\n \n if len(references) != M:\n raise Exception(\"incompatible number of models and references\")\n \n # use the packet to load information from the db, refset and models\n packet = prep_compute_packets(self.config, \n references=references, \n models=models,\n partition_size=M)[0]\n packet.prep()\n refset = packet.general_refset \n if config.explain == \"specific\":\n refset = packet.specific_refset\n refset.learn_obo(MinimalObo(config.obo))\n\n allresults = [None]*M\n for i, (modelid, refid) in enumerate(zip(models, references)):\n data = packet.models[modelid]\n result = refset.inference_chain(data, refid, verbose=True,\n fp_penalty=config.fp_penalty)\n allresults[i] = result.to_json(nodata=config.explain_nodata) \n \n return \"[\"+(\",\".join(allresults))+\"]\";",
"def explain(self):",
"def review_model(model): \n \n diagnose_model(model)\n \n plot_param_coef(model)\n \n plot_p_values(model)\n \n return",
"def run_article_experiment(model, hparams,\n mode=tf.contrib.learn.ModeKeys.TRAIN,\n validation_size=10,\n input_feature='text',\n max_input_sequence_length=Article.max_text+2,\n target_feature='short_description',\n max_target_sequence_length=Article.max_short_description+2,\n dataset_dir='records/medium',\n model_dir='model',\n seed=0,\n restore=True,\n just_evaluate_me=True):\n eval_every = 100\n\n graph = tf.get_default_graph()\n sess = tf.Session(graph=graph)\n run_config = tf.contrib.learn.RunConfig(model_dir=model_dir)\n all_files = os.listdir(dataset_dir)\n record_list = []\n for fn in all_files:\n if fn.endswith('.txt'):\n continue\n record_list.append(os.path.join(dataset_dir, fn))\n train_input_fn, eval_input_fn, vocab_sizes, lookup_table, input_lookup_table = create_article_dataset(record_list, dataset_dir, sess,\n validation_size=validation_size,\n eval_every=eval_every,\n input_feature=input_feature,\n target_feature=target_feature,\n max_input_sequence_length=max_input_sequence_length,\n max_target_sequence_length=max_target_sequence_length,\n hparams=hparams)\n\n train_features, train_labels = train_input_fn()\n # eval_features, eval_labels = eval_input_fn()\n prediction_op, loss_op, train_op = model(train_features, train_labels, graph,\n mode=mode,\n vocab_sizes=vocab_sizes,\n hparams=hparams,\n seed=seed)\n\n # actual inference.\n input_strings = input_lookup_table.lookup(train_features['input'])\n pred_strings = lookup_table.lookup(tf.cast(prediction_op.sample_id, dtype=tf.int64))\n label_strings = lookup_table.lookup(train_labels['target'])\n\n # inputs_text = tf.summary.text('inputs', input_strings)\n # predictions_text = tf.summary.text('predictions', pred_strings)\n # targets_text = tf.summary.text('targets', label_strings)\n loss_summary = tf.summary.scalar('loss', loss_op)\n # merged_text = tf.summary.merge([inputs_text, predictions_text, targets_text])\n\n\n saver = tf.train.Saver()\n sess.run(tf.global_variables_initializer())\n writer = tf.summary.FileWriter(os.path.join(model_dir, 'log'), sess.graph)\n if restore:\n saver.restore(sess, os.path.join(model_dir, 'model_restored.ckpt'))\n\n if just_evaluate_me:\n while True:\n try:\n batch_train_features, batch_train_labels = sess.run((train_features, train_labels))\n inputs_, preds_, labels_ = sess.run([input_strings, pred_strings, label_strings], feed_dict={\n train_features['input']: batch_train_features['input'],\n train_features['input_sequence_length']: batch_train_features['input_sequence_length'],\n train_labels['target']: batch_train_labels['target'],\n train_labels['target_sequence_length']: batch_train_labels['target_sequence_length'],\n })\n for inputs, pred, label in zip(inputs_, preds_, labels_):\n print('Input')\n print('='*30)\n print(' '.join(list(map(lambda s: s.decode('utf-8'), inputs))))\n print('Actual')\n print('='*30)\n print(' '.join(list(map(lambda s: s.decode('utf-8'), label))))\n print('Prediction')\n print('='*30)\n print(' '.join(list(map(lambda s: s.decode('utf-8'), pred))))\n print('\\n'*2)\n except tf.errors.OutOfRangeError:\n break\n sess.close()\n\n else:\n epoch = 0\n save_every = 100\n while True:\n try:\n batch_train_features, batch_train_labels = sess.run((train_features, train_labels))\n if batch_train_features['input'].shape[0] != hparams.batch_size:\n continue\n if batch_train_labels['target'].shape[0] != hparams.batch_size:\n continue\n if batch_train_labels['target_sequence_length'].shape[0] != hparams.batch_size:\n continue\n if batch_train_features['input_sequence_length'].shape[0] != hparams.batch_size:\n continue\n\n summ_loss, _, loss = sess.run([loss_summary, train_op, loss_op], feed_dict={\n train_features['input']: batch_train_features['input'],\n train_features['input_sequence_length']: batch_train_features['input_sequence_length'],\n train_labels['target']: batch_train_labels['target'],\n train_labels['target_sequence_length']: batch_train_labels['target_sequence_length'],\n })\n\n writer.add_summary(summ_loss, epoch)\n\n print('loss at epoch {}: {}'.format(epoch, loss))\n\n if (epoch + 1) % save_every == 0:\n saver.save(sess, os.path.join(model_dir, 'model_restored.ckpt'))\n\n # if (epoch + 1) % eval_every == 0:\n # features, labels = sess.run((eval_features, eval_labels))\n #\n # if features['input'].shape[0] != hparams.batch_size:\n # continue\n # if labels['target'].shape[0] != hparams.batch_size:\n # continue\n # if labels['target_sequence_length'].shape[0] != hparams.batch_size:\n # continue\n # if features['input_sequence_length'].shape[0] != hparams.batch_size:\n # continue\n #\n # text_summary, _, _ = sess.run([merged_text, pred_strings, label_strings], feed_dict={\n # eval_features['input']: features['input'],\n # eval_features['input_sequence_length']: features['input_sequence_length'],\n # eval_labels['target']: labels['target'],\n # eval_labels['target_sequence_length']: labels['target_sequence_length'],\n # })\n # writer.add_summary(text_summary, epoch)\n\n epoch += 1\n except tf.errors.OutOfRangeError:\n break\n sess.close()",
"def visualize():\n model.eval()\n with torch.no_grad():\n alpha = model.mu_q_alpha\n beta = model.get_beta(alpha) \n \n print('\\n')\n print('#'*100)\n print('Visualize topics...') \n \n topics_words = []\n for k in range(args.num_topics):\n gamma = beta[k, :]\n top_words = list(gamma.cpu().numpy().argsort()[-args.num_words+1:][::-1]) \n topic_words = [vocab[a] for a in top_words]\n topics_words.append(' '.join(topic_words))\n print('Topic {} .. ===> {}'.format(k, topic_words)) \n\n print('\\n')\n print('Visualize word embeddings ...')\n # queries = ['economic', 'assembly', 'security', 'management', 'debt', 'rights', 'africa']\n # queries = ['economic', 'assembly', 'security', 'management', 'rights', 'africa']\n queries = ['border', 'vaccines', 'coronaviruses', 'masks']\n queries = set(queries).intersection(vocab)\n try:\n embeddings = model.rho.weight # Vocab_size x E\n except:\n embeddings = model.rho # Vocab_size x E\n # neighbors = []\n for word in queries:\n print('word: {} .. neighbors: {}'.format(\n word, nearest_neighbors(word, embeddings, vocab, args.num_words)))\n print('#'*100)",
"def analyze_model(folder, verbose=True):\n hyps = get_hyps(folder)\n table = get_analysis_table(folder, hyps=hyps)\n\n model,metrics = read_model(folder,ret_metrics=True)\n get_analysis_figs(folder, metrics)\n\n train_acc, train_loss = metrics['acc'][-1], metrics['loss'][-1]\n table['train_acc'] = [train_acc]\n table['train_loss'] = [train_loss]\n val_acc, val_loss = metrics['val_acc'][-1], metrics['val_loss'][-1]\n table['val_acc'] = [val_acc]\n table['val_loss'] = [val_loss]\n if verbose:\n print(\"ValAcc: {:05e}, ValLoss: {:05e}\".format(val_acc, val_loss))\n return pd.DataFrame(table)",
"def __init__(self, *argv, **kwargs):\n super(DeepExplainer, self).__init__(*argv, **kwargs)\n\n self.explainer = shap.DeepExplainer(*argv, **kwargs)",
"def explain(self, X_e, Y_e, objective='SPARSITY', n_explanations=1, max_features=999999, max_runtime=60): \r\n \r\n if self.data_type==\"BINARY\":\r\n X_e = complement_binary_dataframe(X_e) \r\n else:\r\n X_e = complement_continuous_dataframe(X_e) \r\n \r\n assert(len(np.setdiff1d(Y_e, [0,1]))==0), 'currently supports binary datasets'\r\n assert(len(X_e)==len(Y_e)), 'Mismatch between the number of observations and predictions'\r\n assert(type(X_e)==pd.DataFrame), 'Expected dataframe for X_e'\r\n assert(set(self.features) == set(X_e.columns.values))\r\n assert(objective in ['SPARSITY','SUPPORT']), 'Unknown objective passed to explain_local'\r\n assert(n_explanations>=1), 'n_explanations should be >= 1'\r\n assert(max_features>=1), 'max_features should be >= 1'\r\n \r\n res = []\r\n for i in range(len(X_e)):\r\n df_i = self.__explain_local__(X_e.iloc[i], Y_e[i], objective, n_explanations, max_features, max_runtime)\r\n df_i.insert(0, \"#Observation\",i)\r\n df_i.insert(1, \"#Explanation\",df_i.index.values)\r\n #df_i.index = map(lambda x:\"Obs. #%s, Exp. #%s:\"%(str(i),str(x)),df_i.index.values)\r\n res.append(df_i)\r\n return(pd.concat(res).reset_index(drop=True))",
"def learn(self):\n pass",
"def learn(self):\n pass",
"def train_model(self):\r\n alpha, accuracy_rate = self.select_model()\r\n # Initialize logistic regression with alpha(learning rate)\r\n lg = logisticregression(C=alpha)\r\n # Train the model.\r\n lg.fit(self.training_data, self.target_data)\r\n # Save the trained model as .pkl file.\r\n joblib.dump(value=lg, filename=self.intention_id+'.pkl', compress=1)\r\n print \"Estimated Parameters of Logistic Regression\"\r\n # Estimated parameters of logistic regression.\r\n print lg.get_params()",
"def __init__(self, the_model, lrp_exponent=1, beta=.5, epsilon=1e-6,\n method=\"e-rule\"):\n super(InnvestigateModel, self).__init__()\n self.model = the_model\n self.device = torch.device(\"cpu\", 0)\n self.prediction = None\n self.r_values_per_layer = None\n self.only_max_score = None\n # Initialize the 'Relevance Propagator' with the chosen rule.\n # This will be used to back-propagate the relevance values\n # through the layers in the innvestigate method.\n self.inverter = RelevancePropagator(lrp_exponent=lrp_exponent,\n beta=beta, method=method, epsilon=epsilon,\n device=self.device)\n\n # Parsing the individual model layers\n self.register_hooks(self.model)\n if method == \"b-rule\" and float(beta) in (-1., 0):\n which = \"positive\" if beta == -1 else \"negative\"\n which_opp = \"negative\" if beta == -1 else \"positive\"\n print(\"WARNING: With the chosen beta value, \"\n \"only \" + which + \" contributions \"\n \"will be taken into account.\\nHence, \"\n \"if in any layer only \" + which_opp +\n \" contributions exist, the \"\n \"overall relevance will not be conserved.\\n\")",
"def get_model( ):\n\n return Lasso(alpha = 1e-3, fit_intercept = True, precompute = True, max_iter = 1e4)"
]
| [
"0.7617358",
"0.6783141",
"0.66918886",
"0.66185474",
"0.6507089",
"0.6430941",
"0.6364863",
"0.62004024",
"0.61559814",
"0.5953485",
"0.5935031",
"0.5910146",
"0.585681",
"0.5826685",
"0.5823301",
"0.581807",
"0.5754989",
"0.5713097",
"0.5707046",
"0.57027256",
"0.5647099",
"0.56466496",
"0.5646009",
"0.5644067",
"0.5643759",
"0.56258863",
"0.56258863",
"0.55988663",
"0.5521682",
"0.5507198"
]
| 0.69931304 | 1 |
Does Non Max Suppression given bboxes | def non_max_suppression(bboxes, iou_threshold, threshold, box_format="corners"):
# 49 x 6
assert type(bboxes) == list
# print(bboxes)
bboxes = [box for box in bboxes if box[1] > threshold]
bboxes = sorted(bboxes, key=lambda x: x[1], reverse=True)
bboxes_after_nms = []
# print(bboxes)
while bboxes:
chosen_box = bboxes.pop(0)
bbox_temp = bboxes.copy()
bboxes = []
for box in bbox_temp: # not the same class or not overlap a lot
if box[0] != chosen_box[0] or intersection_over_union(torch.tensor(chosen_box[2:]),torch.tensor(box[2:]), box_format=box_format,) < iou_threshold:
bboxes.append(box)
bboxes_after_nms.append(chosen_box)
# print("NMS: " + str(len(bboxes_after_nms)))
return bboxes_after_nms | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def iou_suppression(cnt_box, yolo_box, max_threshold, min_threshold):\n all_boxes = []\n pre_bboxes = yolo_box\n bboxes = cnt_box\n for i in range(len(pre_bboxes)):\n max_flag = 0\n min_flag = 0\n for j in range(len(bboxes)):\n\n (pre_x1, pre_y1) = (pre_bboxes[i][0], pre_bboxes[i][1])\n (pre_x2, pre_y2) = (pre_bboxes[i][2], pre_bboxes[i][3])\n (cur_x1, cur_y1) = (bboxes[j][0], bboxes[j][1])\n (cur_x2, cur_y2) = (bboxes[j][2], bboxes[j][3])\n origin_w = pre_x2 - pre_x1\n origin_h = pre_y2 - pre_y1\n current_w = cur_x2 - cur_x1\n current_h = cur_y2 - cur_y1\n prime_area = origin_h * origin_w\n current_area = current_h*current_w\n\n if pre_x1 > cur_x1:\n if pre_y1 > cur_y1:\n if cur_x2 - pre_x1 <= 0 or cur_y2 - pre_y1 <= 0:\n lap_area = 0\n else:\n width = cur_x2 - pre_x1\n height = cur_y2 - pre_y1\n if width > origin_w:\n width = origin_w\n if height > origin_h:\n height = origin_h\n\n lap_area = width*height\n\n else:\n if cur_x2 - pre_x1 <= 0 or pre_y2 - cur_y1 <= 0:\n lap_area = 0\n else:\n width = cur_x2 - pre_x1\n height = pre_y2 - cur_y1\n if width > origin_w:\n width = origin_w\n if height > current_h:\n height = current_h\n\n lap_area = width*height\n else:\n if pre_y1 > cur_y1:\n if pre_x2 - cur_x1 <= 0 or cur_y2 - pre_y1 <= 0:\n lap_area = 0\n else:\n width = pre_x2 - cur_x1\n height = cur_y2 - pre_y1\n if width > current_w:\n width = current_w\n if height > origin_h:\n height = origin_h\n\n lap_area = width*height\n else:\n if pre_x2 - cur_x1 <= 0 or pre_y2 - cur_y1 <= 0:\n lap_area = 0\n else:\n width = pre_x2 - cur_x1\n height = pre_y2 - cur_y1\n if width > current_w:\n width = current_w\n if height > current_h:\n height = current_h\n\n lap_area = width*height\n\n if lap_area != 0:\n sum_area = (prime_area + current_area - lap_area)\n iou_score = lap_area/sum_area\n if iou_score > max_threshold: # set the threshold of the iou scores, in line with the sort\n max_flag = 1\n elif iou_score > min_threshold:\n min_flag = 1\n\n if max_flag == 1 or min_flag == 0:\n all_boxes.append(pre_bboxes[i])\n\n if cnt_box != []:\n for index_box in range(cnt_box.shape[0]):\n all_boxes.append(cnt_box[index_box])\n\n return np.asarray(all_boxes)",
"def non_maxima_suppression(boxes, probs, classes_num, thr=0.2):\n for i, box in enumerate(boxes):\n if probs[i] == 0:\n continue\n for j in range(i+1, len(boxes)):\n if classes_num[i] == classes_num[j] and iou(box, boxes[j]) > thr:\n probs[j] = 0.0\n\n return probs",
"def non_maximum_suppression(boxes):\n\n boxes = sorted(boxes, key=lambda box: box[2]-box[0], reverse=True)\n nms_boxes = []\n overlap_threshold = 0.5\n\n for box in boxes:\n if not any([overlap_between(box, nms_box) > overlap_threshold for nms_box in nms_boxes]):\n nms_boxes.append(box)\n\n return nms_boxes",
"def non_max_suppression(self, filtered_boxes, box_classes, box_scores):\n box_predictions = []\n predicted_box_classes = []\n predicted_box_scores = []\n for label in range(len(self.class_names)):\n # for each class\n boxes = []\n class_tmp = []\n score_tmp = []\n for i in range(len(box_classes)):\n if box_classes[i] == label:\n boxes.append(filtered_boxes[i])\n class_tmp.append(box_classes[i])\n score_tmp.append(box_scores[i])\n\n class_tmp = np.array(class_tmp)\n while len(class_tmp) > 0 and np.amax(class_tmp) > -1:\n index = np.argmax(score_tmp)\n box_predictions.append(boxes[index])\n predicted_box_classes.append(class_tmp[index])\n predicted_box_scores.append(score_tmp[index])\n score_tmp[index] = -1\n class_tmp[index] = -1\n px1, py1, px2, py2 = boxes[index]\n p_area = (px2 - px1) * (py2 - py1)\n\n for box in range(len(boxes)):\n if class_tmp[box] != -1:\n bx1, by1, bx2, by2 = boxes[box]\n b_area = (bx2 - bx1) * (by2 - by1)\n ox1 = px1 if px1 > bx1 else bx1\n oy1 = py1 if py1 > by1 else by1\n ox2 = px2 if px2 < bx2 else bx2\n oy2 = py2 if py2 < by2 else by2\n if ox2 - ox1 <= 0 or oy2 - oy1 <= 0:\n continue\n # Calculate overlap area and IoU\n o_area = (ox2 - ox1) * (oy2 - oy1)\n u_area = p_area + b_area - o_area\n iou = o_area / u_area\n\n if iou > self.nms_t:\n class_tmp[box] = -1\n score_tmp[box] = -1\n\n box_predictions = np.array(box_predictions)\n predicted_box_classes = np.array(predicted_box_classes)\n predicted_box_scores = np.array(predicted_box_scores)\n return (box_predictions, predicted_box_classes, predicted_box_scores)",
"def bboxes_nms_fast(classes, scores, bboxes, threshold=0.45):\n pass",
"def nms(bboxes, iou_threshold, sigma = 0.3, method = 'nms'):\n \"\"\" takes bboxes with the shape of (num_of_box, 6), where 6 => (xmin, ymin, xmax, ymax, score, class) \"\"\"\n \n # remove duplicates in classes\n classes_in_img = list(set(bboxes[:, 5]))\n \n # initialise list to store best bboxes\n best_bboxes = []\n \n # iterate over each class\n for cls in classes_in_img:\n \n # get mask for bboxes with the same class and apply on bboxes to obtain array of bboxes with same class\n cls_mask = (bboxes[:, 5] == cls)\n cls_bboxes = bboxes[cls_mask]\n \n # iterate while there are still bboxes in cls_bboxes\n while len(cls_bboxes) > 0:\n \n # select index of the bbox with the highest score \n max_ind = np.argmax(cls_bboxes[:, 4])\n \n # select bbox with highest score \n best_bbox = cls_bboxes[max_ind]\n \n # append to best _bbox list \n best_bboxes.append(best_bbox)\n \n # obtain cls_bboxes without best bbox\n cls_bboxes = np.concatenate([cls_bboxes[: max_ind], cls_bboxes[max_ind + 1:]])\n \n # calculate iou of remaining bboxes with best bbox \n iou = bbox_iou(best_bbox[np.newaxis, :4], cls_bboxes[:, :4])\n \n weight = np.ones((len(iou), ), dtype = np.float32)\n \n # assert method to be either 'nms' or 'soft_nms'\n assert method in ['nms', 'soft_nms']\n \n if method == 'nms':\n \n # obtain nms iou mask based on threshold\n iou_mask = iou > iou_threshold\n \n # apply mask on weights\n weight[iou_mask.numpy()] = 0.0\n \n if method == 'soft_nms':\n \n # obtain soft_nms weights\n weight = np.exp(-(1.0 * iou ** 2 / sigma))\n \n # apply weights on cls_bboxes\n cls_bboxes[:, 4] = cls_bboxes[:, 4] * weight\n \n # obtain score mask of scores greater than zero\n score_mask = cls_bboxes[:, 4] > 0.\n \n # apply mask on cls_bboxes \n cls_bboxes = cls_bboxes[score_mask]\n\n return best_bboxes",
"def apply_non_max_suppression(boxes, scores, iou_thresh=.45, top_k=200):\n\n selected_indices = np.zeros(shape=len(scores))\n if boxes is None or len(boxes) == 0:\n return selected_indices\n x_min = boxes[:, 0]\n y_min = boxes[:, 1]\n x_max = boxes[:, 2]\n y_max = boxes[:, 3]\n areas = (x_max - x_min) * (y_max - y_min)\n remaining_sorted_box_indices = np.argsort(scores)\n remaining_sorted_box_indices = remaining_sorted_box_indices[-top_k:]\n\n num_selected_boxes = 0\n while len(remaining_sorted_box_indices) > 0:\n best_score_args = remaining_sorted_box_indices[-1]\n selected_indices[num_selected_boxes] = best_score_args\n num_selected_boxes = num_selected_boxes + 1\n if len(remaining_sorted_box_indices) == 1:\n break\n\n remaining_sorted_box_indices = remaining_sorted_box_indices[:-1]\n\n best_x_min = x_min[best_score_args]\n best_y_min = y_min[best_score_args]\n best_x_max = x_max[best_score_args]\n best_y_max = y_max[best_score_args]\n\n remaining_x_min = x_min[remaining_sorted_box_indices]\n remaining_y_min = y_min[remaining_sorted_box_indices]\n remaining_x_max = x_max[remaining_sorted_box_indices]\n remaining_y_max = y_max[remaining_sorted_box_indices]\n\n inner_x_min = np.maximum(remaining_x_min, best_x_min)\n inner_y_min = np.maximum(remaining_y_min, best_y_min)\n inner_x_max = np.minimum(remaining_x_max, best_x_max)\n inner_y_max = np.minimum(remaining_y_max, best_y_max)\n\n inner_box_widths = inner_x_max - inner_x_min\n inner_box_heights = inner_y_max - inner_y_min\n\n inner_box_widths = np.maximum(inner_box_widths, 0.0)\n inner_box_heights = np.maximum(inner_box_heights, 0.0)\n\n intersections = inner_box_widths * inner_box_heights\n remaining_box_areas = areas[remaining_sorted_box_indices]\n best_area = areas[best_score_args]\n unions = remaining_box_areas + best_area - intersections\n intersec_over_union = intersections / unions\n intersec_over_union_mask = intersec_over_union <= iou_thresh\n remaining_sorted_box_indices = remaining_sorted_box_indices[\n intersec_over_union_mask]\n\n return selected_indices.astype(int), num_selected_boxes",
"def non_max_suppression(boxes, scores, threshold):\n assert boxes.shape[0] > 0\n if boxes.dtype.kind != \"f\":\n boxes = boxes.astype(np.float32)\n\n # Compute box areas\n y1 = boxes[:, 0]\n x1 = boxes[:, 1]\n y2 = boxes[:, 2]\n x2 = boxes[:, 3]\n area = (y2 - y1) * (x2 - x1)\n\n # Get indicies of boxes sorted by scores (highest first)\n ixs = scores.argsort()[::-1]\n\n pick = []\n while len(ixs) > 0:\n # Pick top box and add its index to the list\n i = ixs[0]\n pick.append(i)\n # Compute IoU of the picked box with the rest\n iou = compute_iou(boxes[i], boxes[ixs[1:]], area[i], area[ixs[1:]])\n # Identify boxes with IoU over the threshold. This\n # returns indices into ixs[1:], so add 1 to get\n # indices into ixs.\n remove_ixs = np.where(iou > threshold)[0] + 1\n # Remove indices of the picked and overlapped boxes.\n ixs = np.delete(ixs, remove_ixs)\n ixs = np.delete(ixs, 0)\n return np.array(pick, dtype=np.int32)",
"def non_max_suppression_all_classes(boxes, scores, labels, iou_threshold=0.5):\n excluded_indices = []\n for i in range(0,len(boxes)):\n obj1_box, _, obj1_label = boxes[i], scores[i], labels[i]\n for j in range(i+1,len(boxes)):\n obj2_box, _, obj2_label = boxes[j], scores[j], labels[j]\n if (get_iou(obj1_box, obj2_box) > iou_threshold):\n #print('excluding idx={}, class={}, score={}, bbox={}'.format(j, obj2_label, obj2_score, obj2_box))\n excluded_indices.append(j)\n \n excluded_indices = list(set(excluded_indices)) #Elimina indices repetidos\n included_indices = [idx for idx in range(len(boxes)) if idx not in excluded_indices]\n #print(included_indices)\n return included_indices",
"def non_max_suppression(boxes, scores, threshold):\n assert boxes.shape[0] > 0\n if boxes.dtype.kind != \"f\":\n boxes = boxes.astype(np.float32)\n\n polygons = convert_format(boxes)\n\n # Get indicies of boxes sorted by scores (highest first)\n ixs = scores.argsort()[::-1]\n\n pick = []\n while len(ixs) > 0:\n # Pick top box and add its index to the list\n i = ixs[0]\n pick.append(i)\n # Compute IoU of the picked box with the rest\n iou = compute_iou(polygons[i], polygons[ixs[1:]])\n # Identify boxes with IoU over the threshold. This\n # returns indices into ixs[1:], so add 1 to get\n # indices into ixs.\n remove_ixs = np.where(iou > threshold)[0] + 1\n # Remove indices of the picked and overlapped boxes.\n ixs = np.delete(ixs, remove_ixs)\n ixs = np.delete(ixs, 0)\n\n return np.array(pick, dtype=np.int32)",
"def nms(bboxes, iou_threshold, threshold, box_format=\"corners\"):\n\tassert type(bboxes) == list\n\tbboxes = [box for box in bboxes if box[1] > threshold]\n\tbboxes = sorted(bboxes, key=lambda x: x[1], reverse=True)\n\tbboxes_after_nms = []\n\n\twhile bboxes:\n\t\tchosen_box = bboxes.pop(index=0)\n\t\tbboxes = [box for box in bboxes \n\t\t\t\t\t\t\tif box[0] != chosen_box[0] or intersection_over_union\n\t\t\t\t\t\t\t(torch.tensor(chosen_box[2:]), \n\t\t\t\t\t\t\t\ttorch.tensor(chosen_box[2:]),\n\t\t\t \t\t\t\t\tbox_format=\"midpoint\") < iou_threshold]\n\t\tbboxes_after_nms.append(chosen_box)\n\n\treturn bboxes_after_nms",
"def non_max_suppression_fast(boxes, overlapThresh=0.2):\n # if there are no boxes, return an empty list\n if len(boxes) == 0:\n return []\n\n # if the bounding boxes integers, convert them to floats --\n # this is important since we'll be doing a bunch of divisions\n if boxes.dtype.kind == \"i\":\n boxes = boxes.astype(\"float\")\n\n # initialize the list of picked indexes\n pick = []\n\n # grab the coordinates of the bounding boxes\n x1 = boxes[:, 0]\n y1 = boxes[:, 1]\n x2 = boxes[:, 2]\n y2 = boxes[:, 3]\n\n # compute the area of the bounding boxes and sort the bounding\n # boxes by the bottom-right y-coordinate of the bounding box\n area = (x2 - x1 + 1) * (y2 - y1 + 1)\n idxs = np.argsort(y2)\n\n # keep looping while some indexes still remain in the indexes list\n while len(idxs) > 0:\n # grab the last index in the indexes list and add the\n # index value to the list of picked indexes\n last = len(idxs) - 1\n i = idxs[last]\n pick.append(i)\n\n # find the largest (x, y) coordinates for the start of\n # the bounding box and the smallest (x, y) coordinates\n # for the end of the bounding box\n xx1 = np.maximum(x1[i], x1[idxs[:last]])\n yy1 = np.maximum(y1[i], y1[idxs[:last]])\n xx2 = np.minimum(x2[i], x2[idxs[:last]])\n yy2 = np.minimum(y2[i], y2[idxs[:last]])\n\n # compute the width and height of the bounding box\n w = np.maximum(0, xx2 - xx1 + 1)\n h = np.maximum(0, yy2 - yy1 + 1)\n\n # compute the ratio of overlap\n overlap = (w * h) / area[idxs[:last]]\n\n # delete all indexes from the index list that have\n idxs = np.delete(idxs, np.concatenate(([last], np.where(overlap > overlapThresh)[0])))\n\n # return only the bounding boxes that were picked using the\n # integer data type\n return boxes[pick].astype(\"int\"), pick",
"def non_max_suppression(boxes, max_bbox_overlap, scores=None):\n if len(boxes) == 0:\n return []\n\n boxes = boxes.astype(np.float)\n pick = []\n\n x1 = boxes[:, 0]\n y1 = boxes[:, 1]\n x2 = boxes[:, 2] + boxes[:, 0]\n y2 = boxes[:, 3] + boxes[:, 1]\n\n area = (x2 - x1 + 1) * (y2 - y1 + 1)\n if scores is not None:\n idxs = np.argsort(scores)\n else:\n idxs = np.argsort(y2)\n\n while len(idxs) > 0:\n last = len(idxs) - 1\n i = idxs[last]\n pick.append(i)\n\n xx1 = np.maximum(x1[i], x1[idxs[:last]])\n yy1 = np.maximum(y1[i], y1[idxs[:last]])\n xx2 = np.minimum(x2[i], x2[idxs[:last]])\n yy2 = np.minimum(y2[i], y2[idxs[:last]])\n\n w = np.maximum(0, xx2 - xx1 + 1)\n h = np.maximum(0, yy2 - yy1 + 1)\n\n overlap = (w * h) / area[idxs[:last]]\n\n idxs = np.delete(\n idxs, np.concatenate(\n ([last], np.where(overlap > max_bbox_overlap)[0])))\n\n return pick",
"def non_max_suppression(pred_bboxes, pred_labels, **kwargs):\n return tf.image.combined_non_max_suppression(\n pred_bboxes,\n pred_labels,\n **kwargs\n )",
"def non_max_suppression_fast(boxes, probabilities=None, overlap_threshold=0.3):\n # if there are no boxes, return an empty list\n if boxes.shape[1] == 0:\n return []\n # if the bounding boxes integers, convert them to floats --\n # this is important since we'll be doing a bunch of divisions\n if boxes.dtype.kind == \"i\":\n boxes = boxes.astype(\"float\")\n # initialize the list of picked indexes\n pick = []\n # grab the coordinates of the bounding boxes\n x1 = boxes[:, 0] - (boxes[:, 2] / [2]) # center x - width/2\n y1 = boxes[:, 1] - (boxes[:, 3] / [2]) # center y - height/2\n x2 = boxes[:, 0] + (boxes[:, 2] / [2]) # center x + width/2\n y2 = boxes[:, 1] + (boxes[:, 3] / [2]) # center y + height/2\n\n # compute the area of the bounding boxes and grab the indexes to sort\n # (in the case that no probabilities are provided, simply sort on the\n # bottom-left y-coordinate)\n area = boxes[:, 2] * boxes[:, 3] # width * height\n idxs = y2\n\n\n # if probabilities are provided, sort on them instead\n if probabilities is not None:\n idxs = probabilities\n\n # sort the indexes\n idxs = np.argsort(idxs)\n # keep looping while some indexes still remain in the indexes\n # list\n while len(idxs) > 0:\n # grab the last index in the indexes list and add the\n # index value to the list of picked indexes\n last = len(idxs) - 1\n i = idxs[last]\n pick.append(i)\n # find the largest (x, y) coordinates for the start of\n # the bounding box and the smallest (x, y) coordinates\n # for the end of the bounding box\n xx1 = np.maximum(x1[i], x1[idxs[:last]])\n yy1 = np.maximum(y1[i], y1[idxs[:last]])\n xx2 = np.minimum(x2[i], x2[idxs[:last]])\n yy2 = np.minimum(y2[i], y2[idxs[:last]])\n # compute the width and height of the bounding box\n w = np.maximum(0, xx2 - xx1 + 1)\n h = np.maximum(0, yy2 - yy1 + 1)\n # compute the ratio of overlap\n overlap = (w * h) / area[idxs[:last]]\n # delete all indexes from the index list that have\n idxs = np.delete(idxs, np.concatenate(([last],\n np.where(overlap > overlap_threshold)[0])))\n # return only the bounding boxes that were picked\n return pick",
"def _filter_box_candidates(self, bboxes, labels):\n bbox_w = bboxes[:, 2] - bboxes[:, 0]\n bbox_h = bboxes[:, 3] - bboxes[:, 1]\n valid_inds = (bbox_w > self.min_bbox_size) & \\\n (bbox_h > self.min_bbox_size)\n valid_inds = np.nonzero(valid_inds)[0]\n return bboxes[valid_inds], labels[valid_inds]",
"def non_maximum_suppression(boxes, confs, overlap_threshold, top_k):\n eps = 1e-15\n \n boxes = np.asarray(boxes, dtype='float32')\n \n pick = []\n x1, y1, x2, y2 = boxes.T\n \n idxs = np.argsort(confs)\n area = (x2 - x1) * (y2 - y1)\n \n while len(idxs) > 0:\n i = idxs[-1]\n \n pick.append(i)\n if len(pick) >= top_k:\n break\n \n idxs = idxs[:-1]\n \n xx1 = np.maximum(x1[i], x1[idxs])\n yy1 = np.maximum(y1[i], y1[idxs])\n xx2 = np.minimum(x2[i], x2[idxs])\n yy2 = np.minimum(y2[i], y2[idxs])\n \n w = np.maximum(0, xx2 - xx1)\n h = np.maximum(0, yy2 - yy1)\n I = w * h\n \n overlap = I / (area[idxs] + eps)\n # as in Girshick et. al.\n \n #U = area[idxs] + area[i] - I\n #overlap = I / (U + eps)\n \n idxs = idxs[overlap <= overlap_threshold]\n \n return pick",
"def py_cpu_nms(boxes, scores, thresh=0.55):\n # x1、y1、x2、y2、以及score赋值\n boxes = boxes.detach().numpy()\n x1 = boxes[:, 0]\n y1 = boxes[:, 1]\n x2 = boxes[:, 2]\n y2 = boxes[:, 3]\n scores = scores\n\n # 每一个检测框的面积\n areas = (x2 - x1 + 1) * (y2 - y1 + 1)\n # 按照score置信度降序排序\n # order = scores.argsort()[::-1]\n all_scores, order = scores.sort(descending=True)\n\n keep = [] # 保留的结果框集合\n # print(order)\n while int(len(order.detach().numpy())) > 0:\n i = order[0]\n keep.append(i.numpy()) # 保留该类剩余box中得分最高的一个\n # 得到相交区域,左上及右下\n xx1 = np.maximum(x1[i], x1[order[1:]])\n yy1 = np.maximum(y1[i], y1[order[1:]])\n xx2 = np.minimum(x2[i], x2[order[1:]])\n yy2 = np.minimum(y2[i], y2[order[1:]])\n\n # 计算相交的面积,不重叠时面积为0\n w = np.maximum(0.0, xx2 - xx1 + 1)\n h = np.maximum(0.0, yy2 - yy1 + 1)\n inter = w * h\n # 计算IoU:重叠面积 /(面积1+面积2-重叠面积)\n ovr = inter / (areas[i] + areas[order[1:]] - inter)\n # 保留IoU小于阈值的box\n inds = np.where(ovr <= thresh)[0]\n order = order[inds + 1] # 因为ovr数组的长度比order数组少一个,所以这里要将所有下标后移一位\n\n return keep",
"def box_nms(boxes, scores, threshold=0.5):\n\n # Torchvision NMS:\n keep = torchvision.ops.boxes.nms(boxes, scores,threshold)\n return keep\n\n # Custom NMS: uncomment to use\n \"\"\"x1 = bboxes[:, 0]\n y1 = bboxes[:, 1]\n x2 = bboxes[:, 2]\n y2 = bboxes[:, 3]\n\n areas = (x2 - x1) * (y2 - y1)\n _, order = scores.sort(0, descending=True)\n keep = []\n while order.numel() > 0:\n try:\n i = order[0]\n except IndexError:\n break\n keep.append(i)\n\n if order.numel() == 1:\n break\n \n xx1 = x1[order[1:]].clamp(min=x1[i].item())\n yy1 = y1[order[1:]].clamp(min=y1[i].item())\n xx2 = x2[order[1:]].clamp(max=x2[i].item())\n yy2 = y2[order[1:]].clamp(max=y2[i].item())\n\n w = (xx2 - xx1).clamp(min=0)\n h = (yy2 - yy1).clamp(min=0)\n inter = w * h\n\n if mode == 'union':\n ovr = inter / (areas[i] + areas[order[1:]] - inter)\n elif mode == 'min':\n ovr = inter / areas[order[1:]].clamp(max=areas[i])\n else:\n raise TypeError('Unknown nms mode: %s.' % mode)\n\n ids = (ovr < threshold).nonzero().squeeze()\n if ids.numel() == 0:\n break\n # because the length of the ovr is less than the order by 1\n # so we have to add to ids to get the right one\n order = order[ids + 1]\n return torch.LongTensor(keep)\"\"\"",
"def bboxes_nms(classes, scores, bboxes, nms_threshold=0.45):\n keep_bboxes = np.ones(scores.shape, dtype=np.bool)\n for i in range(scores.size - 1):\n if keep_bboxes[i]:\n # Computer overlap with bboxes which are following.\n overlap = bboxes_jaccard(bboxes[i], bboxes[(i + 1):])\n # Overlap threshold for keeping + checking part of the same class\n keep_overlap = np.logical_or(overlap < nms_threshold, classes[(i + 1):] != classes[i])\n keep_bboxes[(i + 1):] = np.logical_and(keep_bboxes[(i + 1):], keep_overlap)\n\n idxes = np.where(keep_bboxes)\n return classes[idxes], scores[idxes], bboxes[idxes]",
"def yolo_non_max_suppression(scores, boxes, classes, max_boxes=10, iou_threshold=0.5):\r\n\r\n # max_boxes_tensor = tf.Variable(max_boxes, dtype=tf.int32) # tensor to be used in tf.image.non_max_suppression()\r\n # tf.InteractiveSession().run(tf.variables_initializer([max_boxes_tensor])) # initialize variable max_boxes_tensor\r\n max_boxes_tensor = tf.constant(max_boxes, dtype=tf.int32)\r\n\r\n # Use tf.image.non_max_suppression() to get the list of indices corresponding to boxes you keep\r\n ### START CODE HERE ### (≈ 1 line)\r\n nms_indices = tf.image.non_max_suppression(boxes, scores, max_boxes_tensor, iou_threshold)\r\n ### END CODE HERE ###\r\n\r\n # Use K.gather() to select only nms_indices from scores, boxes and classes\r\n ### START CODE HERE ### (≈ 3 lines)\r\n scores = tf.gather(scores, nms_indices)\r\n boxes = tf.gather(boxes, nms_indices)\r\n classes = tf.gather(classes, nms_indices)\r\n ### END CODE HERE ###\r\n\r\n return scores, boxes, classes",
"def non_maximum_suppression_slow(boxes, confs, iou_threshold, top_k):\n idxs = np.argsort(-confs)\n selected = []\n for idx in idxs:\n if np.any(iou(boxes[idx], boxes[selected]) >= iou_threshold):\n continue\n selected.append(idx)\n if len(selected) >= top_k:\n break\n return selected",
"def greedyNonMaximumSupression(boxlist,clipthresh=0.05,IOUthresh=0.5):\r\n NMSed_list=[]\r\n if len(boxlist)==0 or clipthresh>1:\r\n return NMSed_list\r\n \r\n # keep every box with largest score while doesn't overlap with all the other\r\n # boxes\r\n NMSed_list.append(boxlist[0])\r\n for i in range(1,len(boxlist)):\r\n keepflag=True\r\n \r\n if boxlist[i][4]<clipthresh:\r\n break # break when score of current box is lower than thresh\r\n else:\r\n #print('----NMS--{}----'.format(i))\r\n for j in range(len(NMSed_list)):\r\n iou=getIoU(boxlist[i],NMSed_list[j])\r\n #print(iou)\r\n if iou>IOUthresh:\r\n keepflag=False\r\n break\r\n if keepflag:\r\n NMSed_list.append(boxlist[i])\r\n \r\n return NMSed_list",
"def reduce_possibilities_by_box(self):\n x = self.targetCell.x\n y = self.targetCell.y\n if x < 3 and y < 3: #top left\n self.check_box1()\n if x > 2 and x < 6 and y < 3: #middle left\n self.check_box2()\n if x > 5 and y < 3: #bottom left\n self.check_box3()\n if x < 3 and y > 2 and y < 6: #top middle\n self.check_box4()\n if x > 2 and x < 6 and y > 2 and y < 6: #center\n self.check_box5()\n if x > 5 and y > 2 and y < 6: #bottom middle\n self.check_box6()\n if x < 3 and y > 5: #top right\n self.check_box7()\n if x > 2 and x < 6 and y > 5: #middle right\n self.check_box8()\n if x > 5 and y > 5: #bottom right\n self.check_box9()\n self.targetCell.box_neighbour_possibilities = flatten_list(self.targetCell.box_neighbour_possibilities)",
"def nms(bobj, cf_thresh, nms_thresh):\n bboxs = bobj[\"boxs\"]\n scores = bobj[\"scores\"]\n cfvalid_ids = np.where(scores >= cf_thresh)[0]\n if len(cfvalid_ids) == 0:\n return None, None\n bboxs = bobj[\"boxs\"][cfvalid_ids]\n scores = scores[cfvalid_ids]\n ids = bobj[\"ids\"][cfvalid_ids]\n masks = bobj[\"masks\"][cfvalid_ids]\n x1 = bboxs[:, 0]\n y1 = bboxs[:, 1]\n x2 = bboxs[:, 2]\n y2 = bboxs[:, 3]\n areas = (x2 - x1 + 1) * (y2 - y1 + 1)\n # cfvalid_ids = np.where(scores >= cf_thresh)[0]\n # scores = scores[cfvalid_ids]\n\n # order = scores.argsort()[::-1]\n mask_sizes = np.sum(masks, axis=(1, 2))\n order = mask_sizes.argsort()[::-1]\n keep = []\n suppress = []\n while order.size > 0:\n i = order[0]\n keep.append(i)\n xx1 = np.maximum(x1[i], x1[order[1:]])\n yy1 = np.maximum(y1[i], y1[order[1:]])\n xx2 = np.minimum(x2[i], x2[order[1:]])\n yy2 = np.minimum(y2[i], y2[order[1:]])\n w = np.maximum(0.0, xx2 - xx1 + 1)\n h = np.maximum(0.0, yy2 - yy1 + 1)\n inter = w * h\n iou = inter / (areas[i] + areas[order[1:]] - inter)\n # Because of we split the object cross the boundary in the cropped instance,\n # concatenating it to the original instance, thus we need also mask iou condition for nms\n mask_other = masks[order[1:], :, :]\n mask_cur = masks[i, :, :]\n mask_inter = np.sum(mask_cur & mask_other, axis=(1, 2))\n mask_union = np.sum(mask_cur | mask_other, axis=(1, 2))\n mask_iou = mask_inter / mask_union\n\n suppress_inds = np.where((iou > nms_thresh) | (mask_iou > nms_thresh))[0]\n sup_i = order[1:][suppress_inds] if suppress_inds.size != 0 else np.array([])\n suppress.append(sup_i)\n\n inds = np.where((iou <= nms_thresh) & (mask_iou <= nms_thresh))[0]\n order = order[inds + 1]\n\n for i, sup in enumerate(suppress):\n if sup.any():\n for sup_id in sup:\n # sup_id = s + 1\n keep_id = keep[i]\n # union the keep mask and the suppress mask\n masks[keep_id, :, :] = masks[keep_id, :, :] | masks[sup_id, :, :]\n if keep:\n return ids[keep], masks[keep]\n else:\n return [], []",
"def convert_batched_nms(self, boxes, scores, idxs, iou_thres, num_boxes, indices):\n scores = op.expand_dims(scores, axis=-1, num_newaxis=1)\n idxs = op.expand_dims(idxs, axis=-1, num_newaxis=1)\n idxs = op.cast(idxs, \"float32\")\n data = op.concatenate([idxs, scores, boxes], -1)\n data = op.expand_dims(data, 0, 1)\n\n top_k = max_out_size = -1\n out = op.vision.non_max_suppression(\n data=data,\n valid_count=num_boxes,\n indices=indices,\n max_output_size=max_out_size,\n iou_threshold=iou_thres,\n force_suppress=False,\n top_k=top_k,\n coord_start=2,\n score_index=1,\n id_index=0,\n return_indices=True,\n invalid_to_bottom=False,\n )\n return out.tuple_value",
"def bboxes_clip(bbox_ref, bboxes):\n bboxes = np.copy(bboxes)\n bboxes = np.transpose(bboxes)\n bbox_ref = np.transpose(bbox_ref)\n bboxes[0] = np.maximum(bboxes[0], bbox_ref[0])\n bboxes[1] = np.maximum(bboxes[1], bbox_ref[1])\n bboxes[2] = np.minimum(bboxes[2], bbox_ref[2])\n bboxes[3] = np.minimum(bboxes[3], bbox_ref[3])\n bboxes = np.transpose(bboxes)\n return bboxes",
"def single_roidb(entry):\n gt_boxes = entry['boxes']\n return gt_boxes.shape[0] <= 1",
"def _nms_boxes(self, boxes, box_confidences):\n x_coord = boxes[:, 0]\n y_coord = boxes[:, 1]\n width = boxes[:, 2]\n height = boxes[:, 3]\n\n areas = width * height\n ordered = box_confidences.argsort()[::-1]\n\n keep = list()\n while ordered.size > 0:\n # Index of the current element:\n i = ordered[0]\n keep.append(i)\n xx1 = np.maximum(x_coord[i], x_coord[ordered[1:]])\n yy1 = np.maximum(y_coord[i], y_coord[ordered[1:]])\n xx2 = np.minimum(x_coord[i] + width[i],\n x_coord[ordered[1:]] + width[ordered[1:]])\n yy2 = np.minimum(y_coord[i] + height[i],\n y_coord[ordered[1:]] + height[ordered[1:]])\n\n width1 = np.maximum(0.0, xx2 - xx1 + 1)\n height1 = np.maximum(0.0, yy2 - yy1 + 1)\n intersection = width1 * height1\n union = (areas[i] + areas[ordered[1:]] - intersection)\n\n # Compute the Intersection over Union (IoU) score:\n iou = intersection / union\n\n # The goal of the NMS algorithm is to reduce the number of adjacent bounding-box\n # candidates to a minimum. In this step, we keep only those elements whose overlap\n # with the current bounding box is lower than the threshold:\n indexes = np.where(iou <= self.nms_threshold)[0]\n ordered = ordered[indexes + 1]\n\n keep = np.array(keep)\n return keep",
"def _filter_boxes(self, boxes, box_confidences, box_class_probs):\n box_scores = box_confidences * box_class_probs\n box_classes = np.argmax(box_scores, axis=-1)\n box_class_scores = np.max(box_scores, axis=-1)\n pos = np.where(box_class_scores >= self.object_threshold)\n\n boxes = boxes[pos]\n classes = box_classes[pos]\n scores = box_class_scores[pos]\n\n return boxes, classes, scores"
]
| [
"0.7150832",
"0.7069101",
"0.68712294",
"0.6653853",
"0.65888387",
"0.65339005",
"0.64546716",
"0.6423793",
"0.6404525",
"0.63950497",
"0.6346387",
"0.6314987",
"0.63059807",
"0.62732834",
"0.62166923",
"0.61743283",
"0.607827",
"0.60195446",
"0.5984185",
"0.5977257",
"0.59766257",
"0.5940926",
"0.59384686",
"0.59252626",
"0.5852722",
"0.5846041",
"0.5807396",
"0.5775417",
"0.57649404",
"0.57624674"
]
| 0.7529759 | 0 |
out product of state1 and state2 | def ketbra(state1, state2):
state1 = normalize(state1)
state2 = normalize(state2)
return np.outer(state1.conj(), state2) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def inner_product(state_1, state_2):\n return numpy.dot(state_1.conjugate(), state_2)",
"def braket(state1, state2):\n state1 = normalize(state1)\n state2 = normalize(state2)\n return np.dot(state1.conj(), state2)",
"def product(self, x, y):\n return self( x.lift() * y.lift() )",
"def prod(self, x, y):\n return (self.basic_operation.reduce(x.original+y.original),\n self.operation1.prod(x.left, y.left),\n self.operation2.prod(x.right, y.right))",
"def product_on_basis(self, t1, t2):\n return tensor( (module.monomial(x1)*module.monomial(x2) for (module, x1, x2) in zip(self._sets, t1, t2)) ) #.",
"def product_on_basis(self, g1, g2):\n return self.monomial(g1 * g2)",
"def two_bs2x4_transform(t1, r1, t2, r2, input_state):\n size = len(input_state)\n output_state = np.zeros((size,) * 4, dtype=complex)\n for m in range(size):\n for n in range(size):\n\n for k in range(m + 1):\n for l in range(n + 1):\n # channels indexes\n ind1 = k\n ind2 = m - k\n ind3 = l\n ind4 = n - l\n coeff = input_state[m, n] * t1**(m - k) * (1j*r1)**k * t2**(n - l) * (1j*r2)**l * factorial(m) * factorial(n) / (factorial(k) * factorial(m - k) * factorial(l) * factorial(n - l))\n output_state[ind1, ind2, ind3, ind4] = output_state[ind1, ind2, ind3, ind4] + coeff\n\n return output_state",
"def test_is_product_entangled_state_2_sys():\n ent_vec = max_entangled(4)\n np.testing.assert_equal(is_product_vector(ent_vec, dim=[4, 4]), False)",
"def Chain(A, B):\n return _prodOperator(B, A)",
"def product_2(m1, m2):\r\n return make_mono_admissible_2(list(m1) + list(m2))",
"def product(a, b):\n return a * b",
"def two_bs2x4_transform_opt(t1, r1, t2, r2, input_state):\n size = len(input_state)\n out = np.zeros((size,) * 4, dtype=complex)\n\n def coef(k1, k2, k3, k4):\n return t1 ** k2 * (1j * r1) ** k1 * t2 ** k4 * (1j * r2) ** k3 / (factorial(k1) * factorial(k2) * factorial(k3) * factorial(k4))\n\n # index 'i' = (m,n,k,l)\n for i in np.ndindex(size, size, size, size):\n if i[2] <= i[0] and i[3] <= i[1] and i[0] + i[1] < size:\n out[i[2], i[0] - i[2], i[3], i[1] - i[3]] = coef(i[2], i[0] - i[2], i[3], i[1] - i[3]) * input_state[i[0], i[1]] * factorial(i[0]) * factorial(i[1])\n\n return out",
"def product(self, x, y):\n return self._cached_product(x.value, y.value)",
"def pair_product(x1, x2):\n return np.multiply(x1, x2)",
"def dot_product(a,b):\n return sum(pairwise_mult(a,b))",
"def product2(a, b, *args ):\n valadd = a + b\n valmul = 1\n for i in args:\n valmul *= i\n return valadd, valmul",
"def cartesian_product(self, other, only_accessible_components=True):\n def function(*transitions):\n if equal(t.word_in for t in transitions):\n return (transitions[0].word_in,\n list(zip_longest(\n *(t.word_out for t in transitions)\n )))\n else:\n raise LookupError\n\n def final_function(*states):\n return list(zip_longest(*(s.final_word_out\n for s in states)))\n\n return self.product_FiniteStateMachine(\n other,\n function,\n final_function=final_function,\n only_accessible_components=only_accessible_components)",
"def _commuting_products(q_1: Q, q_2: Q) -> Dict:\n\n s_t, s_x, s_y, s_z = q_1.t, q_1.x, q_1.y, q_1.z\n q_2_t, q_2_x, q_2_y, q_2_z = q_2.t, q_2.x, q_2.y, q_2.z\n\n product_dict = {\n \"tt\": s_t * q_2_t,\n \"xx+yy+zz\": s_x * q_2_x + s_y * q_2_y + s_z * q_2_z,\n \"tx+xt\": s_t * q_2_x + s_x * q_2_t,\n \"ty+yt\": s_t * q_2_y + s_y * q_2_t,\n \"tz+zt\": s_t * q_2_z + s_z * q_2_t,\n }\n\n return product_dict",
"def cross_product(p0,p1,p2):\n\treturn (((p1[0]-p0[0])*(p2[1]-p0[1]))-((p2[0]-p0[0])*(p1[1]-p0[1])))",
"def mult(p1, p2):\r\n p = 0\r\n while p2:\r\n if p2 & 0b1:\r\n p ^= p1\r\n p1 <<= 1\r\n if p1 & 0b10000:\r\n p1 ^= 0b11\r\n p2 >>= 1\r\n return p & 0b1111",
"def make_bprod(self):\n rhs1 = random.choice(self.nonterminals)\n rhs2 = random.choice(self.nonterminals)\n lhs = random.choice(self.nonterminals)\n return (lhs, (rhs1, rhs2))",
"def product(self):\n raise NotImplementedError",
"def cross_product(a, b):\n a1, a2, a3 = a\n b1, b2, b3 = b\n return (a2 * b3 - a3 * b2, a3 * b1 - a1 * b3, a1 * b2 - a2 * b1)",
"def same_side_product(p, q, a, b):\n return line_ccw(a, b, p) * line_ccw(a, b, q)",
"def __mul__(self, other):\r\n return self.prod(other)",
"def __init__(self, state):\n super().__init__(\"set_matrix_product_state\", len(state[0]), 0, [state])",
"def product(num_a, num_b):\r\n return num_a*num_b",
"def call(self, inputs, states):\r\n (out_prev, Vm_prev) = states\r\n\r\n #Vm = Vm_prev * (1.0 - out_prev)\r\n #Lateral inhibition logic:\r\n Vm = Vm_prev * (1.0 - tf.reduce_max(out_prev))\r\n\r\n Vm = Vm * self.decay\r\n Vm = Vm + tf.matmul(inputs, self.kernel)\r\n if self.recurrent:\r\n Vm = Vm + tf.matmul(out_prev, self.recurrent_kernel)\r\n Vm = self.g(Vm)\r\n overVth = Vm - self.bias\r\n out = self.activation(overVth)\r\n return out, (out, Vm)",
"def __call__(self, inputs, states):\n \"\"\"Now we have multiple states, state->states\"\"\"\n sigmoid = tf.sigmoid\n # Parameters of gates are concatenated into one multiply for efficiency.\n # states: size = time_lag\n if self._state_is_tuple:\n hs = ()\n for state in states:\n c, h = state # c and h: tensor_size = (batch_size, hidden_size)\n hs += (h,) # hs : size = time_lag, i.e. time_lag * (batch_size, hidden_size)\n else:\n hs = ()\n for state in states:\n c, h = array_ops.split(value=state,\n num_or_size_splits=2,\n axis=1)\n hs += (h,)\n \n meta_variable_size = 4 * self.output_size\n concat = Symmetric_MPS_wavefn(inputs,\n hs,\n meta_variable_size,\n self._num_orders,\n self._virtual_dim,\n True)\n # i = input_gate, j = new_input, f = forget_gate, o = output_gate\n i, j, f, o = array_ops.split(value=concat,\n num_or_size_splits=4,\n axis=1)\n\n new_c = (c * sigmoid(f + self._forget_bias) + sigmoid(i) * self._activation(j))\n new_h = self._activation(new_c) * sigmoid(o)\n\n if self._state_is_tuple:\n new_state = LSTMStateTuple(new_c, new_h)\n else:\n new_state = array_ops.concat([new_c, new_h], 1)\n return new_h, new_state",
"def product_map(xs1, xs2):\n return jax.vmap(lambda x1: jax.vmap(lambda x2: pair_product(x1, x2))(xs2))(xs1)"
]
| [
"0.75618",
"0.6473393",
"0.6389227",
"0.61178523",
"0.6102681",
"0.6095291",
"0.6018713",
"0.601494",
"0.5975703",
"0.59749955",
"0.59558004",
"0.59287375",
"0.5914068",
"0.5913484",
"0.5894375",
"0.5887508",
"0.5875724",
"0.5860244",
"0.5821667",
"0.58075386",
"0.57790744",
"0.57539487",
"0.568767",
"0.56865007",
"0.5684541",
"0.56575257",
"0.56476665",
"0.5638651",
"0.56288606",
"0.56199104"
]
| 0.6515105 | 1 |
density matrix of a ensemble of quantum states | def density(ensembles):
if len(ensembles.shape) < 2:
return ketbra(ensembles)
else:
den_mat = ketbra(ensembles[0])
for i in range(1, len(ensembles)):
den_mat += ketbra(ensembles[i])
den_mat /= len(ensembles)
return den_mat | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def gen_density_matrix(states=None, dimensions=None):\n if states is None:\n tdim = np.prod(dimensions)\n dmtotal0 = np.eye(tdim) / tdim\n\n return dmtotal0\n\n dmtotal0 = np.eye(1, dtype=np.complex128)\n\n for i, s in enumerate(states):\n\n if not hasattr(s, \"__len__\"):\n # assume s is int or float showing the spin projection in the pure state\n d = dimensions[i]\n dm_nucleus = np.zeros((d, d), dtype=np.complex128)\n state_number = int(round((d - 1) / 2 - s))\n dm_nucleus[state_number, state_number] = 1\n\n else:\n if s.shape.__len__() == 1:\n d = dimensions[i]\n dm_nucleus = np.zeros((d, d), dtype=np.complex128)\n np.fill_diagonal(dm_nucleus, s)\n\n else:\n dm_nucleus = s\n\n dmtotal0 = np.kron(dmtotal0, dm_nucleus)\n\n return dmtotal0",
"def density(self):\n return self.nnz/self.dim",
"def getDensityOfStates(self, Elist):\n\t\trho = np.zeros((len(Elist)), np.float64)\n\t\trho0 = _modes.hinderedrotor_densityofstates(Elist, self.frequency, self.barrier)\n\t\tfor i in range(self.degeneracy):\n\t\t\trho = _modes.convolve(rho, rho0, Elist)\n\t\treturn rho",
"def get_density(matrix):\n return matrix.getnnz() / (matrix.shape[0] * matrix.shape[1])",
"def get_element_density(mt):\r\n fraction_matrix = zeros(100)\r\n \r\n composition = Composition(mt['pretty_formula'])\r\n \r\n for element in composition:\r\n fraction = composition.get_atomic_fraction(element) # get the atomic fraction.\r\n fraction_matrix[element.Z] = fraction\r\n \r\n return fraction_matrix",
"def density(self):\n return self.nnz / self.size",
"def dens_matrix(state):\n size = len(state)\n state_conj = np.conj(state)\n dm = np.zeros((size,) * 4, dtype=complex)\n\n for p1 in range(size):\n for p2 in range(size):\n for p1_ in range(size):\n for p2_ in range(size):\n dm[p1, p2, p1_, p2_] = state[p1, p2] * state_conj[p1_, p2_]\n\n return dm",
"def test_density(self):\n sol = Mader(p_cj=3.0e11, d_cj=8.0e5, gamma=3.0, u_piston=0.0)\n r = np.array([0.7, 0.8])\n t = 6.25e-6\n solrt = sol(r, t)\n np.testing.assert_allclose(solrt.density[0], 2.26666666666663)",
"def matrix_to_density(mat):\n from sympy.physics.quantum.density import Density\n eigen = mat.eigenvects()\n args = [[matrix_to_qubit(Matrix(\n [vector, ])), x[0]] for x in eigen for vector in x[2] if x[0] != 0]\n if (len(args) == 0):\n return S.Zero\n else:\n return Density(*args)",
"def random_density_matrix(nqubits: int, dtype=np.complex128) -> np.ndarray:\n rho = random_numpy_hermitian(nqubits, dtype=dtype)\n # Normalize\n ids = np.arange(2 ** nqubits)\n rho[ids, ids] = rho[ids, ids] / np.trace(rho)\n return rho.astype(dtype)",
"def density(self):\n raise TypeError(\"The density function is not support on a Multigraph.\")",
"def density_matrix(wires) -> \"DensityMatrixMP\":\n wires = Wires(wires)\n return DensityMatrixMP(wires=wires)",
"def find_density(attr, D, h):\n d = D.shape[1]\n n = D.shape[0]\n total = 0\n for xi in D:\n kernel = find_kernel_value(attr, xi, h, d)\n total += kernel\n return total / (n * h ** d)",
"def dens_matrix_4ch(state):\n size = len(state)\n state_conj = np.conj(state)\n dens_matrix = np.zeros((size,) * 8, dtype=complex)\n\n for p1 in range(size):\n for p2 in range(size):\n for p3 in range(size):\n for p4 in range(size):\n for p1_ in range(size):\n for p2_ in range(size):\n for p3_ in range(size):\n for p4_ in range(size):\n dens_matrix[p1, p2, p3, p4, p1_, p2_, p3_, p4_] = state[p1, p2, p3, p4] * state_conj[p1_, p2_, p3_, p4_]\n\n return dens_matrix",
"def computeChargeDensity(self):\n \n self.rho = np.zeros((self.ni, self.nj, self.nk))\n \n for species in self.speciesList:\n if species.charge!=0:\n self.rho += species.charge*species.den",
"def getDensityOfStates(self, Elist):\n\t\tpass",
"def _density(self):\n fraction = np.array([0.]+[m.value for m in self.fraction])\n # TODO: handle invalid fractions using penalty functions\n # S = sum(fraction)\n # scale = S/100 if S > 100 else 1\n # fraction[0] = 100 - S/scale\n # penalty = scale - 1\n fraction[0] = 100 - sum(fraction)\n if (fraction < 0).any():\n return NaN\n volume = self._volume(fraction)\n density = np.array([m.density() for m in [self.base]+self.material])\n return np.sum(volume*density)",
"def getDensityOfStates(self, Elist, V=1.0):\n\t\treturn _modes.translation_densityofstates(Elist, self.mass, self.dimension, V)",
"def getDensityOfStates(self, Elist):\n\t\treturn _modes.freerotor_densityofstates(Elist, self.frequencies, 1 if self.linear else 0)",
"def test_density(self):\n earth = CoreMantleCrustModel()\n assert earth.density(0) == 14\n assert earth.density(1e6) == 14\n assert earth.density(3.464e6) == 14\n assert earth.density(3.5e6) == 3.4\n assert earth.density(5e6) == 3.4\n assert earth.density(6.338e6) == 3.4\n assert earth.density(6.378e6) == 2.9",
"def density_of_state_plot(N=400,a=1.0,eita=0.01):\n foot_step=2*np.pi/N\n k=np.arange(0.0,2*np.pi/a,foot_step)\n Ek=band_energy(k)\n E=np.arange(-3.0,3.0,0.01)\n Ek.shape=(N,1)\n E.shape=(1,600)\n \"\"\"Reshape E and Ek series with broadcasting method.\"\"\"\n dirac_function=np.imag(np.true_divide(1/np.pi,np.subtract(E-Ek,1j*eita)))\n D=np.sum(np.true_divide(dirac_function,N),axis=0)\n \"\"\"Calculate the density of state with lorentzian broadenning method.\"\"\" \n E.shape=(600)\n plt.plot(D,E)",
"def getDensityOfStates(self, Elist, linear):\n\n\t\timport states\n\n\t\t# Create energies in cm^-1 at which to evaluate the density of states\n\t\tconv = constants.h * constants.c * 100.0 * constants.Na # [=] J/mol/cm^-1\n\t\tEmin = min(Elist) / conv\n\t\tEmax = max(Elist) / conv\n\t\tdE = (Elist[1] - Elist[0]) / conv\n\t\tElist0 = np.arange(Emin, Emax+dE/2, dE)\n\n\t\t# Prepare inputs for density of states function\n\t\tvib = np.array([mode.frequency for mode in self.modes if isinstance(mode, HarmonicOscillator)])\n\t\trot = np.array([mode.frequencies for mode in self.modes if isinstance(mode, RigidRotor)])\n\t\thind = np.array([[mode.frequency, mode.barrier] for mode in self.modes if isinstance(mode, HinderedRotor)])\n\t\tif len(hind) == 0: hind = np.zeros([0,2],np.float64)\n\t\tlinear = 1 if linear else 0\n\t\tsymm = self.symmetry\n\n\t\t# Calculate the density of states\n\t\tdensStates, msg = states.densityofstates(Elist0, vib, rot, hind, symm, linear)\n\t\tmsg = msg.strip()\n\t\tif msg != '':\n\t\t\traise Exception('Error while calculating the density of states for species %s: %s' % (self, msg))\n\n\t\t# Convert density of states from (cm^-1)^-1 to mol/J\n\t\tdensStates /= conv\n\n\t\t# Return result\n\t\treturn densStates",
"def tensor_density(self):\r\n from .converter import Converter\r\n return Converter.convert_density(self)",
"def density_ch(tensor):\n return 1 - sparsity_ch(tensor)",
"def test_densities():\n\n actual, r, wt = GridGenerator.make_grid(400)\n grid = 4*pi*r**2*wt\n\n data = AtomData()\n\n print(\"\\nINTEGRATED DENSITY TEST\")\n print(\"=======================\")\n for a in list(data.nuclear_charge.keys()):\n atom = Atom(a)\n Nel = data.electron_count[a]\n d0, d1, g0, g1, t0, t1, l0, l1 = atom.get_densities(r)\n\n # Count electrons per spin channel\n s_occ = AtomData.s_occ.get(a, [0, 0])\n p_occ = AtomData.p_occ.get(a, [0, 0])\n d_occ = AtomData.d_occ.get(a, [0, 0])\n f_occ = AtomData.f_occ.get(a, [0, 0])\n nela = np.sum(s_occ[0])+np.sum(p_occ[0])+np.sum(d_occ[0])+np.sum(f_occ[0])\n nelb = np.sum(s_occ[1])+np.sum(p_occ[1])+np.sum(d_occ[1])+np.sum(f_occ[1])\n assert(nela+nelb == Nel)\n\n id0 = np.dot(d0, grid)\n id1 = np.dot(d1, grid)\n\n diff_0 = id0 - nela\n percent_diff_0 = 100*diff_0/nela\n\n # Check to catch for Hydrogen having no beta electrons\n if nelb > 0.0:\n diff_1 = id1 - nelb\n percent_diff_1 = 100*diff_1/nelb\n else:\n diff_1 = 0.0\n percent_diff_1 = 0.0\n\n print(\"{:>3} - N_0 = ({:4.1f}) {:+2.6e}%, N_1 = ({:4.1f}) {:+2.6e}%, {:}\".format(a, id0, percent_diff_0, id1, percent_diff_1, \"PASSED\" if max(abs(diff_0), abs(diff_1)) < 1e-4 else \"FAILED - \"))\n\n print(\"\\nINTEGRATED KINETIC TEST\")\n print(\"=======================\")\n for a in list(data.ke_test.keys()):\n atom = Atom(a)\n t_bm = data.ke_test[a]\n d0, d1, g0, g1, t0, t1, l0, l1 = atom.get_densities(r)\n\n it0 = np.dot(t0, grid)\n it1 = np.dot(t1, grid)\n itot = it0 + it1\n\n diff = itot - t_bm\n print(\"{:>3} - T = {:+.6e}%, {:}\".format(a, 100*diff/t_bm, \"PASSED\" if abs(100*diff/t_bm) < 1e-2 else \"FAILED - \"))\n\n\n # The integral of the Laplacian over all space should be 0. Check that.\n print(\"\\nINTEGRATED LAPLACIAN TEST\")\n print(\"=========================\")\n for a in list(AtomData.ke_test.keys()):\n atom = Atom(a)\n\n d0, d1, g0, g1, t0, t1, l0, l1 = atom.get_densities(r)\n\n il0 = np.dot(grid, l0)\n il1 = np.dot(grid, l1)\n print(\"{:>3} - L_0 = {:+.6e}, L_1 = {:+.6e}, {:}\".format(a, il0, il1, \"PASSED\" if max(abs(il0), abs(il1)) < 1e-6 else \"FAILED - \"))\n\n\n print(\"\\nFINITE DIFFERENCE GRADIENT TEST\")\n print(\"===============================\")\n print(\"Testing gradient evaluation function against finite difference estimate...\")\n ne = Atom(\"Ne\") # Let's use \"the guvnor\"\n # We only need to test a few points around the core\n fdh = 1e-8\n fdr = np.arange(0.9, 0.9+fdh*10, fdh)\n d0, d1, g0, g1, t0, t1, l0, l1 = ne.get_densities(fdr)\n\n # First the first central difference\n fdiff = (d0[2:]-d0[:-2])/(2*fdh) # Construct the central difference\n if np.allclose(fdiff, g0[1:-1], atol=1e-1): # finite difference is not perfect, so lenient tollerance\n print(\"Gradient: PASSED\")\n else:\n print(\"Gradient: FAILED -\")\n\n print(\"\\nELEMENT COLOR FUNCTIONS TEST\")\n print(\"===========================\")\n test_obj = [Atom(\"H\"), Atom(\"C\"), Atom(\"O\")]\n test_str = [\"H\", \"C\", \"O\"]\n ref = np.array([[1., 1., 1.], [0.565, 0.565, 0.565], [1. , 0.051, 0.051]])\n\n if np.allclose( np.array(get_colors_for_elements(test_obj)), ref):\n print(\"\\nColor from objects: PASSED\")\n else:\n print(\"\\nColor from objects: FAILED -\")\n\n if np.allclose( np.array(get_colors_for_elements(test_str)), ref):\n print(\"Color from strings: PASSED\")\n else:\n print(\"Color from strings: FAILED -\")\n\n if HAVE_LIBXC:\n test_functional='GGA_X_PBE'\n print(\"\\nATOMIC EXCHANGE ENERGIES WITH {}\".format(test_functional))\n print(\"============================================\")\n for a in list(data.ke_test.keys()):\n atom = Atom(a)\n nE, vrho, vsigma, vtau, vlapl = atom.libxc_eval(r, functional=test_functional, restricted=False)\n Exc = (np.dot(nE, grid)).item()\n print('{:3s} {:.10f}'.format(a, Exc))\n else:\n print(\"\\nNot doing energy calculations due to lack of libxc.\\n\")",
"def linkDensity(self, time=None):\r\n listofDensities = list()\r\n for cell in self.cells:\r\n listofDensities.append(cell.cellDensity())\r\n return listofDensities",
"def getDensityEstimate(self):\n return self.density",
"def neighbor_dE(self, state):\n\n dE = np.zeros(self.n)\n for i in range(self.n):\n dE[i] = 2*state[i]*self.hJ[i] +2*state[i]*(state*self.Jmat[i]).sum()\n return dE",
"def idensity(n):\n I = zeroes(n, n)\n for i in range(n):\n I.g[i][i] = 1.0\n return I",
"def density(x, m, h):\n \n n = x.size\n rho = np.zeros((n,1))\n \n for i in range(0, n):\n # calculate vector between two particles\n uij = x[i] - x\n # calculate contribution due to neighbors\n rho_ij = m*kernel( uij, h, '0' )\n # accumulate contributions to the density\n rho[i] = rho[i] + np.sum(rho_ij)\n \n return rho"
]
| [
"0.7259972",
"0.6602903",
"0.65992695",
"0.6588799",
"0.642848",
"0.6414085",
"0.6389149",
"0.62670654",
"0.6260562",
"0.6256143",
"0.62500995",
"0.6240718",
"0.6226135",
"0.61993104",
"0.61880094",
"0.61302406",
"0.6075312",
"0.60418975",
"0.6032779",
"0.6008144",
"0.5978373",
"0.5939915",
"0.5926663",
"0.5923069",
"0.58977723",
"0.58969325",
"0.58677465",
"0.5855517",
"0.5837418",
"0.5834539"
]
| 0.6821482 | 1 |
fidelity between state1 and state2, only valid for pure state1 | def fidelity(state1, state2):
if len(state1.shape) > 1:
print("error: state1 must be a pure state.")
state1 = normalize(state1)
fid = 0
if len(state2.shape)<2:
state2 = normalize(state2)
fid = np.abs(
braket(state1, state2)
)**2
else:
for i in range(len(state2)):
state2[i] = normalize(state2[i])
fid += np.abs(
braket(state1, state2)
)**2
fid /= len(state2)
return fid.real | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_update_state2(self):\n pass",
"def test_update_state1(self):\n pass",
"def test_eq_not_two_states(self):\n assert not State(substance=\"water\") == 3\n assert not 3 == State(substance=\"water\")",
"def check_state(self):\n pass",
"async def test_multiple_same_state(hass):\n calls_1 = async_mock_service(hass, DOMAIN, SERVICE_TURN_ON)\n\n await async_reproduce_states(hass, [\n State(ENTITY_1, 'on'),\n State(ENTITY_2, 'on'),\n ])\n\n await hass.async_block_till_done()\n\n assert len(calls_1) == 2\n # order is not guaranteed\n assert any(call.data == {'entity_id': ENTITY_1} for call in calls_1)\n assert any(call.data == {'entity_id': ENTITY_2} for call in calls_1)",
"async def test_multiple_different_state(hass):\n calls_1 = async_mock_service(hass, DOMAIN, SERVICE_TURN_ON)\n calls_2 = async_mock_service(hass, DOMAIN, SERVICE_TURN_OFF)\n\n await async_reproduce_states(hass, [\n State(ENTITY_1, 'on'),\n State(ENTITY_2, 'off'),\n ])\n\n await hass.async_block_till_done()\n\n assert len(calls_1) == 1\n assert calls_1[0].data == {'entity_id': ENTITY_1}\n assert len(calls_2) == 1\n assert calls_2[0].data == {'entity_id': ENTITY_2}",
"def _see_state(self, new_state: State) -> None:\n entity_id = new_state.entity_id\n domain = new_state.domain\n state = new_state.state\n registry: GroupIntegrationRegistry = self.hass.data[REG_KEY]\n self._assumed[entity_id] = bool(new_state.attributes.get(ATTR_ASSUMED_STATE))\n\n if domain not in registry.on_states_by_domain:\n # Handle the group of a group case\n if state in registry.on_off_mapping:\n self._on_states.add(state)\n elif state in registry.off_on_mapping:\n self._on_states.add(registry.off_on_mapping[state])\n self._on_off[entity_id] = state in registry.on_off_mapping\n else:\n entity_on_state = registry.on_states_by_domain[domain]\n if domain in registry.on_states_by_domain:\n self._on_states.update(entity_on_state)\n self._on_off[entity_id] = state in entity_on_state",
"def testStateVariables(self):\n\n class S1(ClassWithCollections):\n v1 = StateVariable(enabled=True, doc=\"values1 is ...\")\n v1XXX = StateVariable(enabled=False, doc=\"values1 is ...\")\n\n\n class S2(ClassWithCollections):\n v2 = StateVariable(enabled=True, doc=\"values12 is ...\")\n\n class S1_(S1):\n pass\n\n class S1__(S1_):\n v1__ = StateVariable(enabled=False)\n\n class S12(S1__, S2):\n v12 = StateVariable()\n\n s1, s2, s1_, s1__, s12 = S1(), S2(), S1_(), S1__(), S12()\n\n self.failUnlessEqual(s1.states.isEnabled(\"v1\"), True)\n s1.v1 = 12\n s12.v1 = 120\n s2.v2 = 100\n\n self.failUnlessEqual(len(s2.states.listing), 1)\n\n self.failUnlessEqual(s1.v1, 12)\n try:\n tempvalue = s1__.v1__\n self.fail(\"Should have puked since values were not enabled yet\")\n except:\n pass",
"def assume_state(self, state):\n s = self.get_state()\n if s.id != state:\n raise GrblStateError(s)",
"def test_State_diff_id(self):\n U1 = State()\n U2 = State()\n U3 = State()\n self.assertNotEqual(U1.id, U2.id)\n self.assertNotEqual(U1.id, U3.id)\n self.assertNotEqual(U2.id, U3.id)",
"def state(params1):\n variational_circuit(params1)\n return qml.state()",
"def _transit_state(self) -> None:\n pass",
"def test_update_state3(self):\n pass",
"def successors(self, state):\n abstract",
"def choose(self, state: State) -> State:",
"def test_send_state_event_nonoverwriting(self) -> None:\n\n self._perform_background_initial_update()\n\n u1 = self.register_user(\"u1\", \"pass\")\n u1token = self.login(\"u1\", \"pass\")\n r1 = self.helper.create_room_as(u1, tok=u1token)\n\n self.helper.send_state(\n r1, \"cat.hissing\", {\"value\": True}, tok=u1token, state_key=\"tabby\"\n )\n\n r1stats_ante = self._get_current_stats(\"room\", r1)\n assert r1stats_ante is not None\n\n self.helper.send_state(\n r1, \"cat.hissing\", {\"value\": False}, tok=u1token, state_key=\"moggy\"\n )\n\n r1stats_post = self._get_current_stats(\"room\", r1)\n assert r1stats_post is not None\n\n self.assertEqual(\n r1stats_post[\"current_state_events\"] - r1stats_ante[\"current_state_events\"],\n 1,\n )",
"def evaluate(self) :\n if self.inStates[0].getState() == self.inStates[1].getState(): return 0\n return 1",
"def test_update_state4(self):\n pass",
"def test_ids(self):\n state1 = State()\n state2 = State()\n state3 = State()\n self.assertFalse(state1.id == state2.id)\n self.assertFalse(state1.id == state3.id)\n self.assertFalse(state2.id == state3.id)",
"def goal_state(game_state):\n if game_state[0]:\n return False\n return True",
"def state(self, state: str) -> None:",
"def test_random_state_transfer(self):\r\n class Graph:\r\n def __init__(self, seed=123):\r\n self.rng = RandomStreams(seed)\r\n self.y = self.rng.uniform(size=(1,))\r\n g1 = Graph(seed=123)\r\n f1 = function([], g1.y)\r\n g2 = Graph(seed=987)\r\n f2 = function([], g2.y)\r\n\r\n for (su1, su2) in zip(g1.rng.state_updates, g2.rng.state_updates):\r\n su2[0].set_value(su1[0].get_value())\r\n\r\n numpy.testing.assert_array_almost_equal(f1(), f2(), decimal=6)",
"def state_changed(self, old_state, new_state, target_state):\n pass",
"def is_goal(state):\n pass",
"def __and__(self, other):\n if is_FiniteStateMachine(other):\n return self.intersection(other)",
"def test_not_eq_sub(self):\n st_1 = State(substance=\"water\", T=Q_(400.0, \"K\"), p=Q_(101325.0, \"Pa\"))\n st_2 = State(substance=\"ammonia\", T=Q_(400.0, \"K\"), p=Q_(101325.0, \"Pa\"))\n assert not st_1 == st_2",
"def is_different_from(self, another_state):\n return self.state_str != another_state.state_str",
"def comparable_to(self, from_state: _State, to_state: _State) -> bool:\n return (\n self.from_state.name == from_state.name\n and self.to_state.name == to_state.name\n )",
"def test_multiple_states(self):\n\n # Prepare.\n app = self.factory()\n request = self.getRequest(app)\n context = model.factory()\n\n # Create a dummy event and get it back.\n event_id = boilerplate.createEvent(context)\n event = repo.LookupActivityEvent()(event_id)\n\n # Cancel when created.\n state_changer = request.state_changer\n # We have to use a transaction manager because perform creates\n # a new event on state change.\n with transaction.manager:\n bm.Session.add(event)\n bm.Session.add(context)\n state_changer.perform(context, a.CANCEL, event)\n s1 = context.work_status.value\n self.assertEqual(s1, s.CANCELLED)\n\n # Cancel when started.\n c2 = model.factory(initial_state=s.STARTED)\n # We have to use a transaction manager because perform creates\n # a new event on state change.\n with transaction.manager:\n bm.Session.add(event)\n state_changer.perform(c2, a.CANCEL, event)\n s2 = c2.work_status.value\n self.assertEqual(s2, s.CANCELLED)",
"def state(self):\n pass"
]
| [
"0.631732",
"0.6272566",
"0.62698925",
"0.6217125",
"0.6172236",
"0.6096079",
"0.6023768",
"0.5982629",
"0.59668",
"0.5952515",
"0.5916351",
"0.5878813",
"0.58518475",
"0.5844466",
"0.583131",
"0.5803019",
"0.58011234",
"0.5720957",
"0.57201564",
"0.5688706",
"0.56737256",
"0.56551075",
"0.56446296",
"0.56394655",
"0.56136054",
"0.5610463",
"0.55931574",
"0.5570892",
"0.5538748",
"0.55132496"
]
| 0.6372287 | 0 |
NXDOMAIN records to Redis. Scheduled with RedisHandler.submit(). | def nx_to_redis(self, backlog_timer, client_address, name):
if self.stop:
return
if PRINT_COROUTINE_ENTRY_EXIT:
PRINT_COROUTINE_ENTRY_EXIT("START nx_to_redis")
if DNS_STATS:
timer = self.answer_to_redis_stats.start_timer()
self.redis_executor(self.nx_to_redis_, client_address, name.replace('\\;',';'))
if DNS_STATS:
timer.stop()
backlog_timer.stop()
if PRINT_COROUTINE_ENTRY_EXIT:
PRINT_COROUTINE_ENTRY_EXIT("END nx_to_redis")
return | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def post(host):\n redis.setex('dispatcher',host,60)\n timer = threading.Timer(20.0, post, args=[host])\n timer.daemon = True\n timer.start()",
"def main():\n start = time.time()\n store = redis_stats_store.RedisStatsStore()\n\n delta_domains = get_delta_domains()\n for domain in delta_domains:\n store.note(domain)\n\n print 'Processed stats for {} domains in {} seconds'.format(\n len(delta_domains),\n round(time.time() - start, 2)\n )",
"def run(self):\n init()\n list_name = comet_config.REDIS_NAMESPACE + \"incoming/\" + self.service_name\n list_name_processing = list_name + \"/processing\"\n self.redis = r\n while True:\n try:\n item = self.redis.brpoplpush(list_name, list_name_processing)\n self.process_incoming(item)\n self.redis.lrem(list_name_processing, item)\n\n except redis.ConnectionError:\n pass",
"def nx_to_redis_(self, client_address, name):\n self.client_to_redis(client_address)\n k = '{};{};nx'.format(client_address, name)\n self.redis.incr(k)\n self.redis.expire(k, TTL_GRACE)\n return",
"def post_to_redis(self, message):\n \n if self.message_type and message.field('type')[1] != self.message_type:\n if self.performance_hint:\n logging.warn('PERFORMANCE HINT: Change your Dnstap config to restrict it to client response only.')\n self.performance_hint = False\n return\n # NOTE: Do these lookups AFTER verifying that we have the correct message type!\n response = message.field('response_message')[1]\n client_address = message.field('query_address')[1]\n \n question = None\n\n if self.ignore is not None:\n question = response.question[0].name.to_text().lower()\n for s in self.ignore:\n if s in question:\n return\n\n redis = self.redis\n\n if response.rcode() == rcode.NXDOMAIN:\n if response.question[0].rdtype in redis.ADDRESS_RECORDS:\n if question is None:\n question = response.question[0].name.to_text().lower()\n redis.submit(redis.nx_to_redis, client_address, question)\n elif response.rcode() == rcode.NOERROR:\n redis.submit(redis.answer_to_redis, client_address, response.answer)\n \n return",
"def sync_dns(self,):\n\n for server_name, server_ip in self.get_instances():\n self.dnsmanager.ensure_a_record(server_name, server_ip)",
"def report_to_redis(job, count=5):\n\n # it's important that these main python methods\n # don't call the Singleton - _connection needs to be None to be\n # properly serialized.\n r = StrictRedis.from_url(\"redis://10.0.0.10:6379\")\n for i in range(count):\n res = r.zpopmax('temp0')\n print(res)\n title = r.hget(res[0][0][:-1],res[0][0][-1:]+':t')\n r.set('success:'+str(job)+'|'+str(i), res[0][0]+'|%1.3f'%res[0][1])\n r.delete('temp0')\n return 0",
"def _records_to_redis_naive(self, records: List[Any]) -> bool:\n redis_client: Redis = self.redis_client\n\n queue_type: str = self._config[\"graph_queue_type\"]\n queue_key: str = self._config[\"graph_queue_key\"]\n\n try:\n redis_action = getattr(\n redis_client, self._redis_methods_map[queue_type].lower()\n )\n\n for r in records:\n gevent.sleep()\n redis_action(queue_key, json_dumps(r))\n\n except RedisError as e:\n self._logger.exception(\"Redis Exception: %s\", str(e)) # noqa: G200\n result = False\n\n else:\n result = True\n\n return result",
"def run_redis_example():\n\n try:\n print('\\nStep 1: Connect to Redis')\n r = login_redis_cloud()\n print('\\nStep 2: Cache some data in Redis and read it back')\n r.set('andy', '[email protected]')\n email = r.get('andy')\n print(f\"r.get('andy'): {email}\")\n\n print('\\nStep 3: Cache more data in Redis')\n r.set('pam', '[email protected]')\n r.set('fred', '[email protected]')\n\n print(\"\\nStep 4: Delete 'andy' from cache\")\n r.delete('andy')\n\n print('\\nStep 5: Make a unique ID and use it to count.')\n r.set('user_count', 21)\n r.incr('user_count')\n r.incr('user_count')\n r.decr('user_count')\n result = r.get('user_count')\n print(f'user_count=21+1+1-1={result}')\n\n print('\\nStep 6: Make richer data for a SKU')\n r.rpush('186675', 'chair')\n r.rpush('186675', 'red')\n r.rpush('186675', 'leather')\n r.rpush('186675', '5.99')\n\n print('\\nStep 7: Pull some data from the SKU structure')\n cover_type = r.lindex('186675', 2)\n print(f'Type of cover = {cover_type}')\n\n print('\\nStep 8: Add customer data for 6 customers')\n PHONE_IDX = 0\n ZIP_IDX = 1\n customer_data = {\n 'apple': {\n 'phone': '012-345-6789',\n 'zip': '01234'\n },\n 'lucky': {\n 'phone': '503-832-2833',\n 'zip': '53098'\n },\n 'zeke': {\n 'phone': '555-555-5555',\n 'zip': '98000'\n },\n 'blake': {\n 'phone': '838-608-0199',\n 'zip': '12011'\n },\n 'naomi': {\n 'phone': '721-608-8223',\n 'zip': '24587'\n },\n 'kale': {\n 'phone': '444-385-9115',\n 'zip': '62214'\n },\n }\n for customer, data in customer_data.items():\n print(f\"Inserting {customer}: [phone: {data['phone']}\"\n f\", zip: {data['zip']}]\")\n r.rpush(customer, data['phone'])\n r.rpush(customer, data['zip'])\n\n print('\\nStep 9. Retrieve zip and phone for blake')\n blake_phone = r.lindex('blake', PHONE_IDX)\n blake_zip = r.lindex('blake', ZIP_IDX)\n print(f\"Blake's info: [phone: {blake_phone}, zip: {blake_zip}]\")\n\n print('\\nFinally: Delete all data so we can start over.')\n r.flushdb()\n\n except Exception as e:\n print(f'Redis error: {e}')",
"def a_to_redis(self, client_address, name, ttl, address ):\n k = '{};{};dns'.format(client_address, address)\n ttl += TTL_GRACE\n name = ';{};'.format(name)\n names = self.redis.get(k) or ''\n if name not in names:\n self.redis.append(k, name)\n old_ttl = self.redis.ttl(k)\n if old_ttl is None or old_ttl < ttl:\n self.redis.expire(k, ttl)\n return",
"def __init__(self):\n self._redis = redis.Redis(host=\"localhost\", port=6379)\n self._redis.flushdb()",
"def redis_flush(self):\n def func(server):\n return server.server.flushall()\n self.__run_redis_cmd(func)",
"def __init__(self):\n self._redis = redis.Redis()\n self._redis.flushdb()",
"def run_worker():\n listen = ['default']\n conn = Redis(host=app.config['RQ_DEFAULT_HOST'],\n port=app.config['RQ_DEFAULT_PORT'],\n db=0,\n password=app.config['RQ_DEFAULT_PASSWORD'])\n\n with Connection(conn):\n worker = Worker(map(Queue, listen))\n worker.work()",
"def register_publisher(self, hostname, expire=-1):",
"def __run_redis_cmd(self, func, dbs=[0]):\n path_inventory = u'%s/inventories/%s' % (self.ansible_path, self.environment)\n path_lib = u'%s/library/beehive/' % (self.ansible_path)\n runner = Runner(inventory=path_inventory, verbosity=self.verbosity, \n module=path_lib)\n hosts, vars = runner.get_inventory_with_vars(u'redis') \n \n resp = []\n for host in hosts:\n for db in dbs:\n redis_uri = u'%s;%s;%s' % (host, 6379, db)\n server = RedisManager(redis_uri)\n res = func(server)\n #res = server.ping()\n \n if isinstance(res, dict):\n for k,v in res.items():\n resp.append({u'host':str(host), u'db':db, \n u'response':u'%s = %s' % (k,v)})\n elif isinstance(res, list):\n for v in res:\n resp.append({u'host':str(host), u'db':db, \n u'response':v}) \n else:\n resp.append({u'host':str(host), u'db':db, u'response':res})\n self.logger.info(u'Ping redis %s : %s' % (redis_uri, resp))\n self.result(resp, headers=[u'host', u'db', u'response'])",
"def _redis2influx(self):\n messages = None\n\n try:\n messages = self.redis.queue_pop_all()\n logger.debug(f\"Read {len(messages)} messages from Redis queue\")\n\n if messages:\n messages_influx = list()\n for message_bytes in messages:\n message_str = message_bytes.decode()\n message_mqtt: MQTTMessage = string_2_mqtt_message(message_str)\n message_influx = mqtt_message_2_influx_message(message_mqtt)\n messages_influx.append(message_influx)\n\n self.influx.insert(*messages_influx)\n\n except Exception as ex:\n if messages:\n self.redis.queue_insert(*messages)\n raise ex",
"async def update_emotes():\n\n print(REDIS)\n r = await requests.get(url = URL)\n data = r.json()\n\n dictionary = {};\n\n for emote in data:\n dictionary[emote[\"emote\"][\"code\"]] = emote[\"emote\"][\"id\"]\n\n redis = await aioredis.create_redis_pool(REDIS)\n\n # TIL hmset_dict will UPDATE the emotes key with new key values in dictionary\n await redis.hmset_dict(\"emotes\", dictionary)\n\n redis.close()\n await redis.wait_closed()",
"def queue_domain(event, context):\n\n domain = event['domain']\n fetch_limit = int(os.environ['PAGE_FETCH_LIMIT'])\n if 'limit' in event:\n fetch_limit = int(event['limit'])\n\n index = os.environ['CC_INDEX']\n if 'index' in event:\n index = event['index']\n\n # pull all entries for this domain from index\n indices = list(get_warc_indices_for_domain(domain, index))\n\n # sample returned indices to 'limit' (where they exceed 'limit')\n sampled_indices = indices\n if fetch_limit < len(indices):\n sampled_indices = random.sample(indices, fetch_limit)\n\n # for each sampled index, get stored page text by URL\n lambda_client = boto3.client('lambda')\n\n results = list()\n\n for index in sampled_indices:\n results.append(\n lambda_client.invoke(\n FunctionName='fetch_wet_entry',\n Payload=json.dumps(index),\n InvocationType='Event'\n )\n )\n\n return {\n \"total_index_count\": len(indices),\n \"requested_indices\": sampled_indices\n }",
"def _mock_backend(self):\n for crawl_id in self.crawlQueue:\n # Retrieve page count from engine and set in central redis\n page_count = self.engine_redis.get(crawl_id + \"_count\")\n self.central_redis.set(crawl_id + \"_count\", page_count)\n self.central_redis.expire(crawl_id + \"_count\", 60*60)\n if page_count == \"-2\": # if complete\n self.crawlQueue.remove(crawl_id)",
"def dk_redis(request):\n return _dk_redis(request)",
"def newpublic_timeline(TTL=60):\n\n message_json = \"\"\n hash = hashlib.sha224(message_json).hexdigest()\n key = \"public_timeline_key:\" + hash\n # print \"Created Key\\t : %s\" % key\n\n############### REDIS SESSION CODE #####################\n\n # Check if data is in cache.\n if (R_SERVER.get(key)):\n print \"** Messages returned from Redis Cache **\"\n return cPickle.loads(R_SERVER.get(key))\n else:\n print \"** Messages returned from MongoDB **\"\n messages = public_timeline_query()\n data = []\n # print messages\n for row in messages:\n data.append({'user': row['username'], 'message': row['text'],\n 'pub_date': format_datetime(row['pub_date'])})\n\n public_timeline_JSON = jsonify(\n messages=data, Status_code=status.HTTP_200_OK)\n R_SERVER.set(key, cPickle.dumps(public_timeline_JSON))\n R_SERVER.expire(key, TTL)\n return public_timeline_JSON",
"async def test_update_emotes():\n\n dictionary = { \"Boo\": \"/Boo\" }\n\n redis = await aioredis.create_redis_pool(REDIS)\n await redis.hmset_dict(\"test_emotes\", dictionary)\n\n redis.close()\n await redis.wait_closed()",
"def cname_to_redis(self, client_address, oname, rname):\n k = '{};{};cname'.format(client_address, rname)\n oname = ';{};'.format(oname)\n names = self.redis.get(k) or ''\n if oname not in names:\n self.redis.append(k, oname)\n self.redis.expire(k, TTL_GRACE)\n return",
"def populate_redis(self, d):\n for k, v in d.items():\n self.redis_conn.set(k, v)",
"def run(self):\n redis_servers = settings.get_redis_servers()\n\n for redis_server in redis_servers:\n redis_password = redis_server.get(\"password\")\n self.ping(redis_server[\"server\"], redis_server[\"port\"], redis_password)\n\n if len(self.failedList) > 0:\n self.sendMail()",
"def run(self):\n while True :\n try :\n instance_id = self.queue.get()\n db.hset(application_name,instance_id,1)\n except:\n pass\n finally:\n pass",
"def send_notification(data):\n red = Redis(dd.REDIS_HOST, int(dd.REDIS_PORT))\n red.publish(\"all\", ['publish', data])",
"def send_notification(data):\n red = Redis(dd.REDIS_HOST, int(dd.REDIS_PORT))\n red.publish(\"all\", ['publish', data])",
"def app_index_job(cls):\n import time\n s = time.time()\n print('init--redis')\n news = json.dumps(DB.index_news(), ensure_ascii=False)\n mvs = json.dumps(DB.index_mvs('mv'), ensure_ascii=False)\n dsjs = json.dumps(DB.index_mvs('dsj'), ensure_ascii=False)\n dms = json.dumps(DB.index_mvs('dm'), ensure_ascii=False)\n zys = json.dumps(DB.index_mvs('zy'), ensure_ascii=False)\n mv_top = json.dumps(DB.index_tops('mv')[0:6], ensure_ascii=False)\n dsj_top = json.dumps(DB.index_tops('dsj')[0:6], ensure_ascii=False)\n zy_top = json.dumps(DB.index_tops('zy')[0:6], ensure_ascii=False)\n dm_top = json.dumps(DB.index_tops('dm')[0:6], ensure_ascii=False)\n # 今日更新和总视频数量\n today, total = DB.today_total(None)\n # 淘宝广告\n ads = json.dumps(TBApi.get_tb_goods(), ensure_ascii=False)\n cls.r.set('news', news)\n cls.r.set('mvs', mvs)\n cls.r.set('dsjs', dsjs)\n cls.r.set('dms', dms)\n cls.r.set('zys', zys)\n cls.r.set('mv_top', mv_top)\n cls.r.set('dsj_top', dsj_top)\n cls.r.set('zy_top', zy_top)\n cls.r.set('dm_top', dm_top)\n cls.r.set('today', today)\n cls.r.set('total', total)\n cls.r.set('ads', ads)\n del news, mvs, dsjs, dms, zys, mv_top, dsj_top, zy_top, dm_top, ads\n print(f'{time.time() - s}')"
]
| [
"0.62073916",
"0.56420434",
"0.5620912",
"0.55742466",
"0.552533",
"0.55196965",
"0.54764575",
"0.5447752",
"0.54184425",
"0.5291048",
"0.5289621",
"0.5279754",
"0.52736604",
"0.5273331",
"0.52584916",
"0.5249248",
"0.5233644",
"0.5203347",
"0.51823413",
"0.51620185",
"0.51219475",
"0.5119202",
"0.51126397",
"0.5084844",
"0.5059856",
"0.5034581",
"0.50134915",
"0.5008067",
"0.5008067",
"0.5003927"
]
| 0.568186 | 1 |
Analyze and post to the ShoDoHFlo redis database. | def post_to_redis(self, message):
if self.message_type and message.field('type')[1] != self.message_type:
if self.performance_hint:
logging.warn('PERFORMANCE HINT: Change your Dnstap config to restrict it to client response only.')
self.performance_hint = False
return
# NOTE: Do these lookups AFTER verifying that we have the correct message type!
response = message.field('response_message')[1]
client_address = message.field('query_address')[1]
question = None
if self.ignore is not None:
question = response.question[0].name.to_text().lower()
for s in self.ignore:
if s in question:
return
redis = self.redis
if response.rcode() == rcode.NXDOMAIN:
if response.question[0].rdtype in redis.ADDRESS_RECORDS:
if question is None:
question = response.question[0].name.to_text().lower()
redis.submit(redis.nx_to_redis, client_address, question)
elif response.rcode() == rcode.NOERROR:
redis.submit(redis.answer_to_redis, client_address, response.answer)
return | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def post(host):\n redis.setex('dispatcher',host,60)\n timer = threading.Timer(20.0, post, args=[host])\n timer.daemon = True\n timer.start()",
"def knock_sev_post(self,host):\n url = \"http://{}:{}/knock\".format(host, self.port)\n method = \"POST\"\n data = {\n \"Command\":\"Register\",\n \"AlgoHosts\":self.localhost\n }\n r = dict()\n try:\n r = yield retrieve_rds(url, method, **data)\n except Exception as e:\n pass\n # traceback.print_exc()\n raise gen.Return(r)",
"def postQuery(self):\n pass",
"def submit(id, host):",
"def app_index_job(cls):\n import time\n s = time.time()\n print('init--redis')\n news = json.dumps(DB.index_news(), ensure_ascii=False)\n mvs = json.dumps(DB.index_mvs('mv'), ensure_ascii=False)\n dsjs = json.dumps(DB.index_mvs('dsj'), ensure_ascii=False)\n dms = json.dumps(DB.index_mvs('dm'), ensure_ascii=False)\n zys = json.dumps(DB.index_mvs('zy'), ensure_ascii=False)\n mv_top = json.dumps(DB.index_tops('mv')[0:6], ensure_ascii=False)\n dsj_top = json.dumps(DB.index_tops('dsj')[0:6], ensure_ascii=False)\n zy_top = json.dumps(DB.index_tops('zy')[0:6], ensure_ascii=False)\n dm_top = json.dumps(DB.index_tops('dm')[0:6], ensure_ascii=False)\n # 今日更新和总视频数量\n today, total = DB.today_total(None)\n # 淘宝广告\n ads = json.dumps(TBApi.get_tb_goods(), ensure_ascii=False)\n cls.r.set('news', news)\n cls.r.set('mvs', mvs)\n cls.r.set('dsjs', dsjs)\n cls.r.set('dms', dms)\n cls.r.set('zys', zys)\n cls.r.set('mv_top', mv_top)\n cls.r.set('dsj_top', dsj_top)\n cls.r.set('zy_top', zy_top)\n cls.r.set('dm_top', dm_top)\n cls.r.set('today', today)\n cls.r.set('total', total)\n cls.r.set('ads', ads)\n del news, mvs, dsjs, dms, zys, mv_top, dsj_top, zy_top, dm_top, ads\n print(f'{time.time() - s}')",
"def handle(req):\n start = time()\n event = json.loads(req)\n\n user_id = event[\"user_id\"]\n post_id = event[\"post_id\"]\n timestamp = event[\"timestamp\"]\n\n myclient = pymongo.MongoClient(user_timeline_mongodb)\n mydb = myclient['user-timeline']\n mycol = mydb[\"user-timeline\"]\n\n myquery = { \"user_id\": user_id }\n mydoc = mycol.find(myquery)\n\n if mydoc.count() == 0:\n posts_j = {}\n posts_j[str(post_id)] = timestamp\n mydict = {\"user_id\": user_id, \"posts\": json.dumps(posts_j)}\n mycol.insert_one(mydict)\n else:\n posts_j = json.loads(mydoc.next()[\"posts\"])\n posts_j[str(post_id)] = timestamp\n posts_update = {\"$set\": {\"posts\": json.dumps(posts_j)}}\n mycol.update_one(myquery, posts_update)\n\n r = redis.Redis(host=user_timeline_redis, port=6379, decode_responses=True)\n r.hset(user_id, post_id, timestamp)\n\n #r.hset(\"end_time\", event[\"req_id\"], str(time()))\n\n return str(time() - start)",
"def run(self):\n while True :\n try :\n instance_id = self.queue.get()\n db.hset(application_name,instance_id,1)\n except:\n pass\n finally:\n pass",
"def process_message(self, message):\n self.post_to_redis(message)\n return",
"def process(self):\n assert self.valid, 'cannot apply invalid op'\n from hive.indexer.cached_post import CachedPost\n\n action = self.action\n params = dict(\n date=self.date,\n community=self.community,\n community_id=self.community_id,\n actor=self.actor,\n actor_id=self.actor_id,\n account=self.account,\n account_id=self.account_id,\n post_id=self.post_id,\n role_id=self.role_id,\n notes=self.notes,\n title=self.title,\n )\n\n # Community-level commands\n if action == 'updateProps':\n bind = ', '.join([k+\" = :\"+k for k in list(self.props.keys())])\n DB.query(\"UPDATE hive_communities SET %s WHERE id = :id\" % bind,\n id=self.community_id, **self.props)\n self._notify('set_props', payload=json.dumps(read_key_dict(self.op, 'props')))\n\n elif action == 'subscribe':\n DB.query(\"\"\"INSERT INTO hive_subscriptions\n (account_id, community_id, created_at)\n VALUES (:actor_id, :community_id, :date)\"\"\", **params)\n DB.query(\"\"\"UPDATE hive_communities\n SET subscribers = subscribers + 1\n WHERE id = :community_id\"\"\", **params)\n self._notify('subscribe')\n elif action == 'unsubscribe':\n DB.query(\"\"\"DELETE FROM hive_subscriptions\n WHERE account_id = :actor_id\n AND community_id = :community_id\"\"\", **params)\n DB.query(\"\"\"UPDATE hive_communities\n SET subscribers = subscribers - 1\n WHERE id = :community_id\"\"\", **params)\n\n # Account-level actions\n elif action == 'setRole':\n DB.query(\"\"\"INSERT INTO hive_roles\n (account_id, community_id, role_id, created_at)\n VALUES (:account_id, :community_id, :role_id, :date)\n ON CONFLICT (account_id, community_id)\n DO UPDATE SET role_id = :role_id\"\"\", **params)\n self._notify('set_role', payload=Role(self.role_id).name)\n elif action == 'setUserTitle':\n DB.query(\"\"\"INSERT INTO hive_roles\n (account_id, community_id, title, created_at)\n VALUES (:account_id, :community_id, :title, :date)\n ON CONFLICT (account_id, community_id)\n DO UPDATE SET title = :title\"\"\", **params)\n self._notify('set_label', payload=self.title)\n\n # Post-level actions\n elif action == 'mutePost':\n DB.query(\"\"\"UPDATE hive_posts SET is_muted = '1'\n WHERE id = :post_id\"\"\", **params)\n self._notify('mute_post', payload=self.notes)\n if not DbState.is_initial_sync():\n CachedPost.update(self.account, self.permlink, self.post_id)\n\n elif action == 'unmutePost':\n DB.query(\"\"\"UPDATE hive_posts SET is_muted = '0'\n WHERE id = :post_id\"\"\", **params)\n self._notify('unmute_post', payload=self.notes)\n if not DbState.is_initial_sync():\n CachedPost.update(self.account, self.permlink, self.post_id)\n\n elif action == 'pinPost':\n DB.query(\"\"\"UPDATE hive_posts SET is_pinned = '1'\n WHERE id = :post_id\"\"\", **params)\n self._notify('pin_post', payload=self.notes)\n elif action == 'unpinPost':\n DB.query(\"\"\"UPDATE hive_posts SET is_pinned = '0'\n WHERE id = :post_id\"\"\", **params)\n self._notify('unpin_post', payload=self.notes)\n elif action == 'flagPost':\n self._notify('flag_post', payload=self.notes)\n\n return True",
"def sync_sev(self,host):\n url = \"http://{}:{}/syncalgo\".format(host, self.port)\n method = \"POST\"\n data = {\n \"Command\":\"ReportSev\",\n \"Dict_algo_phont\":self.Dict_algo_phont\n }\n r = dict()\n try:\n r = yield retrieve_rds(url, method, **data)\n except Exception as e:\n pass\n # traceback.print_exc()\n raise gen.Return(r)",
"def run(self):\n\t\tlogger.info(\"Uploading data... @ %f, PID: %d\" % (time.time(), os.getpid()))\n\n\t\tself.dump_db()",
"def _record_hits(self, hits):\n self.connection = mdb.connect(\n host=self.db_host, user=self.db_user, passwd=self.db_pass,\n db=self.db_name, charset='utf8')\n\n sql = \"\"\"\n INSERT INTO statistics_access (ip, filename, is_download, session_time,\n is_redirect, event_category, event_action, lineno, status,\n is_error, event_name, date, session_start_date, path,\n extension, referrer, userid, length, user_agent,\n generation_time_milli, query_string, is_robot, full_path,\n country_code, country, city, latitude, longitude,\n region, region_name, organization)\n VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s,\n %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s,\n %s, %s, %s, %s, %s, %s)\n \"\"\"\n try:\n c = self.connection.cursor()\n for hit in hits:\n if hit.session_time > 0:\n hit.session_start_date = hit.date - timedelta(\n seconds=hit.session_time)\n user_info = hit.user_agent.split(\":\")\n hit.country_code = ''\n hit.country = ''\n hit.city = ''\n hit.latitude = ''\n hit.longitude = ''\n hit.region = ''\n hit.region_name = ''\n hit.organization = ''\n if len(user_info) == 1:\n hit.user_agent = user_info[0]\n elif len(user_info) == 6:\n hit.country_code = user_info[0]\n hit.country = user_info[1]\n hit.city = user_info[2]\n hit.latitude = user_info[3]\n hit.longitude = user_info[4]\n hit.user_agent = user_info[5]\n elif len(user_info) == 7:\n hit.country_code = user_info[0]\n hit.country = user_info[1]\n hit.city = user_info[2]\n hit.latitude = user_info[3]\n hit.longitude = user_info[4]\n hit.organization = user_info[5]\n hit.user_agent = user_info[6]\n elif len(user_info) == 9:\n hit.country_code = user_info[0]\n hit.country = user_info[1]\n hit.city = user_info[2]\n hit.latitude = user_info[3]\n hit.longitude = user_info[4]\n hit.region = user_info[5]\n hit.region_name = user_info[6]\n hit.organization = user_info[7]\n hit.user_agent = user_info[8]\n try:\n c.execute(sql, (hit.ip, hit.filename, hit.is_download,\n hit.session_time, hit.is_redirect,\n hit.event_category, hit.event_action,\n hit.lineno, hit.status, hit.is_error,\n hit.event_name, hit.date,\n hit.session_start_date, hit.path,\n hit.extension, hit.referrer,\n hit.userid, hit.length, hit.user_agent,\n hit.generation_time_milli,\n hit.query_string, hit.is_robot,\n hit.full_path, hit.country_code,\n hit.country, hit.city, hit.latitude,\n hit.longitude, hit.region,\n hit.region_name, hit.organization))\n except Exception, e:\n print e\n except Exception, e:\n print e\n self.connection.commit()\n self.connection.close()\n stats.count_lines_recorded.advance(len(hits))",
"def post(self):\n pass",
"def post(self):\n pass",
"def post(self):\n pass",
"def post(self):\n pass",
"def post(self):\n pass",
"def post(self):\n pass",
"def post(self):\n pass",
"def post(self):\n pass",
"def post(self):\n pass",
"def post(self):\n pass",
"def post(self):\n pass",
"def post(self):\n pass",
"def post(self):\n pass",
"def post(self):\n pass",
"def thingspeak_job():\n try:\n # init thingspeak data dict\n data_d = dict()\n # populate it with valid redis values\n try:\n r_value = int(rdb.get('cvm16:good'))\n if r_value not in [0, 1]:\n raise ValueError\n data_d['field1'] = r_value\n except (TypeError, ValueError):\n logging.warning(f'unable to process redis key \"cvm16:good\" value must be 0 or 1')\n try:\n data_d['field2'] = round(float(rdb.get('cvm16:wobbe')), 2)\n except (TypeError, ValueError):\n logging.warning(f'unable to process redis key \"cvm16:wobbe\" value must be a valid float')\n # add API key\n data_d['api_key'] = API_KEY\n # do thingspeak request\n resp = urlopen(f'https://api.thingspeak.com/update?{urlencode(data_d)}', timeout=5.0)\n # print request status\n try:\n # HTTP request return current entry ID or 0 on error\n entry_id = int(resp.read())\n if entry_id < 1:\n raise ValueError\n logging.info(f'successful data update to entry ID: {entry_id}')\n except ValueError:\n logging.warning(f'unable to update data')\n except redis.RedisError as e:\n logging.error(f'redis error occur: {e!r}')\n except urllib.error.URLError as e:\n logging.error(f'network error occur: {e!r}')",
"def post(self):",
"def run(self):\n \n db = dbconnection()\n \n messageshash = {}\n postshash = {}\n postcommentshash = {}\n usershash = {}\n groupshash = {}\n eventshash = {}\n # TODO persist hashes in a json file\n while True:\n \n for i in range(3):\n cur = db.cursor()\n try: cur.execute('SELECT bus_id FROM intranet.messages limit 1')\n except: \n db = dbconnection()\n continue\n break\n\n\n \n notifications = {}\n \n cur.execute('SELECT bus_id, sha1(GROUP_CONCAT(msg_id,status,opened,flag)) FROM intranet.messages GROUP BY bus_id')\n for busid, h in cur:\n if not busid in messageshash: messageshash[busid] = ''\n if not busid in notifications: notifications[busid] = []\n #print '* comparing hashes `%s` == `%s`' %(messageshash[busid], h)\n \n if not h == messageshash[busid]:\n print '* messages changed for bus_id `%s`' %(busid)\n notifications[busid].append('messages')\n \n messageshash[busid] = h\n db.commit()\n \n \n cur.execute('SELECT bus_id, sha1(GROUP_CONCAT(post_id,active)) FROM intranet.posts GROUP BY bus_id')\n for busid, h in cur:\n if not busid in postshash: postshash[busid] = ''\n if not busid in notifications: notifications[busid] = []\n #print '* comparing hashes `%s` == `%s`' %(messageshash[busid], h)\n \n if not h == postshash[busid]: notifications[busid].append('posts')\n \n postshash[busid] = h\n db.commit()\n \n cur.execute('SELECT bus_id, sha1(GROUP_CONCAT(post_id,user_id,comment_body)) FROM intranet.post_comments GROUP BY bus_id')\n for busid, h in cur:\n if not busid in postcommentshash: postcommentshash[busid] = ''\n if not busid in notifications: notifications[busid] = []\n #print '* comparing hashes `%s` == `%s`' %(messageshash[busid], h)\n \n if not h == postcommentshash[busid]: notifications[busid].append('post_comments')\n \n postcommentshash[busid] = h\n db.commit()\n \n cur.execute('SELECT bus_id, sha1(GROUP_CONCAT(user_id,active,online)) FROM intranet.users GROUP BY bus_id')\n for busid, h in cur:\n if not busid in usershash: usershash[busid] = ''\n if not busid in notifications: notifications[busid] = []\n #print '* comparing hashes `%s` == `%s`' %(messageshash[busid], h)\n \n if not h == usershash[busid]: notifications[busid].append('users')\n \n usershash[busid] = h\n db.commit()\n \n cur.execute('SELECT bus_id, sha1(GROUP_CONCAT(group_id,user_id,title,active)) FROM intranet.groups GROUP BY bus_id')\n for busid, h in cur:\n if not busid in groupshash: groupshash[busid] = ''\n if not busid in notifications: notifications[busid] = []\n #print '* comparing hashes `%s` == `%s`' %(messageshash[busid], h)\n \n if not h == groupshash[busid]: notifications[busid].append('groups')\n \n groupshash[busid] = h\n db.commit()\n \n cur.execute('SELECT bus_id, sha1(GROUP_CONCAT(event_id,startdate,enddate,starttime,endtime,allday,url,type,flag,status)) FROM intranet.events GROUP BY bus_id')\n for busid, h in cur:\n if not busid in eventshash: eventshash[busid] = ''\n if not busid in notifications: notifications[busid] = []\n #print '* comparing hashes `%s` == `%s`' %(messageshash[busid], h)\n \n if not h == eventshash[busid]: notifications[busid].append('events')\n \n eventshash[busid] = h\n db.commit()\n \n self.callback (notifications )\n\n time.sleep(self.interval)",
"def post(self):\n code, status = run_handlers.handle_data_post(self.request.headers, self.request.body)\n self.set_status(code)\n self.write(status)\n self.finish()"
]
| [
"0.5725667",
"0.56653404",
"0.5566156",
"0.5477563",
"0.5476219",
"0.53017694",
"0.5296024",
"0.5260438",
"0.5248785",
"0.52383286",
"0.5230912",
"0.52285486",
"0.5227938",
"0.5227938",
"0.5227938",
"0.5227938",
"0.5227938",
"0.5227938",
"0.5227938",
"0.5227938",
"0.5227938",
"0.5227938",
"0.5227938",
"0.5227938",
"0.5227938",
"0.5227938",
"0.5226289",
"0.5220566",
"0.5217642",
"0.52171"
]
| 0.5848072 | 0 |
u"""setParameters(IZParameters) > void Sets the IZParameters normally during creation of menu items. | def setParameters(self, izParameters): #$NON-NLS-1$
| {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _set_parameters(self, parameters):\n self.parameters = parameters\n self._set_points_and_weights()",
"def updateParameters(self, parameters):",
"def updateParameters(self, parameters):\r\n return",
"def updateParameters(self, parameters):\r\n return",
"def updateParameters(self, parameters):\r\n return",
"def updateParameters(self, parameters):\r\n return",
"def updateParameters(self, parameters):\r\n return",
"def updateParameters(self, parameters):\r\n return",
"def updateParameters(self, parameters):\r\n return",
"def updateParameters(self, parameters):\r\n return",
"def setParameters(self):\n\n # Set the parameters\n self.taux = 24.2\n self.mu = 0.23\n self.G = 33.75\n self.alpha_0 = 0.05\n self.delta = 0.0075\n self.p = 0.50\n self.I0 = 9500.0\n self.kparam = 0.55",
"def set_parameters(self,params):\n K3Supervisor.set_parameters(self,params)\n self.blending.set_parameters(self.parameters)",
"def updateParameters(self, parameters):\n\t\treturn",
"def parameters(self, parameters):\n\n self._parameters = parameters",
"def parameters(self, parameters):\n\n self._parameters = parameters",
"def parameters(self, parameters):\n\n self._parameters = parameters",
"def setParams(self, paramSet):\r\n pass",
"def set_params(self, params):",
"def set_parameters(self,params):\n K3Supervisor.set_parameters(self,params)\n self.gtg.set_parameters(self.parameters)\n self.avoidobstacles.set_parameters(self.parameters)\n self.wall.set_parameters(self.parameters)",
"def updateParameters(self, parameters):\n return",
"def updateParameters(self, parameters):\n return",
"def updateParameters(self, parameters):\n return",
"def updateParameters(self, parameters):\n return",
"def updateParameters(self, parameters):\n return",
"def updateParameters(self, parameters):\n return",
"def updateParameters(self, parameters):\n return",
"def updateParameters(self, parameters):\n return",
"def updateParameters(self, parameters):\n return",
"def updateParameters(self, parameters):\n return",
"def updateParameters(self, parameters):\n return"
]
| [
"0.6724673",
"0.6470492",
"0.6398909",
"0.6398909",
"0.6398909",
"0.6398909",
"0.6398909",
"0.6398909",
"0.6398909",
"0.6398909",
"0.63371325",
"0.6329999",
"0.6298071",
"0.62955827",
"0.62955827",
"0.62955827",
"0.6238463",
"0.6228565",
"0.6209343",
"0.619296",
"0.619296",
"0.619296",
"0.619296",
"0.619296",
"0.619296",
"0.619296",
"0.619296",
"0.619296",
"0.619296",
"0.619296"
]
| 0.8441474 | 0 |
NDVI with wrong bands | def _test_ndvi_incorrect_bands(self):
scene = Landsat8Scene(self.filenames)
self.assertEquals(scene.band_numbers, 8)
try:
scene2.ndvi()
except SatProcessError as e:
self.assertEquals(e.message, 'nir band is not provided')
scene2 = scene.select(['nir', 'blue', 'green'])
try:
scene2.ndvi()
except SatProcessError as e:
self.assertEquals(e.message, 'red band is not provided') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def ndvi(in_nir_band, in_colour_band, in_rows, in_cols, in_geotransform, out_tiff, data_type=gdal.GDT_Float32):\r\n\r\n # Read the input bands as numpy arrays.\r\n np_nir = in_nir_band.ReadAsArray(0, 0, in_cols, in_rows)\r\n np_colour = in_colour_band.ReadAsArray(0, 0, in_cols, in_rows)\r\n\r\n # Convert the np arrays to 32-bit floating point to make sure division will occur properly.\r\n np_nir_as32 = np_nir.astype(np.float32)\r\n np_colour_as32 = np_colour.astype(np.float32)\r\n\r\n # Calculate the NDVI formula.\r\n numerator = subtract(np_nir_as32, np_colour_as32)\r\n denominator = add(np_nir_as32, np_colour_as32)\r\n result = divide(numerator, denominator)\r\n\r\n # Remove any NaNs cause by division by zero.\r\n ndvi_float32 = nan_to_num(result)\r\n\r\n # Initialize a geotiff driver.\r\n geotiff = GetDriverByName('GTiff')\r\n\r\n # If the desired output is an int16, map the domain [-1,1] to [0,255], create an int16 geotiff with one band and\r\n # write the contents of the int16 NDVI calculation to it. Otherwise, create a float32 geotiff with one band and\r\n # write the contents of the float32 NDVI calculation to it.\r\n if data_type == gdal.GDT_UInt16:\r\n ndvi_int16 = multiply((ndvi_float32 + 1), (2**7 - 1))\r\n output = geotiff.Create(out_tiff, in_cols, in_rows, 1, gdal.GDT_UInt16)\r\n output.GetRasterBand(1).WriteArray(ndvi_int16)\r\n elif data_type == gdal.GDT_Float32:\r\n output = geotiff.Create(out_tiff, in_cols, in_rows, 1, gdal.GDT_Float32)\r\n output.GetRasterBand(1).WriteArray(ndvi_float32)\r\n else:\r\n raise ValueError('Invalid output data type. Valid types are gdal.UInt16 or gdal.Float32.')\r\n\r\n # Set the geographic transformation as the input.\r\n output.SetGeoTransform(in_geotransform)\r\n\r\n # return the output image in case you want to do something else with it.\r\n return output",
"def calculate_ndvi(self):\n self.ndvi = (self.bands[\"n\"].astype(float) - self.bands[\"r\"].astype(float)) \\\n / (self.bands[\"n\"].astype(float) + self.bands[\"r\"].astype(float))",
"def ndvi(self,\n img):\n return img.normalizedDifference(['NIR', 'RED']).select([0], ['NDVI']).multiply(self.scale_factor)",
"def get_ndvi(image_path):\n image = cv2.imread(image_path) \n b, g, r = cv2.split(image)\n\n bottom = (r.astype(float) + b.astype(float))\n bottom[bottom == 0] = 0.00001 # Make sure we don't divide by zero!\n ndvi_image = (r.astype(float) - b) / bottom\n ndvi_image = contrast_stretch(ndvi_image)\n ndvi_image = ndvi_image.astype(np.uint8)\n return ndvi_image",
"def get_ndvi(image_path):\n image = cv2.imread(image_path)\n b, g, r = cv2.split(image)\n\n bottom = (r.astype(float) + b.astype(float))\n bottom[bottom == 0] = 0.00001 # Make sure we don't divide by zero!\n ndvi_image = (r.astype(float) - b) / bottom\n ndvi_image = contrast_stretch(ndvi_image)\n ndvi_image = scale_down(ndvi_image)\n ndvi_image = ndvi_image.astype(np.uint8)\n return ndvi_image",
"def calculate_ndvi ( red_filename, nir_filename ):\n\n g_red = gdal.Open ( red_filename )\n red = g_red.ReadAsArray()\n g_nir = gdal.Open ( nir_filename )\n nir = g_nir.ReadAsArray()\n if ( g_red.RasterXSize != g_nir.RasterXSize ) or \\\n ( g_red.RasterYSize != g_nir.RasterYSize ):\n print \"ERROR: Input datasets do't match!\"\n print \"\\t Red data shape is %dx%d\" % ( red.shape )\n print \"\\t NIR data shape is %dx%d\" % ( nir.shape )\n\n sys.exit ( -1 )\n passer = np.logical_and ( red > 1, nir > 1 )\n ndvi = np.where ( passer, (1.*nir - 1.*red ) / ( 1.*nir + 1.*red ), -999 )\n return ndvi",
"def addSTDdevIndices(img):\n\t\t\timg = img.addBands(img.normalizedDifference(['green','swir1']).rename(['ND_green_swir1'])); # NDSI, MNDWI\n\t\t\timg = img.addBands(img.normalizedDifference(['nir','red']).rename(['ND_nir_red'])); # NDVI\n\t\t\timg = img.addBands(img.normalizedDifference(['nir','swir2']).rename(['ND_nir_swir2'])); # NBR, MNDVI\n\t\t\t\n\t\t\treturn img;",
"def get_landsat8_ndvi(tile: Union[rasterio.DatasetReader, np.ndarray],\n window: Union[rasterio.windows.Window, Tuple] = None) -> np.ndarray:\n if isinstance(tile, rasterio.io.DatasetReader):\n if window and isinstance(window, Tuple):\n window = rasterio.windows.Window(window[0], window[1], window[2], window[3])\n\n band_red = tile.read(4, window=window, boundless=True, fill_value=0).squeeze()\n band_nir = tile.read(5, window=window, boundless=True, fill_value=0).squeeze()\n\n elif isinstance(tile, np.ndarray):\n if window and isinstance(window, rasterio.windows.Window):\n window = [window.col_off, window.row_off, window.width, window.height]\n # get the red and NIR bands\n tile_bands = []\n for numpy_band_index in (3, 4):\n if window:\n b = tile[\n window[1]: window[1] + window[3],\n window[0]: window[0] + window[2],\n numpy_band_index\n ]\n else:\n b = tile[:, :, numpy_band_index]\n\n tile_bands.append(b)\n band_red = tile_bands[0]\n band_nir = tile_bands[1]\n\n sum_red_nir = band_nir + band_red\n\n # sum of the NIR and red bands being zero is most likely because this section is empty\n # this workaround means that the final NDVI at such pixels are 0.\n sum_red_nir[sum_red_nir == 0.0] = 1\n\n ndvi = (band_nir - band_red) / sum_red_nir\n return ndvi",
"def ndwi(self,\n img):\n return img.normalizedDifference(['NIR', 'SWIR2']).select([0], ['NDWI']).multiply(self.scale_factor)",
"def NDVIscaler(ds, dates):\n\n\t# ========== Set up the layer params ==========\n\tky = 'ndvi'\n\tlong_name = \"normalized_difference_vegetation_index\"\n\tfill_val = -1.0\n\n\t# ========== Modify the ndvi values ==========\n\tndvi = ds.ndvi.values.astype(float)\n\tndvi /= 10000\n\tndvi[ndvi<-0.3] = np.NAN\n\n\n\t# ========== Start making the netcdf ==========\n\tDA, enc = DAbuilder(ndvi, ds, dates, ky, long_name, fill_val)\n\n\treturn DA, enc",
"def ndsi(self,\n img):\n return img.normalizedDifference(['GREEN', 'SWIR1']).select([0], ['NDSI']).multiply(self.scale_factor)",
"def n_band(self):\n pass",
"def savi(self,\n img):\n return (img.select(['NIR']).subtract(img.select(['RED'])).multiply(1 + self.const))\\\n .divide(img.select(['NIR']).add(img.select(['RED'])).add(self.const))\\\n .select([0], ['SAVI']).multiply(self.scale_factor).toInt16()",
"def test_nib_resample_image_4d(fake_4dimage_nib):\n img_r = resampling.resample_nib(fake_4dimage_nib, new_size=[2, 2, 1, 1], new_size_type='factor', interpolation='nn')\n assert img_r.get_data().shape == (18, 18, 9, 3)\n assert img_r.get_data()[8, 8, 4, 0] == 1.0 # make sure there is no displacement in world coordinate system\n assert img_r.get_data()[8, 8, 4, 1] == 0.0\n assert img_r.header.get_zooms() == (0.5, 0.5, 1.0, 1.0)",
"def test_mnir_image():\n # Initiate the sunglint correction class\n g = deglint.GlintCorr(odc_meta_file, sub_product)\n\n # ---------------------- #\n # NIR subtraction #\n # ---------------------- #\n mnir_xarrlist = g.glint_subtraction(\n vis_bands=[\"3\"],\n corr_band=\"6\",\n water_val=5,\n )\n\n sungc_band = mnir_xarrlist[0].lmbadj_green.values # 3D array\n\n # path to expected sunglint corrected output from NIR subtraction\n exp_sungc_band = (\n data_path\n / \"MINUS_NIR\"\n / \"ga_ls8c_lmbadj_3-2-0_091086_2014-11-06_final_band03-deglint-600m.tif\"\n )\n\n # ensure that all valid sungint corrected pixels match expected\n with rasterio.open(exp_sungc_band, \"r\") as exp_sungc_ds:\n urd_band = urd(sungc_band[0, :, :], exp_sungc_ds.read(1), exp_sungc_ds.nodata)\n assert urd_band.max() < 0.001",
"def calc_rsi(image):\n\n # roll axes to conventional row,col,depth\n img = np.rollaxis(image, 0, 3)\n\n # bands: Coastal(0), Blue(1), Green(2), Yellow(3), Red(4), Red-edge(5), NIR1(6), NIR2(7)) Multispectral\n COAST = img[:, :, 0]\n B = img[:, :, 1]\n G = img[:, :, 2]\n Y = img[:, :, 3]\n R = img[:, :, 4]\n RE = img[:, :, 5]\n NIR1 = img[:, :, 6]\n NIR2 = img[:, :, 7]\n\n arvi = old_div((NIR1 - (R - (B - R))), (NIR1 + (R - (B - R))))\n dd = (2 * NIR1 - R) - (G - B)\n gi2 = (B * -0.2848 + G * -0.2434 + R * -0.5436 + NIR1 * 0.7243 + NIR2 * 0.0840) * 5\n gndvi = old_div((NIR1 - G), (NIR1 + G))\n ndre = old_div((NIR1 - RE), (NIR1 + RE))\n ndvi = old_div((NIR1 - R), (NIR1 + R))\n ndvi35 = old_div((G - R), (G + R))\n ndvi84 = old_div((NIR2 - Y), (NIR2 + Y))\n nirry = old_div((NIR1), (R + Y))\n normnir = old_div(NIR1, (NIR1 + R + G))\n psri = old_div((R - B), RE)\n rey = old_div((RE - Y), (RE + Y))\n rvi = old_div(NIR1, R)\n sa = old_div(((Y + R) * 0.35), 2) + old_div((0.7 * (NIR1 + NIR2)), 2) - 0.69\n vi1 = old_div((10000 * NIR1), (RE) ** 2)\n vire = old_div(NIR1, RE)\n br = (old_div(R, B)) * (old_div(G, B)) * (old_div(RE, B)) * (old_div(NIR1, B))\n gr = old_div(G, R)\n rr = (old_div(NIR1, R)) * (old_div(G, R)) * (old_div(NIR1, RE))\n\n ###Built-Up indices\n wvbi = old_div((COAST - RE), (COAST + RE))\n wvnhfd = old_div((RE - COAST), (RE + COAST))\n\n ###SIs\n evi = old_div((2.5 * (NIR2 - R)), (NIR2 + 6 * R - 7.5 * B + 1))\n L = 0.5 # some coefficient for Soil Adjusted Vegetation Index (SAVI) DO NOT INCLUDE IN FEATURES\n savi = old_div(((1 + L) * (NIR2 - R)), (NIR2 + R + L))\n msavi = old_div((2 * NIR2 + 1 - ((2 * NIR2 + 1) ** 2 - 8 * (NIR2 - R)) ** 0.5), 2)\n bai = old_div(1.0, ((0.1 + R) ** 2 + 0.06 + NIR2))\n rgi = old_div(R, G)\n bri = old_div(B, R)\n\n rsi = np.stack(\n [arvi, dd, gi2, gndvi, ndre, ndvi, ndvi35, ndvi84, nirry, normnir, psri, rey, rvi, sa, vi1, vire, br, gr, rr,\n wvbi, wvnhfd, evi, savi, msavi, bai, rgi, bri],\n axis=2)\n\n return rsi",
"def writeNDI(self, outfile):\n if self.ndi is None or self.mask is None:\n print \"Error, gap filled ndi is not computed yet. Use fillNDIGap to generate a NDI array first\"\n return 1\n outformat = \"ENVI\"\n driver = gdal.GetDriverByName(outformat)\n outds = driver.Create(outfile, self.ndi.shape[1], self.ndi.shape[0], 2, gdal.GDT_Float32)\n outds.GetRasterBand(1).WriteArray(self.ndi)\n outds.FlushCache()\n outds.GetRasterBand(2).WriteArray(self.mask)\n outds.FlushCache()\n outds = None\n # write header file\n hdrfile = \".\".join(outfile.rsplit('.')[0:-1]) + \".hdr\"\n if os.path.isfile(hdrfile):\n os.remove(hdrfile)\n print \"Default ENVI header file generated by gdal is removed: \\n{0:s}\".format(hdrfile)\n hdrfile = outfile + \".hdr\"\n hdrstr = \\\n \"ENVI\\n\" + \\\n \"description = {\\n\" + \\\n \"NDI image with one-band-no-return shots filled from files, \\n\" + \\\n self.nirfile + \", \\n\" + \\\n self.swirfile + \", \\n\" + \\\n \"Create, [\" + time.strftime(\"%c\") + \"]}\\n\" + \\\n \"samples = \" + \"{0:d}\".format(self.ndi.shape[1]) + \"\\n\" \\\n \"lines = \" + \"{0:d}\".format(self.ndi.shape[0]) + \"\\n\" \\\n \"bands = 2\\n\" + \\\n \"header offset = 0\\n\" + \\\n \"file type = ENVI standard\\n\" + \\\n \"data type = 4\\n\" + \\\n \"interleave = bsq\\n\" + \\\n \"sensor type = Unknown\\n\" + \\\n \"byte order = 0\\n\" + \\\n \"wavelength units = unknown\\n\" + \\\n \"band names = {NDI, mask}\"\n with open(hdrfile, 'w') as hdrf:\n hdrf.write(hdrstr)\n\n print \"NDI writing done!\"\n return 0",
"def bands(self) -> int:\n ...",
"def make_agree_vis_nir(self,rad):\n print( 'Ratio-ing the NIR spectra to match VIS *** Only for 4STAR ***')\n ivis = range(1055,1069)\n inir = range(1004,1037)\n mean_vis = np.nanmean(mea['rad'][600,ivis])\n mean_nir = np.nanmean(mea['rad'][600,inir])\n s_ratio_vis_nir = mean_vis/mean_nir",
"def NII_ratio_ne(**kwargs):\n\n p = copy.copy(params)\n for key,val in kwargs.items():\n setattr(p,key,val)\n\n GR = glo.global_results()\n\n line1,line2 = '[NII]122','[NII]205'\n L_line1 = getattr(GR,'L_'+line1+'_sun')\n L_line2 = getattr(GR,'L_'+line2+'_sun')\n # Get ratio where the two samples overlap:\n ratio = L_line1 / L_line2\n ne_mw = getattr(GR,'ne_mw')[ratio != 0]\n ratio = ratio[ratio != 0]\n label = '%s / %s' % (line1,line2)\n\n fig,ax = plt.subplots(figsize=(10,8))\n ax.set_xlabel('log ' + getlabel('ne'))\n ax.set_ylabel(label)\n ax.plot(np.log10(ne_mw), ratio, 'o', color='grey', alpha=0.7) \n xs = np.arange(ax.get_xlim()[0],ax.get_xlim()[1],0.1)\n ax.plot(xs,aux.NII_from_logne(xs),'-b')\n\n if not os.path.isdir(p.d_plot + 'luminosity/'): os.mkdir(p.d_plot + 'luminosity/') \n plt.savefig(p.d_plot + 'luminosity/NII_ratio_ne_%s%s' % (p.sim_name,p.sim_run),dpi=300)",
"def make_sn(sn_thres=0, format=\"png\", snsig=False):\r\n ###############################################\r\n # Read values of S/N\r\n sn = np.loadtxt(outtable, usecols=(14,))\r\n if snsig:\r\n sigma = np.loadtxt(outtable, usecols=(3,))\r\n sn /= sigma / 100.\r\n ###############################################\r\n # Find good (and bad) regions according to S/N\r\n good = np.where(((~np.isnan(sn)) & (sn >= sn_thres)))[0]\r\n bad = np.where((sn < sn_thres))[0]\r\n ###############################################\r\n # Filter S/N\r\n sn = sn[good]\r\n ###############################################\r\n # Colorbar limits\r\n vmin, vmax = 10, 50\r\n # Set limits for the plot\r\n norm = Normalize(vmin, vmax)\r\n ###############################################\r\n # Set colormap\r\n cmap = \"cubelaw_r\"\r\n cmap = \"Spectral\"\r\n # Produces a collection of polygons with colors according to S/N values\r\n coll = PolyCollection(polygons_bins[good], array=sn, cmap=cmap,\r\n edgecolors='w', norm=norm, linewidths=1.)\r\n ###############################################\r\n # Initiate figure and axis for matplotlib\r\n fig, ax = plt.subplots(1, 1, figsize=(6.4, 6), )\r\n fig.subplots_adjust(left=0.09, right=0.985, bottom=0.092, top=0.98,\r\n hspace=0.05, wspace=0.06)\r\n ###############################################\r\n # ax.add_patch(Rectangle((-100, -100), 200, 200, facecolor=\"0.8\", zorder=0,\r\n # alpha=0.5))\r\n ###############################################\r\n # Draw the polygons\r\n draw_map(fig, ax, coll, lims=40)\r\n ###############################################\r\n # Add contours according to V-band image\r\n # draw_contours(\"residual\", fig, ax, c=\"k\")\r\n draw_contours(\"vband\", fig, ax, c=\"k\")\r\n # Draw actual slit positions\r\n # canvas.draw_slits(ax, slit_type=1, fc=\"r\", ec=\"r\", ignore=ignore_slits )\r\n # canvas.draw_slits(ax, slit_type=3, fc=\"r\", ec=\"r\", ignore=ignore_slits )\r\n canvas.draw_slits_ids(ax, slits, fc=\"r\", ec=\"r\")\r\n ###############################################\r\n # Draw white rectangle in the position of the colorbar so background\r\n # stars do not overplot the labels and ticks\r\n plt.gca().add_patch(Rectangle((18, -36), 20, 10, alpha=1, zorder=10000,\r\n color=\"w\"))\r\n ###############################################\r\n # Draw the colorbar\r\n label = r\"100 S/N [pix] / $\\sigma$\" if snsig else r\"S/N\"\r\n draw_colorbar(fig, ax, coll, ticks=np.linspace(vmin, vmax, 5),\r\n cblabel=label, cbar_pos=[0.16, 0.15, 0.17, 0.04])\r\n ##############################################\r\n # Write labels\r\n xylabels(ax)\r\n ##############################################\r\n # Draw positions of galaxies\r\n # draw_galaxies(fig, ax)\r\n ##############################################\r\n # Save the figure\r\n plt.savefig(\"figs/sn.{0}\".format(format), dpi=300)\r\n # plt.savefig(\"figs/sn.pdf\", dpi=100)\r\n # plt.savefig(\"figs/sn.eps\", dpi=2500, format=\"eps\")\r\n # plt.savefig(\"figs/sn.png\", dpi=300)\r\n return",
"def fillNDIGap(self, knn):\n nirrho = self.readImage(self.nirfile, self.rhoband).astype(np.float_)\n nirnhits = self.readImage(self.nirfile, self.nhitsband).astype(np.int)\n nirmask = self.readImage(self.nirfile, self.maskband).astype(np.bool_)\n \n swirrho = self.readImage(self.swirfile, self.rhoband).astype(np.float_)\n swirnhits = self.readImage(self.swirfile, self.nhitsband).astype(np.int)\n swirmask = self.readImage(self.swirfile, self.maskband).astype(np.bool_)\n\n hitmask = np.logical_and(np.greater(nirnhits, 0), np.greater(swirnhits, 0))\n if not hitmask.any():\n # no valid hit at all!\n print \"Error, no shot has returns! Check your data\"\n sys.exit()\n xhit, yhit = np.where(hitmask)\n nirrhohit = nirrho[hitmask]/nirnhits[hitmask]\n swirrhohit = swirrho[hitmask]/swirnhits[hitmask]\n\n ndi = np.zeros_like(nirrho)\n mask = np.zeros_like(nirrho, dtype=int) + 3\n tmpflag = np.logical_and(np.invert(nirmask), np.invert(swirmask))\n mask[tmpflag] = 0\n \n ndihit = (nirrhohit - swirrhohit) / (nirrhohit + swirrhohit)\n ndi[hitmask] = ndihit\n mask[hitmask] = 1\n \n nirgapmask = np.logical_and(np.equal(nirnhits, 0), np.greater(swirnhits, 0))\n swirgapmask = np.logical_and(np.greater(nirnhits, 0), np.equal(swirnhits, 0))\n\n if (not nirgapmask.any()) and (not swirgapmask.any()):\n # no gap\n print \"No fillable gap.\"\n return ndi, mask\n\n gapmask = np.logical_or(nirgapmask, swirgapmask)\n xgap, ygap = np.where(gapmask)\n\n X = np.hstack((xhit.reshape(len(xhit), 1), yhit.reshape(len(yhit), 1))).astype(np.float32)\n T = np.hstack((xgap.reshape(len(xgap), 1), ygap.reshape(len(ygap), 1))).astype(np.float32)\n ndigap = self.fillGap(X, ndihit, T, knn)\n ndi[gapmask] = ndigap\n mask[gapmask] = 2\n\n self.ndi = ndi\n self.mask = mask\n \n return ndi, mask",
"def visualize_svd():",
"def test_nib_resample_image_3d(fake_3dimage_nib):\n img_r = resampling.resample_nib(fake_3dimage_nib, new_size=[2, 2, 1], new_size_type='factor', interpolation='nn')\n assert img_r.get_data().shape == (18, 18, 9)\n assert img_r.get_data()[8, 8, 4] == 1.0 # make sure there is no displacement in world coordinate system\n assert img_r.header.get_zooms() == (0.5, 0.5, 1.0)\n # debug\n # nib.save(img_r, 'test_4.nii.gz')",
"def test_basic():\n\n spec = IGRINSSpectrum(file=file, order=10)\n\n assert spec is not None\n assert isinstance(spec, Spectrum1D)\n assert isinstance(spec.flux, np.ndarray)\n assert len(spec.flux) == len(spec.wavelength)\n assert spec.mask.sum() > 0\n\n new_spec = spec.remove_nans()\n\n assert new_spec.shape[0] < spec.shape[0]\n assert new_spec.shape[0] > 0\n assert new_spec.mask is not None\n\n new_spec = spec.normalize()\n\n assert new_spec.shape[0] == spec.shape[0]\n assert np.nanmedian(new_spec.flux) == 1\n\n new_spec = spec.remove_outliers(threshold=3)\n\n assert len(new_spec.flux) > 0\n assert new_spec.shape[0] <= spec.shape[0]\n assert new_spec.shape[0] > 0\n assert new_spec.mask is not None\n\n new_spec = spec.trim_edges()\n\n assert new_spec.shape[0] < spec.shape[0]\n assert new_spec.shape[0] > 0\n assert new_spec.mask is not None\n\n ax = new_spec.plot(label=\"demo\", color=\"r\")\n assert ax is not None",
"def test_get_nsamples_no_squeeze():\n test_file = os.path.join(DATA_PATH, \"paper_test_file.uvh5\")\n test_uv = UVData()\n test_uv.read(test_file)\n\n baseline_array = np.array(list(set(test_uv.baseline_array)))\n nsample_array = utils.get_nsample_array(test_uv, reds=baseline_array, squeeze=False)\n\n test_samples = np.zeros(\n (test_uv.Npols, test_uv.Nbls, test_uv.Ntimes, test_uv.Nfreqs),\n dtype=np.float32,\n )\n\n pol_array = uvutils.polnum2str(test_uv.polarization_array)\n for pol_cnt, pol in enumerate(pol_array):\n for cnt, baseline in enumerate(list(set(test_uv.baseline_array))):\n ant_1, ant_2 = test_uv.baseline_to_antnums(baseline)\n test_samples[pol_cnt, cnt] = test_uv.get_nsamples(ant_1, ant_2)\n\n assert np.all(test_samples == nsample_array)",
"def test_mnir_bands():\n g = deglint.GlintCorr(odc_meta_file, sub_product)\n\n with pytest.raises(Exception) as excinfo:\n g.glint_subtraction(\n vis_bands=[\"20\"], # this band id doesn't exist\n corr_band=\"6\",\n water_val=5,\n )\n assert \"is missing from bands\" in str(excinfo)\n\n with pytest.raises(Exception) as excinfo:\n g.glint_subtraction(\n vis_bands=[\"3\"],\n corr_band=\"20\", # this band id doesn't exist\n water_val=5,\n )\n assert \"is missing from bands\" in str(excinfo)",
"def to_nii(self, outbase, spirec='spirec', saveInOut=False):\n if self.image_data is None:\n self.recon(spirec)\n\n image_tlhc = np.array([self.header.image.tlhc_R, self.header.image.tlhc_A, self.header.image.tlhc_S])\n image_trhc = np.array([self.header.image.trhc_R, self.header.image.trhc_A, self.header.image.trhc_S])\n image_brhc = np.array([self.header.image.brhc_R, self.header.image.brhc_A, self.header.image.brhc_S])\n #image_cent = np.array([self.header.image.ctr_R, self.header.image.ctr_A, self.header.image.ctr_S])\n\n row_vec = (image_trhc-image_tlhc)/np.sqrt(np.dot(image_trhc-image_tlhc, image_trhc-image_tlhc))\n col_vec = -(image_trhc-image_brhc)/np.sqrt(np.dot(image_trhc-image_brhc, image_trhc-image_brhc))\n # The DICOM standard defines these two unit vectors in an LPS coordinate frame, but we'll\n # need RAS (+x is right, +y is anterior, +z is superior) for NIFTI. So, we compute them\n # such that row_vec points to the right and col_vec points up.\n # Not sure if we need to negate the slice_norm. From the NIFTI-1 header:\n # The third column of R will be either the cross-product of the first 2 columns or\n # its negative. It is possible to infer the sign of the 3rd column by examining\n # the coordinates in DICOM attribute (0020,0032) \"Image Position (Patient)\" for\n # successive slices. However, this method occasionally fails for reasons that I\n # (RW Cox) do not understand.\n\n # can also get slice_norm from: slice_norm = np.cross(row_vec, col_vec)\n slice_norm = np.array([self.header.image.norm_R, self.header.image.norm_A, self.header.image.norm_S])\n slice_fov = np.abs(self.header.series.start_loc - self.header.series.end_loc)\n\n # This is either the first slice tlhc (image_tlhc) or the last slice tlhc. How to decide?\n # And is it related to wheather I have to negate the slice_norm?\n # Tuned this empirically by comparing spiral and EPI data with the sam Rx.\n # Everything seems reasonable, except the test for axial orientation (start_ras==S|I).\n # I have no idea why I need that! But the flipping only seems necessary for axials, not\n # coronals or the few obliques I've tested.\n # FIXME: haven't tested sagittals! (to test for spiral: 'sprt' in self.psd_name.lower())\n if (self.header.series.start_ras=='S' or self.header.series.start_ras=='I') and self.header.series.start_loc > self.header.series.end_loc:\n pos = image_tlhc - slice_norm*slice_fov\n # FIXME: since we are reversing the slice order here, should we change the slice_order field below?\n self.image_data = self.image_data[:,:,::-1,]\n if self.fm_data is not None:\n self.fm_data = self.fm_data[:,:,::-1,]\n else:\n pos = image_tlhc\n\n if self.num_bands > 1:\n pos = pos - slice_norm * self.band_spacing_mm * (self.num_bands - 1.0) / 2.0\n\n qto_xyz = np.zeros((4,4))\n qto_xyz[0,0] = row_vec[0]\n qto_xyz[0,1] = col_vec[0]\n qto_xyz[0,2] = slice_norm[0]\n\n qto_xyz[1,0] = row_vec[1]\n qto_xyz[1,1] = col_vec[1]\n qto_xyz[1,2] = slice_norm[1]\n\n qto_xyz[2,0] = row_vec[2]\n qto_xyz[2,1] = col_vec[2]\n qto_xyz[2,2] = slice_norm[2]\n\n qto_xyz[:,3] = np.append(pos, 1).T\n qto_xyz[0:3,0:3] = np.dot(qto_xyz[0:3,0:3], np.diag(self.mm_per_vox))\n\n nii_header = nibabel.Nifti1Header()\n nii_header.set_xyzt_units('mm', 'sec')\n nii_header.set_qform(qto_xyz, 'scanner')\n nii_header.set_sform(qto_xyz, 'scanner')\n\n nii_header['slice_start'] = 0\n nii_header['slice_end'] = self.num_slices - 1\n # nifti slice order codes: 0 = unknown, 1 = sequential incrementing, 2 = seq. dec., 3 = alternating inc., 4 = alt. dec.\n slice_order = 0\n nii_header['slice_duration'] = self.tr * 1000 / self.num_slices\n # FIXME: check that this is correct.\n if self.header.series.se_sortorder == 0:\n slice_order = 1 # or 2?\n elif self.header.series.se_sortorder == 1:\n slice_order = 3 # or 4?\n nii_header['slice_code'] = slice_order\n\n # Note: the freq/phase dir isn't meaningful for spiral trajectories.\n if self.header.image.freq_dir==1:\n nii_header.set_dim_info(freq=1, phase=0, slice=2)\n else:\n nii_header.set_dim_info(freq=0, phase=1, slice=2)\n\n # FIXME: There must be a cleaner way to set the TR! Maybe bug Matthew about it.\n nii_header.structarr['pixdim'][4] = self.tr\n nii_header.set_slice_duration(nii_header.structarr['pixdim'][4] / self.num_slices)\n nii_header.structarr['cal_max'] = self.image_data.max()\n nii_header.structarr['cal_min'] = self.image_data.min()\n\n if self.num_echoes == 1:\n nifti = nibabel.Nifti1Image(self.image_data, None, nii_header)\n nibabel.save(nifti, outbase + '.nii.gz')\n elif self.num_echoes == 2:\n if saveInOut:\n nifti = nibabel.Nifti1Image(self.image_data[:,:,:,:,0], None, nii_header)\n nibabel.save(nifti, outbase + '_in.nii.gz')\n nifti = nibabel.Nifti1Image(self.image_data[:,:,:,:,1], None, nii_header)\n nibabel.save(nifti, outbase + '_out.nii.gz')\n # FIXME: Do a more robust test for spiralio!\n # Assume spiralio, so do a weighted average of the two echos.\n # FIXME: should do a quick motion correction here\n w_in = np.mean(self.image_data[:,:,:,:,0], 3)\n w_out = np.mean(self.image_data[:,:,:,:,1], 3)\n inout_sum = w_in + w_out\n w_in = w_in / inout_sum\n w_out = w_out / inout_sum\n avg = np.zeros(self.image_data.shape[0:4])\n for tp in range(self.image_data.shape[3]):\n avg[:,:,:,tp] = w_in*self.image_data[:,:,:,tp,0] + w_out*self.image_data[:,:,:,tp,1]\n nifti = nibabel.Nifti1Image(avg, None, nii_header)\n nibabel.save(nifti, outbase + '.nii.gz')\n else:\n for echo in range(self.num_echoes):\n nifti = nibabel.Nifti1Image(self.image_data[:,:,:,:,echo], None, nii_header)\n nibabel.save(nifti, outbase + '_echo%02d.nii.gz' % echo)\n\n if self.fm_data is not None:\n nii_header.structarr['cal_max'] = self.fm_data.max()\n nii_header.structarr['cal_min'] = self.fm_data.min()\n nifti = nibabel.Nifti1Image(self.fm_data, None, nii_header)\n nibabel.save(nifti, outbase + '_B0.nii.gz')",
"def _allowed_bands():\n pass",
"def vis_mechanically_coupled_regions(img_dir,output_dir,data,dbscn_length,dbscn_min_size,display_not_save=False):\n #Read in the image that is segmented/labelled for nuclei\n img=imread(img_dir)\n\n #save plots to show clusters\n fig = plt.figure(figsize=(6, 2))\n ax0 = fig.add_subplot(131)\n ax1 = fig.add_subplot(132)\n ax3 = fig.add_subplot(133)\n #show segmented image labels\n ax0.imshow(img,aspect='auto') \n ax0.axis('off')\n #nuclear centroid color-coded by their orientation\n img1=ax1.scatter(data[\"Y\"], data[\"X\"], c=data[\"angles\"],s=1)\n ax1.set_xlim(0,img.shape[0])\n ax1.set_ylim(img.shape[1],0)\n plt.colorbar(img1)\n ax1.axis('off')\n\n # plot the cluster assignments\n img3=ax3.scatter(data[data[\"clusters\"]> -1][\"Y\"], data[data[\"clusters\"]> -1][\"X\"], \n c=data[data[\"clusters\"]> -1][\"clusters\"],cmap=\"plasma\",s=1)\n ax3.set_xlim(0,img.shape[0])\n ax3.set_ylim(img.shape[1],0)\n ax3.axis('off')\n\n #add titles\n ax0.title.set_text('Segmented Image')\n ax1.title.set_text('Filtered Orientation')\n ax3.title.set_text('Clusters')\n\n if display_not_save:\n plt.show()\n else: \n plt.savefig((output_dir+\"/\"+img_dir.rsplit('/', 1)[-1][:-4]+\"_\"+str(dbscn_length)+\"_\"+ str(dbscn_min_size)+\".png\"),dpi=600, bbox_inches = 'tight',pad_inches = 0)\n fig.clf()\n plt.close(fig)\n plt.close('all')\n \n \n del fig,ax0,ax1,ax3,img1,img3"
]
| [
"0.71685034",
"0.71119153",
"0.6962683",
"0.6600374",
"0.6548575",
"0.648904",
"0.63722575",
"0.63132954",
"0.61180866",
"0.6112589",
"0.6094235",
"0.6086239",
"0.59720784",
"0.5760393",
"0.5739196",
"0.5679314",
"0.5637874",
"0.561567",
"0.55735964",
"0.55613995",
"0.5546381",
"0.54975396",
"0.5496064",
"0.546776",
"0.545338",
"0.5448812",
"0.544816",
"0.5416686",
"0.53651977",
"0.5354156"
]
| 0.7226453 | 0 |
Get the patients that are currently in the Intensive Care. | def get_patients_in_ic(self):
query = "SELECT * FROM patients WHERE datetime_discharge IS NULL"
return self.mysql_obj.fetch_rows(query) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_patients(self):\n return",
"def get_all_incidents():\n allIncidents = Incident.get_all()\n #allCops = get_all_cops()\n incidents = []\n for i in allIncidents:\n if(\n (i['operations_center']['id'] in allCops) and\n (inicioAmostragem <= i.reporting_date and i.reporting_date <=terminoAmostragem)\n ):\n \n i['operations_center']['id'] = changeCop(i['operations_center']['id'])\n incidents.append(i)\n \n return incidents",
"def get_available_cops():\n allIncidents = Incident.get_all()\n cops = []\n \n for i in allIncidents:\n if(inicioAmostragem <= i.reporting_date and i.reporting_date <=terminoAmostragem):\n cops.append(i['operations_center']['id'])\n \n allReports = RelatoDeSituacao.get_all()\n \n for r in allReports:\n if (\n inicioAmostragem <= r.data_hora and \n r.data_hora <=terminoAmostragem and\n 'cop' in r.relator and # todos tem que ter o COP\n 'id' in r.relator['cop'] # todos tem que ter o id \n ):\n cops.append(r.relator['cop']['id'])\n \n return set(cops)",
"def facial_incidences(self):\n try:\n return self._facial_incidences\n except AttributeError:\n self._facial_incidences = \\\n [ [ h.index(), \n [v.index() for v in h.incident()] \n ] for h in self.Hrepresentation() ]\n return self._facial_incidences",
"def get_patient_list(self):\n return self._patient_list",
"def get_resilient_incidents(self):\n r_incidents = []\n query_uri = '/incidents/query?return_level=partial'\n query = {\n 'filters': [{\n 'conditions': [\n {\n 'field_name': 'plan_status',\n 'method': 'equals',\n 'value': 'A'\n }\n ]\n }],\n 'sorts': [{\n 'field_name': 'create_date',\n 'type': 'desc'\n }]\n }\n try:\n r_incidents = self.rest_client().post(query_uri, query)\n except SimpleHTTPException as ex:\n LOG.error(\"Failed to pull incidents:%s\", ex)\n r_incidents = 'Failed'\n return r_incidents",
"def get_patient_cases(patient):\n # ----- Get database connection\n db = connect_to_db()\n try:\n c1 = db.cursor()\n try:\n c1.execute(\n \"\"\"SELECT tc.SLABEL \"\"\"\n \"\"\"FROM BOM.PATIENT pt \"\"\"\n \"\"\" INNER JOIN BOM.TCASE tc ON pt.SUID = tc.SPATIENTUID \"\"\"\n \"\"\"WHERE \"\"\"\n \"\"\" pt.SID = '%s' \"\"\" %\n patient)\n res = c1.fetchall()\n cases = []\n for re in res:\n cases.append(re[0])\n finally:\n c1.close()\n finally:\n db.close()\n return cases",
"def get_all_incidents(self):\n sql = f\"SELECT * FROM incidences\"\n curr = Db().cur\n curr.execute(sql)\n output = curr.fetchall()\n return output",
"def getCertifications(self):\n return [c for c in self.objectValues('InstrumentCertification') if c]",
"def get_invasive_ventilation_patients(con) -> pd.DataFrame:\n print('Creating cohort invasive mechanical ventilation during ICU admission...')\n print('Querying reason for admission...')\n combined_diagnoses = get_reason_for_admission(con)\n\n # use mechanical ventilation query\n print('Querying mechanical ventilation (including possible non-invasive ventilation)...')\n ventilation = query('lifesupport/mechanical_ventilation.sql', con)\n\n # merge dataframes\n ventilation = pd.merge(combined_diagnoses, ventilation, on='admissionid', how='left')\n\n print('Selecting patients with invasive ventilation...')\n ventilation = ventilation[ventilation['invasive_bool'].fillna(False)]\n\n return ventilation",
"def get_surgical_patients(con) -> pd.DataFrame:\n combined_diagnoses = get_reason_for_admission(con)\n return combined_diagnoses[combined_diagnoses['surgical'] == 1]",
"def getMyCamps(self):\n r = []\n for p in self.__camps:\n if(p.getOwner() == 1):\n r.append(p)\n return r",
"def get_patient_list(self, client):\n self._patient_list = client.get_patient_list(self.id)",
"def accepting_medicaid_patients(self):\n return self._accepting_medicaid_patients",
"def getInquiriesForDisplay(self):\n return [self.context]",
"def getPatients(self):\n if not self.patients.choices:\n db = DBStorage()\n options = []\n for patient in db.all_patients():\n options.append((patient.id, '{} {}'.format(\n patient.name, patient.last_name)))\n self.patients.choices = options\n self.patients.default = 1",
"def accepting_medicare_patients(self):\n return self._accepting_medicare_patients",
"def get_medical_patients(con) -> pd.DataFrame:\n combined_diagnoses = get_reason_for_admission(con)\n return combined_diagnoses[combined_diagnoses['surgical'] == 0]",
"def incidents(self) -> 'outputs.MTPDataConnectorDataTypesResponseIncidents':\n return pulumi.get(self, \"incidents\")",
"def get_infection_patients(con) -> pd.DataFrame:\n print('Querying reason for admission...')\n combined_diagnoses = get_reason_for_admission(con)\n print('Selecting patients with presumed infection...')\n infection = combined_diagnoses[\n (\n (\n # use reasons for admission\n # surgical admissions with sepsis\n (combined_diagnoses['surgical'] == 1)\n & (combined_diagnoses['diagnosis'].str.contains(re_sepsis_surg, na=False, flags=re.IGNORECASE))\n ) | (\n # medical admissions with sepsis\n (combined_diagnoses['surgical'] == 0)\n & (combined_diagnoses['diagnosis'].str.contains(re_sepsis_med, na=False, flags=re.IGNORECASE))\n ) | (\n # uses documentation at admission form (Early Goal Directed Therapy)\n (combined_diagnoses['sepsis_at_admission'] == 1)\n ) | (\n # uses administered (therapeutic) antibiotics for determining sepsis\n (combined_diagnoses['sepsis_antibiotics_bool'] == 1)\n ) | (\n # uses combination of administered antibiotics (that sometimes are used as prophylaxis) AND\n # drawn cultures for determining sepsis\n (combined_diagnoses['other_antibiotics_bool'] == 1)\n & (combined_diagnoses['sepsis_cultures_bool'] == 1)\n )\n ) & ~((combined_diagnoses['sepsis_at_admission'] == 0).fillna(False))\n # exclude all diagnoses where explicitly 'no sepsis' was documented, forces comparing `pd.NA`\n # to be considered False\n ]\n return infection",
"def get_critics(self):\n actors = [ddpg_agent.critic for ddpg_agent in self.maddpg_agent]\n return actors",
"def getAllCaptains(self):\n staffObject_list = self.mainObject.getStaffIO()\n captainObject_list = []\n for staffMember in staffObject_list:\n if staffMember.getRank() == 'captain':\n captainObject_list.append(staffMember)\n return captainObject_list",
"def get_patient_data(self, client):\n for patient in self._monitored_patients.get_patient_list():\n # print(\"Requesting data for \" + patient.first_name+\" \"+patient.last_name+\"...\")\n patient.update_data(client.get_patient_data(patient.id))",
"def get_politicians_from_cityrep(self, charge_ids, cityrep_id):\n\n lookup = get_lookup(CITYREP_CHANNEL_NAME)\n politician_charges = []\n politician_ids = []\n\n cityrep_data = lookup.get_objects([cityrep_id])[0]['city_representatives']\n\n for institution, institution_kinds in INSTITUTIONS.items():\n\n for inst_kind in institution_kinds:\n cityreps_of_kind = cityrep_data[institution][inst_kind]\n\n for politician in cityreps_of_kind:\n if politician['charge_id'] in charge_ids:\n politician_charges.append(politician['charge_id'])\n politician_ids.append(politician['politician_id'])\n\n return politician_charges, politician_ids",
"def diseases(self):\n\t\treturn Disease.DiseasesByPatient(self.id, self.host)",
"def getMyCaptains(self, empireID):\n d = {}\n for captainID, myCaptain in self.captains.iteritems():\n if myCaptain.empireID == empireID:\n d[captainID] = myCaptain.getMyInfoAsDict()\n return d",
"def get(self):\n all_patients = model_patient.query.all()\n return jsonify(all_patients)",
"def get_cardio_patients(con) -> pd.DataFrame:\n combined_diagnoses = get_reason_for_admission(con)\n cardio = combined_diagnoses[\n (combined_diagnoses['surgical'] == 0)\n & (combined_diagnoses['diagnosis'].str.contains(re_cardio, na=False, flags=re.IGNORECASE))\n ]\n\n return cardio",
"def get_adc(data):\n return [patient[2] for i, patient in enumerate(data) if i in good_patients]",
"def get_vip_clinical():\n\n db = app.data.driver.db\n\n # limit access to service account only\n auth = request.authorization\n if not auth:\n return json.dumps({\"error\": \"no authorization supplied\"})\n\n accounts = db.user\n user = accounts.find_one({'token': auth.username})\n if not user:\n return json.dumps({\"error\": \"not authorized\"})\n\n query = {}\n params = request.args.get('where', None)\n if params is not None:\n query = json.loads(request.args.get('where'))\n\n if 'get_new_patients_only' in query:\n query['_created'] = {'$gte': datetime.datetime.strptime(query['data_push_id'], '%Y-%m-%d %X')}\n del query['get_new_patients_only']\n\n clinical_ll = list(db.clinical.find(query))\n for clinical in clinical_ll:\n for field, val in clinical.items():\n if not isinstance(field, float) and not isinstance(field, int):\n try:\n clinical[field] = str(val)\n except UnicodeEncodeError:\n continue\n\n return json.dumps(clinical_ll)"
]
| [
"0.6540609",
"0.6131111",
"0.6062366",
"0.59744585",
"0.59557164",
"0.5854854",
"0.58444697",
"0.5811601",
"0.58045304",
"0.57877827",
"0.56264734",
"0.555936",
"0.55263805",
"0.551021",
"0.550255",
"0.55022293",
"0.5453711",
"0.5433522",
"0.5396434",
"0.53827906",
"0.5377944",
"0.535407",
"0.53115076",
"0.5272269",
"0.5258252",
"0.5218089",
"0.5175484",
"0.51470476",
"0.5113118",
"0.5108447"
]
| 0.6811366 | 0 |
Get all signal values for patient. | def get_signal_values_for_patient(self, patient_id):
query = \
"""
SELECT s.name, psv.value, psv.time
FROM patient_signal_values psv
INNER JOIN signals s
ON psv.signal_id = s.id
WHERE patient_id = %(patient_id)s
"""
params = {
"patient_id": patient_id
}
return self.mysql_obj.fetch_rows(query, params) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_raw_signals(self):\n signals, fields = wfdb.rdsamp(self.patient_number, pb_dir='mitdb', warn_empty=True)\n logging.info(\"Patient {} additional info: {}\".format(self.patient_number, fields))\n return signals, fields",
"def return_values(self):\r\n\r\n values = list(self.piDD.values())\r\n return values",
"def read_all_signals(self):\n return [pio.sample(signal_idx)\n for signal_idx in self._signals_idx]",
"def build_signal_dataset(self):\n return np.abs(self.bandpassed).mean(axis=-2)",
"def get_signals(self):\n return QFDataFrame(data=self._signals, index=self._signals_dates)",
"def values(self):\n return self.data['values'].values",
"def evaluate(self, signal_values: Array) -> Array:\n pass",
"def values(self):\n\t\treturn self.myVals",
"def get_array(self):\r\n samples = getattr(self.instrument, self.devchan+'_samples')()\r\n sample_rate = getattr(self.instrument, self.devchan+'_sample_rate')()\r\n timeout = getattr(self.instrument, self.devchan+'_timeout')()\r\n return self.instrument.read_analog(self.devchan,\r\n samples,\r\n sample_rate,\r\n timeout,\r\n self.chan_config,\r\n self.minv,\r\n self.maxv,\r\n self.triggered,\r\n False\r\n )",
"def signal(self) -> list:\n raise NotImplementedError(\"You must implement signal\")",
"def valuerefs(self):\r\n return self.data.values()",
"def values(self):\n return self[\"values\"]",
"def values(self):\n return self[\"values\"]",
"def get_values(self):\n \n return []",
"def values(self):\n # Account for events generated with different versions of code.\n values = [getattr(self, x, \"\") for x in self.base_field_names()]\n values += [self.data.get(x, \"\") for x in self.data]\n return values",
"def get_all(self):\n try:\n return self.current_data\n except:\n print('No data received from sensor')",
"def GetSignals(cls):\n return []",
"def signal_values(filename):\n\n global standard_deviation\n f = h5py.File(filename, 'r')\n grp = np.array(f.get('/model'))\n\n for i in grp:\n ideal_signal_values[str(i[0])[2:-1]] = {cs.VALUE: i[1]}\n standard_deviation = np.array(grp[0][2])\n return ideal_signal_values",
"def values(self):\n return [i.value for i in self.value]",
"def getAllSpectrumMeasurements(self): \n return self.spectrum",
"def values(self):\n return [p.value for p in self]",
"def getValues(self):\n return self.__get('values')",
"def get_values(self):\n raise NotImplementedError(\"Abstract method not implemented.\")",
"def values(self):\n self._remove_expired()\n\n return self._d.values()",
"def values(self) -> ndarray:\n return self._vals",
"def Values(self):\r\n\t\treturn self._get_attribute('values')",
"def values(self) -> pulumi.Output[Sequence[str]]:\n return pulumi.get(self, \"values\")",
"def get_samples_per_signal(self):\n return np.array([self.samples_in_file(chn) for chn in range(self.signals_in_file)])",
"def values (self):\n return self._values",
"def values (self):\n return self._values"
]
| [
"0.6766118",
"0.6505692",
"0.6368694",
"0.63396835",
"0.60953635",
"0.60298645",
"0.58378303",
"0.58372927",
"0.58189297",
"0.57914597",
"0.57846195",
"0.5775973",
"0.5775973",
"0.577389",
"0.57266253",
"0.57230437",
"0.5665654",
"0.5658327",
"0.5649866",
"0.5648695",
"0.56453943",
"0.56276673",
"0.56138337",
"0.561365",
"0.5579486",
"0.5568019",
"0.5554228",
"0.55538476",
"0.5549075",
"0.5549075"
]
| 0.77991635 | 0 |
computes diag(v1) dot M dot diag(v2). returns np.ndarray with same dimensions as M | def v1Mv2(v1, M, v2):
return v1[:, None] * M * v2[None, :] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def vec_dot(v1,v2):\r\n \r\n return np.dot(v1,v2)",
"def diag(v, k=0):\n\n if not use_origin_backend(v):\n if not isinstance(v, dparray):\n pass\n else:\n return dpnp_diag(v, k)\n\n return call_origin(numpy.diag, v, k)",
"def vdot(a, b):\n return np.vdot(a.ravel(), b.ravel())",
"def diag(v, k=0):\n v = wrappers.mpmath.numeric.asmparray(v)\n s = v.shape\n if len(s) == 1:\n n = s[0]+abs(k)\n res = wrappers.mpmath.numeric.zeros((n, n), v.ctx)\n if k >= 0:\n i = k\n else:\n i = (-k) * n\n res[:n-k].flat[i::n+1] = v\n return res\n elif len(s) == 2:\n return np.diagonal(v, k)\n else:\n raise ValueError(\"Input must be 1- or 2-d.\")",
"def _listdot(d1, d2):\n return [np.dot(x[0].T, x[1]) for x in zip(d1, d2)]",
"def r_diag_dot_sparse(mat, diag):\n return mat @ sp.diags(diag)",
"def det_matrix_2x2(m: list):\n return m[0][0]*m[1][1] - m[0][1]*m[1][0]",
"def diag(v, k=0):\n if isinstance(v, cupy.ndarray):\n if v.ndim == 1:\n size = v.size + abs(k)\n ret = cupy.zeros((size, size), dtype=v.dtype)\n ret.diagonal(k)[:] = v\n return ret\n else:\n return v.diagonal(k)\n else:\n return cupy.array(numpy.diag(v, k))",
"def dot(a, b):\n return np.vdot(a.arr,b.arr)",
"def matrix_dot(self, other):\n assert self.qodulus == other.qodulus\n\n # The following essentially is a massive case statement on whether self\n # and other are scalars, vectors or matrices. Unwieldly, but efficient\n # and clear.\n if self.isscalar() and other.isscalar():\n return self * other\n else:\n res_dtype = np.result_type(self.dtype, other.dtype)\n res_charge = self._qod_func(self.charge + other.charge)\n res_invar = self.invar and other.invar\n\n # Vector times vector\n if len(self.shape) == 1 and len(other.shape) == 1:\n assert self.compatible_indices(other, 0, 0)\n if self.dirs[0] + other.dirs[0] != 0:\n warnings.warn(\n \"Automatically flipping dir 0 of other in dot.\"\n )\n other = other.flip_dir(0)\n res = 0\n for qnum in self.qhape[0]:\n try:\n a = self[(qnum,)]\n b = other[(qnum,)]\n except KeyError:\n # This block doesn't exist in one or the other matrix,\n # so it contributes zero.\n continue\n prod = np.dot(a, b)\n if prod:\n res += prod\n # Turn the single scalar number into a scalar tensor.\n res = type(self)(\n [],\n qhape=[],\n qodulus=self.qodulus,\n sects={},\n defval=res,\n dirs=[],\n dtype=res_dtype,\n charge=res_charge,\n invar=res_invar,\n )\n else:\n res_sects = {}\n\n # Vector times matrix\n if len(self.shape) == 1:\n assert other.invar\n assert other.defval == 0\n assert self.compatible_indices(other, 0, 0)\n if self.dirs[0] + other.dirs[0] != 0:\n warnings.warn(\n \"Automatically flipping dir 0 of self in dot.\"\n )\n self = self.flip_dir(0)\n res_shape = [other.shape[1]]\n res_qhape = [other.qhape[1]]\n res_dirs = [other.dirs[1]]\n flux = -other.dirs[0] * other.dirs[1]\n for sum_qnum in self.qhape[0]:\n b_qnum = self._qod_func(\n sum_qnum * flux + other.dirs[1] * other.charge\n )\n try:\n a = self[(sum_qnum,)]\n b = other[(sum_qnum, b_qnum)]\n res_sects[(b_qnum,)] = np.dot(a, b)\n except KeyError:\n # One of the blocks was zero so the resulting block\n # will be zero.\n continue\n\n # Matrix times vector\n elif len(other.shape) == 1:\n assert self.invar\n assert self.defval == 0\n assert self.compatible_indices(other, 1, 0)\n if self.dirs[1] + other.dirs[0] != 0:\n warnings.warn(\n \"Automatically flipping dir 0 of other in dot.\"\n )\n other = other.flip_dir(0)\n res_shape = [self.shape[0]]\n res_qhape = [self.qhape[0]]\n res_dirs = [self.dirs[0]]\n flux = -self.dirs[0] * self.dirs[1]\n for sum_qnum in self.qhape[1]:\n a_qnum = self._qod_func(\n sum_qnum * flux + self.dirs[0] * self.charge\n )\n try:\n a = self[(a_qnum, sum_qnum)]\n b = other[(sum_qnum,)]\n res_sects[(a_qnum,)] = np.dot(a, b)\n except KeyError:\n # One of the blocks was zero so the resulting block\n # will be zero.\n continue\n\n # Matrix times matrix\n else:\n assert self.invar and other.invar\n assert self.defval == other.defval == 0\n assert self.compatible_indices(other, 1, 0)\n if self.dirs[1] + other.dirs[0] != 0:\n warnings.warn(\n \"Automatically flipping dir 0 of other in dot.\"\n )\n other = other.flip_dir(0)\n res_shape = [self.shape[0], other.shape[1]]\n res_qhape = [self.qhape[0], other.qhape[1]]\n res_dirs = [self.dirs[0], other.dirs[1]]\n a_flux = -self.dirs[0] * self.dirs[1]\n b_flux = -other.dirs[0] * other.dirs[1]\n for sum_qnum in self.qhape[1]:\n a_qnum = self._qod_func(\n sum_qnum * a_flux + self.dirs[0] * self.charge\n )\n b_qnum = self._qod_func(\n sum_qnum * b_flux + other.dirs[1] * other.charge\n )\n try:\n a = self[a_qnum, sum_qnum]\n b = other[sum_qnum, b_qnum]\n res_sects[a_qnum, b_qnum] = np.dot(a, b)\n except KeyError:\n # One of the blocks was zero so the resulting block\n # will be zero.\n continue\n # Turn the dictionary of sectors into a tensor.\n res = type(self)(\n res_shape,\n qhape=res_qhape,\n qodulus=self.qodulus,\n sects=res_sects,\n dtype=res_dtype,\n dirs=res_dirs,\n charge=res_charge,\n invar=res_invar,\n )\n return res",
"def r_diag_dot_dense(mat, diag):\n if diag.size <= 128:\n return mul_dense(mat, diag.reshape(1, -1))\n else:\n out = np.empty_like(mat, dtype=common_type(diag, mat))\n _r_diag_dot_dense_par(mat, diag.ravel(), out)\n\n return out",
"def _dot(a, b):\n return np.einsum('ijk,ikl->ijl', a, b)",
"def vector_dot(v, w):\n return np.dot(v, w)",
"def _prod_vectorized(M1, M2):\n sh1 = M1.shape\n sh2 = M2.shape\n assert len(sh1) >= 2\n assert len(sh2) >= 2\n assert sh1[-1] == sh2[-2]\n\n ndim1 = len(sh1)\n t1_index = list(xrange(ndim1-2)) + [ndim1-1, ndim1-2]\n return np.sum(np.transpose(M1, t1_index)[..., np.newaxis] *\n M2[..., np.newaxis, :], -3)",
"def matrix(self, v1, v2, lengths):\n M = [[self.covariance(i, j, lengths) for j in v2] for i in v1]\n return array(M)",
"def get_mo_ovlp(mo1, mo2, ovlp):\n ovlp = np.asarray(ovlp)\n mo1 = np.asarray(mo1)\n mo2 = np.asarray(mo2)\n if mo1.ndim == 2:\n res = reduce(np.dot, (mo1.conj().T, ovlp, mo2))\n else:\n assert mo1.shape[0] == mo2.shape[0]\n spin, nao, nmo1 = mo1.shape\n nmo2 = mo2.shape[-1]\n res = np.zeros((spin, nmo1, nmo2), dtype=np.result_type(mo1, mo2))\n for s in range(spin):\n res[s] = reduce(np.dot, (mo1[s].conj().T, ovlp, mo2[s]))\n return res",
"def matDiag(vec):\n ret=matZeros((len(vec),len(vec)))\n for i in range(len(vec)):\n matSet(ret,i,i,vec[i])\n return ret",
"def l_diag_dot_sparse(diag, mat):\n return sp.diags(diag) @ mat",
"def dotProduct(v1, v2):\n n1 = normalize(v1)\n n2 = normalize(v2)\n return n1[0] * n2[0] + n1[1] * n2[1] + n1[2] * n2[2]",
"def l_diag_dot_dense(diag, mat):\n\n if diag.size <= 128:\n return mul_dense(diag.reshape(-1, 1), mat)\n else:\n out = np.empty_like(mat, dtype=common_type(diag, mat))\n _l_diag_dot_dense_par(diag.ravel(), mat, out)\n\n return out",
"def vector_dot(v1,v2):\n return (v1.x * v2.x) + (v1.y * v2.y) + (v1.z * v2.z)",
"def np_matmul(mat1, mat2):\n return mat1.dot(mat2)",
"def _gu_matvec(x1, x2):\n return (x1 @ x2[..., np.newaxis])[..., 0]",
"def diagflat(v, k=0):\n\n if not use_origin_backend(v):\n if not isinstance(v, dparray):\n pass\n else:\n return dpnp_diag(v.ravel(), k)\n\n return call_origin(numpy.diagflat, v, k)",
"def naive_matrix_vector_dot(x, y):\n assert len(x.shape) == 2\n assert len(y.shape) == 1\n assert x.shape[1] == y.shape[0]\n\n z = np.zeros(x.shape[0])\n for i in range(x.shape[0]):\n for j in range(x.shape[1]):\n z[i] += x[i, j] * y[j]\n return z",
"def reflection_matrix(v):\n n = len(v)\n v = np.array(v)[np.newaxis]\n return np.eye(n) - 2 * np.dot(v.T, v)",
"def dot(array1, array2):\n return Nd4jArray(array1.array.mmul(array2.array))",
"def diag(M,idx=0):\n n, m = shape_mat(M)\n if idx >= 0:\n return [ M[i][i+idx] for i in xrange( min( n, m-idx ) ) ]\n else:\n return [ M[i-idx][i] for i in xrange( min( n+idx, m ) ) ]",
"def DM(self, masses=None):\n N = len(self.diameters)\n rs = self.rs\n d = self.ndim\n M = np.zeros((d * N, d * N))\n\n for i in range(N):\n sigi = self.diameters[i]\n for j in range(i):\n rijvec = rs[i, :] - rs[j, :]\n rijvec = rijvec - np.around(rijvec)\n rijsq = np.sum(rijvec**2)\n dij = (sigi + self.diameters[j]) / 2\n dijsq = dij**2\n if rijsq < dijsq:\n rij = np.sqrt(rijsq)\n rijouter = np.outer(rijvec, rijvec)\n # U(r) = ½(1 - r/d)²\n # d²U/dxdy = (dr/dx)(dr/dy)/d² - (1 - r/d)(d²r/dxdy)/d\n # dr/dx = x/r\n # d²r/dxdy = -(x y) / r³\n # d²U/dxdy = -(x y)/(r² d²) + (1 - r/d)((x y)/r²)/(d r)\n # d²U/dx² = (dr/dx)²/d² - (1 - r/d)(d²r/dx²)/d\n # d²r/dx² = -x² / r³ + 1/r\n # d²U/dxᵢdxⱼ = -(xᵢ xⱼ)/(r² d²) + (1 - r/d)((xᵢ xⱼ)/r² -\n # δᵢⱼ)/(d r)\n\n Mij1 = -rijouter / rijsq / dijsq\n Mij2 = (1 - rij / dij) * \\\n (rijouter / rijsq - np.eye(d)) / rij / dij\n Mij = Mij1 + Mij2\n\n M[d * i:d * i + d, d * j:d * j + d] = Mij\n M[d * j:d * j + d, d * i:d * i + d] = Mij\n M[d * i:d * i + d, d * i:d * i + d] -= Mij\n M[d * j:d * j + d, d * j:d * j + d] -= Mij\n\n np.divide(M, self.L**2, out=M)\n if masses is None:\n return M\n\n # TODO: is the mass part of this really part of this?\n marr = np.array(masses)\n assert np.shape(masses) == np.shape(self.diameters)\n marr = np.array([masses] * d)\n marr = marr.T.flatten()\n # marr is now [m1,m1,m2,m2,...] (in 2D)\n mm = np.eye(d * N)\n np.multiply(mm, marr**-.5, out=mm)\n # mm is now M^-½, where M is the mass matrix\n\n mm.dot(M, out=M)\n M.dot(mm, out=M)\n return M",
"def test_dot_mm(self):\n self.check_dot_mm(dot2, dot3, \"np.dot()\")"
]
| [
"0.6502854",
"0.6264067",
"0.6235242",
"0.6111132",
"0.60820174",
"0.6053548",
"0.6019043",
"0.60024",
"0.59885395",
"0.59714615",
"0.5968381",
"0.596071",
"0.5959117",
"0.59306157",
"0.5902412",
"0.5900423",
"0.58945966",
"0.58917797",
"0.58533454",
"0.58418703",
"0.58413756",
"0.5840591",
"0.58262444",
"0.5776026",
"0.5769444",
"0.57418776",
"0.57358146",
"0.57259804",
"0.5687578",
"0.5683599"
]
| 0.64991665 | 1 |
Return windows of indices into the flattened data. data[index_matrix[i]] returns the flattened window around the ith element. | def create_index_matrix(data_shape, window_shape):
n_data = np.prod(data_shape)
n_window = np.prod(window_shape)
box = np.indices(window_shape)
index_matrix = np.zeros((n_data, n_window), dtype=np.int32)
shifts = np.unravel_index(np.arange(n_data), data_shape)
offset = (np.array(window_shape)-1)//2
for i, shift in enumerate(zip(*shifts)):
shift = np.array(shift)-offset
window = (box.T + shift).T
index_matrix[i] = np.ravel_multi_index(window, data_shape, 'wrap') \
.flatten()
return index_matrix | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_flatten_indices(actual_index, num_samples, skip_coef=1, window_size=5):\n n_pre_zeros = 0\n window_indices = []\n n_post_zeros = 0\n for i in range(window_size * 2 + 1):\n if (actual_index - window_size*skip_coef) + i*skip_coef >= 0 and (actual_index - window_size*skip_coef) + i*skip_coef < num_samples: \n window_indices.append((actual_index - window_size*skip_coef) + i*skip_coef)\n elif (actual_index - window_size*skip_coef) + i*skip_coef < 0 :\n n_pre_zeros = n_pre_zeros + 1\n elif (actual_index - window_size*skip_coef) + i*skip_coef >= num_samples:\n n_post_zeros = n_post_zeros + 1\n else:\n raise Exception(\"Isophonics get_flatten_indices faced to unexptected situation.\")\n\n return n_pre_zeros, window_indices, n_post_zeros",
"def all_windows(x, system_shape, window_shape):\n index_matrix = tf.constant(create_index_matrix(system_shape, window_shape))\n return tf.transpose(\n tf.gather_nd(tf.transpose(x),\n tf.expand_dims(index_matrix, 2)),\n [2, 0, 1])",
"def window_inds(dataset, window_sz, overlap):\r\n\tdata_len = len(dataset[0])\r\n\tassert window_sz < data_len\r\n\tind1 = 0\r\n\tind2 = window_sz-1\r\n\tind_list = []\r\n\tov_ind_diff = int(np.ceil(np.abs(overlap*window_sz)))\r\n\tif ov_ind_diff == window_sz:\r\n\t\tov_ind_diff += -1\r\n\twhile ind2 < data_len:\r\n\t\tind_list.append((ind1,ind2))\r\n\t\tind1 += window_sz-ov_ind_diff\r\n\t\tind2 += window_sz-ov_ind_diff\r\n\treturn ind_list",
"def dataset_to_windows(dataset, windowsize):\n windows = []\n row, col = dataset.shape\n for i in range(col):\n if i > 0:\n windows.append(lag(np.array(dataset)[:,i], windowsize))\n return np.array(windows)",
"def windows(X, width, skip_last):\n ret = []\n n = X.shape[0]\n for i in range(n - width + 1 - skip_last):\n window = X[i:i + width, :]\n ret.append([tuple(x) for x in window[:]])\n return np.array(ret)",
"def rolling_window_sequences(X, index, window_size, target_size, target_column):\n out_X = list()\n out_y = list()\n X_index = list()\n y_index = list()\n\n target = X[:, target_column]\n\n for start in range(len(X) - window_size - target_size + 1):\n end = start + window_size\n out_X.append(X[start:end])\n out_y.append(target[end:end + target_size])\n X_index.append(index[start])\n y_index.append(index[end])\n\n return np.asarray(out_X), np.asarray(out_y), np.asarray(X_index), np.asarray(y_index)",
"def gather_windows(x, centers, system_shape, window_shape):\n window_size = np.prod(window_shape)\n batch_size = tf.shape(x)[0]\n index_matrix = tf.constant(create_index_matrix(system_shape, window_shape))\n window_range = tf.range(batch_size, dtype=tf.int32)[:, None] * \\\n tf.ones(window_size, dtype=tf.int32)[None, :]\n indices = tf.stack((window_range, tf.gather(index_matrix, centers)), 2)\n return tf.gather_nd(x, indices)",
"def window_data(data: np.ndarray):\n\n w_len = 128\n stride = w_len // 2\n\n no_offset_windows = np.split(data, 10)\n offset_windows = np.split(data[stride:-stride], 9)\n windows = [0] * 19\n windows[::2] = no_offset_windows\n windows[1::2] = offset_windows\n windows = np.array(windows, dtype=np.float32)\n\n return windows",
"def window(spectrogram: np.ndarray, wlength: int) -> Iterator[np.ndarray]:\n\n y = spectrogram.shape[1]\n for j in range(y):\n ymin = j\n ymax = j + wlength if j + wlength <= y else y\n if ymax == y:\n break\n yield spectrogram[:, ymin:ymax]",
"def matrix_to_flat(self, idx_rows):\n idx = []\n for i in range(self.nts):\n idx.append(self._matrix_to_flat_by_ts(idx_rows, i))\n return idx",
"def _get_indices_split ( indices, number_of_folds ):\n # Split the indicies by the number of folds\n return np.array_split ( indices, indices_or_sections = number_of_folds )\n # End get_indices_split()",
"def _get_indices_split ( indices, number_of_folds ):\n # Split the indicies by the number of folds\n return np.array_split ( indices, indices_or_sections = number_of_folds )\n # End get_indices_split()",
"def index_to_slices(index):\r\n\r\n #contruct the return structure\r\n ind = np.asarray(index,dtype=np.int64)\r\n ret = [[] for i in range(ind.max()+1)]\r\n\r\n #find the switchpoints\r\n ind_ = np.hstack((ind,ind[0]+ind[-1]+1))\r\n switchpoints = np.nonzero(ind_ - np.roll(ind_,+1))[0]\r\n\r\n [ret[ind_i].append(slice(*indexes_i)) for ind_i,indexes_i in zip(ind[switchpoints[:-1]],zip(switchpoints,switchpoints[1:]))]\r\n return ret",
"def index_to_slices(index):\r\n\r\n #contruct the return structure\r\n ind = np.asarray(index,dtype=np.int64)\r\n ret = [[] for i in range(ind.max()+1)]\r\n\r\n #find the switchpoints\r\n ind_ = np.hstack((ind,ind[0]+ind[-1]+1))\r\n switchpoints = np.nonzero(ind_ - np.roll(ind_,+1))[0]\r\n\r\n [ret[ind_i].append(slice(*indexes_i)) for ind_i,indexes_i in zip(ind[switchpoints[:-1]],zip(switchpoints,switchpoints[1:]))]\r\n return ret",
"def extract_window_data(df, window_len=10, zero_base=True):\n window_data = []\n for idx in range(len(df) - window_len):\n tmp = df[idx: (idx + window_len)].copy()\n if zero_base:\n tmp = normalise_zero_base(tmp)\n window_data.append(tmp.values)\n return np.array(window_data)",
"def get_indices_entire_sequence(data: pd.Dataframe, window_size: int, step_size: int) -> list:\n stop_position = len(data)-1 # 1- because of 0 indexing\n\n # Start the first sub-sequence at index position 0\n subseq_first_idx = 0\n\n subseq_last_idx = subseq_first_idx + window_size\n\n indices = []\n\n while subseq_last_idx <= stop_position:\n indices.append((subseq_first_idx, subseq_last_idx))\n subseq_first_idx += step_size\n subseq_last_idx += step_size\n return indices",
"def compute_indices_pandas(data) -> pd.Series:\n d = data.ravel()\n f = lambda x: np.unravel_index(x.index, data.shape)\n return pd.Series(d).groupby(d).apply(f)",
"def window_partition(x, window_size):\n B, D, H, W, C = x.shape\n x = x.view(B, D // window_size[0], window_size[0], H // window_size[1], window_size[1], W // window_size[2], window_size[2], C)\n windows = x.permute(0, 1, 3, 5, 2, 4, 6, 7).contiguous().view(-1, reduce(mul, window_size), C)\n return windows",
"def _sliding_windows(a, N):\n a = np.asarray(a)\n p = np.zeros(N - 1, dtype=a.dtype)\n b = np.concatenate((p, a, p))\n s = b.strides[0]\n return np.lib.stride_tricks.as_strided(\n b[N - 1:],\n shape=(N, len(a) + N - 1),\n strides=(-s, s),\n )",
"def window_blocks(large_array, window_size):\n y_size = large_array.shape[0]/window_size\n blocks_array = large_array.reshape(y_size, window_size)\n return blocks_array",
"def extract_window_data(df, window_len=30, zero_base=True):\n window_data = []\n for idx in range(len(df) - window_len):\n tmp = df[idx: (idx + window_len)].copy()\n if zero_base:\n tmp = normalise_min_max(tmp)\n window_data.append(tmp.values)\n return np.array(window_data)\n #return window_data",
"def fold(nb_splits, dataset):\r\n index = np.arange(np.shape(dataset)[0])\r\n splits = np.split(index, nb_splits)\r\n\r\n index = []\r\n\r\n for n_fold in np.arange(nb_splits):\r\n index.append((splits[n_fold].tolist(),(np.concatenate([x for i,x in enumerate(splits) if i!=n_fold])).tolist()))\r\n\r\n return index",
"def sample_data_input_fn(params):\n window_size = params['window_size']\n batch_size = params['batch_size']\n\n dataset_names = sample_data.get_data_names()\n all_downsampled = [sample_data.get_downsampled_data(name) for name in dataset_names]\n np_dtype = all_downsampled[0].dtype\n _, num_columns = all_downsampled[0].shape\n assert num_columns == 3\n\n # For each data item, this computes\n time_diffs = [(x[1:, 0] - x[:-1, 0]) for x in all_downsampled]\n median_time_diff = np.median(np.concatenate(time_diffs, axis=0))\n lower, upper = median_time_diff * 0.8, median_time_diff * 1.2\n valid_start_window_indices = [\n get_window_valid_indices(d, lower, upper, window_size) for d in time_diffs\n ]\n for name, valid_indices in zip(dataset_names, valid_start_window_indices):\n if np.size(valid_indices) == 0:\n raise ValueError(\"{} has no valid window ranges\".format(name))\n\n def get_samples_py_op(idx_array):\n assert isinstance(idx_array, np.ndarray)\n assert idx_array.shape == (batch_size, )\n samp_results = np.zeros((batch_size, window_size, num_columns), dtype=np_dtype)\n for i, sample_idx in enumerate(idx_array):\n start_idx = random.choice(valid_start_window_indices[sample_idx])\n samp_results[i, :, :] = all_downsampled[sample_idx][start_idx: (\n start_idx + window_size)]\n assert samp_results.shape == (batch_size, window_size, num_columns)\n return samp_results\n\n def get_window_sample(idx_tensor):\n samples = tf.py_func(get_samples_py_op, [idx_tensor], np_dtype)\n samples.set_shape((batch_size, window_size, num_columns))\n return samples\n\n def random_negative_py_op(idx_array):\n assert isinstance(idx_array, np.ndarray)\n neg_idx_array = np.copy(idx_array)\n for i, idx in enumerate(idx_array):\n while neg_idx_array[i] == idx_array[i]:\n neg_idx_array[i] = random.randint(0, len(all_downsampled) - 1)\n return neg_idx_array\n\n def get_negative_window_sample(idx_tensor):\n neg_idx_tensor = tf.py_func(\n random_negative_py_op,\n [idx_tensor],\n idx_tensor.dtype)\n return get_window_sample(neg_idx_tensor)\n\n # Current sample method: First select sample index, then select window.\n num_samples = len(all_downsampled)\n if num_samples < 2:\n raise ValueError(\"Need at least 2 light curves for negative samples!\")\n dataset = tf.data.Dataset.range(num_samples)\n dataset = dataset.repeat().shuffle(num_samples * 2).batch(batch_size)\n\n positive = dataset.map(lambda idx_tensor: {\n 'left': get_window_sample(idx_tensor),\n 'right': get_window_sample(idx_tensor),\n 'goal': tf.constant([1.0] * batch_size, dtype=tf.float64)\n })\n negative = dataset.map(lambda idx_tensor: {\n 'left': get_window_sample(idx_tensor),\n 'right': get_negative_window_sample(idx_tensor),\n 'goal': tf.constant([0.0] * batch_size, dtype=tf.float64)\n })\n\n # TODO(gatoatigrado): Experiment with shuffling positive & negative within a batch.\n # Currently each batch is just positive or negative.\n assert positive.output_shapes == negative.output_shapes\n assert negative.output_types == positive.output_types\n dataset = tf.contrib.data.sample_from_datasets((positive, negative))\n assert dataset.output_shapes == negative.output_shapes\n return dataset",
"def windows(self,windowSize):\n for i in range(0,len(self)-windowSize):\n yield (i,i+windowSize)",
"def vec_to_windows(x, wlen):\n n = len(x)\n # number of windows\n m = n // wlen\n # total samples to be kept\n s = m * wlen\n return jnp.reshape(x[:s], (m, wlen)).T",
"def _get_split_indices(self):\n\n cumsum = np.cumsum(\n np.concatenate((np.array([0], dtype=np.int8), self.split_sizes)))\n \n fold_inds = np.array(\n [(cumsum[n], cumsum[n + 1]) for n in range(self.n_splits)])\n\n return fold_inds",
"def flattened_indices_from_row_col_indices(row_indices, col_indices, num_cols):\n return (row_indices * num_cols) + col_indices",
"def broadcast_index(values, indices):\r\n assert_array(indices, shape=(...,) + values.shape[:-1])\r\n indexed_values = jp.take_along_axis(\r\n values.reshape((1,) + values.shape),\r\n indices.reshape((-1,) + values.shape[:-1] + (1,)),\r\n axis=-1,\r\n )\r\n flat_result = jp.squeeze(indexed_values, axis=-1)\r\n return flat_result.reshape(indices.shape)",
"def _construct_windows(self, Nw, ti, i0=0, i1=None):\n if i1 is None:\n i1 = Nw\n\n # get data for windowing period\n df = self.data.get_data(ti-self.dtw, ti+(Nw-1)*self.dto)[self.data_streams]\n\n # create windows\n dfs = []\n for i in range(i0, i1):\n dfi = df[:].iloc[i*(self.iw-self.io):i*(self.iw-self.io)+self.iw]\n try:\n dfi['id'] = pd.Series(np.ones(self.iw, dtype=int)*i, index=dfi.index)\n except ValueError:\n print('hi')\n dfs.append(dfi)\n df = pd.concat(dfs)\n window_dates = [ti + i*self.dto for i in range(Nw)]\n return df, window_dates[i0:i1]",
"def flatten_idx(idx, axis=-1):\n idx = numpy.asanyarray(idx)\n if not idx.dtype.kind in ('i', 'u'):\n idx = idx.astype(int)\n preshape = idx.shape[:axis]\n postshape = idx.shape[axis:]\n stride = int(numpy.product(postshape[1:])) #1 if applied to empty\n #The index on this axis moves stride elements in flat\n outidx = idx.flatten() * stride #makes a copy\n #First add the offsets to get us to [..., idx @ axis = 0, 0...)\n outidx += numpy.repeat(\n numpy.arange(0, len(outidx), int(numpy.product(postshape)),\n dtype=idx.dtype),\n numpy.product(postshape))\n #Now offsets for non-zero on the trailing axes [0, 0, ... 0@axis, ...]\n outidx += numpy.tile(numpy.arange(0, stride, dtype=idx.dtype),\n int(numpy.product(preshape)) * idx.shape[axis])\n return outidx"
]
| [
"0.6340699",
"0.5813027",
"0.5748229",
"0.56743836",
"0.55570626",
"0.55411845",
"0.5432381",
"0.5309236",
"0.5209544",
"0.52076346",
"0.52067953",
"0.52067953",
"0.51609695",
"0.51609695",
"0.5154295",
"0.5130914",
"0.5128103",
"0.50972",
"0.5071069",
"0.49966234",
"0.49941477",
"0.49778488",
"0.4956917",
"0.495351",
"0.49500495",
"0.49351314",
"0.4911626",
"0.4903829",
"0.48841846",
"0.4878735"
]
| 0.6024071 | 1 |
Decorate tensorflow op graph building function with name_scope. Name defaults to function name. | def scope_op(name=None):
def decorator(function):
@functools.wraps(function)
def wrapper(*args, **kwargs):
with tf.name_scope(name or function.__name__):
return function(*args, **kwargs)
return wrapper
return decorator | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def function_name_scope(function):\n @wraps(function)\n def wrapper(*args, **kwargs):\n with tf.name_scope(function.__name__):\n return function(*args, **kwargs)\n return wrapper",
"def build(self, **kwargs):\n if not self.built:\n with tf.name_scope(self.name):\n self._build(**kwargs)\n self.built = True\n return self",
"def _graph_mode_decorator(f, args, kwargs):\n # TODO(rsepassi): Add support for kwargs\n if kwargs:\n raise ValueError(\n \"The custom_gradient decorator currently supports keywords \"\n \"arguments only when eager execution is enabled.\")\n name = generate_name()\n args = variable_utils.convert_variables_to_tensors(args)\n args = nest.map_structure(ops.convert_to_tensor, args, expand_composites=True)\n\n # Checking global and local variables attempts to ensure that no non-resource\n # Variables are added to the graph.\n current_var_scope = variable_scope.get_variable_scope()\n before_vars = set([\n v.ref() for v in current_var_scope.global_variables() +\n current_var_scope.local_variables()\n ])\n with record.VariableWatcher() as variable_watcher:\n result, grad_fn = f(*args)\n\n flat_args = composite_tensor_gradient.get_flat_tensors_for_gradients(\n nest.flatten(args))\n flat_result = composite_tensor_gradient.get_flat_tensors_for_gradients(\n nest.flatten(result))\n flat_result_len = len(flat_result)\n\n after_vars = set([\n v.ref() for v in current_var_scope.global_variables() +\n current_var_scope.local_variables()\n ])\n new_vars = after_vars - before_vars\n new_vars_list = [v.deref() for v in new_vars]\n for v in new_vars_list:\n if not resource_variable_ops.is_resource_variable(v):\n raise TypeError(\n \"All variables used by a function wrapped with @custom_gradient must \"\n \"be `ResourceVariable`s. Ensure that no `variable_scope` is created \"\n \"with `use_resource=False`.\")\n\n # The variables that grad_fn needs to return gradients for are the set of\n # variables used that are *not* part of the inputs.\n variables_in_tape = frozenset([\n v.ref() for v in variable_watcher.watched_variables()\n ])\n\n graphs = {getattr(o, \"graph\", None) for o in flat_result}\n # Not all results may be tensors. However, we want to ensure all tensor\n # outputs are from the same graph and get a list of captured inputs for\n # variable search\n graphs.discard(None) # Discard non-graph outputs\n if graphs:\n if len(graphs) > 1:\n raise ValueError(\n \"All custom_gradient outputs should be from the same graph\")\n output_graph = graphs.pop()\n filtered_input_tensors = []\n for i in flat_args:\n if i.graph == output_graph:\n filtered_input_tensors.append(i)\n else:\n filtered_input_tensors = flat_args\n\n variables_in_subgraph = frozenset([\n v.ref() for v in _get_dependent_variables(\n input_ops=filtered_input_tensors, output_ops=flat_result)\n ])\n variables = sorted(\n [v.deref() for v in variables_in_subgraph.union(variables_in_tape)],\n key=lambda v: v.name)\n\n grad_argspec = tf_inspect.getfullargspec(grad_fn)\n variables_in_signature = (\"variables\" in grad_argspec.args or\n \"variables\" in grad_argspec.kwonlyargs or\n grad_argspec.varkw)\n if variables and not variables_in_signature:\n raise TypeError(\n \"@tf.custom_gradient grad_fn must accept keyword argument 'variables', \"\n \"since function uses variables: {}\".format(variables))\n if variables_in_signature and not variables:\n # User seems to intend to use variables but none were captured.\n logging.vlog(\n 1, \"@custom_gradient grad_fn has 'variables' in signature, \"\n \"but no ResourceVariables were used on the forward pass.\")\n\n all_tensors = flat_result + flat_args + variables\n\n def tape_grad_fn(*result_grad_components):\n \"\"\"Custom grad fn wrapper.\"\"\"\n result_grads = composite_tensor_gradient.replace_flat_tensors_for_gradients(\n nest.flatten(result), result_grad_components[:flat_result_len])\n if not isinstance(result_grads, (list, tuple)):\n result_grads = [result_grads]\n\n if variables:\n input_grads, variable_grads = grad_fn(*result_grads, variables=variables)\n if len(variable_grads) != len(variables):\n raise ValueError(\"Must return gradient for each variable from \"\n \"@custom_gradient grad_fn.\")\n else:\n input_grads = grad_fn(*result_grads)\n variable_grads = []\n\n # Need to return one value per input to the IdentityN, so pad the\n # gradients of the inputs of the custom_gradient function with the\n # gradients of the outputs as well.\n input_grads = composite_tensor_gradient.get_flat_tensors_for_gradients(\n nest.flatten(input_grads))\n return ([None] * flat_result_len) + input_grads + variable_grads\n\n @ops.RegisterGradient(name)\n def internal_grad_fn(unused_op, *result_grads): # pylint: disable=unused-variable\n \"\"\"Custom grad fn wrapper.\"\"\"\n return tape_grad_fn(*result_grads)\n\n original_tensors = all_tensors\n with ops.get_default_graph().gradient_override_map({\"IdentityN\": name}):\n all_tensors = array_ops.identity_n(all_tensors)\n\n original_tensors = [ops.convert_to_tensor(x) for x in original_tensors]\n\n # Propagate handle data for happier shape inference for resource variables.\n for i, t in enumerate(original_tensors):\n if t.dtype == dtypes.resource and hasattr(t, \"_handle_data\"):\n all_tensors[i]._handle_data = t._handle_data # pylint: disable=protected-access\n record.record_operation(\n f.__name__, all_tensors, original_tensors, tape_grad_fn)\n for ot, t in zip(original_tensors, all_tensors):\n handle_data_util.copy_handle_data(ot, t)\n flat_result = composite_tensor_gradient.replace_flat_tensors_for_gradients(\n nest.flatten(result), all_tensors[:flat_result_len])\n return nest.pack_sequence_as(result, flat_result)",
"def shared_name_scope(name, graph, name_scopes):\n with graph.as_default():\n if name not in name_scopes:\n with tf.name_scope(name) as scope:\n name_scopes[name] = scope\n return tf.name_scope(name_scopes[name])",
"def graph_callable(shape_and_dtypes):\n # TODO(alive,apassos): support initialized_value and friends from tf.Variable.\n assert context.in_eager_mode(), (\n \"graph_callable can only be used when Eager execution is enabled.\")\n def decorator(func):\n return tf_decorator.make_decorator(func,\n _graph_callable_internal(\n func, shape_and_dtypes))\n\n return decorator",
"def command_(self, name):\n def decorator(func):\n func.__name__ = name\n return self.command(func)\n return decorator",
"def define_scope(function, scope=None, *args, **kwargs):\n attribute = '_cache_' + function.__name__\n name = scope or function.__name__\n @property\n @functools.wraps(function)\n def decorator(self):\n if not hasattr(self, attribute):\n with tf.variable_scope(name, *args, **kwargs):\n setattr(self, attribute, function(self))\n return getattr(self, attribute)\n return decorator",
"def define_scope(function, scope=None, *args, **kwargs):\n attribute = '_cache_' + function.__name__\n name = scope or function.__name__\n @property\n @functools.wraps(function)\n def decorator(self):\n if not hasattr(self, attribute):\n with tf.variable_scope(name, *args, **kwargs):\n setattr(self, attribute, function(self))\n return getattr(self, attribute)\n return decorator",
"def define_scope(function, scope=None, *args, **kwargs):\n attribute = '_cache_' + function.__name__\n name = scope or function.__name__\n\n @property\n @functools.wraps(function)\n def decorator(self):\n if not hasattr(self, attribute):\n with tf.variable_scope(name, *args, **kwargs):\n setattr(self, attribute, function(self))\n return getattr(self, attribute)\n\n return decorator",
"def build_graph(self, name):\n with tf.variable_scope(name, reuse=tf.AUTO_REUSE):\n # change shape of input for when adding score\n self.input_positions = tf.placeholder(tf.float32, shape=(None, 1, 2,6), name='inputs')\n self.target_q = tf.placeholder(shape=[None], dtype=tf.float32, name='target')\n net = self.input_positions\n\n net = tf.layers.conv2d(inputs=net, filters=128, kernel_size=6,\n kernel_regularizer=tf.contrib.layers.l1_l2_regularizer(),\n data_format=\"channels_last\", padding='SAME', activation=tf.nn.relu)\n net = tf.layers.conv2d(inputs=net, filters=128, kernel_size=6,\n kernel_regularizer=tf.contrib.layers.l1_l2_regularizer(),\n data_format=\"channels_last\", padding='SAME', activation=tf.nn.relu)\n net = tf.layers.conv2d(inputs=net, filters=64, kernel_size=6,\n kernel_regularizer=tf.contrib.layers.l1_l2_regularizer(),\n data_format=\"channels_last\", padding='SAME', activation=tf.nn.relu)\n\n net = tf.layers.flatten(net)\n\n net = self.add_dense_layer(net, 12, tf.nn.relu)\n\n self.value = self.add_dense_layer(net, 1, name='state_q_value')\n self.advantage = self.add_dense_layer(net, 12, name='action_advantage')\n\n self.q_values = tf.add(self.value, tf.subtract(self.advantage,\n tf.reduce_mean(self.advantage, axis=1, keepdims=True)),\n name=\"action_q_values\")\n\n self.probabilities = tf.nn.softmax(self.q_values, name='probabilities')\n\n self.actions = tf.placeholder(shape=[None], dtype=tf.int32, name='actions')\n self.actions_onehot = tf.one_hot(self.actions, 12, dtype=tf.float32)\n self.q = tf.reduce_sum(tf.multiply(self.q_values, self.actions_onehot), axis=1, name=\"selected_action_q\")\n\n tf.summary.histogram(\"Action_Q_values\", self.q)\n\n self.td_error = tf.square(self.target_q - self.q)\n self.loss = tf.reduce_mean(self.td_error, name=\"q_loss\")\n\n tf.summary.scalar(\"Q_Loss\", self.loss)\n self.reg_losses = tf.identity(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES, scope=name),\n name=\"reg_losses\")\n\n reg_loss = self.beta * tf.reduce_mean(self.reg_losses)\n tf.summary.scalar(\"Regularization_loss\", reg_loss)\n\n self.merge = tf.summary.merge_all()\n\n self.total_loss = tf.add(self.loss, reg_loss, name=\"total_loss\")\n self.train_step = tf.train.GradientDescentOptimizer(learning_rate=self.learningRate). \\\n minimize(self.total_loss, name='train')",
"def build_tf_graph(self):\n raise NotImplementedError",
"def get_graph_func(name):\n if name == \"chain\":\n f = generate_chain\n elif name == \"bidiag\":\n f = generate_bidiag\n elif name == \"collider\":\n f = generate_collider\n elif name == \"jungle\":\n f = generate_jungle\n elif name == \"full\":\n f = generate_full\n elif name == \"regular\":\n f = generate_regular_graph\n elif name == \"random\":\n f = generate_random_graph\n elif name.startswith(\"random_max_\"): # Random graph with maximum number of parents\n max_parents = int(name.split(\"_\")[-1])\n f = lambda *args, **kwargs: generate_random_graph(*args, max_parents=max_parents, **kwargs)\n else:\n f = generate_random_graph\n return f",
"def node(func, name=None):\n return NamedFunc(func, name)",
"def add_scope(scope=None, scope_fn=None):\n def decorator(f):\n\n @functools.wraps(f)\n def decorated(*args, **kwargs):\n # Python 2 hack for keyword only args\n name = kwargs.pop(\"name\", None)\n with scope_fn(name or scope or f.__name__):\n return f(*args, **kwargs)\n return decorated\n\n return decorator",
"def build(self):\n if not self.built:\n with tf.name_scope(self.name):\n self._build()\n self.built = True\n return self",
"def build(self, graph, name_scopes, training):\n raise NotImplementedError('Must be overridden by concrete subclass')",
"def register_fn(cls, f):\n def inner(self, *args, **kwargs):\n try:\n query, projection, options = cls.unpack_scope(f(*args, **kwargs))\n new_query = deepcopy(self.query)\n new_projection = deepcopy(self.projection)\n new_options = deepcopy(self.options)\n deep_merge(query, new_query)\n new_projection.update(projection)\n new_options.update(options)\n return ScopeBuilder(self.model, self.fns, new_query,\n new_projection, new_options)\n except ValueError:\n raise ValueError(\"Scope function \\\"{}\\ returns an invalid scope\".format(f.__name__))\n\n setattr(cls, f.__name__, inner)",
"def __init__(self, name='batch_norm'):\n with tf.variable_scope(name):\n self.name = name",
"def custom_gradient(f=None):\n\n if f is None:\n return lambda f: custom_gradient(f=f)\n\n @Bind.decorator\n def decorated(wrapped, args, kwargs):\n \"\"\"Decorated function with custom gradient.\"\"\"\n if context.executing_eagerly():\n return _eager_mode_decorator(wrapped, args, kwargs)\n else:\n return _graph_mode_decorator(wrapped, args, kwargs)\n\n return tf_decorator.make_decorator(f, decorated(f)) # pylint: disable=no-value-for-parameter",
"def node(self, func_or_name):\n\n self._compiled = None\n\n def _decorator(fn):\n self.graph[func_or_name] = fn\n return fn\n\n if callable(func_or_name):\n self.graph[func_or_name.__name__] = func_or_name\n return func_or_name\n else:\n return _decorator",
"def scope_name():\n return tf.get_variable_scope().name",
"def __init__(self, inputs, outputs,\n session=tf.get_default_session, name='function'):\n self.session, self.name = session, name\n self.inputs, self.outputs = inputs, outputs",
"def decorator(func):\n\t\treturn push_aspect(name or func.__name__, func)",
"def Optimizer(loss,name_scope=\"Regress\"):\n Vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,scope=name_scope) \n opt = tf.train.AdamOptimizer(lr).minimize(loss,var_list=Vars)\n return opt",
"def scope_name():\n return tf.compat.v1.get_variable_scope().name",
"def tf_op(\n self, py_fun):\n with tf.name_scope('tf_op'):\n return self.context.as_nql(py_fun(self.tf), self._type_name)",
"def namespace(f):\n @functools.wraps(f)\n def _wrapped(*arg, **kw):\n parent = kw.get('parent', get_default_parent())\n name = kw.get('name', '')\n name = '_warmns_' + name + ('-' if name else '') + f.__name__\n name = _auto_name(name, parent)\n kw['name'] = name.replace('_warmns_', '')\n return f(*arg, **kw)\n return _wrapped",
"def name(self) -> str:\n return \"UnnamedOptimizer\"",
"def build(self):\n if not self.built:\n logger.debug(f\"Building {self.name}...\")\n with tf.name_scope(self.name):\n self._build()\n self.built = True\n return self",
"def command(name):\n def _decoration(fcn):\n fcn.command = name\n return fcn\n return _decoration"
]
| [
"0.6776843",
"0.6052667",
"0.6024638",
"0.6017519",
"0.5850662",
"0.582437",
"0.578642",
"0.57745945",
"0.5770974",
"0.57384944",
"0.57371694",
"0.57050693",
"0.56976104",
"0.5680395",
"0.5660873",
"0.56570977",
"0.5645926",
"0.56300974",
"0.55482644",
"0.55344635",
"0.5514956",
"0.54904217",
"0.5466635",
"0.54607797",
"0.5449279",
"0.54434425",
"0.5437559",
"0.54252094",
"0.5423492",
"0.541954"
]
| 0.78752005 | 0 |
Gather windows of tensor around centers. Uses wrapped padding. | def gather_windows(x, centers, system_shape, window_shape):
window_size = np.prod(window_shape)
batch_size = tf.shape(x)[0]
index_matrix = tf.constant(create_index_matrix(system_shape, window_shape))
window_range = tf.range(batch_size, dtype=tf.int32)[:, None] * \
tf.ones(window_size, dtype=tf.int32)[None, :]
indices = tf.stack((window_range, tf.gather(index_matrix, centers)), 2)
return tf.gather_nd(x, indices) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_windows(x, centers, updates, mask, system_shape, window_shape):\n window_size = np.prod(window_shape)\n batch_size = tf.shape(x)[0]\n index_matrix = tf.constant(create_index_matrix(system_shape, window_shape))\n window_range = tf.range(batch_size, dtype=tf.int32)[:, None] * \\\n tf.ones(window_size, dtype=tf.int32)[None, :]\n indices = tf.stack((window_range, tf.gather(index_matrix, centers)), 2)\n return tf.scatter_nd_update(\n x, tf.boolean_mask(indices, mask), tf.boolean_mask(updates, mask))",
"def find_window_centroids(self, warped):\n window_width = self.window_width\n window_height = self.window_height\n margin = self.margin\n \n window_centroids = [] # store the (left,right) window centroids positions per level\n window = np.ones(window_width) # Create window templates for doing convolutions\n \n # First find the two starting positions right and left lines by using np.sum to get the vertical image slice\n # and then np.convolve the vertical image slice with the window template\n \n # sum the bottom quarter of image to get slice\n l_sum = np.sum(warped[int(3*warped.shape[0]/4):,:int(3*warped.shape[1]/2)], axis=0)\n l_center = np.argmax(np.convolve(window,l_sum)) - window_width/2\n r_sum = np.sum(warped[int(3*warped.shape[0]/4):int(3*warped.shape[1]/2),:], axis=0)\n r_center = np.argmax(np.convolve(window,r_sum)) - window_width/2 + int(warped.shape[1]/2)\n \n # add what we found for the first layer\n window_centroids.append((l_center,r_center))\n \n # Go through each layer looking for max pixel location\n for level in range(1,(int)(warped.shape[0]/window_height)):\n # convolve the window into the vertical slice of the image\n image_layer = np.sum(warped[int(warped.shape[0]-(level+1)*window_height):int(warped.shape[0]-level*window_height),:], axis=0)\n conv_signal = np.convolve(window,image_layer)\n # Find the best left centroid by using past left center as a reference \n # Use window_width/2 as offset because convolution signal reference is at right side of window, not center of window\n offset = window_width/2\n l_min_index = int(max(l_center+offset-margin,0))\n l_max_index = int(max(l_center+offset+margin,warped.shape[1]))\n l_center = np.argmax(conv_signal[l_min_index:l_max_index]) + l_min_index - offset\n \n # Find the best right centroid by using past right center as a reference\n r_min_index = int(max(r_center+offset-margin,0))\n r_max_index = int(max(r_center+offset+margin,warped.shape[1]))\n r_center = np.argmax(conv_signal[r_min_index:r_max_index]) + r_min_index - offset\n # Add what we found for the layer\n window_centroids.append((l_center,r_center))\n \n self.recent_centers.append(window_centroids)\n # return averaged value of of the line centers, helps to keep the markers from jumping around too much\n average_line_centers = np.average(self.recent_centers[-self.smooth_factor:], axis=0)\n return average_line_centers",
"def windows(X, width, skip_last):\n ret = []\n n = X.shape[0]\n for i in range(n - width + 1 - skip_last):\n window = X[i:i + width, :]\n ret.append([tuple(x) for x in window[:]])\n return np.array(ret)",
"def crop_to_center(tensor, border=50):\n return tensor[:, :, border:-border, border:-border]",
"def smdm_normalize(images, window, padding, name=\"unnamed_smdm_normalize\"):\n\tMEDIAN_JITTER = tf.constant(1e-8)\n\t\n\tif window % 2 == 0:\n\t\traise ValueError(\"attempted to smdm_normalize() with even-sized window\")\n\n\timages = tf.cast(images, tf.float32)\n\tbatch_size, height, width, channels = tf.shape(images)[0], tf.shape(images)[1], tf.shape(images)[2], tf.shape(images)[3]\n\n\tspatial_last = tf.transpose(images, (0, 3, 1, 2))\n\tspatial_last_and_flat = tf.reshape(spatial_last, (batch_size, channels, -1))\n\tn = tf.multiply(height, width)\n\tk = tf.to_int32(tf.divide(n, 2)) + 1\n\ttop_k = tf.nn.top_k(spatial_last_and_flat, k, name=name + \"_top_half_of_images\")[0]\n\tmedians_spatial_last_and_flat = tf.cond(\n\t\ttf.equal(tf.mod(n, 2), 0),\n\t\tlambda: tf.reduce_mean(top_k[:, :, k - 2: k], -1, keep_dims=True),\n\t\tlambda: top_k[:, :, k - 1]\n\t)\n\tmedians_spatial_last_and_flat = tf.add(\n\t\tmedians_spatial_last_and_flat,\n\t\ttf.fill(tf.shape(medians_spatial_last_and_flat), MEDIAN_JITTER)\n\t)\n\tmedians_spatial_last = tf.expand_dims(medians_spatial_last_and_flat, 3)\n\tmedians = tf.transpose(medians_spatial_last, (0, 2, 3, 1))\n\timages = tf.divide(images, medians, name=name + \"_divide_images_by_medians\")\n\n\tpadding_amount = int((window - 1) / 2)\n\tpadding_amounts = ((0, 0), (padding_amount, padding_amount), (padding_amount, padding_amount), (0, 0))\n\timages_padded = tf.pad(images, padding_amounts, padding)\n\tlocal_means = tf.nn.pool(images_padded, (window, window), \"AVG\", \"VALID\", name=name + \"_local_means_of_images\")\n\timages = tf.subtract(images, local_means, name=name + \"_subtract_local_means_from_images\")\n\n\treturn images",
"def _construct_window_around_peak(fs, irs, tleft, tright, alpha=0.5):\n orig_shape = irs.shape\n flat_irs = irs.reshape(-1, irs.shape[-1])\n\n sleft = int(fs * tleft)\n sright = int(fs * tright)\n\n windows = np.ones(flat_irs.shape)\n for i in range(flat_irs.shape[0]):\n ipeak = np.argmax(np.abs(flat_irs[i]))\n iwstart = max(ipeak - sleft, 0)\n iwend = min(ipeak + sright, flat_irs.shape[-1])\n\n window = tukey(iwend - iwstart, alpha=alpha)\n\n windows[i, iwstart:iwend] *= window\n windows[i, :iwstart] = 0\n windows[i, iwend:] = 0\n\n return windows.reshape(orig_shape)",
"def window_partition(x, window_size):\n B, D, H, W, C = x.shape\n x = x.view(B, D // window_size[0], window_size[0], H // window_size[1], window_size[1], W // window_size[2], window_size[2], C)\n windows = x.permute(0, 1, 3, 5, 2, 4, 6, 7).contiguous().view(-1, reduce(mul, window_size), C)\n return windows",
"def rolling_window(a, window, pad=None):\n shape = a.shape[:-1] + (a.shape[-1] - window + 1, window)\n strides = a.strides + (a.strides[-1], )\n out = np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)\n if pad is not None:\n blankpad = np.empty((window // 2, window))\n blankpad[:] = pad\n return np.concatenate([blankpad, out, blankpad])\n else:\n return out",
"def make_grid_bbox(tensor, box, nrow=8, padding=2,\n normalize=False, range=None, \n scale_each=False, pad_value=0, draw_line=False):\n\n # make the mini-batch of images into a grid\n # nmaps = tensor.size(0)\n nmaps = len(box)\n xmaps = min(nrow, nmaps)\n ymaps = int(math.ceil(float(nmaps) / xmaps))\n # height, width = int(tensor.size(2) + padding), int(tensor.size(3) + padding)\n height, width = int(256 + padding), int(256 + padding)\n tensor = torch.ones(())\n grid = tensor.new_full((3, height * ymaps + padding, width * xmaps + padding), pad_value)\n # # add the white image into the grid\n # block = tensor.new_full((3, height - padding, width - padding), 9.0/13)\n k = 0\n for y in irange(ymaps):\n for x in irange(xmaps):\n if k >= nmaps:\n break\n # add the white image into the grid\n block = tensor.new_full((3, height - padding, width - padding), 9.0/13)\n # print(box[0].size())\n # print(box[1].size())\n # assert False\n # num_curr_box = box[0][k].size(0)\n num_curr_box = box[k][0].size(0)\n for z in irange(num_curr_box):\n # label = box[1][k][z].item()\n try:\n label = box[k][1][z].item()\n except:\n print(box)\n print(k)\n assert False\n \n if label != -1:\n block = draw_box(block, box[k][0][z], label, draw_line)\n # print(k, z)\n else:\n break\n # copy to the grid\n grid.narrow(1, y * height + padding, height - padding)\\\n .narrow(2, x * width + padding, width - padding)\\\n .copy_(block)\n k = k + 1\n return grid",
"def apply_windows(slided_signals, window_type=\"hann\"):\n # length of each slided signal\n n = slided_signals.shape[-1]\n window = signal.get_window(window_type, n)\n windowed_signals = numpy.multiply(slided_signals, window)\n return windowed_signals",
"def get_windows(self, x_train, y_train):\n\n def roundMultiple(x, base=4):\n \"\"\"Round n up to nearest multiple of base.\"\"\"\n return int(base * round(float(x)/base))\n\n def auto_set_stride():\n self.stride = roundMultiple(\n int(self.window_size / 10), base=2)\n debug(\"Stride auto set to \", self.stride)\n\n def auto_set_window_size(sequence):\n threshold = (self.left_epsilon + self.right_epsilon) * 2\n time_arr = sequence[:, self.X_TIME_COLUMN]\n self.window_size = roundMultiple(\n np.argmax(time_arr > threshold), base=4)\n debug(\"Window size auto set to \", self.window_size)\n\n windows_x = []\n windows_y = []\n debug(\"Making windows...\")\n if self.window_size is None:\n auto_set_window_size(x_train[0])\n if self.stride is None:\n auto_set_stride()\n\n for index in tqdm(range(len(x_train))):\n sequence_extractions, sequence_extraction_labels = \\\n self.get_windows_for_sequence(\n x_train[index], y_train[index])\n windows_x.append(sequence_extractions)\n windows_y.append(sequence_extraction_labels)\n return np.array(windows_x), np.array(windows_y)",
"def _sliding_windows(a, N):\n a = np.asarray(a)\n p = np.zeros(N - 1, dtype=a.dtype)\n b = np.concatenate((p, a, p))\n s = b.strides[0]\n return np.lib.stride_tricks.as_strided(\n b[N - 1:],\n shape=(N, len(a) + N - 1),\n strides=(-s, s),\n )",
"def apply_sliding_windows(self, binary_warped, leftx_base, rightx_base):\r\n # Choose the number of sliding windows\r\n nwindows = 9\r\n # Set height of windows\r\n window_height = np.int(binary_warped.shape[0] / nwindows)\r\n # Identify the x and y positions of all nonzero pixels in the image\r\n nonzero = binary_warped.nonzero()\r\n nonzeroy, nonzerox = np.array(nonzero[0]), np.array(nonzero[1])\r\n # Current positions to be updated for each window\r\n leftx_current, rightx_current = leftx_base, rightx_base\r\n # Set the width of the windows +/- margin\r\n margin = 100\r\n # Set minimum number of pixels found to recenter window\r\n minpix = 50\r\n # Create empty lists to receive left and right lane pixel indices\r\n left_lane_inds, right_lane_inds = [], []\r\n\r\n # Step through the windows one by one\r\n # Create an output image to draw on and visualize the result\r\n out_img = np.dstack((binary_warped, binary_warped, binary_warped)) * 255\r\n for window in range(nwindows):\r\n # Identify window boundaries in x and y (and right and left)\r\n win_y_low = binary_warped.shape[0] - (window + 1) * window_height\r\n win_y_high = binary_warped.shape[0] - window * window_height\r\n win_xleft_low, win_xleft_high = leftx_current - margin, leftx_current + margin\r\n win_xright_low, win_xright_high = rightx_current - margin, rightx_current + margin\r\n # Draw the windows on the visualization image\r\n if self.debug:\r\n cv2.rectangle(out_img,(win_xleft_low,win_y_low),(win_xleft_high,win_y_high),(0,255,0), 2)\r\n cv2.rectangle(out_img,(win_xright_low,win_y_low),(win_xright_high,win_y_high),(0,255,0), 2)\r\n # Identify the nonzero pixels in x and y within the window\r\n good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xleft_low) & (\r\n nonzerox < win_xleft_high)).nonzero()[0]\r\n good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xright_low) & (\r\n nonzerox < win_xright_high)).nonzero()[0]\r\n # Append these indices to the lists\r\n left_lane_inds.append(good_left_inds)\r\n right_lane_inds.append(good_right_inds)\r\n # If you found > minpix pixels, recenter next window on their mean position\r\n if len(good_left_inds) > minpix:\r\n leftx_current = np.int(np.mean(nonzerox[good_left_inds]))\r\n if len(good_right_inds) > minpix:\r\n rightx_current = np.int(np.mean(nonzerox[good_right_inds]))\r\n if self.debug:\r\n cv_rgb = cv2.cvtColor(out_img.astype(np.uint8), cv2.COLOR_BGR2RGB)\r\n plt.imshow(cv_rgb)\r\n #cv2.imshow('Sliding window computation',out_img)\r\n # Concatenate the arrays of indices\r\n left_lane_inds = np.concatenate(left_lane_inds)\r\n right_lane_inds = np.concatenate(right_lane_inds)\r\n if self.debug:\r\n self.fit_dict['left_lane_inds'] = left_lane_inds\r\n self.fit_dict['right_lane_inds'] = right_lane_inds\r\n\r\n # Extract left and right line pixel positions\r\n leftx, lefty = nonzerox[left_lane_inds], nonzeroy[left_lane_inds]\r\n rightx, righty = nonzerox[right_lane_inds], nonzeroy[right_lane_inds]\r\n return leftx, lefty, rightx, righty",
"def window_data(data: np.ndarray):\n\n w_len = 128\n stride = w_len // 2\n\n no_offset_windows = np.split(data, 10)\n offset_windows = np.split(data[stride:-stride], 9)\n windows = [0] * 19\n windows[::2] = no_offset_windows\n windows[1::2] = offset_windows\n windows = np.array(windows, dtype=np.float32)\n\n return windows",
"def windows_partition(x, window_size):\n\n B, H, W, C = x.shape\n x = x.reshape([B, H//window_size, window_size, W//window_size, window_size, C])\n x = x.transpose([0, 1, 3, 2, 4, 5])\n x = x.reshape([-1, window_size, window_size, C]) #(num_windows*B, window_size, window_size, C)\n return x",
"def pool(inputs, init, reduce_fn, window_shape, strides, padding):\n num_batch_dims = inputs.ndim - (len(window_shape) + 1)\n strides = strides or (1,) * len(window_shape)\n assert len(window_shape) == len(\n strides\n ), f\"len({window_shape}) must equal len({strides})\"\n strides = (1,) * num_batch_dims + strides + (1,)\n dims = (1,) * num_batch_dims + window_shape + (1,)\n\n is_single_input = False\n if num_batch_dims == 0:\n # add singleton batch dimension because lax.reduce_window always\n # needs a batch dimension.\n inputs = inputs[None]\n strides = (1,) + strides\n dims = (1,) + dims\n is_single_input = True\n\n assert inputs.ndim == len(dims), f\"len({inputs.shape}) != len({dims})\"\n if not isinstance(padding, str):\n padding = tuple(map(tuple, padding))\n assert len(padding) == len(window_shape), (\n f\"padding {padding} must specify pads for same number of dims as \"\n f\"window_shape {window_shape}\"\n )\n assert all(\n [len(x) == 2 for x in padding]\n ), f\"each entry in padding {padding} must be length 2\"\n padding = ((0, 0),) + padding + ((0, 0),)\n y = lax.reduce_window(inputs, init, reduce_fn, dims, strides, padding)\n if is_single_input:\n y = jnp.squeeze(y, axis=0)\n return y",
"def make_grid_floor_plan(tensor, box, nrow=8, padding=2,\n normalize=False, range=None, \n scale_each=False, pad_value=0):\n # make the mini-batch of images into a grid\n # nmaps = tensor.size(0)\n nmaps = len(box)\n xmaps = min(nrow, nmaps)\n ymaps = int(math.ceil(float(nmaps) / xmaps))\n # height, width = int(tensor.size(2) + padding), int(tensor.size(3) + padding)\n height, width = int(256 + padding), int(256 + padding)\n tensor = torch.ones(())\n grid = tensor.new_full((3, height * ymaps + padding, width * xmaps + padding), pad_value)\n # # add the white image into the grid\n # block = tensor.new_full((3, height - padding, width - padding), 9.0/13)\n\n wall_thickness = 2\n wall_symbol = 2.0\n\n k = 0\n for y in irange(ymaps):\n for x in irange(xmaps):\n if k >= nmaps:\n break\n # add the white image into the grid\n block = tensor.new_full((3, height - padding, width - padding), 9.0/13)\n num_curr_box = box[k][0].size(0)\n \n # sorted the box according to their size\n sorted_box = {}\n for z in irange(num_curr_box):\n curr_box = box[k][0][z]\n x1, y1, x2, y2 = curr_box[0], curr_box[1], curr_box[2], curr_box[3]\n sorted_box[z] = (x2-x1)*(y2-y1)\n # to get sorted id\n sorted_box = sorted(sorted_box.items(), lambda x, y: cmp(x[1], y[1]), reverse=True)\n\n # obtain the sorted box and corresponding label\n for m in irange(num_curr_box):\n # get sorted id\n z = sorted_box[m][0]\n # label = box[1][k][z].item()\n try:\n label = box[k][1][z].item()\n except:\n assert False\n # draw box in the current image\n if label != -1:\n block = draw_floor_plan(block, box[k][0][z], label)\n # print(k, z)\n else:\n break\n\n # copy the current image to the grid\n grid.narrow(1, y * height + padding, height - padding)\\\n .narrow(2, x * width + padding, width - padding)\\\n .copy_(block)\n k = k + 1\n return grid",
"def center_size(boxes):\n concat = P.Concat(1)\n return concat(((boxes[:, 2:] + boxes[:, :2])/2, # cx, cy\n boxes[:, 2:] - boxes[:, :2])) # w, h",
"def slide_window(a, window):\n shape = (a.shape[0] - window + 1, window) + a.shape[1:]\n strides = (a.strides[0],) + a.strides\n examples = np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)\n\n inp = examples[:-1]\n out = examples[1:]\n return inp, out",
"def _rolling_windows(a, window):\n\n if window > a.shape[0]:\n raise ValueError(\n \"Specified `window` length of {0} exceeds length of\"\n \" `a`, {1}.\".format(window, a.shape[0])\n )\n if isinstance(a, (pd.Series, pd.DataFrame)):\n a = a.values\n if a.ndim == 1:\n a = a.reshape(-1, 1)\n shape = (a.shape[0] - window + 1, window) + a.shape[1:]\n strides = (a.strides[0],) + a.strides\n windows = np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)\n if windows.ndim == 1:\n windows = np.atleast_2d(windows)\n return windows",
"def slidingWindow(self, img):\n # 720 x 1280\n # y --> 720 (0)\n # x --> 1280 (1)\n\n sizeY, sizeX = img.shape\n\n outputImg = np.dstack((img, img, img)) * 255\n\n # Compute histogram for the bottom half of the image along the x-axis\n hist = np.sum(img[sizeY//2:,:], axis=0)\n\n # Height of each window\n window_height = np.int(sizeY // self.nwindows)\n\n # Check indexes != 0\n nonzero = np.nonzero(img)\n nonzeroInY = np.array(nonzero[0])\n nonzeroInX = np.array(nonzero[1])\n\n # Split the image in two and set the centers\n leftXCenter = np.argmax(hist[:sizeX // 2])\n rightXCenter = np.argmax(hist[sizeX // 2:]) + sizeX // 2\n\n # Set the x-center of the boxes, which will be corrected over time\n leftXCurrent = leftXCenter\n rightXCurrent = rightXCenter\n \n # Lists to save indexes of pixel inside the rectangle\n leftSidePixels = []\n rightSidePixels = []\n\n for window in range(self.nwindows):\n # Make the boxes\n # Calculate the Y coords\n yLow = sizeY - (1 + window) * window_height\n yHigh = sizeY - window * window_height\n \n # Calculate the X coords for the left and right side\n xLowLeft = leftXCurrent - self.margin\n xHighLeft = leftXCurrent + self.margin\n xLowRight = rightXCurrent - self.margin\n xHighRight = rightXCurrent + self.margin\n\n # Draw rectangle for the left lane\n cv2.rectangle(outputImg, (xLowLeft, yLow), (xHighLeft, yHigh), (0, 255, 0), 3)\n \n # Draw rectangle for the right lane\n cv2.rectangle(outputImg, (xLowRight, yLow), (xHighRight, yHigh), (0, 255, 0), 3)\n\n # Check if pixels's values != 0 are inside the window (rectanle)\n\n # Check if the indexes are in the boxes and their values != 0\n leftSidePixelsInsideBox = ((nonzeroInX >= xLowLeft) & (nonzeroInX <= xHighLeft) & (nonzeroInY >= yLow) & (nonzeroInY <= yHigh)).nonzero()[0]\n rightSidePixelsInsideBox = ((nonzeroInX >= xLowRight) & (nonzeroInX <=xHighRight) & (nonzeroInY >= yLow) & (nonzeroInY <= yHigh)).nonzero()[0]\n\n leftSidePixels.append(leftSidePixelsInsideBox)\n rightSidePixels.append(rightSidePixelsInsideBox)\n\n if len(leftSidePixelsInsideBox) > self.minpixels:\n leftXCurrent = np.int(np.mean(nonzeroInX[leftSidePixelsInsideBox]))\n\n if len(rightSidePixelsInsideBox) > self.minpixels:\n rightXCurrent = np.int(np.mean(nonzeroInX[rightSidePixelsInsideBox]))\n\n try:\n leftSidePixels = np.concatenate(leftSidePixels)\n rightSidePixels = np.concatenate(rightSidePixels)\n except ValueError:\n # Avoids an error if the above is not implemented fully\n pass\n\n leftLaneY = nonzeroInY[leftSidePixels]\n leftLaneX = nonzeroInX[leftSidePixels]\n rightLaneY = nonzeroInY[rightSidePixels]\n rightLaneX = nonzeroInX[rightSidePixels]\n\n # Get the coefficients (A, B, C)\n leftFit = np.polyfit(leftLaneX, leftLaneY, 2)\n rightFit = np.polyfit(rightLaneX, rightLaneY, 2)\n \n # Generate x values. These will be the y for plotting\n ploty = np.linspace(0, outputImg.shape[0]-1, outputImg.shape[0])\n \n try:\n leftFitX = ploty*leftFit[0]**2 + ploty*leftFit[1] + leftFit[2]\n rightFitX = ploty*rightFit[0]**2 + ploty*rightFit[1] + leftFit[2]\n \n except TypeError:\n # In case there is no C\n leftFitX = ploty*leftFit[0]**2 + ploty*leftFit[1]\n rightFitX = ploty*rightFit[0]**2 + ploty*rightFit[1]\n\n windowImg = np.zeros_like(outputImg)\n\n outputImg[leftLaneY, leftLaneX] = [255, 0, 0]\n outputImg[rightLaneY, rightLaneX] = [0, 0, 255]\n\n leftLineWindow1 = np.array([np.transpose(np.vstack([leftFitX - self.margin, ploty]))])\n leftLineWindow2 = np.array([np.flipud(np.transpose(np.vstack([leftFitX + self.margin, ploty])))])\n leftLinePts = np.hstack((leftLineWindow1, leftLineWindow2))\n \n rightLineWindow1 = np.array([np.transpose(np.vstack([rightFitX - self.margin, ploty]))])\n rightLineWindow2 = np.array([np.flipud(np.transpose(np.vstack([rightFitX + self.margin, ploty])))])\n rightLinePts = np.hstack((rightLineWindow1, rightLineWindow2))\n\n cv2.fillPoly(windowImg, np.int_([leftLinePts]), (0, 255, 0))\n cv2.fillPoly(windowImg, np.int_([rightLinePts]), (0, 255, 0))\n result = cv2.addWeighted(outputImg, 1, windowImg, 0.3, 0)\n\n plt.plot(leftFitX, ploty, color = 'yellow')\n plt.plot(rightFitX, ploty, color = 'yellow')\n\n # leftFitX -> Formula for the left lane\n # rightFitX -> Formula for the right lane\n # leftLaneX -> X - index inside the left window and their values != 0\n # rightLaneX -> X - index inside the right window and their values != 0\n return leftFitX, leftLaneX, rightFitX, rightLaneX, result",
"def __padding(self, image, boxes, height, width):\n temp = boxes[:, :4].astype(np.int)\n y1 = np.where(temp[:, 0] < 0)[0]\n if len(y1) > 0:\n temp[y1, 0] = 0\n x1 = np.where(temp[:, 1] < 0)[0]\n if len(x1) > 0:\n temp[x1, 0] = 0\n y2 = np.where(temp[:, 2] > image.shape[0] - 1)[0]\n if len(y2) > 0:\n temp[y2, 0] = image.shape[0] - 1\n x2 = np.where(temp[:, 3] > image.shape[1] - 1)[0]\n if len(x2) > 0:\n temp[x2, 0] = image.shape[1] - 1\n pad_top = np.abs(temp[:, 0] - boxes[:, 0]).astype(np.int)\n pad_left = np.abs(temp[:, 1] - boxes[:, 1]).astype(np.int)\n pad_bottom = np.abs(temp[:, 2] - boxes[:, 2]).astype(np.int)\n pad_right = np.abs(temp[:, 3] - boxes[:, 3]).astype(np.int)\n input_data = np.empty([boxes.shape[0], 3, height, width], dtype=np.float32)\n for i in range(boxes.shape[0]):\n crop_img = image[temp[i, 0]:temp[i, 2] + 1, temp[i, 1]:temp[i, 3] + 1, :]\n crop_img = cv2.copyMakeBorder(crop_img, pad_top[i], pad_bottom[i], \\\n pad_left[i], pad_right[i], cv2.BORDER_CONSTANT, value=0)\n if crop_img is None:\n continue\n crop_img = cv2.resize(crop_img, (width, height)).astype(np.float32)\n crop_img[:, :, 0] -= self.mean[0]\n crop_img[:, :, 1] -= self.mean[1]\n crop_img[:, :, 2] -= self.mean[2]\n crop_img *= self.scale_factor\n crop_img = np.transpose(crop_img, (2, 0, 1))\n input_data[i] = crop_img.copy()\n return input_data",
"def windowing(input):\n return input * hamming(input.shape[1], sym=0)",
"def all_windows(x, system_shape, window_shape):\n index_matrix = tf.constant(create_index_matrix(system_shape, window_shape))\n return tf.transpose(\n tf.gather_nd(tf.transpose(x),\n tf.expand_dims(index_matrix, 2)),\n [2, 0, 1])",
"def _get_mlc_window(\n self, leaf_center, leaf_width, approx_idx, spacing\n ) -> np.ndarray:\n leaf_width_px = leaf_width * self.image.dpmm\n leaf_center_px = leaf_center * self.image.dpmm + (\n self.image.shape[0] / 2\n if self.orientation == Orientation.UP_DOWN\n else self.image.shape[1] / 2\n )\n if self.orientation == Orientation.UP_DOWN:\n # crop edges to image boundary if need be; if the pickets are too close to edge we could spill outside\n left_edge = max(int(approx_idx - spacing / 2), 0)\n right_edge = min(int(approx_idx + spacing / 2), self.image.shape[1])\n top_edge = max(int(leaf_center_px - leaf_width_px / 2), 0)\n bottom_edge = min(\n int(leaf_center_px + leaf_width_px / 2), self.image.shape[0]\n )\n array = self.image[top_edge:bottom_edge, left_edge:right_edge]\n else:\n top_edge = max(int(approx_idx - spacing / 2), 0)\n bottom_edge = min(int(approx_idx + spacing / 2), self.image.shape[0])\n left_edge = max(int(leaf_center_px - leaf_width_px / 2), 0)\n right_edge = min(\n int(leaf_center_px + leaf_width_px / 2), self.image.shape[1]\n )\n array = self.image[top_edge:bottom_edge, left_edge:right_edge]\n return array",
"def pad_edges(self, pad):\n weights=[]\n for dim, xy in zip([0, 1], [self.x, self.y]):\n xy0 = np.mean(xy)\n W = xy[-1]-xy[0]\n dist = np.abs(xy-xy0)\n wt=np.ones_like(dist)\n wt[ dist >= W/2 - pad] = 0\n weights += [wt]\n self.weight *= weights[0][:,None].dot(weights[1][None,:])",
"def _with_space_to_batch_base_paddings(filter_shape, num_spatial_dims,\n rate_or_const_rate):\n # Spatial dimensions of the filters and the upsampled filters in which we\n # introduce (rate - 1) zeros between consecutive filter values.\n filter_spatial_shape = filter_shape[:num_spatial_dims]\n pad_extra_shape = (filter_spatial_shape - 1) * rate_or_const_rate\n\n # When full_padding_shape is odd, we pad more at end, following the same\n # convention as conv2d.\n pad_extra_start = pad_extra_shape // 2\n pad_extra_end = pad_extra_shape - pad_extra_start\n base_paddings = array_ops_stack.stack(\n [[pad_extra_start[i], pad_extra_end[i]] for i in range(num_spatial_dims)])\n return base_paddings",
"def center_size(boxes):\n wh = boxes[:, 2:] - boxes[:, :2] + 1.0\n if isinstance(boxes, np.ndarray):\n return np.column_stack((boxes[:, :2] + 0.5 * wh, wh))\n return torch.cat((boxes[:, :2] + 0.5 * wh, wh), 1)",
"def output_shape_conv_and_pool_layer(rows: int,\n columns: int,\n kernel: int,\n stride: int = 1,\n padding: int = 0,\n dilatation: float = 1.) -> Tuple[int, int]:\n return (\n int((rows + 2 * padding - dilatation * (kernel - 1) - 1) / stride + 1),\n int((columns + 2 * padding - dilatation * (kernel - 1) - 1) / stride + 1),\n )",
"def _get_centers(self, lwidth, lheight, batch_size):\n x_left, y_left = tf.meshgrid(tf.range(0, lheight), tf.range(0, lwidth))\n x_y = K.stack([x_left, y_left], axis = -1)\n x_y = tf.cast(x_y, dtype = self.dtype)/tf.cast(lwidth, dtype = self.dtype)\n x_y = tf.repeat(tf.expand_dims(tf.repeat(tf.expand_dims(x_y, axis = -2), self._num, axis = -2), axis = 0), batch_size, axis = 0)\n return x_y"
]
| [
"0.62628543",
"0.6066326",
"0.58352035",
"0.56969947",
"0.5685777",
"0.5637248",
"0.55407983",
"0.553049",
"0.54852927",
"0.54782593",
"0.5473007",
"0.5462575",
"0.5458505",
"0.5457785",
"0.5445776",
"0.5361355",
"0.5351152",
"0.5341841",
"0.53008586",
"0.5300423",
"0.52744365",
"0.524856",
"0.5240066",
"0.52275854",
"0.5223487",
"0.52185875",
"0.52116394",
"0.5181227",
"0.51556295",
"0.5142023"
]
| 0.670488 | 0 |
Update windows around centers with updates at rows where mask is True. | def update_windows(x, centers, updates, mask, system_shape, window_shape):
window_size = np.prod(window_shape)
batch_size = tf.shape(x)[0]
index_matrix = tf.constant(create_index_matrix(system_shape, window_shape))
window_range = tf.range(batch_size, dtype=tf.int32)[:, None] * \
tf.ones(window_size, dtype=tf.int32)[None, :]
indices = tf.stack((window_range, tf.gather(index_matrix, centers)), 2)
return tf.scatter_nd_update(
x, tf.boolean_mask(indices, mask), tf.boolean_mask(updates, mask)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_mask(self, indices):\n\n indices = indices.view(self.batch_size, -1)\n updated_mask = torch.zeros_like(self.mask.squeeze(-1)).scatter_(1, indices, 1)\n\n return updated_mask.unsqueeze(-1)",
"def _modify_updates(self, updates):\n wxf = self.wxf\n wyf = self.wyf\n wxf_updated = updates[wxf]\n wyf_updated = updates[wyf]\n nwxf = (wxf_updated.std(0) + SMALL)[numpy.newaxis, :]\n nwyf = (wyf_updated.std(0) + SMALL)[numpy.newaxis, :]\n meannxf = nwxf.mean()\n meannyf = nwyf.mean()\n # Center filters\n centered_wxf = wxf_updated - wxf_updated.mean(0)\n centered_wyf = wyf_updated - wyf_updated.mean(0)\n # Fix standard deviation\n wxf_updated = centered_wxf * (meannxf / nwxf)\n wyf_updated = centered_wyf * (meannyf / nwyf)\n updates[wxf] = wxf_updated\n updates[wyf] = wyf_updated",
"def update_mask(self, mask):\n\n # Get general mask\n general_mask = self.general_mask\n\n # Complete with the input mask\n new_mask = (general_mask | mask)\n\n # Update attribute\n self.mask = new_mask\n\n # Correct i_bounds if it was not specified\n # self.update_i_bnds()\n\n # Re-compute weights\n self.weights, self.weights_k_idx = self.compute_weights()\n\n return",
"def flag_window(data):\n data.mask[:params.st_bp_window_t, :] = True\n data.mask[-1 * params.st_bp_window_t:, : ] = True\n data.mask[:, :params.st_bp_window_f] = True\n data.mask[:, -1 * params.st_bp_window_f:] = True\n return data.mask",
"def _modify_updates(self, updates):\n\n if self.max_kernel_norm is not None:\n W, = self.transformer.get_params()\n if W in updates:\n updated_W = updates[W]\n row_norms = T.sqrt(T.sum(T.sqr(updated_W), axis=(0, 1, 2)))\n desired_norms = T.clip(row_norms, 0, self.max_kernel_norm)\n scales = desired_norms / (1e-7 + row_norms)\n updates[W] = (updated_W * scales.dimshuffle('x', 'x', 'x', 0))",
"def _update(self, mask):\n if self.reporting:\n for pin in self.pins:\n if pin.mode is INPUT:\n pin_nr = pin.pin_number - self.port_number * 8\n pin.value = (mask & (1 << pin_nr)) > 0",
"def update_masks(self, index, weight):\n # determine number of updates without actually updating the count\n if index not in self._index_update_count:\n num_update = self.begin_num_update\n else:\n num_update = self._index_update_count[index]\n num_update += 1\n num_update = max(num_update, self.num_update)\n\n # calculate epoch\n epoch = int((num_update - 1) / self.batches_per_epoch) + 1\n\n # determine if masks need to be updated, and get corresponding parameters\n if index == 0:\n self.masks_updated = True\n if self.epoch != epoch:\n self.epoch = epoch\n if epoch == 1:\n self.masks_updated = False\n if self.weight_sparsity is not None:\n logging.info(log + 'bias-sparsity={}, weight-sparsity={}'.format(self.bias_sparsity[0], self.weight_sparsity[0]))\n else:\n logging.info(log + 'bias-threshold={}, weight-threshold={}'.format(self.bias_threshold[0], self.weight_threshold[0]))\n if self.pruning_switch_epoch[0] + 1 == epoch:\n self.masks_updated = False\n self.pruning_switch_epoch.pop(0)\n if self.weight_sparsity is not None:\n self.weight_sparsity.pop(0)\n self.bias_sparsity.pop(0)\n logging.info(log + 'bias-sparsity={}, weight-sparsity={}'.format(self.bias_sparsity[0], self.weight_sparsity[0]))\n else:\n self.weight_threshold.pop(0)\n self.bias_threshold.pop(0)\n logging.info(log + 'bias-threshold={}, weight-threshold={}'.format(self.bias_threshold[0], self.weight_threshold[0]))\n\n # update masks if needed\n if not self.masks_updated:\n # initialize masks\n if epoch == 1:\n self.masks.append(None)\n # if percentages are given\n if self.weight_sparsity is not None:\n if len(weight.shape) == 1:\n sparsity = self.bias_sparsity[0]\n else:\n sparsity = self.weight_sparsity[0]\n number_unpruned = int((100.0 - sparsity) * weight.size / 100.0)\n self.masks[index] = topk(NDabs(weight), axis=None, ret_typ='mask',\n k=number_unpruned)\n # if thresholds are given\n else:\n if len(weight.shape) == 1:\n threshold = self.bias_threshold[0]\n else:\n threshold = self.weight_threshold[0]\n self.masks[index] = NDabs(weight) >= threshold\n\n return not self.masks_updated",
"def update_continuum_mask(self, refresh=False):\n\n ymin, ymax = (-1e8, 1e8)\n kwds = {\n \"xmin\": np.nan,\n \"xmax\": np.nan,\n \"ymin\": ymin,\n \"ymax\": ymax,\n \"facecolor\": \"r\",\n \"edgecolor\": \"none\",\n \"alpha\": 0.25,\n \"zorder\": -1\n }\n\n transform = lambda start, end, v=0: np.array([\n [start * (1 - v/c), ymin],\n [start * (1 - v/c), ymax],\n [end * (1 - v/c), ymax],\n [end * (1 - v/c), ymin],\n [start * (1 - v/c), ymin]\n ])\n\n mask = self._cache[\"masks\"][self.continuum_mask.currentText()]\n\n # Any added regions to mask out? v-stack these\n try:\n self._masked_wavelengths\n except AttributeError:\n self._masked_wavelengths = []\n self._masked_wavelengths_norm = []\n\n # Different kind of masks: rest_wavelength, obs_wavelength, pixels\n # rest_wavelength\n # The obsered spectrum is shifted to be at rest, so the continuum masks\n # will also be in the rest frame. So we don't need to shift the\n # 'rest_wavelength' mask, but we do need to shift the 'obs_wavelength'\n # mask\n\n # Get the applied velocity to shift some masks.\n try:\n rv_applied = self.parent.session.metadata[\"rv\"][\"rv_applied\"]\n except (AttributeError, KeyError):\n rv_applied = 0\n\n _ =self.parent.session.metadata[\"normalization\"][\"normalization_kwargs\"]\n \n masked_regions = [\n np.array(mask.get(\"rest_wavelength\", [])),\n np.array(mask.get(\"obs_wavelength\", [])) * (1 - rv_applied/c),\n np.array(_[self.current_order_index].get(\"exclude\", []))\n ]\n if \"pixel\" in mask:\n masked_regions.append(\n # MAGIC HACK\n self.current_order.dispersion[np.array(mask[\"pixel\"])] + 1e-3\n )\n\n for each in masked_regions:\n each.shape = (-1, 2)\n\n masked_regions = np.vstack(masked_regions)\n\n # Remove duplicate masked regions.\n _ = np.ascontiguousarray(masked_regions).view(\n np.dtype((\n np.void, \n masked_regions.dtype.itemsize * masked_regions.shape[1])))\n __, idx = np.unique(_, return_index=True)\n masked_regions = masked_regions[idx]\n\n i = 0\n for start, end in masked_regions:\n if i >= len(self._masked_wavelengths):\n # Create a polygon in the main axis.\n self._masked_wavelengths.append(\n self.ax_order.axvspan(**kwds))\n\n # And for the normalization axis.\n self._masked_wavelengths_norm.append(\n self.ax_order_norm.axvspan(**kwds))\n\n polygons = (\n self._masked_wavelengths[i],\n self._masked_wavelengths_norm[i]\n )\n for polygon in polygons:\n polygon.set_xy(transform(start, end))\n\n i += 1\n\n # Any leftover polygons?\n for polygon in self._masked_wavelengths[i:]:\n polygon.set_xy(transform(np.nan, np.nan))\n\n for polygon in self._masked_wavelengths_norm[i:]:\n polygon.set_xy(transform(np.nan, np.nan))\n\n\n if refresh:\n self.norm_plot.draw()\n return True",
"def __update(self):\n for b in self.__borders:\n b.redraw()\n\n for w in self.__allWins:\n w.refresh()",
"def setMask(self, mask):\n try:\n self.mask = mask\n self.inds = na.nonzero(self.mask.flat)[0]\n #print \"length of self.inds\",len(self.inds)\n #print self.inds\n self.dim = self.mask.shape[::-1]\n #print self.mask.shape\n return True\n except Exception as error:\n print(\"failed in setMask\", error)",
"def applymask(self,mask):\n self.spec[mask==0]=np.nan",
"def update_mask(self):\r\n \r\n # Binary mask from ML detection\r\n if len(self.selected_ML_Index) > 0:\r\n # Delete items in dictionary that are not roi items\r\n roi_dict = self.selected_cells_infor_dict.copy()\r\n del_key_list=[]\r\n for key in roi_dict:\r\n print(key)\r\n if 'ROIitem' not in key:\r\n del_key_list.append(key)\r\n for key in del_key_list:\r\n del roi_dict[key]\r\n \r\n self.MLmask = ProcessImage.ROIitem2Mask(roi_dict, mask_resolution = (self.MLtargetedImg.shape[0], self.MLtargetedImg.shape[1]))\r\n # Binary mask of added rois\r\n self.addedROIitemMask = ProcessImage.ROIitem2Mask(self.roi_list_freehandl_added, mask_resolution = (self.MLtargetedImg.shape[0], self.MLtargetedImg.shape[1]))\r\n \r\n self.intergrate_into_final_mask()",
"def __update_visible(self) -> None:\n for i in range(0, 8):\n visible_row = self.__row_position + Labyrinth.ALL_ROW_MOVE[i]\n visible_col = self.__col_position + Labyrinth.ALL_COL_MOVE[i]\n if 0 <= visible_row < self.__labyrinth.labyrinth_height and \\\n 0 <= visible_col < self.__labyrinth.labyrinth_width:\n self.__labyrinth.visible_cells[visible_row][visible_col] = 1",
"def _update_bbox_mask(self, bbox_coords, mask_coords):\n # Set target/mask regions to image bounding box values\n self.bbox_mask[0, mask_coords[1]:mask_coords[3], mask_coords[0]:mask_coords[2]] = bbox_coords[0]\n self.bbox_mask[1, mask_coords[1]:mask_coords[3], mask_coords[0]:mask_coords[2]] = bbox_coords[1]\n self.bbox_mask[2, mask_coords[1]:mask_coords[3], mask_coords[0]:mask_coords[2]] = bbox_coords[2]\n self.bbox_mask[3, mask_coords[1]:mask_coords[3], mask_coords[0]:mask_coords[2]] = bbox_coords[3]\n # Encode bounding box mask\n self.bbox_mask[0, :] = (self.index_matrix_x - self.bbox_mask[0, :])\n self.bbox_mask[1, :] = (self.index_matrix_y - self.bbox_mask[1, :])\n self.bbox_mask[2, :] = (self.bbox_mask[2, :] - self.index_matrix_x)\n self.bbox_mask[3, :] = (self.bbox_mask[3, :] - self.index_matrix_y)",
"def renewMasking(self, indices, colours_dict):\n for idx in self.abundance_df.index:\n if idx in indices:\n self.abundance_df.loc[idx, 'masked'] = False\n else:\n self.abundance_df.loc[idx, 'masked'] = True\n if idx in colours_dict:\n self.abundance_df.loc[idx, 'colour'] = colours_dict[idx]\n else: \n self.abundance_df.loc[idx, 'colour'] = 'undefined'",
"def testWarpMask(self):\n for kernelName, maskKernelName in (\n (\"bilinear\", \"bilinear\"),\n (\"lanczos3\", \"lanczos3\"),\n (\"lanczos3\", \"bilinear\"),\n (\"lanczos4\", \"lanczos3\"),\n ):\n for growFullMask in (0, 1, 3, 0xFFFF):\n self.verifyMaskWarp(\n kernelName=kernelName,\n maskKernelName=maskKernelName,\n growFullMask=growFullMask,\n )",
"def update_mean(img: np.ndarray, clustermask: np.ndarray):\n\n for k in range(numclusters):\n current_cluster_centers[k, 0, :] = np.mean(img[clustermask==k], axis=0)",
"def update_poi (POIn, POInm1, new, current_cell_mask):\n row, col = cuda.grid(2)\n\n if row < POIn.shape[0] and col < POIn.shape[1]:\n POIn[row,col] = 0 \n if current_cell_mask[row,col] == True:\n POIn[row,col] = POInm1[row,col] + new[row,col]",
"def _update_cells(self):\n for row_number in range(self.number_cells_y):\n for col_number in range(self.number_cells_x):\n if self.to_be_updated[row_number][col_number]:\n self.cells[row_number][col_number].update()",
"def apply_mask(dataframe, mask_files):\n condition = '%f < x < %f & %f < y < %f & not masked'\n for file_ in mask_files:\n # Get mask polygon coordinates\n x_mask, y_mask = np.loadtxt(file_, unpack=True)\n\n # Subset grid to candidates\n df_candidates = dataframe.query(condition % (x_mask.min(), x_mask.max(),\n y_mask.min(), y_mask.max()))\n\n # Find points inside mask polygon\n inside = ps.in_polygon(df_candidates.x.values,\n df_candidates.y.values,\n x_mask,\n y_mask)\n target_index = df_candidates.loc[inside].index.values\n dataframe.at[target_index, 'masked'] = True\n\n # Convert to list for output\n return dataframe.query('not masked').reset_index()",
"def get_patch_centers(self):\n rows, cols = np.where(self.inv_overlap_mask)\n patch_centers = tuple(zip(rows, cols))\n # diff = np.diff(patch_centers)\n # ind_stop_cont = np.where(np.abs(np.diff(np.reshape(diff, diff.shape[0]))) > 1)[0][0]\n self.patch_centers = patch_centers[:: self.sampling_int // 2]\n print(\"# of samples: {}\".format(len(self.patch_centers)))",
"def apply_mask(self):\n for mask, param in self.masked_parameters:\n param.mul_(mask)",
"def apply_new_mask(ifgs, mask_old, mask_new):\n\n \n for ifg_n, ifg in enumerate(ifgs): # Loop through each source\n ifg_r2 = col_to_ma(ifg, mask_old) # turn it from a row vector into a rank 2 masked array \n ifg_r2_new_mask = ma.array(ifg_r2, mask = mask_new) # apply the new mask \n ifg_r1_new_mask = ma.compressed(ifg_r2_new_mask) # convert to row vector \n if ifg_n == 0: # if it's the first ifg.. \n n_pixs_new = ifg_r1_new_mask.shape[0] # get the new number of pixels \n ifgs_new_mask = np.zeros((ifgs.shape[0], n_pixs_new)) # initiate an array of the correct size\n ifgs_new_mask[ifg_n, :] = ifg_r1_new_mask # put the row vector into the array\n return ifgs_new_mask",
"def _change_coordinate_frame(self, boxes, window):\n with tf.name_scope('change_coordinate_frame'):\n\n ymin, xmin, ymax, xmax = tf.unstack(boxes, axis=1)\n ymin -= window[0]\n xmin -= window[1]\n ymax -= window[0]\n xmax -= window[1]\n\n win_height = window[2] - window[0]\n win_width = window[3] - window[1]\n boxes = tf.stack([\n ymin/win_height, xmin/win_width,\n ymax/win_height, xmax/win_width\n ], axis=1)\n boxes = tf.cond(tf.greater(tf.shape(boxes)[0], 0),\n lambda: tf.clip_by_value(boxes, clip_value_min=0.0, clip_value_max=1.0),\n lambda: boxes\n )\n # boxes = tf.clip_by_value(boxes, clip_value_min=0.0, clip_value_max=1.0) - work_element_count > 0 (0 vs. 0)\n return boxes",
"def update_center(self): \r\n \r\n self.grfx[0].center = self.center\r\n\r\n self.update_bbox()",
"def extend_mask_nonlocal(mask,kernel=np.ones((3,3))):\n\tassert (mask.dtype is np.dtype(np.bool)), \"input mask must be of bool type\"\n\n\text_mask = mask.copy()\n\tinp_ind = masked_indices(mask)\n\tim_h, im_w = mask.shape\n\n\tker_y, ker_x = kernel.shape\n\tassert(ker_x%2>0), \"kernel must have odd dimensions\"\n\tassert(ker_y%2>0), \"kernel must have odd dimensions\"\n\n\t# indices of the nonzero kernel elements\n\tker_ind_y, ker_ind_x = np.nonzero(kernel)\n\tker_ind_x -= ker_x//2\n\tker_ind_y -= ker_y//2\n\n\tfor ind_x, ind_y in zip(inp_ind%im_w, inp_ind//im_w):\n\t\tfor i, j in zip(ker_ind_x, ker_ind_y):\n\t\t\text_mask[min(max(0,ind_y+j),im_h-1),min(max(0,ind_x+i),im_w-1)] = True\n\n\treturn ext_mask",
"def apply_mask(face: np.array, mask: np.array) -> np.array:\n mask_h, mask_w, _ = mask.shape\n face_h, face_w, _ = face.shape\n\n # Resize the mask to fit on face\n factor = min(face_h / mask_h, face_w / mask_w)\n new_mask_w = int(factor * mask_w)\n new_mask_h = int(factor * mask_h)\n new_mask_shape = (new_mask_w, new_mask_h)\n resized_mask = cv2.resize(mask, new_mask_shape)\n\n # Add mask to face - ensure mask is centered\n face_with_mask = face.copy()\n non_white_pixels = (resized_mask < 250).all(axis=2)\n off_h = int((face_h - new_mask_h) / 2)\n off_w = int((face_w - new_mask_w) / 2)\n face_with_mask[off_h: off_h+new_mask_h, off_w: off_w+new_mask_w][non_white_pixels] = \\\n resized_mask[non_white_pixels]\n\n return face_with_mask",
"def apply_mask(query_imgs, masks, method):\n resulting_imgs = []\n for img, mask in zip(query_imgs, masks):\n positions = np.where(mask == 255)\n if method == CBHS: # Special treatment for cell-based bg segmentation to mantain \n x_min, x_max, y_min, y_max = positions[0][0], positions[0][-1], positions[1][0], positions[1][-1]\n img = img[x_min:x_max, y_min:y_max]\n else:\n mask = mask == 255\n img = img[mask].reshape(-1, 3)\n\n resulting_imgs.append(img)\n \n if isDebug():\n addDebugImage(img)\n if isDebug():\n showDebugImage()\n print(\"Finished to apply masks\")\n \n return resulting_imgs",
"def apply_mask(self, mask, parameters=None):\n if parameters is None:\n self.dates = self.dates[mask]\n for key in self.data.keys():\n self.data[key] = self.data[key][mask]\n\n self.manufacturer = self.manufacturer[mask]\n self.data_file = self.data_file[mask]\n self.serial_number = self.serial_number[mask]\n else:\n for parameter in parameters:\n self.data[parameter][~mask] = np.nan",
"def update_(self, k):\n for z in range(self.sweeps_per_update):\n new_u_grid = self.u_grid.copy()\n new_v_grid = self.v_grid.copy()\n for i in range(self.N):\n for j in range(self.N):\n\n deltaU = (self.D1*self.dt) * (self.laplacian_(self.u_grid, i, j))\\\n - self.dt * self.u_grid[i][j]*self.v_grid[i][j]**2 \\\n + self.dt * self.F*(1-self.u_grid[i][j])\n new_u_grid[i][j] += deltaU\n deltaV = (self.D2*self.dt) * (self.laplacian_(self.v_grid, i, j))\\\n + self.dt*self.u_grid[i][j]*self.v_grid[i][j]**2 \\\n - self.dt*(self.F+self.k)*self.v_grid[i][j]\n new_v_grid += deltaV\n self.u_grid = new_u_grid.copy()\n self.v_grid = new_v_grid.copy()\n if self.animation:\n self.fig.clear()\n plt.imshow(self.u_grid, interpolation='nearest',\n cmap='coolwarm', origin='lower')\n plt.colorbar()"
]
| [
"0.65512353",
"0.63234437",
"0.62542087",
"0.6015804",
"0.5854032",
"0.5760298",
"0.5750986",
"0.57493496",
"0.56922036",
"0.56810457",
"0.5634808",
"0.5616217",
"0.5529044",
"0.5510181",
"0.5471018",
"0.5466577",
"0.5422766",
"0.54003185",
"0.53856635",
"0.5350144",
"0.53002197",
"0.5295084",
"0.5280806",
"0.5272187",
"0.52540296",
"0.52477163",
"0.52294075",
"0.5224662",
"0.5224073",
"0.5220343"
]
| 0.7305941 | 0 |
Gather all windows of tensor. | def all_windows(x, system_shape, window_shape):
index_matrix = tf.constant(create_index_matrix(system_shape, window_shape))
return tf.transpose(
tf.gather_nd(tf.transpose(x),
tf.expand_dims(index_matrix, 2)),
[2, 0, 1]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def gather_windows(x, centers, system_shape, window_shape):\n window_size = np.prod(window_shape)\n batch_size = tf.shape(x)[0]\n index_matrix = tf.constant(create_index_matrix(system_shape, window_shape))\n window_range = tf.range(batch_size, dtype=tf.int32)[:, None] * \\\n tf.ones(window_size, dtype=tf.int32)[None, :]\n indices = tf.stack((window_range, tf.gather(index_matrix, centers)), 2)\n return tf.gather_nd(x, indices)",
"def gather_from_all(tensor: torch.Tensor) -> torch.Tensor:\n if tensor.ndim == 0:\n # 0 dim tensors cannot be gathered. so unsqueeze\n tensor = tensor.unsqueeze(0)\n\n if is_distributed_training_run():\n tensor, orig_device = convert_to_distributed_tensor(tensor)\n gathered_tensors = GatherLayer.apply(tensor)\n gathered_tensors = [\n convert_to_normal_tensor(_tensor, orig_device)\n for _tensor in gathered_tensors\n ]\n else:\n gathered_tensors = [tensor]\n gathered_tensor = torch.cat(gathered_tensors, 0)\n return gathered_tensor",
"def windows(X, width, skip_last):\n ret = []\n n = X.shape[0]\n for i in range(n - width + 1 - skip_last):\n window = X[i:i + width, :]\n ret.append([tuple(x) for x in window[:]])\n return np.array(ret)",
"def get_train_windows(self, scene: Scene) -> List[Box]:\n raise NotImplementedError()",
"def get_train_windows(self, scene):\n pass",
"def dataset_to_windows(dataset, windowsize):\n windows = []\n row, col = dataset.shape\n for i in range(col):\n if i > 0:\n windows.append(lag(np.array(dataset)[:,i], windowsize))\n return np.array(windows)",
"def get_windows(self, x_train, y_train):\n\n def roundMultiple(x, base=4):\n \"\"\"Round n up to nearest multiple of base.\"\"\"\n return int(base * round(float(x)/base))\n\n def auto_set_stride():\n self.stride = roundMultiple(\n int(self.window_size / 10), base=2)\n debug(\"Stride auto set to \", self.stride)\n\n def auto_set_window_size(sequence):\n threshold = (self.left_epsilon + self.right_epsilon) * 2\n time_arr = sequence[:, self.X_TIME_COLUMN]\n self.window_size = roundMultiple(\n np.argmax(time_arr > threshold), base=4)\n debug(\"Window size auto set to \", self.window_size)\n\n windows_x = []\n windows_y = []\n debug(\"Making windows...\")\n if self.window_size is None:\n auto_set_window_size(x_train[0])\n if self.stride is None:\n auto_set_stride()\n\n for index in tqdm(range(len(x_train))):\n sequence_extractions, sequence_extraction_labels = \\\n self.get_windows_for_sequence(\n x_train[index], y_train[index])\n windows_x.append(sequence_extractions)\n windows_y.append(sequence_extraction_labels)\n return np.array(windows_x), np.array(windows_y)",
"def _complete_windows(it: Iterator[_T], window_size: int) -> Iterator[Tuple[_T, ...]]:\n win = deque(islice(it, window_size), window_size)\n if len(win) < window_size:\n return\n # cache method access for slight speed boost\n append = win.append\n yield tuple(win)\n for e in it:\n append(e)\n yield tuple(win)",
"def get_train_windows(self, scene: Scene) -> List[Box]:\n\n def filter_windows(windows):\n if scene.aoi_polygons:\n windows = Box.filter_by_aoi(windows, scene.aoi_polygons)\n return windows\n\n raster_source = scene.raster_source\n extent = raster_source.get_extent()\n label_store = scene.ground_truth_label_source\n chip_size = self.config.chip_size\n\n chip_options = self.config.chip_options\n\n if chip_options.window_method == 'random_sample':\n return get_random_sample_train_windows(\n label_store, chip_size, self.config.class_map, extent,\n chip_options, filter_windows)\n elif chip_options.window_method == 'sliding':\n stride = chip_options.stride\n if stride is None:\n stride = chip_size / 2\n\n return list(\n filter_windows((extent.get_windows(chip_size, stride))))",
"def featurize_windows(data, start, end, window_size = 1):\n ret = []\n for sentence, labels in data:\n from util import window_iterator\n sentence_ = []\n for window in window_iterator(sentence, window_size, beg=start, end=end):\n if Config.cnn:\n sentence_.append(sum([window], []))\n else:\n sentence_.append(sum(window, []))\n ret.append((sentence_, labels))\n return ret",
"def _flat_map_window(self, window_elements_dict):\n result = {}\n for key in window_elements_dict:\n # See https://github.com/tensorflow/tensorflow/issues/23581#issuecomment-529702702\n result[key] = tf.data.experimental.get_single_element( window_elements_dict[key].batch(self.sequence_length) )\n #return result\n return tf.data.Dataset.from_tensors(result)",
"def _chunk_windows(windows, num_chunks):\n if num_chunks <= 0 or int(num_chunks) != num_chunks:\n raise ValueError(\"Number of chunks must be an integer > 0\")\n num_chunks = min(len(windows) - 1, num_chunks)\n splits = np.array_split(windows[:-1], num_chunks)\n chunks = []\n for j in range(num_chunks - 1):\n chunk = np.append(splits[j], splits[j + 1][0])\n chunks.append(chunk)\n chunk = np.append(splits[-1], windows[-1])\n chunks.append(chunk)\n return chunks",
"def windows_to_inputs(X_windows, num_concepts, skip_last):\n n = X_windows.shape[0]\n return {f\"concept_{i}\": X_windows[:(-1 if skip_last else n), :, i] for i in range(num_concepts)}",
"def windows(self,windowSize):\n for i in range(0,len(self)-windowSize):\n yield (i,i+windowSize)",
"def extract_statistics_from_windows(data: Data_dict_type) -> Data_dict_type:\n for key, item in data.items():\n values, sample_rate = item\n functionals = []\n for window_idx in range(values.shape[0]):\n window_functionals = extract_statistics_from_2d_window(values[window_idx])\n functionals.append(window_functionals[np.newaxis, ...])\n functionals = np.concatenate(functionals, axis=0)\n # squeeze last dimension\n if functionals.shape[-1] == 1:\n functionals = functionals.reshape(functionals.shape[:-1])\n data[key] = (functionals, sample_rate)\n return data",
"def gather_all(self):\n size = tf.reduce_min(self._current_size)\n max_size = tf.reduce_max(self._current_size)\n tf.Assert(size == max_size, [\n \"Not all environment have the same size. min_size:\", size,\n \"max_size:\", max_size\n ])\n\n if size == self._max_length:\n return tf.nest.map_structure(lambda buf: buf.value(), self._buffer)\n else:\n return tf.nest.map_structure(lambda buf: buf[:, :size, ...],\n self._buffer)",
"def flatten_spectrograms(\n windows: numba.typed.List[np.ndarray],\n) -> numba.typed.List[np.ndarray]:\n\n return numba.typed.List([w.flatten() for w in windows])",
"def input_slice(self, inputs):\n result = []\n for i in range(int(len(inputs) / self.window_size)):\n result.append(inputs[i * self.window_size:(i + 1) * self.window_size])\n return result",
"def concat_all_gather(tensor):\n tensors_gather = [\n torch.ones_like(tensor)\n for _ in range(torch.distributed.get_world_size())\n ]\n torch.distributed.all_gather(tensors_gather, tensor, async_op=False)\n\n output = torch.cat(tensors_gather, dim=0)\n return output",
"def time_conv_reshape(arr,window,stride):\n \n bat, steps, feat = arr.get_shape().as_list()\n r = tf.floormod((steps - window), stride)\n n = math.ceil((steps - window)/stride)\n \n def padder(n=n,r=r,feat=feat,steps=steps,bat=bat,arr=arr):\n \"\"\"Pad function.\"\"\"\n pad = tf.zeros([bat, stride - r, feat],tf.float32)\n return tf.concat([arr, pad], 1) \n \n arr = tf.cond(tf.equal(r,0), lambda: arr, padder)\n steps = tf.cond(tf.equal(r,0), lambda: steps, lambda: steps + stride -r)\n last_step = steps - window + 1 \n \n def c(i,a,b):\n \"\"\"Condition tf.while_loop\"\"\"\n return tf.less(i,window)\n \n def b(i,new_arr,arr):\n \"\"\"Body tf.while_loop. Appends ith value of windows to new_arr.\"\"\"\n new_arr = tf.concat([new_arr,arr[:, i:last_step + i:stride, :]], axis=2)\n return i+1,new_arr,arr\n \n i = tf.constant(1)\n new_arr = arr[:, 0: last_step: stride, :]\n new_arr.set_shape([bat,n+1,None])\n _,new_arr,_=tf.while_loop(c,\n b,\n loop_vars=[i,new_arr,arr],\n shape_invariants=[i.get_shape(),\n tf.TensorShape([bat,n+1,None]),\n arr.get_shape(),\n ],\n )\n new_arr.set_shape([bat,n+1,feat*window])\n return new_arr",
"def concat_all_gather(tensor):\n tensors_gather = [torch.ones_like(tensor)\n for _ in range(torch.distributed.get_world_size())]\n torch.distributed.all_gather(tensors_gather, tensor, async_op=False)\n\n output = torch.cat(tensors_gather, dim=0)\n return output",
"def concat_all_gather(tensor):\n tensors_gather = [torch.ones_like(tensor)\n for _ in range(torch.distributed.get_world_size())]\n torch.distributed.all_gather(tensors_gather, tensor, async_op=False)\n\n output = torch.cat(tensors_gather, dim=0)\n return output",
"def gather_all_tensors(result: Tensor, group: Optional[Any]=None) ->List[Tensor]:\n if group is None:\n group = torch.distributed.group.WORLD\n result = result.contiguous()\n world_size = torch.distributed.get_world_size(group)\n torch.distributed.barrier(group=group)\n if result.ndim == 0:\n return _simple_gather_all_tensors(result, group, world_size)\n local_size = torch.tensor(result.shape, device=result.device)\n local_sizes = [torch.zeros_like(local_size) for _ in range(world_size)]\n torch.distributed.all_gather(local_sizes, local_size, group=group)\n max_size = torch.stack(local_sizes).max(dim=0).values\n all_sizes_equal = all(all(ls == max_size) for ls in local_sizes)\n if all_sizes_equal:\n return _simple_gather_all_tensors(result, group, world_size)\n pad_dims = []\n pad_by = (max_size - local_size).detach().cpu()\n for val in reversed(pad_by):\n pad_dims.append(0)\n pad_dims.append(val.item())\n result_padded = F.pad(result, pad_dims)\n gathered_result = [torch.zeros_like(result_padded) for _ in range(world_size)]\n torch.distributed.all_gather(gathered_result, result_padded, group)\n for idx, item_size in enumerate(local_sizes):\n slice_param = [slice(dim_size) for dim_size in item_size]\n gathered_result[idx] = gathered_result[idx][slice_param]\n return gathered_result",
"def load_windows(articles, window_size, features=None, every_nth_window=1,\n only_labeled_windows=False):\n processed_windows = 0\n for article in articles:\n # count how many labels there are in the article\n count = article.count_labels()\n\n if only_labeled_windows and count == 0:\n # ignore articles completely that have no labels at all, if that was requested via\n # the parameters\n pass\n else:\n # split the tokens in the article to windows\n token_windows = split_to_chunks(article.tokens, window_size)\n token_windows = list(token_windows)\n for token_window in token_windows:\n window = Window([token for token in token_window])\n # ignore the window if it contains no labels and that was requested via parameters\n if not only_labeled_windows or window.count_labels() > 0:\n if processed_windows % every_nth_window == 0:\n # generate features for all tokens in the window\n if features is not None:\n window.apply_features(features)\n yield window\n processed_windows += 1",
"def window_partition(x, window_size):\n B, D, H, W, C = x.shape\n x = x.view(B, D // window_size[0], window_size[0], H // window_size[1], window_size[1], W // window_size[2], window_size[2], C)\n windows = x.permute(0, 1, 3, 5, 2, 4, 6, 7).contiguous().view(-1, reduce(mul, window_size), C)\n return windows",
"def vec_to_windows(x, wlen):\n n = len(x)\n # number of windows\n m = n // wlen\n # total samples to be kept\n s = m * wlen\n return jnp.reshape(x[:s], (m, wlen)).T",
"def sliding_window(top, step=10, window_size=(20,20)):\n\tfor x in range(0, top.shape[0], step):\n\t\tif x + window_size[0] > top.shape[0]:\n\t\t\tx = top.shape[0] - window_size[0]\n\t\tfor y in range(0, top.shape[1], step):\n\t\t\tif y + window_size[1] > top.shape[1]:\n\t\t\t\ty = top.shape[1] - window_size[1]\n\t\t\tyield x, y, window_size[0], window_size[1]",
"def sliding_hog_windows(self, image):\n # initialization\n image_height, image_width = 48, 48\n window_size = 24\n window_step = 6\n hog_windows = []\n for y in range(0, image_height, window_step):\n for x in range(0, image_width, window_step):\n window = image[y:y+window_size, x:x+window_size]\n hog_windows.extend(hog(window, orientations=8, pixels_per_cell=(8, 8),\n cells_per_block=(1, 1)))\n return hog_windows",
"def get_all_windows(self):\n success, result = self.manager.c.eval(\n textwrap.dedent(\n \"\"\"\n [win.wid for win in self.core.mapped_windows]\n \"\"\"\n )\n )\n assert success\n return eval(result)",
"def get_windows(img):\n w_far = slide_window(img, x_start_stop=[0, 1280], y_start_stop=[350, 550],\n xy_window=(96,64), xy_overlap=(0.5, 0.5))\n w_medium_s = slide_window(img, x_start_stop=[0, 1280], y_start_stop=[400, 650],\n xy_window=(128,96), xy_overlap=(0.7, 0.7))\n w_medium_l = slide_window(img, x_start_stop=[0, 1280], y_start_stop=[400, 660],\n xy_window=(128,128), xy_overlap=(0.5, 0.5))\n far = slide_window(img, x_start_stop=[600, 1100], y_start_stop=[390, 530],\n xy_window=(48,48), xy_overlap=(0.8, 0.5))\n\n windows = w_far + w_medium_s + w_medium_l + far\n return windows"
]
| [
"0.67346734",
"0.61865276",
"0.6179608",
"0.60237753",
"0.59480226",
"0.5931195",
"0.5915982",
"0.58032703",
"0.5747572",
"0.5730282",
"0.57175064",
"0.5670163",
"0.5625128",
"0.5528307",
"0.5527471",
"0.5497974",
"0.54943347",
"0.5490933",
"0.54879826",
"0.5485261",
"0.54693425",
"0.54687357",
"0.54597485",
"0.54382706",
"0.53997564",
"0.53797626",
"0.53775775",
"0.53467995",
"0.53410804",
"0.53280437"
]
| 0.7017742 | 0 |
Initialize public and private key variables. | def __init__(self):
self.public_key = None
self._private_key = None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self):\n self._keypair = RSA.generate(2048)\n self.public_key = self._keypair.publickey().exportKey()",
"def __init__(self):\n publicKeyFileName = \"serverPublicKey\"\n privateKeyFileName = \"serverPrivateKey.pem\"\n try:\n f = open(privateKeyFileName, 'rb')\n self.keys = RSA.importKey(f.read())\n except:\n self.keys = RSA.generate(1024)\n self.publickey = self.keys.publickey()\n # export public and private keys\n privHandle = open(privateKeyFileName, 'wb')\n privHandle.write(self.keys.exportKey('PEM'))\n privHandle.close()\n \n pubHandle = open(publicKeyFileName, 'wb')\n pubHandle.write(self.keys.publickey().exportKey())\n pubHandle.close()\n self.publickey = self.keys.publickey()",
"def setUp(self):\n\n self.private_key = self.get_new_key()\n self.public_key = self.private_key.public_key()\n\n self.pem_private_key = self.private_key.private_bytes(\n serialization.Encoding.PEM,\n serialization.PrivateFormat.PKCS8,\n serialization.NoEncryption(),\n )\n self.encrypted_pem_private_key = self.private_key.private_bytes(\n serialization.Encoding.PEM,\n serialization.PrivateFormat.PKCS8,\n serialization.BestAvailableEncryption(self.private_key_password),\n )\n\n self.pem_public_key = self.public_key.public_bytes(\n serialization.Encoding.PEM, serialization.PublicFormat.PKCS1\n )",
"def create_keys(self):\n crypto_tool = CryptoTools()\n # creating RSA keys for the signer user\n public_key, private_key = crypto_tool.create_key_with_entropy()\n self.priv_key = crypto_tool.get_pem_format(private_key).decode(\"utf-8\")\n self.pub_key = crypto_tool.get_pem_format(public_key).decode(\"utf-8\")",
"def __init__(self, curve=None, private_key=None, public_key=None):\n self.curve = curve\n self.private_key = None\n self.public_key = None\n if private_key:\n self.load_private_key(private_key)\n if public_key:\n self.load_received_public_key(public_key)",
"def _init_keys(self):\n\n basic_constraints = crypto.X509Extension('basicConstraints'.encode('ascii'), True,\n 'CA:TRUE, pathlen:0'.encode('ascii'))\n serial = self._get_serial()\n pkey = self._create_pkey(self.commonname, serial)\n self._create_cert(pkey, self.commonname, serial, [basic_constraints], expire=30*365)",
"def setup_keys(self, dh_object, public_key, private_key):\n public_numbers = DHPublicNumbers(public_key, dh_object.parameter_numbers)\n private_numbers = DHPrivateNumbers(private_key, public_numbers)\n dh_object.private_key = private_numbers.private_key(default_backend())",
"def _init_key_settings(self):\n self.minKeySize = 1023\n self.maxKeySize = 8193\n self.rsaSigHashes = list(RSA_SIGNATURE_HASHES)\n self.rsaSchemes = list(RSA_SCHEMES)\n self.dsaSigHashes = list(DSA_SIGNATURE_HASHES)\n self.virtual_hosts = []\n # DH key settings\n self.eccCurves = list(CURVE_NAMES)\n self.dhParams = None\n self.dhGroups = list(ALL_DH_GROUP_NAMES)\n self.defaultCurve = \"secp256r1\"\n self.keyShares = [\"secp256r1\", \"x25519\"]\n self.padding_cb = None\n self.use_heartbeat_extension = True\n self.heartbeat_response_callback = None",
"def init(x_in):\n global public_keys, secret_keys, x\n x = func.get_bits(x_in)\n\n public_keys, secret_keys = [], []\n\n elgamal.init_g_p_q()\n for i in range(3):\n create_keys(i)",
"def __init__(self, type_encryption, directory_key_private, directory_key_public):\n\t\t# class variables\n\t\tself.type_encryption = type_encryption\n\t\tself.directory_key_private = directory_key_private\n\t\tself.directory_key_public = directory_key_public\n\n\t\t# check keys\n\t\tself._publicKey = ''\n\t\tself._privateKey = ''\n\t\tself.verify_path() # verify the are not corrupted",
"def generate_keys(self):\n\n # TODO: Store keys encrypted\n rsa1 = RsaPrivateKey.Generate()\n self.sign_private = str(rsa1)\n self.sign_public = str(rsa1.public_key)\n\n rsa2 = RsaPrivateKey.Generate()\n self.crypt_private = str(rsa2)\n self.crypt_public = str(rsa2.public_key)",
"def __init__(self, rsa_key):\r\n if isinstance(rsa_key, tuple):\r\n self.keypair = Crypto.PublicKey.RSA.construct(rsa_key)\r\n else:\r\n self._InitFromString(rsa_key)",
"def __init__(self, sk=None, n=None, h=None):\r\n if sk:\r\n self.n = sk.n\r\n self.h = sk.h\r\n elif n and h:\r\n self.n = n\r\n self.h = h\r\n else:\r\n raise Exception(\"Public Key construction failed: insufficient/wrong arguments\")\r\n\r\n self.signature_bound = Params[self.n][\"sig_bound\"]\r\n self.sig_bytelen = Params[self.n][\"sig_bytelen\"]",
"def __init__(self, **options):\n\n super().__init__(**options)\n\n self._private_key = None\n self._public_key = None\n\n self._load_keys(**options)",
"def __init__(self, private_key):\n if private_key:\n if isinstance(private_key, str): # base58 encoded string\n self.private_key = PrivateKey.from_b58check(private_key)\n else:\n self.private_key = private_key\n self.public_key = self.private_key.public_key\n else:\n self.private_key = None\n self.public_key = None",
"def _InitFromString(self, text):\r\n # First, remove all whitespace:\r\n text = re.sub(_WHITESPACE_RE, '', text)\r\n\r\n # Parse out the period-separated components\r\n match = _KEY_RE.match(text)\r\n if not match:\r\n raise ValueError('Badly formatted key string: \"%s\"', text)\r\n\r\n private_exp = match.group('private_exp')\r\n if private_exp:\r\n private_exp = _B64ToNum(private_exp)\r\n else:\r\n private_exp = None\r\n self.keypair = Crypto.PublicKey.RSA.construct(\r\n (_B64ToNum(match.group('mod')),\r\n _B64ToNum(match.group('exp')),\r\n private_exp))",
"def staticstatic(self, private_key, public_key):",
"def __init__(self, username, password):\n self.username = username\n self.password = password\n self.privkey = None\n\n # sets self.privkey\n self.__set_or_create_key_if_not_exist()",
"def __init__(self, key, initial_prng):\n self.cipher = key\n self.prng = initial_prng\n self.nonce = None",
"def __init__(self):\n self._init_key_settings()\n self._init_misc_extensions()\n self.minVersion = (3, 1)\n self.maxVersion = (3, 4)\n self.versions = [(3, 4), (3, 3), (3, 2), (3, 1)]\n self.cipherNames = list(CIPHER_NAMES)\n self.macNames = list(MAC_NAMES)\n self.keyExchangeNames = list(KEY_EXCHANGE_NAMES)\n self.cipherImplementations = list(CIPHER_IMPLEMENTATIONS)",
"def __init__(self, public_key=None):\n self.public_key = self.convert_public_key_to_ecdsa(public_key) if public_key else public_key",
"def __init__(self, gen_priv_key: bool = False, priv_key_path: str = None):\n self.priv_key = None\n self.pub_key = None\n\n # max size = (bytes(rsa) - 2 * bytes(hash) - 2),\n # currently hard-coded to 190 = 256 - 2 * 32 - 2\n self.max_encrypt_size = 190\n\n if gen_priv_key:\n self.priv_key = RSA.generate(2048)\n if priv_key_path is not None:\n path = pathlib.Path(priv_key_path)\n with open(path.as_posix(), 'w') as f:\n f.write(self.priv_key.export_key().decode('utf-8'))\n elif priv_key_path is not None:\n path = pathlib.Path(priv_key_path)\n if path.is_file():\n self.priv_key = RSA.importKey(open(path.as_posix()).read())\n else:\n raise Exception(\"Failed to open file {}\".format(path.as_posix))\n\n if self.priv_key is not None:\n self.pub_key = self.priv_key.publickey()\n\n # delegate encrypt/decrypt function\n self.cipher = PKCS1_OAEP.new(self.priv_key, hashAlgo=SHA256)\n self.decrypt = self.cipher.decrypt",
"def __init__(self, key_bytes, public=True):\n self.G = _globalECG\n if public:\n self.sec = None\n self.pub = EcPt.from_binary(key_bytes, self.G)\n self.optim = None\n else:\n self.sec = Bn.from_binary(sha256(key_bytes).digest())\n self.pub = self.sec * self.G.generator()\n self.optim = do_ecdsa_setup(self.G, self.sec)",
"def __init__(self, private_key):\n self._sk = ed25519.Ed25519PrivateKey.from_private_bytes(private_key.bytes)",
"def __init__(self, uid, key, initial_prng):\n self.uid = uid\n self.key = key\n Crypto1.__init__(self, key, initial_prng)",
"def __init__(self,\r\n ephemeral_public_key=None,\r\n public_key_hash=None,\r\n transaction_id=None):\r\n\r\n # Initialize members of the class\r\n self.public_key_hash = public_key_hash\r\n self.ephemeral_public_key = ephemeral_public_key\r\n self.transaction_id = transaction_id",
"def __init__(self, globalKey, publicKey, resourceName, **rest):\n super(SshKey, self).__init__({\n \"globalKey\": globalKey,\n \"publicKey\": publicKey,\n \"resourceName\": resourceName,\n }, **rest)",
"def __init__(self, public_key):\n self._pk = ed25519.Ed25519PublicKey.from_public_bytes(public_key.bytes)",
"def create_keypair(self):\n # NOTE: currently we rely on zmq for convenience, but we may use libnacl directly\n # if we want to isolate this module from zmq dependency.\n public_key, private_key = zmq.curve_keypair()\n return public_key, private_key",
"def __init__(self, public_key=None, signature=None, key_id=None, key_type=None):\n self.swagger_types = {\n 'public_key': 'str',\n 'signature': 'str',\n 'key_id': 'str',\n 'key_type': 'str'\n }\n\n self.attribute_map = {\n 'public_key': 'publicKey',\n 'signature': 'signature',\n 'key_id': 'keyId',\n 'key_type': 'keyType'\n }\n\n self._public_key = public_key\n self._signature = signature\n self._key_id = key_id\n self._key_type = key_type"
]
| [
"0.78923047",
"0.7381298",
"0.736876",
"0.7335485",
"0.7318558",
"0.7158979",
"0.7150423",
"0.6961174",
"0.68982685",
"0.68939155",
"0.6887066",
"0.68617594",
"0.6817887",
"0.68071634",
"0.6774524",
"0.6687083",
"0.6653473",
"0.66499335",
"0.6604744",
"0.65936434",
"0.6576135",
"0.6576049",
"0.6551072",
"0.6496512",
"0.64914775",
"0.6483124",
"0.64660656",
"0.6451279",
"0.643586",
"0.63901067"
]
| 0.79213834 | 0 |
Encrypt 'message' with a public key and return its encryption as a list of integers. If no key is provided, use the 'public_key' attribute to encrypt the message. | def encrypt(self, message, key=None):
#Check validity of public key
if self.public_key is None:
raise Exception("invalid public key!")
elif pub_key is None:
e = self.public_key[0]
n = self.public_key[1]
else:
e = pub_key[0]
n = pub_key[1]
output = []
m = string_to_int(message)
pieces = partition(message, string_size(n), '~')
for i in pieces:
temp_int = string_to_int(i)
output.append(pow(temp_int, e, n))
return output | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def encrypt(self, message, key=None):\n if key is None:\n key = self.public_key\n encrypter = RSA.importKey(key)\n return encrypter.encrypt(message, 2048)",
"def rsa_encrypt(message, publickey):\r\n \r\n # A key object is created to interact with the PyCrypto\r\n # encryption suite. The object contains key data and\r\n # the necessary rsa functions.\r\n temp_key_obj = _rsa_keydict_to_keyobj(publickey)\r\n \r\n return _rsa_chopstring(message, temp_key_obj, temp_key_obj.encrypt)",
"def encrypt(message):\n setup()\n\n # Convert message to integer representation\n m = ''\n for letter in message:\n m += \"{0:0>2}\".format(ord(letter) - ord('a') + 1)\n m = int(m)\n\n # Read in e and n from the public key file\n ifp = open(\"public.rsa\")\n e, n = int(ifp.readline()), int(ifp.readline())\n\n # Encrypt m by using public n and e\n c = pow(m, e, n)\n return str(c)",
"def encrypt(message, pub_key):\n\n if not isinstance(pub_key, key.PublicKey):\n raise TypeError(\"You must use the public key with encrypt\")\n\n return chopstring(message, pub_key.e, pub_key.n, encrypt_int)",
"def encrypt_using_public_key(message, user_id, public_key=None):\n if public_key is None:\n public_key_path = os.path.join('public_keys', f'public.{user_id}.key')\n with open(public_key_path, 'rb') as file:\n public_key = RSA.importKey(file.read())\n else:\n public_key = RSA.importKey(public_key)\n\n cipher = PKCS1_OAEP.new(public_key)\n encrypted = cipher.encrypt(message.encode())\n return encrypted.hex()",
"def encrypt(self, message, key):\n return self.translateMessage(message, key, \"encrypt\")",
"def encrypt(self, message):\n return self._transform(message, self._encoder)",
"def encrypt(self, message):\n return self._transform(message, self._encoder)",
"def encrypt(public_key, message):\n symmetric_key = get_rand_bytes(32)\n msg_header = PKCS1_OAEP.new(public_key).encrypt(symmetric_key)\n assert len(msg_header) == 512\n msg_iv = get_rand_bytes(16)\n msg_body = AES.new(symmetric_key,\n mode=AES.MODE_CFB,\n IV=msg_iv).encrypt(message)\n return msg_header + msg_iv + msg_body",
"def encrypt(self, message):\n E = (((k + int_mapping(c)) % 26) for k, c in zip(cycle(self.key), preprocess(message)))\n return ''.join(char_mapping(n) for n in E)",
"def encrypt_message(self, message):\n\t\tf = Fernet(self.key)\n\t\treturn f.encrypt(message.encode())",
"def encrypt(self, data):\n\n if self.crypt_public == \"\":\n raise ValueError(\"Error encrypting: No public encryption key found for {}\".format(self))\n\n key_public = RsaPublicKey.Read(self.crypt_public)\n return key_public.Encrypt(data)",
"def rsa_encrypt_numbers(numbers, public_key):\n # TODO: Daniel\n\n encrypted_numbers = [1,2,3,4,5]\n\n return encrypted_numbers",
"def kem_encapsulate(self, public_key):\n shared_key = Buffer(self.kem_shared_key_len(key=public_key))\n encapsulated_key = Buffer(self.kem_encapsulated_key_len(public_key=public_key))\n status = self._lib_vscf_ecc.vscf_ecc_kem_encapsulate(self.ctx, public_key.c_impl, shared_key.c_buffer, encapsulated_key.c_buffer)\n VscfStatus.handle_status(status)\n return shared_key.get_bytes(), encapsulated_key.get_bytes()",
"def encrypt(self, public_key, data):\n d_data = Data(data)\n out = Buffer(self.encrypted_len(public_key=public_key, data_len=len(data)))\n status = self._lib_vscf_ecc.vscf_ecc_encrypt(self.ctx, public_key.c_impl, d_data.data, out.c_buffer)\n VscfStatus.handle_status(status)\n return out.get_bytes()",
"def encrypt_message(message: bytes, receiver_public_key: RsaKey, nbits: int = 256) -> bytes:\n aes_key = get_random_bytes(nbits // 8)\n cipher_aes = AES.new(aes_key, AES.MODE_CBC)\n return cipher_aes.iv + _encrypt_aes_key(aes_key, receiver_public_key) + cipher_aes.encrypt(\n pad(message, AES.block_size)) # Padding have to be added in case the size does not fit in exact blocks",
"def encrypt(self, message: bytearray) -> bytearray:\n return self.__PRGA(message)",
"def encrypt(self, message):\n output = []\n for letter in message:\n # preventing white spaces and numbers\n if letter == ' ' or isinstance(letter, int):\n output.append(letter)\n else:\n idx_in_plain = self.PLAIN_TEXT_ALPH.index(letter.upper())\n output.append(self.CIPHER_TEXT_ALPH[idx_in_plain])\n return \"\".join(output)",
"def encrypt(self, data):\n\n key_public = RsaPublicKey.Read(self.crypt_public)\n return b64encode(key_public.Encrypt(data))",
"def encrypt(message, key):\n\tnumericRepresentation = []\n\tfor c in message:\n\t\tnumericRepresentation.append(ord(c) - 65)\n\n\tcipher = \"\"\n\tfor x in numericRepresentation:\n\t\tcipher += chr((x + key) % 26 + 65)\n\n\treturn cipher",
"def encrypt(self, message, key):\n message = self.pkcs7_pad(message)\n iv = ''.join(chr(random.randint(0, 0xFF)) for i in range(AES.block_size))\n cipher = AES.new(key, AES.MODE_CBC, iv, segment_size=64)\n return iv + cipher.encrypt(message)",
"def encrypt_message(message,public_key,symetric_key):\n\tif message != None:\n\t\tnonce = os.urandom(12)\n\t\tmessage = AESCCM(symetric_key).encrypt(nonce,message.encode(\"iso-8859-1\"),None)\n\t\tnonce, *_ = encrypt(public_key,nonce)\n\t\tmessage ={'nonce' : nonce.decode(\"iso-8859-1\"),'message':message.decode(\"iso-8859-1\")}\n\n\treturn message",
"def encrypt(self, key, plaintext):\n output = []\n padded_key = padd_key(key, plaintext)\n for i in range(len(plaintext)):\n enc_ascii = (ord(plaintext[i]) + ord(padded_key[i])) % 256\n output.append(chr(enc_ascii))\n return ''.join(output)",
"def __encryptRSA(msg, key):\n # Convert message to bytes\n msg = msg.encode('utf-8')\n return key.encrypt(\n msg,\n padding.OAEP(\n mgf=padding.MGF1(algorithm=hashes.SHA256()),\n algorithm=hashes.SHA256(),\n label=None\n )\n )",
"def encrypt(self, bytes):\r\n paddedBytes = self._addPKCS1Padding(bytes, 2)\r\n m = bytesToNumber(paddedBytes)\r\n if m >= self.n:\r\n raise ValueError()\r\n c = self._rawPublicKeyOp(m)\r\n encBytes = numberToByteArray(c, numBytes(self.n))\r\n return encBytes",
"def dh_encrypt(pub, message):\n \n Group, private, public = dh_get_key()#generate new DH pair for Alice\n #private key is an integer/scalar and public key is a point on the curve \n \n #check whether public key of Bob is valid and on curve \n assert Group.check_point(pub)\n \n #Alice obtains shared secret by multiplying her private key with bob's forwarded public key\n key = pub.pt_mul(private)#dA* qB\n print \"key from enc is\", key\n \n hashedKey=sha256(key.export()).digest()\n\n \n plaintext = message.encode(\"utf8\")#encode message\n aes = Cipher(\"aes-128-gcm\")#select cipher\n iv = urandom(16)#generate initialization vector \n cipher, tag = aes.quick_gcm_enc(hashedKey[:16], iv, plaintext)#encrypt using shared key \n ciphertext = [iv,cipher,tag,public]\n\n return ciphertext",
"async def encrypt(self, message: Message, jids: Optional[List[JID]], tab: ChatTabs):\n\n raise NotImplementedError",
"def encrypt_message(self, message: dict) -> None:\n secure_message = {'type': 'SECURE_MESSAGE', 'content': None}\n content = json.dumps(message).encode()\n \n ct = self.crypto.encrypt(content)\n secure_message['content'] = base64.b64encode(ct).decode()\n self.encrypted_data += secure_message['content']\n\n return secure_message",
"def encrypt(self, message):\n\n IV = Random.new().read(self.BLOCK_SIZE)\n aes = AES.new(self.key, AES.MODE_CBC, IV)\n return base64.b64encode(IV + aes.encrypt(self._pad(message)))",
"def _encrypt_aes_key(aes_key: bytes, receiver_public_key: RsaKey) -> bytes:\n cipher_rsa = PKCS1_OAEP.new(receiver_public_key)\n return cipher_rsa.encrypt(aes_key)"
]
| [
"0.7807903",
"0.759414",
"0.7560335",
"0.7453449",
"0.7370985",
"0.70735997",
"0.7071769",
"0.7071769",
"0.7052295",
"0.68057853",
"0.67484885",
"0.6708029",
"0.66434103",
"0.664233",
"0.6539715",
"0.6480346",
"0.64450645",
"0.63244545",
"0.62741244",
"0.6272498",
"0.62405026",
"0.61861354",
"0.617456",
"0.61568797",
"0.611985",
"0.60683113",
"0.6055885",
"0.6042841",
"0.6016278",
"0.5954312"
]
| 0.84980357 | 0 |
Decrypt 'message' with the private key and return its decryption as a single string. You may assume that the format of 'message' is the same as the output of the encrypt() function. | def decrypt(self, message):
#check validity of _private_key
if self._private_key is None:
raise Exception("invalid private key")
output = ""
d = self._private_key[0]
n = self._private_key[1]
for i in xrange(len(ciphertext)):
m = pow(ciphertext[i], d, n)
output += int_to_string(m)
return output | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def decrypt(self, message):\n return self._keypair.decrypt(message)",
"def decrypt_message(self, message):\n\t\tf = Fernet(self.key)\n\t\treturn f.decrypt(message)",
"def decrypt_message(encrypted_message):",
"def decrypt_using_private_key(message):\n public_key_path = os.path.join('keys', 'private.key')\n with open(public_key_path, 'rb') as file:\n private_key = RSA.importKey(file.read())\n\n cipher = PKCS1_OAEP.new(private_key)\n encrypted = cipher.decrypt(message)\n return encrypted.hex()",
"def decrypt(self,message, key):\n return self.translateMessage(message, key, \"decrypt\")",
"def decrypt_message(self, encrypted_message):\n f = Fernet(bytes(self.key))\n decrypted_message = f.decrypt(encrypted_message)\n return decrypted_message",
"def decrypt(message: str) -> str:\n return \"\".join(REVERSE_DICT[char] for char in message.split())",
"def decrypt_message(encrypted_message):\r\n\r\n # conversion to bytes\r\n encrypted_message = bytes(encrypted_message, \"ascii\")\r\n\r\n # loading key\r\n key = load_key()\r\n\r\n # creating a fernet object\r\n f = Fernet(key)\r\n\r\n # decrypting the messsage\r\n decrypted_message = f.decrypt(encrypted_message)\r\n\r\n return decrypted_message.decode()",
"def decrypt_message(message: bytes, receiver_private_key: RsaKey) -> bytes:\n iv = message[:IV_LEN]\n enc_aes_key = message[IV_LEN:IV_LEN + receiver_private_key.size_in_bytes()] # Assume encryption has been done with same key size\n enc_message = message[IV_LEN + receiver_private_key.size_in_bytes():]\n\n cipher_rsa = PKCS1_OAEP.new(receiver_private_key)\n aes_key = cipher_rsa.decrypt(enc_aes_key)\n\n cipher_aes = AES.new(aes_key, AES.MODE_CBC, iv)\n return unpad(cipher_aes.decrypt(enc_message), AES.block_size) # Padding have to be removed",
"def decrypt_str(message):\n filename = f'/tmp/{get_temp_filename()}'\n filename_encrypted = f'{filename}.pem'\n filename_plain = f'{filename}.plain'\n pem_file = open(filename_encrypted, 'w')\n pem_file.write(message)\n pem_file.close()\n cmd = [\n \"openssl\",\n \"cms\",\n \"-decrypt\",\n \"-inform\", \"PEM\",\n \"-in\", filename_encrypted,\n \"-inkey\", server_key_files[\"key\"],\n \"-recip\", server_key_files[\"crt\"],\n \"-out\", filename_plain\n ]\n res_text = \"\"\n try:\n exec_cmd(cmd)\n with open(filename_plain, \"r\") as plain:\n res_text = plain.read()\n plain.close()\n os.unlink(filename_plain)\n except (OSError, subprocess.CalledProcessError) as err:\n logging.error(\"decrypt_str failed: %s\", err)\n finally:\n os.unlink(filename_encrypted)\n\n return res_text",
"def decrypt(self, message: bytearray) -> bytearray:\n return self.__PRGA(message)",
"def decrypt_message(self):\r\n\r\n\t\t#Will not let user input useless messages that cannot be decrypted.\r\n\t\twhile True:\r\n\t\t\tself.message = input(\"Please enter a message you would like to decrypt. --> \")\r\n\t\t\tif self.message != \"\" and len(self.message) > 4:\r\n\t\t\t\tbreak\r\n\t\t#Decrypts message but verifys correct key before giving user their decrypted message.\r\n\t\tself.right_key = True\r\n\t\twhile self.right_key:\r\n\t\t\tself.setup_key_decrypt()\r\n\t\t\tself.my_code = Decryptor(self.message, self.key).transfer_decrypt()\r\n\t\t\tself.verify_decrypt_key()\r\n\t\tprint(\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\\n\")\r\n\t\tprint(\"Your decrypted message is\")\r\n\t\tprint(self.my_code + \"|\")",
"def decrypt_message(self, cipher):\n\t\tmessage = cipher ** self.private_key % self.hidden_primes_product\n\t\treturn message",
"def decrypt(self, message):\n message = base64.b64decode(message)\n initialization_vector = message[:self._block_size]\n cipher = AES.new(self._key, AES.MODE_CBC, initialization_vector)\n raw_message = cipher.decrypt(message[self._block_size:])\n return self._remove_padding(raw_message).decode('utf-8')",
"def decrypt(self, message):\n output = []\n for letter in message:\n # preventing white spaces and numbers\n if letter == ' ' or isinstance(letter, int):\n output.append(letter)\n else:\n idx_in_plain = self.CIPHER_TEXT_ALPH.index(letter.upper())\n output.append(self.PLAIN_TEXT_ALPH[idx_in_plain])\n return \"\".join(output)",
"def decryption(msg):\n \n start_key = 123\n key_increment = 4\n string = []\n decoded = []\n key = start_key\n message = msg\n for c in range(0, len(message)):\n code = ord(message[c])\n change = code-key\n new = chr(change)\n string += new\n key += key_increment\n decoded = ''.join(string)\n return ('Decoded Message:\\t' + decoded)",
"def _decrypt(self, msg):\r\n # they must be real crypto experts at pubnub.com\r\n # two lines of code and two capital mistakes :-(\r\n # pylint: disable=E1101\r\n key = hashlib.sha256(self.cipher).hexdigest()[0:32]\r\n aes = AES.new(key, AES.MODE_CBC, \"0123456789012345\")\r\n decrypted = aes.decrypt(base64.decodestring(msg))\r\n return json.loads(decrypted[0:-ord(decrypted[-1])])",
"async def decrypt(self, message: Message, jid: Optional[JID], tab: ChatTab):\n\n raise NotImplementedError",
"def fernet_decript(key,message):\n\tf = Fernet(key)\n\treturn f.decrypt(message)",
"def decrypt(library, message):\r\n\r\n # Make lists of keys and values\r\n keys = []\r\n values = []\r\n for entry in library:\r\n entry = entry.split()\r\n keys.append(entry[1])\r\n values.append(entry[0])\r\n\r\n # Decode the message\r\n decoded = ''\r\n message = message.split()\r\n for m in range(len(message)):\r\n for k in range(len(keys)):\r\n if message[m] == keys[k]:\r\n decoded += values[k]\r\n if message[m] not in keys:\r\n decoded += '?'\r\n return decoded",
"def decrypt_message(data,symetric_key,private_key):\n\tif type(data) == str or type(data) == bytes:\n\t\tdata = json.loads(data)\n\ttyp = data['type']\n\tnonce = data['nonce'].encode(\"iso-8859-1\")\n\tmessage = data['message'].encode(\"iso-8859-1\")\n\tnonce, *_ = decrypt(private_key,nonce)\n\tmessage = AESCCM(symetric_key).decrypt(nonce,message,None)\n\tmessage ={'type':typ,'nonce' : nonce.decode(\"iso-8859-1\"),'message':message.decode(\"iso-8859-1\")}\n\treturn message",
"def decrypt(self, msg):\n if self.security_type is not None and self.security_type != 0:\n res, used, _ = gss.unwrap(self.ctx, msg)\n isconf = self.security_type == gss.RequirementFlag.confidentiality\n if (not used and isconf):\n raise GSSClientError('User requested encryption, '\n 'but the server sent an unencrypted '\n 'message!')\n return res.decode('utf-8')\n else:\n return msg.decode('utf-8')",
"def decipher2(s, key): # s = message\n return decipher_raw2(s, key).rstrip(bytes('\\x00'.encode('utf-8')))",
"def decrypt(ciphertext: str, key: str) -> str:\n return encrypt(ciphertext, key)",
"def decrypt(self, msg):\n\n if type(msg) != type(b''):\n raise ValueError(\"msg should be a byte object!\")\n\n return self.gpg.decrypt(msg).data",
"def decrypt(self, key, msg, b64decode=True):\n if b64decode:\n msg = base64.b64decode(msg)\n iv = msg[:self.cipher.block_size]\n cipher = self.cipher.new(key, self.cipher.MODE_CBC, iv)\n\n padded = cipher.decrypt(msg[self.cipher.block_size:])\n l = ord(padded[-1:]) + 1\n plain = padded[:-l]\n return plain",
"def decrypt(self, message):\n # message = message.upper().split()\n # message = \"\".join(message)\n # desalting the message to remove 5 characters blocks\n padding = input(\"Have you used 5 characters blocks? y/n \")\n if padding == \"y\":\n message = message.replace(\" \", \"\")\n message = self.desalt_random(message)\n message = \"\".join(message)\n\n message = message.upper()\n message_list = []\n for ch in message:\n message_list.append(self.main_dict[ch][0])\n\n # OTP Encryption / process the message with OTP\n otp = input(\"What is the OTP that was generated for you during \"\n \"encryption process?: \")\n otp = otp.upper()\n random_otp = []\n for ch in otp:\n random_otp.append(self.main_dict[ch][0])\n\n # If OTP is correct, decrypt the message with mod27\n if len(message_list) != len(random_otp):\n print(\"You typed a wrong OTP.\")\n return None\n else:\n math_list = []\n for i, item in enumerate(message_list):\n if message_list[i] >= random_otp[i]:\n x = message_list[i] - random_otp[i]\n for key, value in self.main_dict.items():\n if value[0] == x:\n math_list.append(key)\n else:\n for key, value in self.main_dict.items():\n if item == value[0]:\n x = value[1] - random_otp[i]\n for key, value in self.main_dict.items():\n if value[0] == x:\n math_list.append(key)\n return \"\".join(math_list)",
"def decrypt(self, message):\r\n\r\n # Example string\r\n message = message.lower()\r\n # Everything we can encrypt\r\n SYMBOLS = \"abcdefghijklmnopqrstuvwxyz\"\r\n\r\n for counter, key in enumerate(range(len(SYMBOLS))):\r\n # try again with each key attempt\r\n translated = \"\"\r\n\r\n for character in message:\r\n if character in SYMBOLS:\r\n symbolIndex = SYMBOLS.find(character)\r\n translatedIndex = symbolIndex - key\r\n\r\n # In the event of wraparound\r\n if translatedIndex < 0:\r\n translatedIndex += len(SYMBOLS)\r\n\r\n translated += SYMBOLS[translatedIndex]\r\n\r\n else:\r\n # Append the symbol without encrypting or decrypting\r\n translated += character\r\n\r\n # Output each attempt\r\n result = self.lc.checkLanguage(translated)\r\n if result:\r\n return {\r\n \"lc\": self.lc,\r\n \"IsPlaintext?\": True,\r\n \"Plaintext\": translated,\r\n \"Cipher\": \"Caesar\",\r\n \"Extra Information\": f\"The rotation used is {counter}\",\r\n }\r\n # if none of them match English, return false!\r\n return {\r\n \"lc\": self.lc,\r\n \"IsPlaintext?\": False,\r\n \"Plaintext\": None,\r\n \"Cipher\": \"Caesar\",\r\n \"Extra Information\": None,\r\n }",
"def decipher(s, key): # s = message\n return decipher_raw(s, key).rstrip(bytes('\\x00'.encode('utf-8')))",
"def decrypt_message(msg):\n with urllib.request.urlopen(format_url(main_url+\"decrypt.php\",msg)) as f:\n decryptedmessage = f.read().decode('utf-8',\"strict\")\n return decryptedmessage"
]
| [
"0.8332212",
"0.82969767",
"0.81545275",
"0.80835545",
"0.7953939",
"0.7846777",
"0.78017974",
"0.77947813",
"0.7641488",
"0.7457096",
"0.7444166",
"0.74372023",
"0.7364984",
"0.7335487",
"0.72907263",
"0.724854",
"0.71047544",
"0.70548815",
"0.7027532",
"0.6998797",
"0.6997155",
"0.6963637",
"0.6938333",
"0.69359297",
"0.69097334",
"0.68452877",
"0.6834641",
"0.67906857",
"0.67578954",
"0.67562556"
]
| 0.84557134 | 0 |
Use Fermat's test for primality to see if 'n' is probably prime. Run the test at most five times, using integers randomly chosen from [2, n1] as possible witnesses. If a witness number is found, return the number of tries it took to find the witness. If no witness number is found after five tries, return 0. | def is_prime(n, number_of_tests=5):
passes = 0
prime = True #assume prime
for i in xrange(number_of_tests):
passes += 1
random_int = random.randint(2, n-1)
test = pow(random_int, n-1, n)
if test != 1:
prime = False
break
if prime:
return 0
else:
return passes | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def Ballie_PSW_test(n, max_trivial_trials=100):\n for i in range(max_trivial_trials):\n if primes[i] == n:\n return True\n if n % primes[i] == 0:\n return False\n if primes[i] ** 2 >= n:\n return True\n if not fermat_strong_test(n, 2):\n return False\n if not lucas_selfridge_test(n):\n return False\n return True",
"def fermats(n):\n randomlist = []\n for i in range(10):\n randomlist.append(random.randrange(2, n-1))\n i += 1\n for i in randomlist:\n if successivesquaring(i, n-1, n) != 1:\n return(\"n is composite\")\n return(\"n is probably prime\")",
"def solution(n: int = 2000000) -> int:\n\n return sum(takewhile(lambda x: x < n, prime_generator()))",
"def witness(a, n):\n t, u = witHelp(n - 1)\n x0 = xi = modExp(a, u, n)\n for i in range(1, t + 1):\n xi = (x0 ** 2) % n\n if xi == 1 and x0 != 1 and x0 != n - 1:\n return True\n x0 = xi\n if xi != 1:\n return True\n return False",
"def Z(n):\n count5 = 0\n i = 1\n while 1:\n a = pow(5, i)\n if a > n:\n return count5\n else:\n count5 += n/a\n i += 1",
"def isPrime(n, s=50):\n from random import randrange\n for x in primes:\n if x < n:\n if n % x == 0:\n return False\n for j in range(1, s + 1):\n a = randrange(1, n)\n if witness(a, n):\n return False\n return True",
"def rwh_primes1(n):\n # http://stackoverflow.com/questions/2068372/fastest-way-to-list-all-primes-below-n-in-python/3035188#3035188\n sieve = [True] * int((n/2))\n for i in range(3,int(n**0.5)+1,2):\n if sieve[int(i/2)]:\n sieve[int(i*i/2)::i] = [False] * int(((n-i*i-1)/(2*i)+1))\n return [2] + [2*i+1 for i in range(1,int(n/2)) if sieve[int(i)]]",
"def isprime(n):\n if n!=int(n):\n return False\n n=int(n)\n #Miller-Rabin test for prime\n if n==0 or n==1 or n==4 or n==6 or n==8 or n==9:\n return False\n\n if n==2 or n==3 or n==5 or n==7:\n return True\n s = 0\n d = n-1\n while d%2==0:\n d>>=1\n s+=1\n assert(2**s * d == n-1)\n\n def trial_composite(a):\n if pow(a, d, n) == 1:\n return False\n for i in range(s):\n if pow(a, 2**i * d, n) == n-1:\n return False\n return True\n\n for i in range(8):#number of trials\n a = random.randrange(2, n)\n if trial_composite(a):\n return False\n\n return True",
"def game1(n):\r\n\twin=0\r\n\tfor i in range(n):\r\n\t\tif game(1)==1:\r\n\t\t\twin+=1\r\n\tprob1=win/n\r\n\treturn prob1",
"def McNuggets(n):\n a=0\n b=0\n c=0\n result=0\n while result <= n:\n result = 6*a + 9*b + 20*c\n if result > n:\n return False\n elif result == n:\n return True\n else:\n a+=1\n ...",
"def is_prime(n):\n assert n > 3\n k = int(log2(n))\n m = n - 1\n d = 0\n while(m % 2 == 0):\n m //= 2\n d += 1\n for _ in range(k):\n a = randint(2, n - 2)\n x = pow(a, m, n)\n if x == 1 or x == n - 1:\n continue\n for _ in range(d - 1):\n x = pow(x, 2, n)\n if x == 1:\n return 0\n if x == n - 1:\n break\n if x != n - 1:\n return 0\n return 1",
"def fermat_prime(n: int, k: int) -> int:\n assert n > 3 and k >= 1\n for _ in range(k):\n a = random.randint(2, n - 2)\n if pow(a, n - 1, n) != 1: # (a**(n-1)%n) != 1:\n return False\n return True",
"def fermat(n, k=10):\n if n != 2 and n % 2 == 0:\n return False\n\n for i in range(k):\n a = random.randint(2, n - 2)\n result = pow(a, n-1, n)\n if result != 1:\n return False\n\n return True",
"def rwh_primes1(n):\n sieve = [True] * (n/2)\n for i in xrange(3,int(n**0.5)+1,2):\n if sieve[i/2]:\n sieve[i*i/2::i] = [False] * ((n-i*i-1)/(2*i)+1)\n return [2] + [2*i+1 for i in xrange(1,n/2) if sieve[i]]",
"def faculteit_iteratief(n):\n res = 1\n\n # Voeg de iteratie in: for ...\n\n return res",
"def potential_witnesses(n):\n if n < 1373653: return [2, 3] # n < 1,373,653\n if n < 9080191: return [31, 73] # n < 9,080,191\n if n < 4759123141: return [2, 7, 61] # n < 4,759,123,141\n if n < 2152302898747: return [2, 3, 5, 7, 11] # n < 2,152,302,898,747\n if n < 3474749660383: return [2, 3, 5, 7, 11, 13] # n < 3,474,749,660,383\n if n < 341550071728321: return [2, 3, 5, 7, 11, 13, 17] # n < 341,550,071,728,321\n return [random.randint(1,n-1) for _ in xrange(0,20)] # 99.999999999909051% (1 - .25**20) accuracy for n >= 341,550,071,728,321",
"def prime(n):\n if n < 2:\n return False\n if n == 2:\n return True\n if n == 3:\n return True\n if n % 2 == 0:\n return False\n if n % 3 == 0:\n return False\n\n i = 5\n w = 2\n\n while i * i <= n:\n if n % i == 0:\n return False\n\n i += w\n w = 6 - w\n\n return True",
"def n_primes(n):\n primes = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59,\n 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127,\n 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193,\n 197, 199, 211, 223, 227, 229, 233, 239, 241, 251, 257, 263, 269,\n 271, 277, 281, 283, 293, 307, 311, 313, 317, 331, 337, 347, 349,\n 353, 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431,\n 433, 439, 443, 449, 457, 461, 463, 467, 479, 487, 491, 499, 503,\n 509, 521, 523, 541, 547, 557, 563, 569, 571, 577, 587, 593, 599,\n 601, 607, 613, 617, 619, 631, 641, 643, 647, 653, 659, 661, 673,\n 677, 683, 691, 701, 709, 719, 727, 733, 739, 743, 751, 757, 761,\n 769, 773, 787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857,\n 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937, 941, 947,\n 953, 967, 971, 977, 983, 991, 997][:n]\n\n if len(primes) < n:\n big_number = 2000\n while 'Not enough primes':\n primes = primes_from_2_to(big_number)[:n]\n if len(primes) == n:\n break\n big_number += 1000\n\n return primes",
"def is_prime(number, num_trials=200):\n if number < 2:\n return False\n if number != 2 and number % 2 == 0:\n return False\n\n # Find largest odd factor of n-1.\n exp = number - 1\n while exp % 2 == 0:\n exp //= 2\n\n for _ in range(num_trials):\n rand_val = int(random.SystemRandom().randrange(1, number))\n new_exp = exp\n power = pow(rand_val, new_exp, number)\n while new_exp != number - 1 and power != 1 and power != number - 1:\n power = (power * power) % number\n new_exp *= 2\n if power != number - 1 and new_exp % 2 == 0:\n return False\n\n return True",
"def is_prime(n):\n\n if n in CONSTANTS.LOW_PRIMES:\n return True\n\n for prime in CONSTANTS.LOW_PRIMES:\n if n % prime == 0:\n return False\n\n if n == 2:\n return True\n if n % 2 == 0:\n return False\n s = 0\n d = n - 1\n while d % 2 == 0:\n d //= 2\n s += 1\n for _ in range(CONSTANTS.MILLER_RABIN_ITERATIONS):\n a = random.randint(2, n - 2)\n x = pow(a, d, n)\n if x == 1 or x == n - 1:\n continue\n for _ in range(s - 1):\n x = pow(x, 2, n)\n if x == n - 1:\n break\n else:\n return False\n return True",
"def ll_primality(n: int) -> bool:\n if n <= 2 or not trial_div(n):\n return False\n luc_leh = lucas_lehmer()\n for _ in range(n - 1):\n ll = next(luc_leh)\n return ll % (2**n - 1) == 0",
"def prob_no_match(n):\n return math.factorial(n)*math.comb(365,n)/(365**n)",
"def prob1(n):\n\n # create a giant draw from a normal distribution\n random_draws = np.random.normal(loc= 0, scale = 1, size = n)\n\n # mask the values\n mask = random_draws > 3\n\n return np.sum(mask)/float(n)",
"def fermat_strong_test(n, a):\n if n == 2:\n return True\n # n - 1 = d * 2 ^ s\n d, s = factor_twos(n - 1)\n\n # by Fermat theorem, if n is prime then\n # (a^d - 1)(a^d + 1)(a^2d + 1)(a^4d + 1)...(a^2^(s-1)d + 1) = 0 (mod n)\n a = powmod(a, d, n)\n if a == 1 or a == n - 1:\n return True\n for _ in range(s):\n a = a * a % n\n if a == n - 1:\n return True\n return False",
"def eg_ok(n=1):\n\n random.seed(n)",
"def GetNFactors(n, primes, n_pfactors, _):\n sqrtn = int(n ** 0.5) + 1\n\n for p in primes:\n if p > sqrtn:\n break\n if n % p == 0:\n n //= p\n if n % p == 0:\n return n_pfactors[n]\n else:\n return n_pfactors[n] + 1\n\n # n is primes\n primes.append(n)\n return 1",
"def nth_prime(n):\n\n upper_bound = 0\n if n >= 7022:\n upper_bound = int(n * log(n) + n * (log(log(n)) - 0.9385))\n elif n >= 6:\n upper_bound = int(n * log(n) + n * log(log(n)))\n else:\n upper_bound = 14\n prime_set = list(primes(upper_bound))\n return prime_set[n - 1]",
"def rwh_primes2(n):\n # flake8: noqa\n # http://stackoverflow.com/questions/2068372/fastest-way-to-list-all-primes-below-n-in-python/3035188#3035188\n correction = (n%6>1)\n n = {0:n,1:n-1,2:n+4,3:n+3,4:n+2,5:n+1}[n%6]\n sieve = [True] * (n/3)\n sieve[0] = False\n for i in xrange(int(n**0.5)/3+1):\n if sieve[i]:\n k=3*i+1|1\n sieve[ ((k*k)/3) ::2*k]=[False]*((n/6-(k*k)/6-1)/k+1)\n sieve[(k*k+4*k-2*k*(i&1))/3::2*k]=[False]*((n/6-(k*k+4*k-2*k*(i&1))/6-1)/k+1)\n return [2,3] + [3*i+1|1 for i in xrange(1,n/3-correction) if sieve[i]]",
"def factorPR(n):\n\tnumsteps=2*math.floor(math.sqrt(math.sqrt(n)))\n\tfor additive in range(1,5):\n\t\tfast=slow=1; i=1\n\t\twhile i<numsteps:\n\t\t\tslow = (slow*slow + additive) % n\n\t\t\ti = i + 1\n\t\t\tfast = (fast*fast + additive) % n\n\t\t\tfast = (fast*fast + additive) % n\n\t\t\tg = gcd(fast-slow,n)\n\t\t\tif (g != 1):\n\t\t\t\tif (g == n):\n\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\treturn g\n\treturn 1",
"def game2(n):\r\n\twin=0\r\n\tfor i in range(n):\r\n\t\tif game(2)==1:\r\n\t\t\twin+=1\r\n\tprob2=win/n\r\n\treturn prob2"
]
| [
"0.65775406",
"0.6563441",
"0.64641523",
"0.6411732",
"0.6402925",
"0.6373394",
"0.63652146",
"0.62972295",
"0.62671983",
"0.62667334",
"0.626641",
"0.62386185",
"0.62371504",
"0.62341464",
"0.61839336",
"0.6167961",
"0.6142165",
"0.6119516",
"0.6093374",
"0.60730374",
"0.60506636",
"0.6045864",
"0.6034459",
"0.6024694",
"0.60145766",
"0.60144645",
"0.60142547",
"0.6005869",
"0.60056907",
"0.59833586"
]
| 0.6907187 | 0 |
Initialize the _keypair and public_key attributes. | def __init__(self):
self._keypair = RSA.generate(2048)
self.public_key = self._keypair.publickey().exportKey() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self):\n self.public_key = None\n self._private_key = None",
"def initialize(self):\n super(self.__class__, self).initialize()\n\n try:\n self.__keypair = nova_utils.get_keypair_by_name(\n self._nova, self.keypair_settings.name)\n return self.__keypair\n except Exception as e:\n logger.warn('Cannot load existing keypair - %s', e)",
"def __init__(self, rsa_key):\r\n if isinstance(rsa_key, tuple):\r\n self.keypair = Crypto.PublicKey.RSA.construct(rsa_key)\r\n else:\r\n self._InitFromString(rsa_key)",
"def __init__(self, os_creds, keypair_settings):\n super(self.__class__, self).__init__(os_creds)\n\n self.keypair_settings = keypair_settings\n self.__delete_keys_on_clean = True\n\n # Attributes instantiated on create()\n self.__keypair = None",
"def __init__(self, public_key=None, signature=None, key_id=None, key_type=None):\n self.swagger_types = {\n 'public_key': 'str',\n 'signature': 'str',\n 'key_id': 'str',\n 'key_type': 'str'\n }\n\n self.attribute_map = {\n 'public_key': 'publicKey',\n 'signature': 'signature',\n 'key_id': 'keyId',\n 'key_type': 'keyType'\n }\n\n self._public_key = public_key\n self._signature = signature\n self._key_id = key_id\n self._key_type = key_type",
"def __init__(self, curve=None, private_key=None, public_key=None):\n self.curve = curve\n self.private_key = None\n self.public_key = None\n if private_key:\n self.load_private_key(private_key)\n if public_key:\n self.load_received_public_key(public_key)",
"def _init_keys(self):\n\n basic_constraints = crypto.X509Extension('basicConstraints'.encode('ascii'), True,\n 'CA:TRUE, pathlen:0'.encode('ascii'))\n serial = self._get_serial()\n pkey = self._create_pkey(self.commonname, serial)\n self._create_cert(pkey, self.commonname, serial, [basic_constraints], expire=30*365)",
"def __init__(self,\r\n ephemeral_public_key=None,\r\n public_key_hash=None,\r\n transaction_id=None):\r\n\r\n # Initialize members of the class\r\n self.public_key_hash = public_key_hash\r\n self.ephemeral_public_key = ephemeral_public_key\r\n self.transaction_id = transaction_id",
"def create_keypair(self):\n # NOTE: currently we rely on zmq for convenience, but we may use libnacl directly\n # if we want to isolate this module from zmq dependency.\n public_key, private_key = zmq.curve_keypair()\n return public_key, private_key",
"def __init__(self, sk=None, n=None, h=None):\r\n if sk:\r\n self.n = sk.n\r\n self.h = sk.h\r\n elif n and h:\r\n self.n = n\r\n self.h = h\r\n else:\r\n raise Exception(\"Public Key construction failed: insufficient/wrong arguments\")\r\n\r\n self.signature_bound = Params[self.n][\"sig_bound\"]\r\n self.sig_bytelen = Params[self.n][\"sig_bytelen\"]",
"def create(self):\n self.initialize()\n\n if not self.__keypair:\n logger.info('Creating keypair %s...' % self.keypair_settings.name)\n\n if self.keypair_settings.public_filepath and os.path.isfile(\n self.keypair_settings.public_filepath):\n logger.info(\"Uploading existing keypair\")\n self.__keypair = nova_utils.upload_keypair_file(\n self._nova, self.keypair_settings.name,\n self.keypair_settings.public_filepath)\n\n if self.keypair_settings.delete_on_clean is not None:\n delete_on_clean = self.keypair_settings.delete_on_clean\n self.__delete_keys_on_clean = delete_on_clean\n else:\n self.__delete_keys_on_clean = False\n else:\n logger.info(\"Creating new keypair\")\n keys = nova_utils.create_keys(self.keypair_settings.key_size)\n self.__keypair = nova_utils.upload_keypair(\n self._nova, self.keypair_settings.name,\n nova_utils.public_key_openssh(keys))\n file_utils.save_keys_to_files(\n keys, self.keypair_settings.public_filepath,\n self.keypair_settings.private_filepath)\n\n if self.keypair_settings.delete_on_clean is not None:\n delete_on_clean = self.keypair_settings.delete_on_clean\n self.__delete_keys_on_clean = delete_on_clean\n else:\n self.__delete_keys_on_clean = True\n elif self.__keypair and not os.path.isfile(\n self.keypair_settings.private_filepath):\n logger.warn(\"The public key already exist in OpenStack \\\n but the private key file is not found ..\")\n\n return self.__keypair",
"def new_key_pair(self):\n from plonevotecryptolib.KeyPair import KeyPair # avoids circular imports\n return KeyPair(self)",
"def __init__(self, public_key=None):\n self.public_key = self.convert_public_key_to_ecdsa(public_key) if public_key else public_key",
"def __init__(self, private_key):\n if private_key:\n if isinstance(private_key, str): # base58 encoded string\n self.private_key = PrivateKey.from_b58check(private_key)\n else:\n self.private_key = private_key\n self.public_key = self.private_key.public_key\n else:\n self.private_key = None\n self.public_key = None",
"def setUp(self):\n\n self.private_key = self.get_new_key()\n self.public_key = self.private_key.public_key()\n\n self.pem_private_key = self.private_key.private_bytes(\n serialization.Encoding.PEM,\n serialization.PrivateFormat.PKCS8,\n serialization.NoEncryption(),\n )\n self.encrypted_pem_private_key = self.private_key.private_bytes(\n serialization.Encoding.PEM,\n serialization.PrivateFormat.PKCS8,\n serialization.BestAvailableEncryption(self.private_key_password),\n )\n\n self.pem_public_key = self.public_key.public_bytes(\n serialization.Encoding.PEM, serialization.PublicFormat.PKCS1\n )",
"def __init__(self, public_key):\n self._pk = ed25519.Ed25519PublicKey.from_public_bytes(public_key.bytes)",
"def create_keys(self):\n crypto_tool = CryptoTools()\n # creating RSA keys for the signer user\n public_key, private_key = crypto_tool.create_key_with_entropy()\n self.priv_key = crypto_tool.get_pem_format(private_key).decode(\"utf-8\")\n self.pub_key = crypto_tool.get_pem_format(public_key).decode(\"utf-8\")",
"def _InitFromString(self, text):\r\n # First, remove all whitespace:\r\n text = re.sub(_WHITESPACE_RE, '', text)\r\n\r\n # Parse out the period-separated components\r\n match = _KEY_RE.match(text)\r\n if not match:\r\n raise ValueError('Badly formatted key string: \"%s\"', text)\r\n\r\n private_exp = match.group('private_exp')\r\n if private_exp:\r\n private_exp = _B64ToNum(private_exp)\r\n else:\r\n private_exp = None\r\n self.keypair = Crypto.PublicKey.RSA.construct(\r\n (_B64ToNum(match.group('mod')),\r\n _B64ToNum(match.group('exp')),\r\n private_exp))",
"def __init__(self, **options):\n\n super().__init__(**options)\n\n self._private_key = None\n self._public_key = None\n\n self._load_keys(**options)",
"def __init__(self, key_info):\n if (key_info.type != client_pb2.KeyInfo.ECDSA):\n raise error.UnsupportedAlgorithmError(\n \"Expected ECDSA key, but got key type %d\" % key_info.type)\n\n # Will raise a PemError on invalid encoding\n self.__der, _ = pem.from_pem(key_info.pem_key, self.__READ_MARKERS)\n try:\n self.__key = ecdsa.VerifyingKey.from_der(self.__der)\n except ecdsa.der.UnexpectedDER as e:\n raise error.EncodingError(e)",
"def _init_key_settings(self):\n self.minKeySize = 1023\n self.maxKeySize = 8193\n self.rsaSigHashes = list(RSA_SIGNATURE_HASHES)\n self.rsaSchemes = list(RSA_SCHEMES)\n self.dsaSigHashes = list(DSA_SIGNATURE_HASHES)\n self.virtual_hosts = []\n # DH key settings\n self.eccCurves = list(CURVE_NAMES)\n self.dhParams = None\n self.dhGroups = list(ALL_DH_GROUP_NAMES)\n self.defaultCurve = \"secp256r1\"\n self.keyShares = [\"secp256r1\", \"x25519\"]\n self.padding_cb = None\n self.use_heartbeat_extension = True\n self.heartbeat_response_callback = None",
"def __init__(self, key, initial_prng):\n self.cipher = key\n self.prng = initial_prng\n self.nonce = None",
"def __init__(self, key=None):\n self.key = key",
"def __init__(self, private_key):\n self._sk = ed25519.Ed25519PrivateKey.from_private_bytes(private_key.bytes)",
"def __init__(self):\n publicKeyFileName = \"serverPublicKey\"\n privateKeyFileName = \"serverPrivateKey.pem\"\n try:\n f = open(privateKeyFileName, 'rb')\n self.keys = RSA.importKey(f.read())\n except:\n self.keys = RSA.generate(1024)\n self.publickey = self.keys.publickey()\n # export public and private keys\n privHandle = open(privateKeyFileName, 'wb')\n privHandle.write(self.keys.exportKey('PEM'))\n privHandle.close()\n \n pubHandle = open(publicKeyFileName, 'wb')\n pubHandle.write(self.keys.publickey().exportKey())\n pubHandle.close()\n self.publickey = self.keys.publickey()",
"def __init__(self, globalKey, publicKey, resourceName, **rest):\n super(SshKey, self).__init__({\n \"globalKey\": globalKey,\n \"publicKey\": publicKey,\n \"resourceName\": resourceName,\n }, **rest)",
"def __init__(self, uid, key, initial_prng):\n self.uid = uid\n self.key = key\n Crypto1.__init__(self, key, initial_prng)",
"def create_key_pair(self) -> Keypair:\n res = self.context.post(\n \"/dsum/create_key_pair\", None, None, \"DSum: failed creating a Curve 25519 Keypair\")\n return Keypair(res['private_key_id'], res['public_key_id'])",
"def __init__(self, key):\n self.key = key",
"def __init__(self, basekey=\"\"):\n self.basekey = basekey"
]
| [
"0.77278996",
"0.73476756",
"0.72197735",
"0.71278316",
"0.7043952",
"0.6906836",
"0.6847678",
"0.6793105",
"0.6780805",
"0.6697357",
"0.6651101",
"0.6648877",
"0.6648221",
"0.66463923",
"0.6603375",
"0.65970063",
"0.65599597",
"0.64603907",
"0.63687944",
"0.6349367",
"0.6348739",
"0.6348002",
"0.6331806",
"0.63209546",
"0.63158673",
"0.630954",
"0.629951",
"0.62603956",
"0.62077045",
"0.61975193"
]
| 0.75195104 | 1 |
Encrypt 'message' with a public key and return its encryption. If no key is provided, use the '_keypair' attribute to encrypt 'message'. | def encrypt(self, message, key=None):
if key is None:
key = self.public_key
encrypter = RSA.importKey(key)
return encrypter.encrypt(message, 2048) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def encrypt(message, pub_key):\n\n if not isinstance(pub_key, key.PublicKey):\n raise TypeError(\"You must use the public key with encrypt\")\n\n return chopstring(message, pub_key.e, pub_key.n, encrypt_int)",
"def encrypt(self, message, key):\n return self.translateMessage(message, key, \"encrypt\")",
"def rsa_encrypt(message, publickey):\r\n \r\n # A key object is created to interact with the PyCrypto\r\n # encryption suite. The object contains key data and\r\n # the necessary rsa functions.\r\n temp_key_obj = _rsa_keydict_to_keyobj(publickey)\r\n \r\n return _rsa_chopstring(message, temp_key_obj, temp_key_obj.encrypt)",
"def encrypt_using_public_key(message, user_id, public_key=None):\n if public_key is None:\n public_key_path = os.path.join('public_keys', f'public.{user_id}.key')\n with open(public_key_path, 'rb') as file:\n public_key = RSA.importKey(file.read())\n else:\n public_key = RSA.importKey(public_key)\n\n cipher = PKCS1_OAEP.new(public_key)\n encrypted = cipher.encrypt(message.encode())\n return encrypted.hex()",
"def encrypt(public_key, message):\n symmetric_key = get_rand_bytes(32)\n msg_header = PKCS1_OAEP.new(public_key).encrypt(symmetric_key)\n assert len(msg_header) == 512\n msg_iv = get_rand_bytes(16)\n msg_body = AES.new(symmetric_key,\n mode=AES.MODE_CFB,\n IV=msg_iv).encrypt(message)\n return msg_header + msg_iv + msg_body",
"def encrypt(self, message, key=None):\n #Check validity of public key\n if self.public_key is None:\n raise Exception(\"invalid public key!\")\n elif pub_key is None:\t\t\n e = self.public_key[0]\n n = self.public_key[1]\n else:\n e = pub_key[0]\n n = pub_key[1]\n\n output = []\n m = string_to_int(message)\n pieces = partition(message, string_size(n), '~')\n\n for i in pieces:\n temp_int = string_to_int(i)\n output.append(pow(temp_int, e, n))\n \n return output",
"def encrypt_message(self, message):\n\t\tf = Fernet(self.key)\n\t\treturn f.encrypt(message.encode())",
"def encrypt(self, data):\n\n if self.crypt_public == \"\":\n raise ValueError(\"Error encrypting: No public encryption key found for {}\".format(self))\n\n key_public = RsaPublicKey.Read(self.crypt_public)\n return key_public.Encrypt(data)",
"def encrypt(message):\n setup()\n\n # Convert message to integer representation\n m = ''\n for letter in message:\n m += \"{0:0>2}\".format(ord(letter) - ord('a') + 1)\n m = int(m)\n\n # Read in e and n from the public key file\n ifp = open(\"public.rsa\")\n e, n = int(ifp.readline()), int(ifp.readline())\n\n # Encrypt m by using public n and e\n c = pow(m, e, n)\n return str(c)",
"def encrypt(self, message):\n return self._transform(message, self._encoder)",
"def encrypt(self, message):\n return self._transform(message, self._encoder)",
"def encrypt_message(message,public_key,symetric_key):\n\tif message != None:\n\t\tnonce = os.urandom(12)\n\t\tmessage = AESCCM(symetric_key).encrypt(nonce,message.encode(\"iso-8859-1\"),None)\n\t\tnonce, *_ = encrypt(public_key,nonce)\n\t\tmessage ={'nonce' : nonce.decode(\"iso-8859-1\"),'message':message.decode(\"iso-8859-1\")}\n\n\treturn message",
"def dh_encrypt(pub, message):\n \n Group, private, public = dh_get_key()#generate new DH pair for Alice\n #private key is an integer/scalar and public key is a point on the curve \n \n #check whether public key of Bob is valid and on curve \n assert Group.check_point(pub)\n \n #Alice obtains shared secret by multiplying her private key with bob's forwarded public key\n key = pub.pt_mul(private)#dA* qB\n print \"key from enc is\", key\n \n hashedKey=sha256(key.export()).digest()\n\n \n plaintext = message.encode(\"utf8\")#encode message\n aes = Cipher(\"aes-128-gcm\")#select cipher\n iv = urandom(16)#generate initialization vector \n cipher, tag = aes.quick_gcm_enc(hashedKey[:16], iv, plaintext)#encrypt using shared key \n ciphertext = [iv,cipher,tag,public]\n\n return ciphertext",
"def kem_encapsulate(self, public_key):\n shared_key = Buffer(self.kem_shared_key_len(key=public_key))\n encapsulated_key = Buffer(self.kem_encapsulated_key_len(public_key=public_key))\n status = self._lib_vscf_ecc.vscf_ecc_kem_encapsulate(self.ctx, public_key.c_impl, shared_key.c_buffer, encapsulated_key.c_buffer)\n VscfStatus.handle_status(status)\n return shared_key.get_bytes(), encapsulated_key.get_bytes()",
"def __encryptRSA(msg, key):\n # Convert message to bytes\n msg = msg.encode('utf-8')\n return key.encrypt(\n msg,\n padding.OAEP(\n mgf=padding.MGF1(algorithm=hashes.SHA256()),\n algorithm=hashes.SHA256(),\n label=None\n )\n )",
"def encrypt(self, s):\n public_key = serialization.load_pem_public_key(\n self.key.encode('utf-8'),\n backend=default_backend())\n\n encrypted = public_key.encrypt(\n s.encode('utf-8'),\n padding.OAEP(\n mgf=padding.MGF1(algorithm=hashes.SHA256()),\n algorithm=hashes.SHA256(),\n label=None))\n # enc = bytes(encrypted).decode(\"utf-8\")\n return str(encrypted)",
"def _encrypt_aes_key(aes_key: bytes, receiver_public_key: RsaKey) -> bytes:\n cipher_rsa = PKCS1_OAEP.new(receiver_public_key)\n return cipher_rsa.encrypt(aes_key)",
"def encrypt_pk(pub_key, plaintext):\n try:\n ciphertext = pub_key.encrypt(\n plaintext,\n padding.OAEP(\n mgf=padding.MGF1(algorithm=CryptoHash()),\n algorithm=CryptoHash(),\n label=None\n )\n )\n except UnsupportedAlgorithm as e:\n # a failure to encrypt our own data is a fatal error\n # the most likely cause of this error is an old cryptography library\n logging.error(\"Fatal error: encryption hash {} unsupported, try upgrading to cryptography >= 1.4. Exception: {}\".format(\n CryptoHash, e))\n # re-raise the exception for the caller to handle\n raise e\n return b64encode(ciphertext)",
"def encrypt(self, public_key, data):\n d_data = Data(data)\n out = Buffer(self.encrypted_len(public_key=public_key, data_len=len(data)))\n status = self._lib_vscf_ecc.vscf_ecc_encrypt(self.ctx, public_key.c_impl, d_data.data, out.c_buffer)\n VscfStatus.handle_status(status)\n return out.get_bytes()",
"def encrypt(self, message: bytearray) -> bytearray:\n return self.__PRGA(message)",
"def encrypt(self, message, key):\n message = self.pkcs7_pad(message)\n iv = ''.join(chr(random.randint(0, 0xFF)) for i in range(AES.block_size))\n cipher = AES.new(key, AES.MODE_CBC, iv, segment_size=64)\n return iv + cipher.encrypt(message)",
"def encrypt_message(message: bytes, receiver_public_key: RsaKey, nbits: int = 256) -> bytes:\n aes_key = get_random_bytes(nbits // 8)\n cipher_aes = AES.new(aes_key, AES.MODE_CBC)\n return cipher_aes.iv + _encrypt_aes_key(aes_key, receiver_public_key) + cipher_aes.encrypt(\n pad(message, AES.block_size)) # Padding have to be added in case the size does not fit in exact blocks",
"def encrypt(self, data):\n\n key_public = RsaPublicKey.Read(self.crypt_public)\n return b64encode(key_public.Encrypt(data))",
"def encrypt(string,pub):\r\n string = livingDead.utfE(string)\r\n crypto = rsa.encrypt(string, pub)\r\n return crypto",
"def encrypt(self, message):\n E = (((k + int_mapping(c)) % 26) for k, c in zip(cycle(self.key), preprocess(message)))\n return ''.join(char_mapping(n) for n in E)",
"def encrypt(self, plaintext: bytes,\n padding: AsymmetricPadding) -> bytes:\n pass",
"def encrypt(self, message):\n return str.translate(message, self._encoder)",
"def encrypt_message(self, message: dict) -> None:\n secure_message = {'type': 'SECURE_MESSAGE', 'content': None}\n content = json.dumps(message).encode()\n \n ct = self.crypto.encrypt(content)\n secure_message['content'] = base64.b64encode(ct).decode()\n self.encrypted_data += secure_message['content']\n\n return secure_message",
"def encrypt(self, data):\n cipher_rsa = PKCS1_OAEP.new(self.key)\n return cipher_rsa.encrypt(data)",
"def encrypt(self, msg, iv=\"\", auth_data=None):\n if not iv:\n raise ValueError(\"Missing Nonce\")\n\n return self.key.encrypt(iv, msg, auth_data)"
]
| [
"0.7848979",
"0.77931744",
"0.7753139",
"0.7636173",
"0.7587493",
"0.7331755",
"0.7327657",
"0.7158712",
"0.7151961",
"0.709577",
"0.709577",
"0.70698965",
"0.6942818",
"0.6929247",
"0.68518966",
"0.68266696",
"0.66923577",
"0.66752476",
"0.66741943",
"0.6659982",
"0.6638486",
"0.663452",
"0.66097194",
"0.65654457",
"0.65470415",
"0.6481583",
"0.640828",
"0.6379783",
"0.62853134",
"0.6274921"
]
| 0.81159896 | 0 |
Sends an XGRequest to the host and parses output into a XGResponse object. | def send_request(self, request, strip=None, retry=True):
data = request.to_xml()
if self.debug:
self.log('sending:\n{0}'.format(data))
try:
resp = self._handle.open(self.request_url, data)
resp_str = resp.read()
if self.debug:
self.log('received:\n{0}'.format(resp_str))
return XGResponse.fromstring(request, resp_str, strip)
except AuthenticationError as e:
self._closed = True
if self.keepalive and retry:
self.log('Attempting keepalive reconnect')
if self.login():
return self.send_request(request, strip, False)
raise e
except (urllib2.HTTPError, urllib2.URLError) as e:
msg = '{0}: {1}'.format(e.__class__.__name__, e)
self.log(msg)
raise NetworkError(msg) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _xml_command(self, request):\n response = self._send(request)\n self._check_response(response)\n return response",
"def do_external_request(self, cmd, extra_payload):\r\n xmlstr = etree.tostring(self.xml, pretty_print=True)\r\n payload = {\r\n 'xml': xmlstr,\r\n 'edX_cmd': cmd,\r\n 'edX_tests': self.tests,\r\n 'processor': self.code,\r\n }\r\n payload.update(extra_payload)\r\n\r\n try:\r\n # call external server. TODO: synchronous call, can block for a\r\n # long time\r\n req = requests.post(self.url, data=payload)\r\n except Exception as err:\r\n msg = 'Error {0} - cannot connect to external server url={1}'.format(err, self.url)\r\n log.error(msg)\r\n raise Exception(msg)\r\n\r\n if self.capa_system.DEBUG:\r\n log.info('response = %s', req.text)\r\n\r\n if (not req.text) or (not req.text.strip()):\r\n raise Exception(\r\n 'Error: no response from external server url=%s' % self.url)\r\n\r\n try:\r\n # response is XML; parse it\r\n rxml = etree.fromstring(req.text)\r\n except Exception as err:\r\n msg = 'Error {0} - cannot parse response from external server req.text={1}'.format(err, req.text)\r\n log.error(msg)\r\n raise Exception(msg)\r\n\r\n return rxml",
"def do_request(xml_location):\n request = open(xml_location,\"r\").read()\n webservice = httplib.HTTP(HOST,PORT)\n webservice.putrequest(\"POST\", API_URL)\n webservice.putheader(\"Host\", HOST)\n webservice.putheader(\"User-Agent\",\"Python post\")\n webservice.putheader(\"Content-type\", \"text/xml; charset=\\\"UTF-8\\\"\")\n webservice.putheader(\"Content-length\", \"%d\" % len(request))\n webservice.endheaders()\n webservice.send(request)\n statuscode, statusmessage, header = webservice.getreply()\n result = webservice.getfile().read()\n print statuscode, statusmessage, header\n print result",
"def _send_xml(self, url, xml):\n http = httplib2.Http()\n headers = {\"Content-type\": \"application/x-www-form-urlencoded\",\n \"Authorization\": \"Basic %s\" % self.get_basic_auth()}\n return http.request(url, \"POST\", xml, headers=headers)",
"def request(self, host, handler, request_body, verbose):\n headers = {'User-Agent': self.user_agent,\n 'Content-Type': 'text/xml',\n }\n url = self._build_url(host, handler)\n kwargs = {}\n if StrictVersion(requests.__version__) >= StrictVersion('0.8.8'):\n kwargs['verify'] = True\n else:\n if self.use_https:\n warnings.warn(\n 'using https transport but no certificate '\n 'verification. (Hint: upgrade requests package.)')\n try:\n resp = requests.post(url, data=request_body, headers=headers,\n **kwargs)\n except ValueError:\n raise\n except Exception:\n raise # something went wrong\n else:\n try:\n resp.raise_for_status()\n except requests.RequestException as e:\n raise xmlrpc.ProtocolError(\n url, resp.status_code, str(e), resp.headers)\n else:\n return self.parse_response(resp)",
"def epg_xml() -> Response:\n xml = render_template('epg.xml',\n stations=locast_service.get_stations(),\n url_base=host_and_port)\n return Response(xml, mimetype='text/xml')",
"def send_request(self):\n\n # Get a formatted version of the request\n self.last_sent_request = self.request.format_request()\n\n # Send request in a byte-encoded format\n self.socket.sendall(self.last_sent_request.encode(\"utf-8\"))\n\n # If POST method is made, params are also sent\n if self.request.method.upper() == \"POST\":\n self.socket.sendall(self.request.params.encode('utf-8'))\n\n return self.get_server_response()",
"def _assemble_and_send_request(self):\r\n # Fire off the query.\r\n response = self.client.service.processShipment(WebAuthenticationDetail=self.WebAuthenticationDetail,\r\n ClientDetail=self.ClientDetail,\r\n TransactionDetail=self.TransactionDetail,\r\n Version=self.VersionId,\r\n RequestedShipment=self.RequestedShipment)\r\n return response",
"def request(self):\n response = self.__send(command[\"Request\"], bytearray())\n ret_val = self.__extract_values_from_response(response)\n return ret_val",
"def send(self):\n url = \"{}:{}\".format(self.url, self.port)\n headers = dict(self.request.get_headers())\n body = self.request.get_body()\n self.response = requests.post(url, data=body, headers=headers)",
"def send_request(request):\n auth()\n response = urllib2.urlopen(request)\n\n return BeautifulSoup(response).resultmessage.string",
"def request_externally(url):\n session = BQServer()\n #session = root\n session.authenticate_mex(identity.mex_authorization_token())\n session.root = request.host_url\n url = session.prepare_url(url)\n log.debug(\"begin routing externally: %s\" % url)\n try:\n resp = session.get(url, headers={'Content-Type':'text/xml'})\n except BQCommError as e:\n log.debug('%s' % str(e))\n return\n\n log.debug(\"end routing externally: status %s\" % resp.status_code)\n return resp",
"def make_request_xml(self):\n #print (self.url)\n try:\n with closing(get(self.url, stream=True)) as resp: #returns b`xml`\n if self.is_good_enough_xml(resp):\n return resp.content\n else:\n return None\n except RequestException as e:\n print('Error during requests to {0} : {1}'.format(url, str(e)))\n return None",
"def _send_response(self, request):\n request_line, headers = split_http_request(request)\n if DEBUG_LEVEL > 1:\n print \"Request: {}\\nHeaders: {}\".format(request_line, headers)\n\n request = HTTPRequest.HTTPRequest(request_line, headers, DEBUG_LEVEL)\n\n uri = request.get_uri_with_no_params()\n uri = uri[1:] if uri[0] == \"/\" else uri\n\n if uri in server_functions.AVAILABLE_FUNCTIONS.keys():\n response, flag = server_functions.\\\n AVAILABLE_FUNCTIONS[uri](request.get_params())\n self._client.send(response.build_response())\n return flag\n\n result = self._check_status_errors(request)\n if result == -1:\n return False\n elif result == 1:\n return True\n\n full_file_path = self._get_full_path(request)\n\n requested_file = open(full_file_path, \"r\")\n data = requested_file.read()\n requested_file.close()\n\n headers = HTTPHeaders.HTTPHeaders()\n public_response_functions.add_default_headers(headers)\n headers[\"Content-Length\"] = str(len(data))\n\n response = HTTPResponse.HTTPResponse(version=1.0, status_code=200,\n phrase=\"OK\", headers=headers)\n self._client.send(response.build_response() + data)\n return True",
"def send_request(self):\r\n x_pos = self.x_pos_list[self.num_points_done % self.x_num]\r\n y_pos = self.y_pos_list[self.num_points_done // self.x_num]\r\n \r\n # zigzag scanning to minimize backlash\r\n if np.where(self.y_pos_list == y_pos)[0][0] % 2 == 1: # for even-numbered rows\r\n original_index = self.num_points_done % self.x_num\r\n new_index = -1 * (original_index + 1) # counting from the end of the list\r\n x_pos = self.x_pos_list[new_index] # overwriting x_pos\r\n \r\n self.scan_request.emit(x_pos, y_pos, self.pmt_exposure_time_in_ms)",
"def sendResponse(self, to_url=None, from_url=None, sync_id=None, xml=None,\n domain=None, send=1, content_type='application/vnd.syncml+xml'):\n## LOG('sendResponse, domain.getPath(): ', INFO, domain.getPath())\n## LOG('sendResponse, to_url: ', INFO, to_url)\n## LOG('sendResponse, from_url: ', INFO, from_url)\n## LOG('sendResponse, sync_id: ', INFO, sync_id)\n## LOG('sendResponse, xml: \\n', INFO, xml)\n if content_type == 'application/vnd.syncml+wbxml':\n xml = xml2wbxml(xml)\n #LOG('sendHttpResponse, xml after wbxml: \\n', DEBUG, hexdump(xml))\n if domain is not None:\n gpg_key = domain.getGpgPublicKey()\n if gpg_key:\n filename = str(random.randrange(1,2147483600)) + '.txt'\n decrypted = file('/tmp/%s' % filename,'w')\n decrypted.write(xml)\n decrypted.close()\n (status,output)=commands.getstatusoutput('gzip /tmp/%s' % filename)\n (status,output)=commands.getstatusoutput('gpg --yes --homedir \\\n /var/lib/zope/Products/ERP5SyncML/gnupg_keys -r \"%s\" -se \\\n /tmp/%s.gz' % (gpg_key,filename))\n # LOG('sendResponse, gpg output:', DEBUG, output)\n encrypted = file('/tmp/%s.gz.gpg' % filename,'r')\n xml = encrypted.read()\n encrypted.close()\n commands.getstatusoutput('rm -f /tmp/%s.gz' % filename)\n commands.getstatusoutput('rm -f /tmp/%s.gz.gpg' % filename)\n if send:\n if isinstance(to_url, str):\n scheme = urlparse(to_url)[0]\n # XXX-Aurel a mapping between protocol-method should be\n # done instead of treating everything here\n if scheme in ('http', 'https'):\n if domain.getPortalType() == 'SyncML Publication' and not\\\n domain.getIsActivityEnabled():\n # not use activity\n # XXX Make sure this is not a problem\n return None\n #use activities to send send an http response\n #LOG('sendResponse, will start sendHttpResponse, xml', INFO, '')\n self.activate(activity='SQLQueue',\n tag=domain.getId(),\n priority=ACTIVITY_PRIORITY).sendHttpResponse(\n sync_id=sync_id,\n to_url=to_url,\n xml=xml,\n domain_path=domain.getPath(),\n content_type=content_type)\n elif scheme in ('file',):\n filename = to_url[len('file:/'):]\n stream = file(filename, 'w')\n stream.write(xml)\n stream.close()\n # we have to use local files (unit testing for example\n elif scheme in ('mailto',):\n # we will send an email\n to_address = to_url[len('mailto:'):]\n from_address = from_url[len('mailto:'):]\n self.sendMail(from_address, to_address, sync_id, xml)\n else:\n LOG(\"sendResponse\", WARNING, \"Unknown scheme %s for response %s : %s - %s\" %(domain.getPath(),\n scheme, to_url, xml))\n return xml",
"def send(self):\n \n # Generate the URL to call\n url = self._url + self._generate_query_string()\n logger.info('Sending request: %s' % url)\n \n # Generate GET request\n req = urllib2.Request(url=url)\n \n if not self._service.debug:\n try:\n f = urllib2.urlopen(req)\n data = f.read()\n f.close()\n \n # Log raw response\n logger.info('Raw response: %s' % data)\n \n except Exception, err:\n logger.exception('Request failed.')\n data = None\n else:\n # Debug data\n data = 'OK\\r\\nMessageID=1234'\n \n return self.parse_response(data)",
"def _send_in_request(self):\n try:\n req_params = urllib.urlencode(self._params)\n except Exception as ex:\n raise ProxyError('Error signing request string') \n \n try:\n self.logger.debug('Send api request to: %s' % self._api_url)\n self.logger.debug('Request params: %s' % req_params)\n self.logger.debug('Request timeout: %s' % self._timeout)\n if len(self._params) > 0:\n f = urllib2.urlopen(self._api_url, req_params, self._timeout)\n response = f.read()\n self.logger.debug('Response length: %s' % len(response))\n f.close() \n return response\n else:\n return \"{'command':'ping', 'message':'ok'}\" \n except (urllib2.URLError) as ex:\n self._error = json.loads(ex.fp.readline()).values()\n raise ProxyResponseError()\n except (IOError) as ex:\n raise ProxyError(ex)",
"def forward_request(self, method, hostname, params, permission):\n ogc_service = params.get('SERVICE', '')\n ogc_request = params.get('REQUEST', '').upper()\n\n stream = True\n if ogc_request in [\n 'GETCAPABILITIES', 'GETPROJECTSETTINGS', 'GETFEATUREINFO',\n 'DESCRIBEFEATURETYPE'\n ]:\n # do not stream if response is filtered\n stream = False\n\n # forward to QGIS server\n project_name = permission['qgs_project']\n url = urljoin(self.qgis_server_url, project_name)\n if method == 'POST':\n # log forward URL and params\n self.logger.info(\"Forward POST request to %s\" % url)\n self.logger.info(\" %s\" % (\"\\n \").join(\n (\"%s = %s\" % (k, v) for k, v, in params.items()))\n )\n\n response = requests.post(url, headers={'host': hostname},\n data=params, stream=stream)\n else:\n # log forward URL and params\n self.logger.info(\"Forward GET request to %s?%s\" %\n (url, urlencode(params)))\n\n response = requests.get(url, headers={'host': hostname},\n params=params, stream=stream)\n\n if response.status_code != requests.codes.ok:\n # handle internal server error\n self.logger.error(\"Internal Server Error:\\n\\n%s\" % response.text)\n\n exception = {\n 'code': \"UnknownError\",\n 'message': \"The server encountered an internal error or \"\n \"misconfiguration and was unable to complete your \"\n \"request.\"\n }\n return Response(\n self.service_exception(exception['code'], exception['message']),\n content_type='text/xml; charset=utf-8',\n status=200\n )\n # return filtered response\n elif ogc_service == 'WMS' and ogc_request in [\n 'GETCAPABILITIES', 'GETPROJECTSETTINGS'\n ]:\n return self.wms_getcapabilities(response, params, permission)\n elif ogc_service == 'WMS' and ogc_request == 'GETFEATUREINFO':\n return self.wms_getfeatureinfo(response, params, permission)\n # TODO: filter DescribeFeatureInfo\n else:\n # unfiltered streamed response\n return Response(\n stream_with_context(response.iter_content(chunk_size=16*1024)),\n content_type=response.headers['content-type'],\n status=response.status_code\n )",
"def _send_request(self):\r\n headers = [\r\n \"GET /subscribe/%s/%s/0/%i?uuid=%s&auth=%s HTTP/1.1\" \\\r\n % (self.sub, self.chan, self.timestamp, self.uuid, self.auth),\r\n \"Accept-Encoding: gzip\",\r\n \"Host: pubsub.pubnub.com\",\r\n \"Connection: keep-alive\"]\r\n str_headers = \"%s\\r\\n\\r\\n\" % \"\\r\\n\".join(headers)\r\n self.sock.send(str_headers)\r\n return self._read_response_header()",
"def request(self, host, handler, request_body, verbose=0):\n self.verbose = verbose\n\n headers = {'Content-type': 'text/xml'}\n data = request_body\n req = urllib2.Request('http://' + host + handler, data, headers)\n\n response = self.opener.open(req)\n\n return self.parse_response(response)",
"def _assemble_and_send_request(self):\r\n client = self.client\r\n # Fire off the query.\r\n response = client.service.track(WebAuthenticationDetail=self.WebAuthenticationDetail,\r\n ClientDetail=self.ClientDetail,\r\n TransactionDetail=self.TransactionDetail,\r\n Version=self.VersionId,\r\n IncludeDetailedScans=self.IncludeDetailedScans,\r\n PackageIdentifier=self.TrackPackageIdentifier,\r\n TrackingNumberUniqueIdentifier = self.TrackingNumberUniqueIdentifier)\r\n\r\n return response",
"def __call__(self):\n params, method = parse_xmlrpc_request(self.request)\n return xmlrpc_response(getattr(self,method)(*params))",
"def execute(self):\n headers = {\n 'Content-type': 'application/x-www-form-urlencoded',\n 'Accept-Charset': 'utf-8',\n 'User-Agent': USER_AGENT\n }\n request = urllib2.Request(self.url(), headers=headers)\n response = urllib2.urlopen(request)\n \n return etree.parse(response)",
"def __send_response(self, response):\n logger.debug(' >>> %s', binascii.b2a_qp(response[0]))\n self.request.send(struct.pack('!I', len(response)))\n self.request.send(response)",
"def MySend(request_path, payload=None,\n content_type=\"application/octet-stream\",\n timeout=None, force_auth=True,\n **kwargs):\n # TODO: Don't require authentication. Let the server say\n # whether it is necessary.\n global rpc\n if rpc == None:\n \trpc = GetRpcServer(upload_options)\n self = rpc\n if not self.authenticated and force_auth:\n self._Authenticate()\n if request_path is None:\n return\n\n old_timeout = socket.getdefaulttimeout()\n socket.setdefaulttimeout(timeout)\n try:\n tries = 0\n while True:\n tries += 1\n args = dict(kwargs)\n url = \"http://%s%s\" % (self.host, request_path)\n if args:\n url += \"?\" + urllib.urlencode(args)\n req = self._CreateRequest(url=url, data=payload)\n req.add_header(\"Content-Type\", content_type)\n try:\n f = self.opener.open(req)\n response = f.read()\n f.close()\n return response\n except urllib2.HTTPError, e:\n if tries > 3:\n raise\n elif e.code == 401:\n self._Authenticate()\n elif e.code == 302:\n loc = e.info()[\"location\"]\n if not loc.startswith('https://www.google.com/a') or loc.find('/ServiceLogin') < 0:\n return ''\n self._Authenticate()\n else:\n raise\n finally:\n socket.setdefaulttimeout(old_timeout)",
"def _request(self, *args):\n self._silent_request(*args)\n return self._get_response()",
"def request(self, spec):\n r = rparse.parse_request(self.settings, spec)\n ret = r.serve(self.wfile, None, self.host)\n self.wfile.flush()\n return http.read_response(self.rfile, r.method, None)",
"def _send_receive(self, request):\n log.debug('Requesting from Stenograph: %s', request)\n response = self._machine.send_receive(request)\n log.debug('Response from Stenograph: %s', response)\n if response is None:\n \"\"\"No response implies device connection issue.\"\"\"\n raise IOError()\n elif response.packet_id == StenoPacket.ID_ERROR:\n \"\"\"Writer may reply with an error packet\"\"\"\n error_number = response.p1\n if error_number == 3:\n raise UnableToPerformRequestException()\n elif error_number == 7:\n raise FileNotAvailableException()\n elif error_number == 8:\n raise NoRealtimeFileException()\n elif error_number == 9:\n raise FinishedReadingClosedFileException()\n else:\n \"\"\"Writer has returned a packet\"\"\"\n if (response.packet_id != request.packet_id\n or response.sequence_number != request.sequence_number):\n raise ProtocolViolationException()\n return response",
"def do_POST(self):\n\n try:\n # get arguments\n data = self.rfile.read(int(self.headers[\"content-length\"]))\n # In previous versions of SimpleXMLRPCServer, _dispatch\n # could be overridden in this class, instead of in\n # SimpleXMLRPCDispatcher. To maintain backwards compatibility,\n # check to see if a subclass implements _dispatch and dispatch\n # using that method if present.\n response = self.server._marshaled_dispatch(\n data, getattr(self, '_dispatch', None)\n )\n except: # This should only happen if the module is buggy\n # internal error, report as HTTP server error\n self.send_response(500)\n self.end_headers()\n else:\n # got a valid XML RPC response\n self.send_response(200)\n self.send_header(\"Content-type\", \"text/xml\")\n self.send_header(\"Content-length\", str(len(response)))\n self.end_headers()\n self.wfile.write(response)\n\n # shut down the connection\n self.wfile.flush()\n self.connection.shutdown() # Modified here!"
]
| [
"0.6229007",
"0.57445365",
"0.5311812",
"0.53040063",
"0.5297168",
"0.52688396",
"0.5243111",
"0.52420366",
"0.5171214",
"0.51392835",
"0.51179385",
"0.5113762",
"0.5070183",
"0.50646555",
"0.5036037",
"0.5022937",
"0.50217754",
"0.50087965",
"0.5005785",
"0.49964833",
"0.49796712",
"0.49443197",
"0.49441254",
"0.49427778",
"0.49240032",
"0.49092624",
"0.49009556",
"0.48788363",
"0.4877222",
"0.48752055"
]
| 0.57920414 | 1 |
Retrieve a "flat" list of XGNode objects based on node_names. If you wish to perform some iteration over a representational hierarchy, use get_node_tree() instead. This takes similar arguments to get_node_values() but returns all the node infomation received from the gateway in terms of XGNode objects. | def get_nodes(self, node_names, nostate=False, noconfig=False):
return self._get_nodes(node_names, nostate, noconfig, flat=True) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_nodes(self, names):\n nodes = []\n for name in names:\n node = self.get_node(name, prevent_error=True)\n if node == None:\n if verbose:\n print('Warning: could not find a TreeNode named {}.'.format(name))\n else:\n nodes.append(node)\n return nodes",
"def get_node_list(self):\n return [[node] for node in self.graph.nodes]",
"def get_node_names(self) -> List[str]:\n\t\t# Variables\n\t\tnames: List[str] = []\n\n\t\t# Iterate over nodes\n\t\tfor node in self.nodes:\n\t\t\tnames.append(node.name)\n\t\t# Return Names\n\t\treturn sorted(names, key=str.lower)",
"def all_nodes_as_iterable(self, include_metadata: bool = False) -> Generator:\n if include_metadata:\n return [\n (self._names.get_name(i), self._meta.get_node(self._names.get_name(i)))\n for i in self._nk_graph.iterNodes()\n ]\n return [self._names.get_name(i) for i in self._nk_graph.iterNodes()]",
"def get_nodes(self) -> List[Node]:\n\t\treturn sorted(self.nodes, key=lambda x: x.name.lower())",
"def get_nodes(self):\n self.map_graph_id()\n self.nodes_list = [\n self.NX_GRAPHS[self.graph_id].nodes[idx]['label'] \n for idx in range(len(self.NX_GRAPHS[self.graph_id].nodes))]",
"def get_nodes(self):\n return list(map(lambda x: x[0], self.__nodes))",
"def get_nodes(self):\n return [node for node in self._nodes.itervalues()]",
"def get_nodes(self):\n return [node for node in self._nodes.itervalues()]",
"def get_nodes(self) -> List[Node]:\n\t\t# Setup a node holder\n\t\tnode_holder: NodeSubNodeHolder = NodeSubNodeHolder()\n\n\t\t# Iterate over RootNodes\n\t\tfor rootnode in self.root_nodes:\n\t\t\t# Iterate over node in each RootNode\n\t\t\tfor node in rootnode.get_nodes():\n\t\t\t\t# Add the Nodes to the node_holder\n\t\t\t\tnode_holder.add_node(node)\n\n\t\t# Return the list of nodes from node_holder\n\t\treturn node_holder.get_nodes()",
"def node_names(self):\n\n for node_name in self.nodes.keys():\n\n yield node_name",
"def get_nodes():\n nodes_config_file = Settings.CONF_NODES_FILE\n current_nodes = load_node_names(nodes_config_file)\n\n return current_nodes",
"def get_nodes(self):\n all_nodes = [] \n if not self._root is None:\n all_nodes.append(self._root)\n i = 0\n while i < len(all_nodes):\n for node in all_nodes[i]._children:\n all_nodes.append(node)\n i += 1 \n return all_nodes",
"def get_node_list(self):\n logger.debug('Retrieving node list')\n self.node_ids = []\n\n # Iterate over interfaces, try to grab gateway ipv4 addr\n # Try to /ping gateway over TCP using default port.. if we get a pong, we may get a node ID\n gateways = netifaces.gateways()\n gateways = gateways.get(netifaces.AF_INET, [])\n\n for gateway in gateways:\n node_id = gateway[0]\n node = self.select_node(node_id)\n info = node.get_info()\n\n if info and info.get('node'):\n logger.debug('Found node with ID \"%s\"', node_id)\n self.node_ids.append(node_id)\n\n return self.node_ids",
"def getNodes(self):\n return [ node for node in sorted(self._nodes.values()) ]",
"def get_all_nodes(self):\n # NOTE: return copy, so no one will screw\n # our list?\n return self.nodes",
"def list_nodes(self):\n return self.ironic_client.node.list()",
"def list_nodes(self):\n nodes = self.nodes\n result = []\n for i_node in self.iapi.node.list():\n if i_node.name:\n name = i_node.name\n else:\n # Sometimes Ironic does not show the names, pull them from Nova if possible.\n selected_nova_node = None\n for nova_node in nodes:\n if getattr(\n nova_node, 'OS-EXT-SRV-ATTR:hypervisor_hostname', None) == i_node.uuid:\n selected_nova_node = nova_node\n break\n if selected_nova_node:\n name = selected_nova_node.name\n else:\n name = None\n result.append(Node(i_node.uuid, name, i_node.power_state, i_node.provision_state))\n return result",
"def get_node_list(self):\n return []",
"def getNodeNames(self, includeDisabled=False):",
"def getNodes(self):\n nodes = [{\"address\": \"http://0.0.0.0:100\"}\n ,{\"address\": \"http://0.0.0.0:200\"}\n ,{\"address\": \"http://0.0.0.0:300\"}\n ,{\"address\": \"http://0.0.0.0:400\"}\n ,{\"address\": \"http://0.0.0.0:500\"}]\n return nodes",
"def get_node_list(self):\n return self.node_list",
"def nodeNames(self):\n return self.backend.configuration.getNodeNames()",
"def get_node_names(self):\n return set({node.get_name() for node in self.get_nodeset()}) # return the set of names",
"def nodes(self):\n return list(self._nodes_dict.values())",
"def GetNodes(self, bulk=False, reason=None):\n query = []\n _AppendIf(query, bulk, (\"bulk\", 1))\n _AppendReason(query, reason)\n\n nodes = self._SendRequest(HTTP_GET, \"/%s/nodes\" % GANETI_RAPI_VERSION,\n query, None)\n if bulk:\n return nodes\n else:\n return [n[\"id\"] for n in nodes]",
"def get_all_nodes(self):\n return self._get_all_nodes()",
"def get_nodes(self):\n return_set = set()\n for value in self._name:\n return_set.add(value)\n return return_set",
"def getNodes(self):\n return self.__allNodes",
"def get_nodes(self):\n\n return list(self.graph.nodes)"
]
| [
"0.70559436",
"0.6798552",
"0.67908376",
"0.6708074",
"0.6707484",
"0.6662926",
"0.6562542",
"0.6462647",
"0.6462647",
"0.6373096",
"0.63702697",
"0.63507134",
"0.6265725",
"0.6254135",
"0.6243567",
"0.6224585",
"0.6222014",
"0.6214804",
"0.62056595",
"0.6203174",
"0.6191699",
"0.61856294",
"0.61848915",
"0.6150531",
"0.6148216",
"0.6146966",
"0.6144925",
"0.6123805",
"0.6123769",
"0.6121948"
]
| 0.69900393 | 1 |
Performs a 'set' using the nodes specified and returns the result. | def perform_set(self, nodes=[]):
# Input validation
try:
# Works for XGNodeDict input
set_nodes = nodes.get_updates()
except (AttributeError, TypeError):
# Assume list instead
set_nodes = nodes
if not isinstance(set_nodes, list):
raise ValueError('Expecting nodes to be of type list')
else:
for x in set_nodes:
if not isinstance(x, XGNode):
raise ValueError('Invalid node: {0}'.format(x.__class__))
req = cinder.volume.drivers.violin.vxg.core.request.XGSet(set_nodes)
resp = self.send_request(req)
try:
# Works for XGNodeDict input, clear the tracked modifications
nodes.clear_updates()
except (AttributeError, TypeError):
pass
return resp.as_action_result() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def make_set(node):\n node.parent = node\n node.rank = 0",
"def visit_Set(self, node):\n self.generic_visit(node)\n return to_call(to_attribute(self.operator, '__set__'), node.elts)",
"def test_set_passed_as_iterable():\n tree = Tree([10, 5, 100])\n assert tree.root.value == 10\n assert tree.root.left.value == 5\n assert tree.root.right.value == 100",
"def Set(*args):\n return _XCAFDoc.XCAFDoc_GraphNode_Set(*args)",
"def put_node_set(self, node_set_id, node_set_node_list):\n ierr = exolib.py_expns(self.exoid, node_set_id,\n node_set_node_list + self._o)\n if ierr:\n raise ExodusIIWriterError(\"Error putting node set\")",
"def set():",
"def XCAFDoc_GraphNode_Set(*args):\n return _XCAFDoc.XCAFDoc_GraphNode_Set(*args)",
"def make_set(g, nodes):\n s = Set()\n names = nodes['names']\n for ii,name in enumerate(names):\n \"\"\" \n We will assume node is entirely contained\n in group if they have one atom in common\n \"\"\" \n atoms = mdn.dic2list(nodes[name]['atoms'])\n atom0 = atoms[0]\n if (atom0 in mdn.dic2list(g['atoms'])):\n s.add(ii)\n return s",
"def compute_nodeset(data):\n xset = NodeSet()\n for nodeset in data.split():\n xset.update(nodeset)\n return xset",
"def set_nodeset(self, nodeset):\n self.nodeset = set(nodeset) # overwrite the existing nodeset with the input nodeset\n\n self.__check_validity() # check if graph is valid - throws exception if not",
"def set(self, node, value):\n self.val[node] = value",
"def get_nodeset(self):\n return set(self.nodeset) # return the nodeset",
"def parseSet(cmds):\n if len(cmds) != 0:\n first = str.strip(cmds[0])\n if first[0] == 'w':\n pass\n elif first[0] == 'r':\n pass\n else:\n parseExpr(first)\n parseSet(cmds[1:])",
"def post_nodeset(body): # noqa: E501\n if connexion.request.is_json:\n body = NodeSet.from_dict(connexion.request.get_json()) # noqa: E501\n return NodesetController.post_nodeset(body)",
"def _apply_to_sets(self, func, operation, keys, *args):\n keys = self._list_or_args(keys, args)\n if not keys:\n raise TypeError(\"{} takes at least two arguments\".format(operation.lower()))\n left = self._get_set(keys[0], operation) or set()\n for key in keys[1:]:\n right = self._get_set(key, operation) or set()\n left = func(left, right)\n return left",
"def handle_set(self, agent) -> Tuple[Optional[str], Any]:\n ref_obj_d = {\"filters\": self.action_dict[\"filters\"]}\n ref_objs = self.subinterpret[\"reference_objects\"](\n self, self.speaker_name, ref_obj_d, extra_tags=[\"_physical_object\"]\n )\n if len(ref_objs) == 0:\n raise ErrorWithResponse(\"I don't know what you're referring to\")\n\n triples_d = self.action_dict[\"upsert\"][\"memory_data\"].get(\"triples\")\n if len(triples_d) == 1 and triples_d[0][\"pred_text\"] == \"has_name\":\n # the set has a name; check to see if one with that name exists,\n # if so add to it, else create one with that name\n name = triples_d[0][\"obj_text\"]\n set_memids, _ = self.memory.basic_search(\n \"SELECT MEMORY FROM Set WHERE (has_name={} OR name={})\".format(name, name)\n )\n if not set_memids:\n # make a new set, and name it\n set_memid = SetNode.create(self.memory)\n self.memory.add_triple(subj=set_memid, pred_text=\"has_name\", obj_text=name)\n else:\n # FIXME, which one\n set_memid = set_memids[0]\n else:\n # an anonymous set, assuming its new, and defined to hold the triple(s)\n set_memid = SetNode.create(self.memory)\n for t in triples_d:\n self.memory.add_triple(\n subj=set_memid, pred_text=t[\"pred_text\"], obj_text=t[\"obj_text\"]\n )\n for r in ref_objs:\n self.memory.add_triple(subj=r.memid, pred_text=\"member_of\", obj=set_memid)\n\n # FIXME point to the objects put in the set, otherwise explain this better\n self.memory.dialogue_stack_append_new(Say, \"OK made those objects into a set \")\n return None, None",
"def nodes(self, nodes_array):\n self.nodes_set = nodes_array",
"def select(elements, val=True):\n for el in elements:\n el.select_set(val)",
"def set():\n pass",
"def set(x):\n pass",
"def add_elements_to_set(s: set, *args) -> set:\n s.update(set(*args))\n return s",
"def nodes(self, nodes):\n\n self._nodes = nodes",
"def nodeset(self):\n return self._nodeset",
"def xpathNewNodeSet(self):\n ret = libxml2mod.xmlXPathNewNodeSet(self._o)\n if ret is None:raise xpathError('xmlXPathNewNodeSet() failed')\n return xpathObjectRet(ret)",
"def list_to_set(llist : LinkedList) -> set:\n current_node = llist.head\n lset = set()\n while current_node is not None:\n lset.add(current_node.value)\n current_node = current_node.next\n \n return lset",
"def nodes(self, nodes):\n global g_npoints\n for osmid, tags, (lng, lat) in nodes:\n if 'name' in tags:\n\n # Build a synthetic value by copying the tags and\n # adding osmid, latitude and longitude.\n valobj = tags.copy()\n valobj['osmid'] = osmid\n valobj['latitude'] = lat\n valobj['longitude'] = lng\n valstr = json.dumps(valobj)\n\n # Construct a GeoJSON bin value to be indexed.\n locobj = { 'type': \"Point\", 'coordinates': [ lng, lat ] }\n locgeo = aerospike.GeoJSON(locobj)\n\n # Make a hash of the id to use for random selection.\n hshval = self.id_to_hash(osmid)\n\n key = (self.args.nspace, self.args.set, osmid)\n \n self.client.put(key, { VALBIN: valstr,\n LOCBIN: locgeo,\n MAPBIN: valobj,\n HSHBIN: hshval },\n policy={ 'timeout': 10000,\n 'retry': 10 })\n\n self.npoints += 1\n if self.npoints % 1000 == 0:\n sys.stderr.write('.')",
"def set(self, node_index, value):\n if value < 0.0:\n raise ValueError(\n 'Sum tree values should be nonnegative. Got {}'.format(value))\n self.highest_set = max(node_index, self.highest_set)\n node_index = node_index + self.low_idx\n self.max_recorded_priority = max(value, self.max_recorded_priority)\n\n delta_value = value - self.nodes[node_index]\n\n # Now traverse back the tree, adjusting all sums along the way.\n for _ in reversed(range(self.depth)):\n # Note: Adding a delta leads to some tolerable numerical inaccuracies.\n self.nodes[node_index] += delta_value\n node_index = (node_index - 1) // 2\n\n self.nodes[node_index] += delta_value\n assert node_index == 0, ('Sum tree traversal failed, final node index '\n 'is not 0.')",
"def valueSet(rbt):\n try:\n vlist = lt.newList('SINGLE_LINKED', rbt['cmpfunction'])\n vlist = valueSetTree(rbt['root'], vlist)\n return vlist\n except Exception as exp:\n error.reraise(exp, 'RBT:valueSet')",
"def run(self, *args, **kwargs):\n\n nodeValuePairs = self.fetch('nodeValuePairs', None)\n\n for nodeValuePair in nodeValuePairs:\n node = nodeValuePair[0]\n value = nodeValuePair[1]\n self.setNodeDatum(node, value)\n\n self.puts(success=True)\n return",
"def set(node_key:str, property_name:str, value):\r\n node_names = split_node_key(node_key)\r\n node = root\r\n for node_name in node_names:\r\n node = node.nodes[node_name]\r\n node.properties[property_name] = value"
]
| [
"0.69223654",
"0.6857986",
"0.657144",
"0.64777136",
"0.6360099",
"0.6342601",
"0.6235472",
"0.6176727",
"0.616161",
"0.6120538",
"0.5858361",
"0.58278024",
"0.58103836",
"0.58078146",
"0.5798671",
"0.574837",
"0.5694906",
"0.5672775",
"0.5651913",
"0.56457376",
"0.56296605",
"0.5628349",
"0.55930424",
"0.55144024",
"0.5498592",
"0.5468185",
"0.5452013",
"0.5449529",
"0.54489255",
"0.5438072"
]
| 0.76450825 | 0 |
Sets nodes per dict with node name > value mappings. | def set_nodes_values(self, node_dict):
# Requires nodes to have type defined in lookup array
raise Exception("Not yet implemented.") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_nodes(self, ndict):\n self.inode_ref = ndict[self.inode]\n self.jnode_ref = ndict[self.jnode]",
"def set_values(self, new_values):\n for name, value in new_values.items():\n self.nodes_db.loc[name][\"node\"].set_value(value)",
"def _init_nodes(self, nodes):\n attributes = self.get_node_attributes()\n for node in nodes:\n if not self._is_node_added(node):\n self._nodes.append(self._get_node_as_dictionary(node, attributes))",
"def remap_nodes(self, new_node_mapping):\n # because all nodes are SchemaNodeIDs (i.e. objects), we only need to reassign nodes one way\n # changes propagate to chains, chain root_nodes, and parents automatically\n for chain in self.chains:\n for edge in chain:\n head, tail = edge\n if head in new_node_mapping.keys():\n head.value = new_node_mapping[head]\n if tail in new_node_mapping.keys():\n tail.value = new_node_mapping[tail]",
"def node_mapping(self):\n ...",
"def __init__(self, nodes):\n self.parents = {}\n self.ranks = {}\n\n for node in nodes:\n self.parents[node] = node\n self.ranks[node] = 0",
"def set_node_value(self, node_name, node_dict: dict):\n\n message = SetValueMessage(id=node_name, values=node_dict)\n requests.post(self.channel, data=message.json(), params=\"set_node\")",
"def assign_parameters_to_specific_nodes(G, nodes, **kwargs):\r\n parameters = get_parameters(**kwargs)\r\n for node in nodes:\r\n for key, val in parameters.items():\r\n G.nodes[node][key] = val\r\n return G",
"async def set_nodes(self, node_callq: Dict):\n for svc in self._services:\n await svc.set_nodes(node_callq)",
"def __setitem__(self, nodename, node):\n\n for hash_ in self._repl_iterator(nodename):\n if hash_ in self._nodes:\n raise ValueError(\"Node name %r is \"\n \"already present\" % nodename)\n self._nodes[hash_] = node\n bisect.insort(self._keys, hash_)",
"def __setitem__(self, nodename, node):\n\n for hash_ in self._repl_iterator(nodename):\n if hash_ in self._nodes:\n raise ValueError(\"Node name %r is \"\n \"already present\" % nodename)\n self._nodes[hash_] = node\n bisect.insort(self._keys, hash_)",
"def set(node_key:str, property_name:str, value):\r\n node_names = split_node_key(node_key)\r\n node = root\r\n for node_name in node_names:\r\n node = node.nodes[node_name]\r\n node.properties[property_name] = value",
"def set_node_positions(self):",
"def adjust_dict(ndict, nodes, dist1, dist2, lcnt):\n if len(set(ndict)) < len(nodes):\n exdict = map_nodes(dist1, dist2, lcnt, 'unique')\n for i in range(0, len(nodes)):\n if i not in ndict:\n ndict[exdict[i]]=i\n return ndict",
"def _add_node_attributes(self):\n ensemble_mapping = SankeyLayout._ensemble_map(\n df=self.supergraph.gf.df, nxg=self.nxg, columns=SankeyLayout._COLUMNS\n )\n for idx, key in enumerate(ensemble_mapping):\n nx.set_node_attributes(self.nxg, name=key, values=ensemble_mapping[key])\n\n dataset_mapping = {}\n for run in self.runs:\n dataset_mapping[run] = SankeyLayout._dataset_map(\n df=self.supergraph.gf.df,\n nxg=self.nxg,\n tag=run,\n columns=SankeyLayout._COLUMNS,\n )\n nx.set_node_attributes(\n self.nxg, name=self.supergraph.tag, values=dataset_mapping[run]\n )",
"def set(cls, hierarchical_dict: dict, key: str, value: Any) -> None:\n # split according to '.'\n hierarchical_key = key.split(\".\")\n\n # go over the the dictionary according to the path, create the nodes that does not exist\n element = hierarchical_dict\n for key in hierarchical_key[:-1]:\n if key not in element:\n element[key] = {}\n element = element[key]\n\n # set the value\n element[hierarchical_key[-1]] = value",
"def test_set_node_properties(self):\n\n pass",
"def set(self, node, value):\n self.val[node] = value",
"def copy_node(self, from_: str, to_: str):\n self._nodes[to_] = dict(self._nodes[from_])",
"def add_node(self, n):\n self.node_dict.setdefault(n, OrderedDict())",
"def nodes(self, nodes):\n\n self._nodes = nodes",
"def _add_nodes_to_mapping(self, nodes: Iterable[graph.Node]) -> None:\n nodes = filter(lambda node: node.element_id not in self._id_to_obj, nodes)\n if not nodes:\n logger.debug(\n \"No nodes to parse packs because all of them in mapping\",\n self._id_to_obj,\n )\n return\n with Pool(processes=cpu_count()) as pool:\n results = pool.starmap(\n _parse_node, ((node.element_id, dict(node.items())) for node in nodes)\n )\n for result in results:\n assert result.database_id is not None\n self._id_to_obj[result.database_id] = result",
"def __init__(self, nodes):\n\t\t\n\t\tself.variables = dict([(n.name, n) for n in nodes])\n\t\tself.roots = [n for n in nodes if not n.parents]\n\t\tself.nodes = nodes",
"def set_nodeprops(self, nodeprops):\n assert isinstance(nodeprops, dict), \"nodeprops must be a dictionary, even if empty\"\n self.nodeprops = nodeprops",
"def nodes(self, nodes):\n global g_npoints\n for osmid, tags, (lng, lat) in nodes:\n if 'name' in tags:\n\n # Build a synthetic value by copying the tags and\n # adding osmid, latitude and longitude.\n valobj = tags.copy()\n valobj['osmid'] = osmid\n valobj['latitude'] = lat\n valobj['longitude'] = lng\n valstr = json.dumps(valobj)\n\n # Construct a GeoJSON bin value to be indexed.\n locobj = { 'type': \"Point\", 'coordinates': [ lng, lat ] }\n locgeo = aerospike.GeoJSON(locobj)\n\n # Make a hash of the id to use for random selection.\n hshval = self.id_to_hash(osmid)\n\n key = (self.args.nspace, self.args.set, osmid)\n \n self.client.put(key, { VALBIN: valstr,\n LOCBIN: locgeo,\n MAPBIN: valobj,\n HSHBIN: hshval },\n policy={ 'timeout': 10000,\n 'retry': 10 })\n\n self.npoints += 1\n if self.npoints % 1000 == 0:\n sys.stderr.write('.')",
"def assign_parameters_to_nodes(G, **kwargs):\r\n parameters = get_parameters(**kwargs)\r\n for key, val in parameters.items():\r\n nx.set_node_attributes(G, values=val, name=key)\r\n return G",
"def set_nodes(self, nodes):\n self._drv_nodes = nodes",
"def make_node_dict(self):\n if self.input1 is None or self.input2 is None:\n raise Exception(\"Missing input: please run the populate() method first\")\n self.node_dict1 = {}\n for node in self.input1['knowledge_graph']['nodes']:\n self.node_dict1[node['id']] = node\n self.node_dict2 = {}\n for node in self.input2['knowledge_graph']['nodes']:\n self.node_dict2[node['id']] = node",
"def nodes_mapped(instance):\n G, mapping = instance.network()\n node_dict = instance.network_nodes_species()\n\n node_dict_mapped = {}\n\n for old_label, new_label in mapping.items():\n for node, ammentity in node_dict.items():\n if old_label == node:\n node_dict_mapped[new_label] = ammentity\n\n return node_dict_mapped",
"def _populate(self, json):\n if json != {}:\n new_nodes = [\n LKENodePoolNode(self._client, c) for c in json[\"nodes\"]\n ]\n json[\"nodes\"] = new_nodes\n\n super()._populate(json)"
]
| [
"0.74113405",
"0.67245656",
"0.6602044",
"0.6529335",
"0.643137",
"0.61999345",
"0.6081437",
"0.6069941",
"0.6063902",
"0.6053018",
"0.6053018",
"0.6028629",
"0.6014309",
"0.5999404",
"0.5981748",
"0.597711",
"0.59712344",
"0.595847",
"0.59258413",
"0.589504",
"0.58826923",
"0.58766645",
"0.5859602",
"0.5843177",
"0.58407074",
"0.5810581",
"0.5795605",
"0.5772524",
"0.57575786",
"0.57563955"
]
| 0.747575 | 0 |
returns relative frequency of a category, and words linked to categories | def getAll(text):
URIs = getURIs(text)
categories=getCategories(URIs[0], URIs[1] )
return categoryFrequency(categories[0]),categories[1] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def categoryFrequency(categoryList): #TODO delete units\n n=len(categoryList)\n freq = dict()\n for i in categoryList:\n if i in freq.keys():\n freq[i]=freq[i]+1/float(n)\n else:\n freq[i]=1/float(n)\n sortedFreq=sorted([(v,k) for (k,v) in freq.items()], reverse = True)[:10]\n freq=dict([(k,v) for (v,k) in sortedFreq])\n return freq",
"def word_frequencies(corpus):\n return frequencies(corpus, 1, to_lower=True)",
"def lp(word, category, unique, k, name=\"category\"):\n\t\tp1 = category.count(word) + k\n\t\tp2 = len(category) + unique\n\t\tprint(word + \" in \"+name+\": \" + str((p1 * 1.0) / (p2 * 1.0)))\n\t\treturn (p1 * 1.0) / (p2 * 1.0)",
"def cat_count(data, column_str, criteria):\r\n ct1 = []\r\n ct2 = []\r\n for i in range(len(find_cats_freq(data, column_str))):\r\n ct1.append(find_cats_freq(data[criteria], column_str)[i])\r\n ct2.append(find_cats_freq(data, column_str)[i])\r\n return np.array(ct1)/np.array(ct2)",
"def catsPerWord(self, thresh):\n totalCats = 0\n words = 0\n for word, wordFreqs in self.lex.items():\n dictSize = len([f for f in wordFreqs.values() if f >= thresh])\n totalCats += dictSize\n if sum(wordFreqs.values()) >= 20:\n words += 1\n return float(totalCats)/float(words)",
"def computeWordsFrequencies(self):\n token_stream = self._tokenize(self.readable)\n token_map = self._countTokens(token_stream)\n # print token_map.items()\n return sorted(token_map.items(), key = lambda x : x[1], reverse = True)",
"def ngram_frequency(word):\r\n\tword = word.lower()\r\n\tword = re.sub(r'[^A-Za-z. ]','',word)\r\n\tngram_statistics = {}\r\n\tngram_categorization_model_keys = []\r\n\tngram_categorization_model_occurances = []\r\n\tres = [0 for _ in range(0,300)]\r\n\tfor ituple in ngram_categorization_model:\r\n\t\tngram_categorization_model_keys.append(ituple[0])\r\n\t\tngram_categorization_model_occurances.append(int(ituple[1]))\r\n\tfor grams in range(2,6):\r\n\t\tfor i in range(len(word)-grams+1):\r\n\t\t\tseq = word[i:i+grams]\r\n\t\t\tif seq not in ngram_statistics.keys():\r\n\t\t\t\tngram_statistics.update({seq:1})\r\n\t\t\telse:\r\n\t\t\t\tngram_occurances = ngram_statistics[seq]\r\n\t\t\t\tngram_statistics.update({seq:ngram_occurances+1})\r\n\tngram_frequency_keys = ngram_statistics.keys()\r\n\tngram_frequency_occurances = list(ngram_statistics.values())\r\n\tfor index, val in enumerate(ngram_categorization_model_keys):\r\n\t\tfor index1, val1 in enumerate(ngram_frequency_keys):\r\n\t\t\tif val == val1:\r\n\t\t\t\tres[index] = ngram_categorization_model_occurances[index]*ngram_frequency_occurances[index1]\r\n\treturn res",
"def word_frequencies(url):\n\ttexts = get_all_texts(url)\n\tcount = count_words_in_sentence_list(texts)\n\treturn count",
"def frequency(self):\n # BEGIN\n \n freq = {} \n # for word in my_list:\n # for letter in word:\n # keys=freq.keys()\n # if letter in keys:\n # freq[letter]+=1\n # else:\n # freq[letter]=1\n # return freq\n\n whole = ''.join(WordSet(self.text).words())\n \n for m in whole:\n if m in freq:\n freq[m] += 1\n else:\n freq[m] = 1\n return freq\n # END",
"def word_rel_freq(word,prefix,corpus=EL_corpus):\n try:\n return word_count.get(word,0) / prefix_count.get(prefix,0)\n except ZeroDivisionError, e:\n return 0",
"def get_category_count(self, category):\r\n if category in self.category_count:\r\n return float(self.category_count[category])\r\n else:\r\n return 0.0",
"def freq():",
"def prob(self, doc, cat):\n catprob = self.category_count(cat) / self.total_count() # Pr(Category)\n docprob = self.doc_prob(doc, cat) # Pr(Document | Category)\n return docprob*Decimal(str(catprob)) # Pr(Category | Document)",
"def _compute_frequencies(self, word_sent):\n freq = defaultdict(int)\n for s in word_sent:\n for word in s:\n if word not in self._stopwords:\n freq[word] += 1\n # frequencies normalization and fitering\n m = float(max(freq.values()))\n for w in freq.keys():\n freq[w] = freq[w]/m\n if freq[w] >= self._max_cut or freq[w] <= self._min_cut:\n del freq[w]\n return freq",
"def score(self, doc, c):\n # >>> YOUR ANSWER HERE\n # the inner loop in the TEST NAIVE BAYES, sum up the logprior of the class and all words' loglikelihood\n sum = self.logprior[c]\n words = doc.split()\n for w in words:\n if w in self.vocabulary:\n sum += self.loglikelihood[(w, c)]\n return sum\n # >>> END YOUR ANSWER",
"def percent_frequencies(self):\n word_count = 0\n local = self.frequencies()\n for key in local.keys():\n i = local[key]\n word_count += int(i)\n for key in local.keys():\n i = local[key]\n percentage = float(i) / float(word_count)\n local[key] = percentage\n return local",
"def weights_by_category(self):\n cate_weights = {}\n for cate in self.unique_category:\n cate_weights[cate] = self.weights[self.category == cate].sum()\n return pd.Series(cate_weights, index=self.unique_category)",
"def get_word_containement_measure(self,l2,l1):\n count = 0\n found_idfs = []\n unfound_idfs = []\n for w in l1:\n val = self.idf.get_tfidf_val(w)\n if (val > 10):\n val = 10\n if w in l2:\n count += 1\n found_idfs.append(val)\n else:\n unfound_idfs.append(val)\n if (len(found_idfs) == 0):\n avg_found = 0\n else:\n avg_found = np.mean(found_idfs)\n if (len(unfound_idfs) ==0):\n avg_unfound = 0\n else:\n avg_unfound = np.mean(unfound_idfs)\n\n\n\n return count / self.normalize_factor, avg_found, avg_unfound",
"def freq(self) -> int:",
"def frequency(w: str) -> float:\n return frequency_list.get(remove_punctuation(w), 0)",
"def _compute_frequencies(self, word_sent):\n freq = defaultdict(int)\n for s in word_sent:\n for word in s:\n if word not in self._stopwords:\n freq[word] += 1\n # frequencies normalization and fitering\n m = float(max(freq.values()))\n for w in freq.keys():\n freq[w] = freq[w]/m\n if freq[w] >= self._max_cut or freq[w] <= self._min_cut:\n del freq[w]\n return freq",
"def get_word_frequencies(topic_description):\n frequencies = {w:f for w,f in topic_description}\n return frequencies",
"def wordsByCategoryName(self, category):\n\t\ttry:\n\t\t\treturn (self.dictData[category])\n\t\texcept KeyError:\n\t\t\tprint (\"La categoría ingresada no existe.\")",
"def get_corpus_counts(x,y,label):\n corpus_counts = defaultdict(float)\n for pos, curr_label in enumerate(y):\n if curr_label == label:\n for word in x[pos]:\n corpus_counts[word] += x[pos][word]\n return corpus_counts",
"def _compute_frequencies( word_sent):\n\t\tfreq = defaultdict(int)\n\t\tfor s in word_sent:\n\t\t\tfor word in s:\n\t\t\t\tif word not in _stopwords:\n\t\t\t\t\tfreq[word] += 1\n\t\t\t\t# frequencies normalization and fitering\n\t\treturn freq",
"def count(self):\n freq = {}\n\n for desc in self.words:\n if desc in freq:\n freq[desc] += 1\n else:\n freq[desc] = 1\n\n return freq",
"def compute_frequencies(num_words, documents):\n res = [0 for i in range(num_words)]\n sum = 0\n for word in documents:\n sum += 1\n tmp = set(word)\n for number in tmp:\n res[number] += 1\n \n res = [i / sum for i in res]\n return res",
"def get_freqs(self):\n dictionary = {}\n for word in self.word_list:\n if word in dictionary:\n dictionary[word] += 1\n else:\n dictionary[word] = 1\n letter_sorted = sorted(dictionary.items(), key=lambda entry: entry[0]) #sorts dictionary into alphabetized tuples\n count_sorted = sorted(letter_sorted, key=lambda seq: seq[1], reverse=True) #sorts alphabetical tuples into count order\n return count_sorted",
"def count_words(title_pair: np.array) -> float:\r\n title_1, title_2 = title_pair\r\n # Transform into sets of words\r\n title_1 = set(title_1.split())\r\n title_2 = set(title_2.split())\r\n # Divide length of intersection by length of union\r\n ratio = len(title_1.intersection(title_2)) / len(title_1.union(title_2))\r\n return ratio",
"def freqWords(self, words):\n return nltk.FreqDist(words)"
]
| [
"0.66775525",
"0.64714974",
"0.6468304",
"0.6424987",
"0.6397465",
"0.63883096",
"0.6299494",
"0.62394077",
"0.6195926",
"0.6182201",
"0.6174687",
"0.6160413",
"0.6156063",
"0.6153839",
"0.612197",
"0.6117892",
"0.60976714",
"0.609022",
"0.6082632",
"0.6076902",
"0.6010695",
"0.6010248",
"0.5988943",
"0.5973417",
"0.59673554",
"0.59655565",
"0.59575766",
"0.5940155",
"0.59227985",
"0.5912934"
]
| 0.70378643 | 0 |
Returns searcher with boosts applied | def apply_boosts(searcher):
return searcher.boost(
question_title=4.0,
question_content=3.0,
question_answer_content=3.0,
post_title=2.0,
post_content=1.0,
document_title=6.0,
document_content=1.0,
document_keywords=8.0,
document_summary=2.0,
# Text phrases in document titles and content get an extra boost.
document_title__match_phrase=10.0,
document_content__match_phrase=8.0) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def search_boost(self, search_boost):\n\n self._search_boost = search_boost",
"def search(self, query, maxhits=100):",
"def _add_better_search_words(self):\n for kw in self.better_search_kw:\n self.search_query += kw",
"def search(self, text, scope=None, limit=20):\n\t\tix = self.get_index()\n\n\t\tresults = None\n\n\t\tsearch_fields = self.get_fields_to_search()\n\t\tfieldboosts = {}\n\n\t\t# apply reducing boost on fields based on order. 1.0, 0.5, 0.33 and so on\n\t\tfor idx, field in enumerate(search_fields, start=1):\n\t\t\tfieldboosts[field] = 1.0 / idx\n\n\t\twith ix.searcher() as searcher:\n\t\t\tparser = MultifieldParser(\n\t\t\t\tsearch_fields, ix.schema, termclass=FuzzyTermExtended, fieldboosts=fieldboosts\n\t\t\t)\n\t\t\tparser.remove_plugin_class(FieldsPlugin)\n\t\t\tparser.remove_plugin_class(WildcardPlugin)\n\t\t\tquery = parser.parse(text)\n\n\t\t\tfilter_scoped = None\n\t\t\tif scope:\n\t\t\t\tfilter_scoped = Prefix(self.id, scope)\n\t\t\tresults = searcher.search(query, limit=limit, filter=filter_scoped)\n\n\t\t\treturn [self.parse_result(r) for r in results]",
"def searchRef(self, searchStr):\n filter = []\n attr = self.__listAttr()\n for name in attr:\n if searchStr.lower() in name.lower():\n doc = getattr(self, name)\n filter.append([name, doc]) \n # if in gloss, search for synonymes\n elif name in self.__glossIndex.keys():\n for altName in self.__glossIndex[name]['syn']:\n if searchStr in altName or altName in searchStr:\n doc = getattr(self, name)\n filter.append([name, doc])\n break\n \n return filter",
"def search(self, term):",
"def search(self, query, limit = 5000,\r\n weighting = None,\r\n sortedby = None, reverse = False):\r\n \r\n doc_reader = self.doc_reader\r\n \r\n t = time.time()\r\n if sortedby is not None:\r\n if isinstance(sortedby, basestring):\r\n sortedby = scoring.FieldSorter(sortedby)\r\n elif isinstance(sortedby, (list, tuple)):\r\n sortedby = scoring.MultiFieldSorter(sortedby)\r\n elif callable(sortedby):\r\n sortedby = sortedby()\r\n \r\n scored_list = sortedby.order(self, query.docs(self), reverse = reverse)\r\n scores = None\r\n docvector = BitVector(doc_reader.doc_count_all(),\r\n source = scored_list)\r\n if len(scored_list) > limit:\r\n scored_list = list(scored_list)[:limit]\r\n else:\r\n # Sort by scores\r\n topdocs = TopDocs(limit, doc_reader.doc_count_all())\r\n topdocs.add_all(query.doc_scores(self, weighting = weighting or self.weighting))\r\n \r\n best = topdocs.best()\r\n if best:\r\n # topdocs.best() returns a list like\r\n # [(docnum, score), (docnum, score), ... ]\r\n # This unpacks that into two lists: docnums and scores\r\n scored_list, scores = zip(*topdocs.best())\r\n else:\r\n scored_list = []\r\n scores = []\r\n \r\n docvector = topdocs.docs\r\n t = time.time() - t\r\n \r\n return Results(self,\r\n query,\r\n scored_list,\r\n docvector,\r\n runtime = t,\r\n scores = scores)",
"def find(terms):\n terms = ' '.join(terms)\n searcher = IndexSearcher(STORE)\n\n SHOULD = BooleanClause.Occur.SHOULD\n\n query = MultiFieldQueryParser.parse(terms, \n ['name_', 'full_text'], [SHOULD, SHOULD], StandardAnalyzer())\n hits = searcher.search(query)\n\n ret = []\n for i, hit in enumerate(hits):\n doc = Hit.cast_(hit).getDocument()\n ret.append(MyHit(doc, hits, i))\n if i == 10:\n break\n\n return ret",
"def search_products(phrase):\n sv = (SearchVector('name', weight='A') +\n SearchVector('description', weight='B'))\n rank = SearchRank(sv, SearchQuery(phrase))\n return Product.objects.annotate(rank=rank).filter(\n rank__gte=0.2).order_by('-rank')",
"def construct_search(barcodes, args):\n\tsearch = []\n\tfor i, set in enumerate(barcodes):\n\t\tname = list(barcodes[i].keys())[0]\n\t\t\n\t\t# if type is variable, construct regex to match \n\t\tif barcodes[i][name]['type'] == 'variable':\n\t\t\t\n\t\t\t# add type, name, and bool specifiying if we want to translate\n\t\t\tsearch_dict = {'type':'variable'}\n\t\t\tsearch_dict['name'] = name\n\t\t\tsearch_dict['trans'] = barcodes[i][name]['translate']\n\t\t\t\n\t\t\t# if we allow mismatches\n\t\t\tif 'mismatches' in barcodes[i][name]:\n\t\t\t\tmismatches = barcodes[i][name]['mismatches']\n\t\t\telse:\n\t\t\t\tmismatches = 0\n\t\t\t\n\t\t\t# construct regex for searching\n\t\t\tsearch_dict['forward'] = construct_variable_regex(barcodes[i][name]['before'], barcodes[i][name]['after'], mismatches)\n\t\t\t\n\t\t\t#search_dict['forward'] = f\"{barcodes[i][name]['before']}(.+){barcodes[i][name]['after']}\"\n\t\t\tsearch.append(search_dict)\n\t\t\t\n\t\t# if type is constant, we need to check if we are allowing mismatches or not\n\t\telif barcodes[i][name]['type'] == 'constant':\n\t\t\t# if number of mismatches is specified\n\t\t\tsearch_dict = create_barcodes_search_dict(barcodes[i][name], args)\n\t\t\tsearch_dict['name'] = name\n\t\t\tsearch.append(search_dict)\n\t\t\t\t\t\t\n\treturn search",
"def cookbook_search(search_term):\n return db.boxcar_cookbooks.find({'name': {'$regex':'^'+search_term}})",
"def search(self, query):",
"def test_search_multiple_scoring(context):\n # When create a query block\n t = QuerySet(\"localhost\", index=\"foo\")\n\n # And there are records\n add_document(\"foo\", {\"bar\": 1, \"baz\": 4})\n add_document(\"foo\", {\"bar\": 1})\n\n # And I add scoring with params\n score = ScriptScore(\"s = custom_param + doc['bar'].value\", params={\"custom_param\": 1})\n t.score(score)\n\n boost = {\n \"boost_factor\": \"10\",\n \"filter\": Exists(\"baz\")\n }\n t.score(boost)\n results = t[0:10]\n\n # Then my results are scored correctly\n len(results).should.equal(2)\n results[0][\"_source\"][\"baz\"].should.equal(4)\n (\"baz\" in results[1][\"_source\"].keys()).should.be.false",
"def lookup_bm25(self) -> list:\n prox_by_doc = {}\n for token in self._tokenizer.tokenize(self._query):\n for token_info in self._index.get_token_search(token):\n doc = token_info.doc\n if doc not in prox_by_doc:\n prox_by_doc[doc] = 0\n prox_by_doc[doc] += token_info.weight\n\n return sorted(prox_by_doc.items(), key=lambda t: t[1], reverse=True)",
"def refinesearch(self) :\n\t\ttry :\n\t\t\treturn self._refinesearch\n\t\texcept Exception as e:\n\t\t\traise e",
"def search(query_string):",
"def dep_searcher(sents):\n \n result = []\n for s in sents:\n lks = []\n deps = get_deps(s, dep_type)\n tokens = s.tokens\n for opt, pat in search.items():\n pat = filtermaker(pat)\n if opt == 'g':\n for l in deps.links:\n if re.match(pat, l.governor.text):\n lks.append(s.get_token_by_id(l.dependent.idx))\n elif opt == 'd':\n for l in deps.links:\n if re.match(pat, l.dependent.text):\n lks.append(s.get_token_by_id(l.governor.idx))\n elif opt == 'f':\n for l in deps.links:\n if re.match(pat, l.type):\n lks.append(s.get_token_by_id(l.dependent.idx))\n elif opt == 'p':\n for tok in tokens:\n if re.match(pat, tok.pos):\n lks.append(tok)\n elif opt == 'l':\n for tok in tokens:\n if re.match(pat, tok.lemma):\n lks.append(tok)\n elif opt == 'w':\n for tok in tokens:\n if re.match(pat, tok.word):\n lks.append(tok)\n elif opt == 'i':\n for tok in tokens:\n if re.match(pat, str(tok.id)):\n lks.append(tok)\n\n # only return results if all conditions are met\n if searchmode == 'all':\n counted = Counter(lks)\n lks = [k for k, v in counted.items() if v >= len(search.keys())]\n\n lks = list(set([x for x in lks if re.search(regex_nonword_filter, x.word)]))\n\n if exclude is not False:\n to_remove = []\n for op, pat in exclude.items():\n pat = filtermaker(pat)\n for tok in lks:\n if op == 'g':\n for l in deps.links:\n if re.match(pat, l.governor.text):\n to_remove.append(s.get_token_by_id(l.governor.idx))\n elif op == 'd':\n for l in deps.links:\n if re.match(pat, l.dependent.text):\n to_remove.append(s.get_token_by_id(l.dependent.idx))\n elif op == 'f':\n for l in deps.links:\n if re.match(pat, l.type):\n to_remove.append(s.get_token_by_id(l.dependent.idx))\n elif op == 'p':\n for tok in tokens:\n if re.match(pat, tok.pos):\n to_remove.append(tok)\n elif op == 'l':\n for tok in tokens:\n if re.match(pat, tok.lemma):\n to_remove.append(tok)\n elif op == 'w':\n for tok in tokens:\n if re.match(pat, tok.word):\n to_remove.append(tok)\n elif op == 'i':\n for tok in tokens:\n if re.match(pat, str(tok.id)):\n to_remove.append(tok)\n\n if excludemode == 'all':\n counted = Counter(to_remove)\n to_remove = [k for k, v in counted.items() if v >= len(exclude.keys())]\n for i in to_remove:\n try:\n lks.remove(i)\n except ValueError:\n pass\n\n if only_count:\n result.append(len(lks))\n continue\n\n # figure out what to show\n for lk in lks:\n single_result = {}\n node = deps.get_node_by_idx(lk.id)\n\n if 'w' in show:\n single_result['w'] = 'none'\n if lemmatise:\n single_result['w'] = lk.lemma\n else:\n single_result['w'] = lk.word\n\n if 'l' in show:\n single_result['l'] = lk.lemma\n\n if 'p' in show:\n single_result['p'] = 'none'\n postag = lk.pos\n if lemmatise:\n if postag.lower() in taglemma.keys():\n single_result['p'] = taglemma[postag.lower()]\n else:\n single_result['p'] = postag.lower()\n else:\n single_result['p'] = postag\n if not single_result['p']:\n single_result['p'] == 'none'\n\n if 'f' in show:\n single_result['f'] = 'none'\n for i in deps.links:\n if i.dependent.idx == lk.id:\n single_result['f'] = i.type\n break\n if single_result['f'] == '':\n single_result['f'] = 'root'\n\n if 'g' in show:\n single_result['g'] = 'none'\n for i in deps.links:\n if i.dependent.idx == lk.id:\n if s.get_token_by_id(i.governor.idx):\n if lemmatise: \n single_result['g'] = s.get_token_by_id(i.governor.idx).lemma\n else:\n single_result['g'] = i.governor.text\n else:\n single_result['g'] = 'root'\n break\n\n if 'd' in show:\n single_result['d'] = 'none'\n for i in deps.links:\n if i.governor.idx == lk.id:\n if s.get_token_by_id(i.dependent.idx): \n if lemmatise:\n single_result['d'] = s.get_token_by_id(i.dependent.idx).lemma\n else:\n single_result['d'] = i.dependent.text\n break\n\n if 'r' in show:\n all_lks = [l for l in deps.links]\n distance = distancer(all_lks, lk)\n if distance:\n single_result['r'] = str(distance)\n else:\n single_result['r'] = '-1'\n\n if 'i' in show:\n single_result['i'] = str(lk.id)\n\n if not only_count:\n \n # add them in order\n out = []\n for i in show:\n out.append(single_result[i])\n\n result.append('/'.join(out))\n \n if 'c' in show:\n result = sum(result)\n\n return result",
"def _weight_boosting_algorithm(name: str):\n return hp.choice(name, [\"SAMME\", \"SAMME.R\"])",
"def boostScore(self, result: str, words:set ):\n found = 0;\n for word in words:\n if result in self.invertedIndex[word]:\n found += 1\n return found/len(words)",
"def search(self, **kwargs):\n return keyword_search(self._rq_list, **kwargs)",
"def find_books(self):\n search_query = unicode(self.search_input.data)\n q = u'%{}%'.format(search_query)\n\n # used for dummy emulation of caseinsensetive search\n qC = u'%{}%'.format(capfirst(search_query))\n\n books = Book.query.filter(db.or_(\n Book.authors.any(db.or_(\n Author.name.like(q),\n Author.name.like(qC))),\n Book.title.like(q),\n Book.title.like(qC)),)\n\n return books",
"def search(self, *args, **kwargs):",
"def do_search(self, *args, **kwargs):\n return [{}]",
"def __make_relevance(self, item, keywords, fltr, fuzzy=False):\n penalty = 1\n\n # Prepare attribute set\n values = []\n for prop in item.properties:\n values.append(prop.value)\n\n # Walk thru keywords\n if keywords:\n for keyword in keywords:\n\n # No exact match\n if not keyword in values:\n penalty *= 2\n\n # Penalty for not having an case insensitive match\n elif not keyword.lower() in [s.value.lower() for s in item.properties]:\n penalty *= 4\n\n # Penalty for not having the correct category\n elif fltr['category'] != \"all\" and fltr['category'].lower() != item['_type'].lower():\n penalty *= 2\n\n # Penalty for not having category in keywords\n if item._type in self.__search_aid['aliases']:\n if not set([t.lower() for t in self.__search_aid['aliases'][item._type]]).intersection(set([k.lower() for k in keywords])):\n penalty *= 6\n\n # Penalty for secondary\n if fltr['secondary'] == \"enabled\":\n penalty *= 10\n\n # Penalty for fuzzyness\n if fuzzy:\n penalty *= 10\n\n return penalty",
"def pre_search(self, qs):\n return qs",
"def search(self, query, n=500, filter_stopwords=False):\n\n def query_score(terms, title):\n \"\"\"Score the search query based on the title.\"\"\"\n\n def term_score(term, word):\n # print (term, word)\n if word.startswith(term):\n return float(len(term)) / len(word)\n else:\n return 0.0\n\n words = list(self._clean_words(title))\n return sum(term_score(t, w) for t, w in product(terms, words))\n\n terms = list(\n self._clean_words(query, filter_stopwords=filter_stopwords)\n )\n if not terms:\n raise gen.Return(final)\n term_groups = [terms]\n\n trie = self.get_ptrie()\n for term in terms:\n new_group = []\n for t in fuzzy_match(term, trie, 1):\n print \"T\", (term, t)\n new_group.append(t or term)\n if new_group not in term_groups:\n term_groups.append(new_group)\n #if t not in terms:\n # terms.append(t)\n #print \"TERMS\"\n #print terms\n\n def flatten(seq):\n nseq = []\n for item in seq:\n if isinstance(item, list):\n nseq.extend(flatten(item))\n else:\n nseq.append(item)\n return nseq\n\n final = {\n 'terms': flatten(term_groups),\n 'results': []\n }\n print term_groups\n all_results_sorted = []\n for terms in term_groups:\n with self._r.pipeline() as pipe:\n pipe.zinterstore('$tmp', terms, aggregate='max')\n pipe.zrevrange('$tmp', 0, n, True)\n # response = pipe.execute()\n response = yield gen.Task(pipe.execute)\n scored_ids = response[1]\n if not scored_ids:\n continue\n # raise gen.Return(final)\n titles = yield gen.Task(self._r.hmget, '$titles', [i[0] for i in scored_ids])\n results = imap(\n lambda x: x[0] + (titles[x[1]],),\n izip(scored_ids, titles)\n )\n # final['results'] = sorted(\n # results_sorted = sorted(\n # results,\n # key=lambda r: query_score(terms, r[2]) * r[1],\n # reverse=True\n # )\n all_results_sorted.extend(results)\n print \"all_results_sorted\"\n print all_results_sorted\n results_sorted = sorted(\n all_results_sorted,\n key=lambda r: r[1],\n reverse=True\n )\n\n final['results'] = results_sorted[:n]\n raise gen.Return(final)",
"def search(phrase):\n return {\n 'products': search_products(phrase),\n 'orders': search_orders(phrase),\n 'users': search_users(phrase)}",
"def _search(dork): \n retVal = [] \n paths = [] \n\n if not dork: \n return None \n\n headers = {} \n\n headers[HTTP_HEADER.USER_AGENT] = dict(conf.httpHeaders).get(HTTP_HEADER.USER_AGENT, DUMMY_SEARCH_USER_AGENT) \n headers[HTTP_HEADER.ACCEPT_ENCODING] = HTTP_ACCEPT_ENCODING_HEADER_VALUE \n\n gpage = conf.googlePage if conf.googlePage > 1 else 1 \n\n#polluted by xi4okv QQ£º48011203 \n\n for gpage in xrange(1,10): \n logger.info(\"using search result page #%d\" % gpage) \n\n url = \"https://m.baidu.com/s?\" \n url += \"word=%s&\" % urlencode(dork, convall=True) \n url += \"&pn=%d\" % ((gpage - 1) * 10) \n\n try: \n req = urllib2.Request(url, headers=headers) \n conn = urllib2.urlopen(req) \n\n requestMsg = \"HTTP request:\\nGET %s\" % url \n requestMsg += \" %s\" % httplib.HTTPConnection._http_vsn_str \n logger.log(CUSTOM_LOGGING.TRAFFIC_OUT, requestMsg) \n\n page = conn.read() \n code = conn.code \n status = conn.msg \n\n responseHeaders = conn.info() \n page = decodePage(page, responseHeaders.get(\"Content-Encoding\"), responseHeaders.get(\"Content-Type\")) \n #print page \n\n responseMsg = \"HTTP response (%s - %d):\\n\" % (status, code) \n\n if conf.verbose <= 4: \n responseMsg += getUnicode(responseHeaders, UNICODE_ENCODING) \n elif conf.verbose > 4: \n responseMsg += \"%s\\n%s\\n\" % (responseHeaders, page) \n\n logger.log(CUSTOM_LOGGING.TRAFFIC_IN, responseMsg) \n except urllib2.HTTPError, e: \n pass \n\n urls = [urllib.unquote(match.group(0) or match.group(1)) for match in re.finditer(GOOGLE_REGEX, page, re.I)] \n #retVal = re.findall(GOOGLE_REGEX, page, re.I) \n\n import urlparse \n\n for url in urls: \n urls_pat = re.compile(r\"http://(.*)[^']\") \n aurl = re.findall(urls_pat, url) \n if \"?\" in url and \"baidu\" not in url: \n xpath = urlparse.urlparse(url).path \n if xpath not in paths: \n paths.append(xpath) \n retVal.append(aurl[0]) \n\n #print retVal \n\n return retVal",
"def get_candidates(beer):\n span = tracer.current_span()\n span.set_tags({'beer.name': beer.name, 'beer.hops': beer.hops})\n\n db = DonutStats.instance()\n\n # find our optimal sugar level Donuts above or below this level\n # will certainly not be a good match\n optimal_sugar_level = db.get_optimal_sugar_level(beer.hops)\n return db.get_by_sugar_level(optimal_sugar_level, limit=10)",
"def pool_search(self,**kwargs):\n\t\treturn self.DA.pool_search(self,**kwargs)"
]
| [
"0.60998726",
"0.59151304",
"0.574534",
"0.57388705",
"0.56903166",
"0.5567034",
"0.55486757",
"0.55217326",
"0.54974544",
"0.54808474",
"0.5383242",
"0.53806686",
"0.5372127",
"0.5334215",
"0.5333432",
"0.53207475",
"0.53197944",
"0.5305514",
"0.5304185",
"0.5290752",
"0.5283626",
"0.5274075",
"0.52659595",
"0.52342665",
"0.523215",
"0.5224025",
"0.522182",
"0.5221015",
"0.51736337",
"0.51655996"
]
| 0.7754596 | 0 |
Write given timeseries to Cloud Monitoring. | def write_time_series(host_project_id, series):
client = monitoring_v3.MetricServiceClient()
project_id = 'projects/%s' % host_project_id
try:
client.create_time_series(request={
'name': project_id,
'time_series': [series]
})
return True
except exceptions.GoogleAPIError as err:
logging.error(err)
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def write_time_series(temperature, time_series_collector, file_name):\n with open(\"./Results/\" + file_name + \"-T{:.4f}.csv\".format(temperature), 'w') as f:\n for i, line in enumerate(zip(*time_series_collector)):\n if i < len(time_series_collector[0]) - 1:\n f.write(\"%s\\n\" % \", \".join([str(element) for element in line]))\n else:\n f.write(\"%s\" % \", \".join([str(element) for element in line]))",
"def put(self, metric, values, timestamp=None):\n if timestamp is None:\n timestamp = time.time()\n now_date = datetime.datetime.fromtimestamp(timestamp)\n\n if self.last is None:\n self.last = timestamp\n return\n\n self.last = timestamp\n\n values = [str(d) for d in [now_date, timestamp]+values]\n\n with open(self.filename, \"at\") as df:\n df.write(\"{}\\n\".format(\",\".join(values)))",
"def write(self, data: List[str]):\n\n # explore:\n # write_api = client.write_api(write_options=ASYNCHRONOUS)\n #\n # _point1 = Point(\"my_measurement\").tag(\"location\", \"Prague\").field(\"temperature\",\n # 25.3)\n # _point2 = Point(\"my_measurement\").tag(\"location\", \"New York\").field(\n # \"temperature\", 24.3)\n #\n # async_result = write_api.write(bucket=\"my-bucket\", record=[_point1, _point2])\n # async_result.get()\n #\n # client.close()\n # or\n # with _client.write_api(write_options=WriteOptions(batch_size=500,\n # flush_interval=10_000,\n # jitter_interval=2_000,\n # retry_interval=5_000,\n # max_retries=5,\n # max_retry_delay=30_000,\n # exponential_base=2))\n # as _write_client:\n # see https://github.com/influxdata/influxdb-client-python\n\n # write_api = self.connection.write_api(write_options=SYNCHRONOUS)\n self.write_api.write(self.config.bucket, self.config.org, data)\n # async_result.get()",
"def insert_timeseries(pool, timeseries, tms_id, end_date=None):\n new_timeseries = []\n for t in [i for i in timeseries]:\n if len(t) > 1:\n # Insert EventId in front of timestamp, value list\n t.insert(0, tms_id)\n new_timeseries.append(t)\n else:\n print('Invalid timeseries data:: %s', t)\n\n if end_date is None:\n end_date = new_timeseries[-1][1]\n\n try:\n\n ts = Timeseries(pool=pool)\n\n ts.insert_data(timeseries=new_timeseries, upsert=True)\n ts.update_end_date(id_=tms_id, end_date=end_date)\n\n except Exception as e:\n traceback.print_exc()\n print(\"Exception occurred while pushing timeseries for tms_id {} to curw_obs\".format(tms_id))",
"def commit_timeseries(self, timeseries, **kwargs):\n extra_dataset_args = {\n 'dtype': 'f8',\n 'chunks': True,\n 'compression': 'gzip',\n }\n extra_dataset_args.update(kwargs)\n\n # Create the dataset in the HDF5 file (if necessary)\n if timeseries.dataset_name in self:\n dataset_hdf5 = self[timeseries.dataset_name]\n else:\n dataset_hdf5 = self.create_dataset(name=timeseries.dataset_name,\n shape=timeseries.dataset.shape,\n **extra_dataset_args)\n\n # when timestamp_key has been left empty, use the default\n timestamp_key = timeseries.timestamp_key\n if timestamp_key is None:\n timestamp_key = '/'.join(timeseries.dataset_name.split('/')[0:-1] \\\n + [DEFAULT_TIMESTAMP_DATASET])\n\n # Create the timestamps in the HDF5 file (if necessary) and calculate\n # where to insert our data into the HDF5's dataset\n if timestamp_key not in self:\n timestamps_hdf5 = self.create_dataset(name=timestamp_key,\n shape=timeseries.timestamps.shape,\n dtype='i8')\n # Copy the in-memory timestamp dataset into the HDF5 file\n timestamps_hdf5[:] = timeseries.timestamps[:]\n t_start = 0\n t_end = timeseries.timestamps.shape[0]\n start_timestamp = timeseries.timestamps[0]\n end_timestamp = timeseries.timestamps[-1] + timeseries.timestep\n else:\n existing_timestamps = self.get_timestamps(timeseries.dataset_name)\n t_start, t_end = get_insert_indices(timeseries.timestamps, existing_timestamps)\n\n if t_start < 0 \\\n or t_start > (len(existing_timestamps) - 2) \\\n or t_end < 1 \\\n or t_end > len(existing_timestamps):\n raise IndexError(\"cannot commit dataset that is not a subset of existing data\")\n\n start_timestamp = existing_timestamps[0]\n end_timestamp = existing_timestamps[-1] + timeseries.timestep\n\n # Make sure that the start/end timestamps are consistent with the HDF5\n # file's global time range\n if 'start' not in self.attrs:\n self.attrs['start'] = start_timestamp\n self.attrs['end'] = end_timestamp\n else:\n if self.attrs['start'] != start_timestamp \\\n or self.attrs['end'] != end_timestamp:\n# warnings.warn(\n raise IndexError(\"Mismatched start or end values: %d != %d or %d != %d\" % (\n start_timestamp, self.attrs['start'],\n end_timestamp, self.attrs['end']))\n\n # If we're updating an existing dataset, use its column names and ordering.\n # Otherwise sort the columns before committing them.\n if COLUMN_NAME_KEY in dataset_hdf5.attrs:\n timeseries.rearrange_columns(timeseries.columns)\n else:\n timeseries.sort_columns()\n\n # Copy the in-memory dataset into the HDF5 file\n dataset_hdf5[t_start:t_end, :] = timeseries.dataset[:, :]\n\n # Copy column names into metadata before committing metadata\n timeseries.dataset_metadata[COLUMN_NAME_KEY] = timeseries.columns\n timeseries.dataset_metadata['updated'] = int(time.mktime(datetime.datetime.now().timetuple()))\n\n # If timeseries.version was never set, don't set a dataset-level version in the HDF5\n if timeseries.version is not None:\n self.set_version(timeseries.version, dataset_name=timeseries.dataset_name)\n\n # Set the file's global version to indicate its schema\n if timeseries.global_version is not None:\n self['/'].attrs['version'] = timeseries.global_version\n\n # Insert/update dataset metadata\n for key, value in timeseries.dataset_metadata.items():\n # special hack for column names\n if key == COLUMN_NAME_KEY:\n # note: the behavior of numpy.string_(x) where\n # type(x) == numpy.array is _different_ in python2 vs. python3.\n # Python3 happily converts each element to a numpy.string_,\n # while Python2 first calls a.__repr__ to turn it into a single\n # string, then converts that to numpy.string_.\n dataset_hdf5.attrs[key] = numpy.array([numpy.string_(x) for x in value])\n elif tokio.common.isstr(value):\n dataset_hdf5.attrs[key] = numpy.string_(value)\n elif value is None:\n warnings.warn(\"Skipping attribute %s (null value) for %s\" % (key, timeseries.dataset_name))\n else:\n dataset_hdf5.attrs[key] = value\n\n # Insert/update group metadata\n for key, value in timeseries.group_metadata.items():\n if tokio.common.isstr(value):\n dataset_hdf5.parent.attrs[key] = numpy.string_(value)\n else:\n dataset_hdf5.parent.attrs[key] = value",
"def _push_to_server(self) -> None:\n timestamp = int(arrow.get().float_timestamp * 1000)\n\n datapoints: List[Dict[str, Union[str, List[Tuple[float, float]]]]] = []\n\n for metric in REGISTRY.collect():\n if type(metric) == Metric and metric.type in [\"gauge\", \"counter\"]:\n if len(metric.samples) == 0:\n continue\n\n external_id = self.external_id_prefix + metric.name\n datapoints.append({\"externalId\": external_id, \"datapoints\": [(timestamp, metric.samples[0].value)]})\n\n self.cdf_client.datapoints.insert_multiple(datapoints)\n self.logger.debug(\"Pushed metrics to CDF tenant '%s'\", self._cdf_project)",
"def write_outputs_to_influx(self, outputs):\n json_body = []\n base = {\n \"measurement\": self.site_id,\n \"time\": \"%s\" % self.current_datetime,\n }\n response = False\n # get each of the simulation output values and feed to the database\n for key in outputs.keys():\n if key != 'time':\n output_id = self.tagid_and_outputs[key]\n value = outputs[key]\n dis = self.id_and_dis[output_id]\n base[\"fields\"] = {\n \"value\": value\n }\n base[\"tags\"] = {\n \"id\": output_id,\n \"dis\": dis,\n \"siteRef\": self.site_id,\n \"point\": True,\n \"source\": 'alfalfa'\n }\n json_body.append(base.copy())\n try:\n print(\"Trying to write to influx\")\n response = self.ac.influx_client.write_points(points=json_body,\n time_precision='s',\n database=self.ac.influx_db_name)\n if response:\n print(\"Influx response received %s\" % response)\n except ConnectionError as e:\n print(\"Unable to write to influx: %s\" % e)",
"def export(self, data, **config):\n self.create_timeseries(data, **config)",
"def write_to_influx(df, tags, host, port, user, password, db_name, batch_size=10000, time_precision='s'):\n logger.debug(\"Write DataFrame with Tags {}, with length: {}\".format(tags, len(df)))\n client = DataFrameClient(host, port, user, password, db_name)\n if not client.write_points(df, db_name, tags, time_precision=time_precision, batch_size=batch_size):\n logger.error(\"Writing to influx failed for tags: {}\".format(tags))",
"def write_points(cls, points):\n\n ts_manager = srv_or_die(\"tsmanager\")\n ts_manager.write_points(points)",
"def insert_timeseries_data(message, device):\n # Get the product and check for any preprocessors\n product = device.product\n\n preprocessors = product.preprocessors.all()\n\n for preprocessor in preprocessors:\n preprocessor = get_preprocessor(preprocessor.preprocessor_name)\n if preprocessor:\n preprocessor(message.body, device=device, ts_cls=TimeSeriesData)\n else:\n logger.warning(\"No preprocessor handler called %s on product %s\",\n preprocessor.preprocessor_name, product.name)\n\n for sensor in device.sensors.all():\n sensor_name = sensor.sensor_type.sensor_name\n if message.body.get(sensor_name) is not None:\n new_datum = TimeSeriesData(\n ts=message.timestamp,\n sensor=sensor,\n value=message.body[sensor_name]\n )\n new_datum.save()\n\n # Evaluate any definitions data with new datapoint\n context = device.get_context(context=message.body, time=message.timestamp)\n logger.debug(\"device context %s\", context)\n redis_cache = RedisEventDefinitions(get_redis())\n\n triggered_events = device.evaluate_all_event_definitions(\n context, redis_cache, check_product=True\n )\n\n send_triggered_events(triggered_events, device, message.body)",
"def write_point(datum):\n measurement = {\n \"measurement\": \"weather\",\n \"tags\": {\n \"location\": LOCATION\n },\n \"time\": datetime.now().isoformat(),\n \"fields\": datum\n }\n CHANNEL.basic_publish(exchange='',\n routing_key='scribe',\n body=json.dumps(measurement))",
"def write_time_series(bc_file, ts, series_id):\n # construct the card line with series type, series number, and number of entries\n line = '{} {} {}'.format(ts.series_type, series_id, len(ts.time_series.index))\n # for output series, add the output units\n if ts.series_type == 'SERIES AWRITE':\n line = '{} {}'.format(line, ts.output_units) # todo this looks wrong\n # for wind and wave series, add the location and input/output units\n elif ts.series_type == 'SERIES WIND' or ts.series_type == 'SERIES WAVE':\n line = '{} {} {} {} {}'.format(line, ts.x_location, ts.y_location, ts.units,\n ts.output_units)\n else:\n # for all other series types, add input/output units\n line = '{} {} {}'.format(line, ts.units, ts.output_units)\n # write the constructed line\n bc_file.write('{}\\n'.format(line))\n bc_file.write(ts.time_series.to_csv(sep=' ', index=False, header=False, ).replace('\\r\\n', '\\n'))\n bc_file.write('\\n')",
"def write_to_splunk(**kwargs):\n event = helper.new_event(**kwargs)\n ew.write_event(event)",
"def write_crossing_times(temperature, crossing_times_collector, file_name):\n with open(\"./Results/\" + file_name + \"-T{:.4f}.csv\".format(temperature), 'w') as f:\n for crossing_times in crossing_times_collector:\n f.write(\"%s\\n\" % \", \".join([str(element) for element in crossing_times]))",
"def collect_to_file(sensor):\n temperature_settings = settings.SENSORS.get(\"TEMPERATURE\")\n\n frequency = float(temperature_settings[1][1])\n period = float( temperature_settings[2][1])\n last_collection_time = temperature_settings[4][1]\n\n while 1: \n s = []\n count = 0 \n logger.info(\"collecting\")\n \n while(count <= period):\n s.append(os.path.join(time.strftime(\"%Y_%j_%H_%M_%S_\"),str(sensor.readTemperature())))\n time.sleep(1)\n count = count + 1\n print count\n \n write_to_file(s)\n logger.info(\"done counting\")\n last_collection_time = datetime.datetime.utcnow()\n logger.info( last_collection_time)\n time.sleep(frequency)\n\n return True",
"def write_frame(location, channels, timeseries):\n # check if a single channel or a list of channels\n if type(channels) is list and type(timeseries) is list:\n channels = channels\n timeseries = timeseries\n else:\n channels = [channels]\n timeseries = [timeseries]\n\n # check that timeseries have the same start and end time\n gps_start_times = set([series.start_time for series in timeseries])\n gps_end_times = set([series.end_time for series in timeseries])\n if len(gps_start_times) != 1 or len(gps_end_times) != 1:\n raise ValueError(\"Start and end times of TimeSeries must be identical.\")\n\n # check that start, end time, and duration are integers\n gps_start_time = gps_start_times.pop()\n gps_end_time = gps_end_times.pop()\n duration = int(gps_end_time - gps_start_time)\n if gps_start_time % 1 or gps_end_time % 1:\n raise ValueError(\"Start and end times of TimeSeries must be integer seconds.\")\n\n # create frame\n frame = lalframe.FrameNew(epoch=gps_start_time, duration=duration,\n project='', run=1, frnum=1,\n detectorFlags=lal.LALDETECTORTYPE_ABSENT)\n\n for i,tseries in enumerate(timeseries):\n # get data type\n for seriestype in _fr_type_map.keys():\n if _fr_type_map[seriestype][1] == tseries.dtype:\n create_series_func = _fr_type_map[seriestype][2]\n create_sequence_func = _fr_type_map[seriestype][4]\n add_series_func = _fr_type_map[seriestype][5]\n break\n\n # add time series to frame\n series = create_series_func(channels[i], tseries.start_time,\n 0, tseries.delta_t, lal.ADCCountUnit,\n len(tseries.numpy()))\n series.data = create_sequence_func(len(tseries.numpy()))\n series.data.data = tseries.numpy()\n add_series_func(frame, series)\n\n # write frame\n lalframe.FrameWrite(frame, location)",
"def write_multiple(self, root_path, start_date, end_date, stackfile='stack.nc',\n **kwargs):\n timestamps = self.tstamps_for_daterange(start_date, end_date)\n for t in timestamps:\n self.read(t, **kwargs)\n if self.fid.image_missing:\n continue\n if stackfile is None:\n subdir = os.path.join(root_path, str(t.year))\n if not os.path.exists(subdir): os.makedirs(subdir)\n filepath = os.path.join(subdir, os.path.basename(self.fid.filename))\n else:\n filepath = os.path.join(root_path, stackfile)\n print(f\"{'Write' if not stackfile else 'Stack'} image for {str(t)}...\")\n self.fid.write(filepath)",
"def write_time_series_cards(bc_file, bc_class):\n # add header for the time series section\n bc_file.write('! Time Series\\n')\n awrite_key = -1\n dt_key = -1\n\n for key, ts in bc_class.time_series.items():\n if ts.series_type == 'SERIES AWRITE':\n # store the output series number\n awrite_key = key\n elif ts.series_type == 'SERIES DT':\n # store the timestep series number\n dt_key = key\n else:\n # write all other series\n write_time_series(bc_file, ts, key)\n bc_file.write('\\n') # blank line after Time Series\n\n # write the time step series\n if dt_key != -1:\n # write header for time step series section\n bc_file.write('! Time step time series\\n')\n write_time_series(bc_file, bc_class.time_series[dt_key], dt_key)\n bc_file.write('\\n') # blank line after Time step time series\n\n # write the output series\n if awrite_key != -1:\n # write header for time step series\n bc_file.write('! Output series\\n')\n write_time_series(bc_file, bc_class.time_series[awrite_key], awrite_key)\n bc_file.write('\\n') # blank line after Output series",
"def _create_influxdb_writer(influxdb_client, tags):\n\n def to_influxdf(data_list, retries=5, pause=5):\n logger = _logger()\n logger.debug(data_list)\n for i in range(retries):\n try:\n if influxdb_client.write_points(data_list, tags=tags):\n logger.debug(\"Success\")\n break\n else:\n sleep(pause)\n except InfluxDBClientError:\n logger.debug('Failed {} out of {}'.format(i, retries))\n else:\n logger.warning(\"Failed to write to Database\")\n\n return to_influxdf",
"def gcp_write_data(project_id: str, stats: BuildStats):\n client = monitoring_v3.MetricServiceClient()\n project_name = client.project_path(project_id)\n now = datetime.datetime.now()\n\n for desc_type, value in [\n [\"buildbots_percent_failed\", stats.percent_failed],\n [\"buildbots_builds_successful\", stats.successful],\n [\"buildbots_builds_failed\", stats.failed],\n [\"buildbots_builds_total\", stats.total],\n ]:\n series = monitoring_v3.types.TimeSeries()\n series.metric.type = 'custom.googleapis.com/buildbots_{}'.format(desc_type)\n series.resource.type = 'global'\n point = series.points.add()\n point.value.double_value = value\n point.interval.end_time.seconds = int(now.timestamp())\n client.create_time_series(project_name, [series])",
"def write_point(client, result, monitor_point):\n try:\n json_body = [\n {\n \"measurement\": monitor_point['measurement'],\n \"tags\": {\n \"type\": monitor_point['type'],\n \"host\": monitor_point['host'],\n \"type_instance\": monitor_point['instance']\n },\n # \"time\": c_time,\n \"fields\": {\n \"value\": result\n }\n }\n ]\n # print json_body # DEBUG\n client.write_points(json_body)\n return client\n except Exception as e:\n print 'write_point' + str(e)\n return client",
"def send_metrics(timestamp: Optional[float] = None) -> bool:\n\n def new_point(metric_name: str, result: float):\n series = monitoring_v3.types.TimeSeries()\n series.metric.type = f\"custom.googleapis.com/{metric_name}\"\n\n point = series.points.add()\n point.interval.end_time.seconds = now\n\n if isinstance(result, float):\n point.value.double_value = result\n else:\n point.value.int64_value = result\n return series\n\n now = int(time.time())\n prev_minute_tstamp = timestamp or (now - (now % 60) - 60)\n metrics_pattern = f\"{Monitoring.ACC_PREFIX}_{prev_minute_tstamp}_*\"\n monitoring_keys = redis_client.keys(metrics_pattern)\n all_series = []\n for metric_key in monitoring_keys:\n raw_value = redis_client.get(metric_key)\n values: List[str] = raw_value.split(\"|\") # type: ignore\n metric_name = values.pop(0) # metric name\n op = values.pop(0) # operation - SUM or AVG\n typ = values.pop(0) # INT or FLOAT\n if typ == \"INT\":\n result = sum(map(int, values))\n if op == \"AVG\":\n result = result // len(values)\n else:\n result = sum(map(float, values)) # type: ignore\n if op == \"AVG\":\n result = result / len(values) # type: ignore\n\n all_series.append(new_point(metric_name, result))\n if op == \"AVG\": # create count for AVG metric too\n all_series.append(new_point(f\"{metric_name}_COUNT\", len(values)))\n\n try:\n monitor_client.create_time_series(project_path, all_series)\n except InvalidArgument:\n logging.exception(\"mark_point failed\")\n return False\n else:\n return True",
"def logging_data(self):\n with open('sensor_data.log','w') as f:\n json.dump(self.read_continuous_data, f)",
"def makeTimeSeriesData(self,cluster,server,items):\n start = 0\n end = len(items)\n step = 1\n values = []\n for key,value in items.iteritems():\n values.append(value)\n \n name = cluster+\",\"+server+\",alert\"\n series = TimeSeries(name, start, end, step, values)\n #for key,value in items:\n return series",
"def Writedata(self, tstep):\n \n nc = Dataset(self.outfile, 'a')\n \n nc.variables['time'][tstep] = self.time\n nc.variables['salt'][tstep] = self.salt\n nc.variables['temp'][tstep] = self.temp\n nc.variables['uc'][tstep] = self.uc\n nc.variables['vc'][tstep] = self.vc\n nc.variables['nu_v'][tstep] = self.nu_v\n nc.variables['rho'][tstep] = self.rho\n nc.variables['tau_x'][tstep] = self.tau_x\n nc.variables['tau_y'][tstep] = self.tau_y\n nc.variables['eta'][tstep] = self.eta\n \n nc.close()",
"def _write_stream(self):\n enrich_df = self._process_stream()\n df_writer = enrich_df \\\n .writeStream \\\n .queryName(\"Agro Data Writer\") \\\n .foreachBatch(db_utils.foreach_batch_function) \\\n .option(\"checkpointLocation\", \"chk-point-dir\") \\\n .trigger(processingTime=\"1 minute\") \\\n .start()\n\n df_writer.awaitTermination()",
"def save(self, data):\n activities = [json.loads(activity['Json']) for activity in data]\n\n for i in range(len(activities)):\n activities[i]['created_at'] = to_datetime(activities[i]['created_at'])\n\n with Elastic(index='wink', doc_type='activity') as elastic:\n elastic.upload(activities, 'created_at')\n\n Log.info(\"Successfully uploaded wink activity data into elasticsearch.\")",
"def write(self, host, index):\n msg = []\n operation = \"WRITE\"\n if not self.create_uid(host, index):\n return False\n url = \"%s%s%s\" % (\"http://\", host, \"/api/put\")\n payload = {\"metric\": METRIC_NAME, \"timestamp\": TIMESTAMP_MILLIS(), \\\n \"value\": METRIC_VAL, \"tags\":{TAGK: \"%s.%d\" % (TAGV, index)}}\n headers = {\"content-type\": \"application/json\"}\n try:\n response = requests.post(url, data=json.dumps(payload), headers=headers)\n if response.status_code == 204:\n LOGGER.debug(\"Value 1 inserted to metric %s\", METRIC_NAME)\n self.process_resp([], operation, \"1\", index)\n return True\n response_dict = json.loads(response.text)\n msg.append(response_dict[\"error\"][\"message\"])\n LOGGER.warning(\"Unable to write 1, error message is %s\", \\\n response_dict[\"error\"][\"message\"])\n self.process_resp(msg, operation, \"0\", index)\n return False\n except requests.exceptions.ConnectionError as ex_message:\n LOGGER.warning(\"Unable to write 1, error message is %s\", str(ex_message))\n self.process_resp([str(ex_message)], operation, \"0\", index)\n return False",
"async def write_metrics(every: int, to: str):\n while True:\n line = f\"pyvast-threatbus,host={socket.gethostname()} \"\n start_length = len(line)\n for m in metrics:\n if not m.is_set:\n continue\n if type(m) is Gauge or type(m) is InfiniteGauge:\n if len(line) > start_length:\n line += \",\"\n line += f\"{m.name}={m.value}\"\n if type(m) is Summary:\n if len(line) > start_length:\n line += \",\"\n line += (\n f\"{m.name}_min={m.min},{m.name}_max={m.max},{m.name}_avg={m.avg}\"\n )\n m.reset()\n\n if len(line) > start_length:\n # only update the file if there were metrics collected.\n line += f\" {time.time_ns()}\" # append current nanoseconds ts\n with open(to, \"a\") as f:\n f.write(line + \"\\n\")\n await asyncio.sleep(every)"
]
| [
"0.66388404",
"0.6581932",
"0.6308372",
"0.6216045",
"0.6097348",
"0.6085458",
"0.59469795",
"0.57111454",
"0.5689555",
"0.5682473",
"0.5670478",
"0.5664158",
"0.5662826",
"0.5619124",
"0.55925524",
"0.5572947",
"0.554315",
"0.5539387",
"0.55369955",
"0.5442291",
"0.54096097",
"0.53978044",
"0.5396205",
"0.5349246",
"0.53453696",
"0.53178173",
"0.5300749",
"0.52904284",
"0.52799743",
"0.52552056"
]
| 0.76035273 | 0 |
Extract timeseries data from MQL query response. | def _extract_mql_timeseries_data(response):
lkeys = response['timeSeriesDescriptor'].get('labelDescriptors', [])
# (fixme): Is there a better way to fetch and extract this data?
for result in response.get('timeSeriesData', []):
data = {}
lvalues = result.get('labelValues', [])
data = {
key['key']: val.get('stringValue', '')
for key, val in zip(lkeys, lvalues)
}
point_data = result.get('pointData', [])
if not point_data:
continue
# Returns all points.
for point in point_data:
values = point.get('values', [])
if not values:
continue
data.update(point['timeInterval'])
value_types = []
value_type_values = []
for value in values:
for key, val in value.items():
value_types.append(key)
value_type_values.append(val)
data['metric_value_types'] = value_types
data['metric_values'] = value_type_values
yield data | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def query_timeseries_mql(project_id, mql):\n project_name = _PROJECTS % project_id\n client = gcp.monitoring_service()\n # pylint:disable=no-member\n request = client.projects().timeSeries().query(name=project_name,\n body={'query': mql})\n # pylint:enable=no-member\n response = gcp.execute_request(request)\n if response:\n return _extract_mql_timeseries_data(response)\n return []",
"def get_results():\n _, body = API.measurements(city='Los Angeles', parameter='pm25', limit=100)\n result = []\n for dict in body['results']:\n date = dict['date']['utc']\n value = dict['value']\n result.append((date, value))\n return result",
"def get_mdal_data(mdal_client, query):\n start = get_mdal_string_to_datetime(query[\"Time\"][\"T0\"])\n end = get_mdal_string_to_datetime(query[\"Time\"][\"T1\"])\n time_frame = end - start\n\n # get windowsize\n str_window = query[\"Time\"][\"WindowSize\"]\n assert str_window[-3:] == \"min\"\n WINDOW_SIZE = datetime.timedelta(minutes=int(str_window[:-3]))\n\n if time_frame < WINDOW_SIZE:\n raise Exception(\"WindowSize is less than the time interval for which data is requested.\")\n\n # To get logarithmic runtime we take splits which are powers of two.\n max_interval = datetime.timedelta(hours=12) # the maximum interval length in which to split the data.\n max_num_splits = int(time_frame.total_seconds() // max_interval.total_seconds())\n all_splits = [1]\n for _ in range(2, max_num_splits):\n power_split = all_splits[-1] * 2\n if power_split > max_num_splits:\n break\n all_splits.append(power_split)\n\n received_all_data = False\n outside_data = []\n # start loop to get data in time intervals of logarithmically decreasing size. This will hopefully find the\n # spot at which mdal returns data.\n for num_splits in all_splits:\n outside_data = []\n pre_look_ahead = time_frame / num_splits\n\n # to round down to nearest window size multiple\n num_window_in_pre_look = pre_look_ahead.total_seconds() // WINDOW_SIZE.total_seconds()\n look_ahead = datetime.timedelta(seconds=WINDOW_SIZE.total_seconds() * num_window_in_pre_look)\n\n print(\"Attempting to get data in %f day intervals.\" % (look_ahead.total_seconds() / (60 * 60 * 24)))\n\n temp_start = start\n temp_end = temp_start + look_ahead\n\n while temp_end <= end:\n query[\"Time\"][\"T0\"] = get_mdal_datetime_to_string(temp_start)\n query[\"Time\"][\"T1\"] = get_mdal_datetime_to_string(temp_end)\n mdal_outside_data = mdal_client.do_query(query, tz=\"UTC\")\n if mdal_outside_data == {}:\n print(\"Attempt failed.\")\n received_all_data = False\n break\n else:\n outside_data.append(mdal_outside_data[\"df\"])\n\n # advance temp_start and temp_end\n temp_start = temp_end + WINDOW_SIZE\n temp_end = temp_start + look_ahead\n\n # to get rest of data if look_ahead is not exact mutliple of time_between\n if temp_start < end < temp_end:\n temp_end = end\n\n # To know that we received all data.\n if end < temp_start:\n received_all_data = True\n\n # stop if we got the data\n if received_all_data:\n print(\"Succeeded.\")\n break\n\n if not received_all_data:\n raise Exception(\"WARNING: Unable to get data form MDAL.\")\n\n return pd.concat(outside_data)",
"def parse_result_series(result):\n if isinstance(result, np.ndarray):\n return result\n\n if result is None or not len(result):\n return None\n\n dates, values = result\n return pd.DataFrame({0:dates.astype(int)/1000,1:values})",
"def get_time_series_data():\r\n # Grab the requested years and columns from the query arguments\r\n ls_year = [int(year) for year in request.args.getlist(\"n\")]\r\n ls_col = request.args.getlist(\"m\")\r\n\r\n # Generate a list of all the months we need to get\r\n all_years = [str(year) for year in range(min(ls_year), max(ls_year) + 1)]\r\n\r\n # Grab all of the wanted months by filtering for the ones we want\r\n wanted_months = reduce(\r\n lambda a, b: a | b, (app.df[\"month\"].str.contains(year) for year in all_years)\r\n )\r\n\r\n # Create a new dataframe from the one that\r\n df_new = app.df[wanted_months][[\"month\"] + ls_col]\r\n\r\n # Convert all string dates into datetime objects and then sort them\r\n df_new[\"month\"] = pd.to_datetime(df_new[\"month\"])\r\n df_new = df_new.sort_values(by=[\"month\"])\r\n\r\n # Return the dataframe as json\r\n return df_new.to_json(), 200",
"def _parse_query(self, inv_obj, query_results, monitored_metrics):\n datapoints = []\n timestamp = int(time.time()) * 1000\n try:\n result = query_results[0]\n for metric in result.value:\n key = metric.id.counterId\n metric_name = monitored_metrics[key].name\n metric_type = monitored_metrics[key].metric_type\n dimensions = self._get_dimensions(inv_obj, metric)\n value = metric.value[0]\n if monitored_metrics[key].units == 'percent':\n value /= 100.0\n dp = self.Datapoint(metric_name, metric_type, value, dimensions, timestamp)\n datapoints.append(dp)\n except Exception as e:\n self._logger.error(\"Error while parsing query results: {0} : {1}\".format(query_results, e))\n\n return datapoints",
"def get_timeseries_data(self, table, datetime_start, datetime_end, timechunk=datetime.timedelta(hours=1)):\n table_schema = LMTDB_TABLES.get(table.upper())\n if table_schema is None:\n raise KeyError(\"Table '%s' is not valid\" % table)\n else:\n result_columns = ['TIMESTAMP'] + table_schema['columns']\n format_dict = {\n 'schema': ', '.join(result_columns).replace(\"TS_ID,\", \"TIMESTAMP_INFO.TS_ID,\"),\n 'table': table,\n }\n\n index0 = len(self.saved_results.get(table, {'rows': []})['rows'])\n chunk_start = datetime_start\n while chunk_start < datetime_end:\n if timechunk is None:\n chunk_end = datetime_end\n else:\n chunk_end = chunk_start + timechunk\n if chunk_end > datetime_end:\n chunk_end = datetime_end\n start_stamp = chunk_start.strftime(\"%Y-%m-%d %H:%M:%S\")\n end_stamp = chunk_end.strftime(\"%Y-%m-%d %H:%M:%S\")\n\n query_str = \"\"\"SELECT\n %(schema)s\n FROM\n %(table)s\n INNER JOIN TIMESTAMP_INFO ON TIMESTAMP_INFO.TS_ID = %(table)s.TS_ID\n WHERE\n TIMESTAMP_INFO.TIMESTAMP >= %%(ps)s\n AND TIMESTAMP_INFO.TIMESTAMP < %%(ps)s\n \"\"\" % format_dict\n self.query(query_str, (start_stamp, end_stamp), table=table, table_schema=table_schema)\n if timechunk is not None:\n chunk_start += timechunk\n\n return self.saved_results[table]['rows'][index0:], result_columns",
"def retrieve_time_series(api, series_ID):\r\n #Retrieve Data By Series ID \r\n series_search = api.data_by_series(series=series_ID)\r\n ##Create a pandas dataframe from the retrieved time series\r\n df = pd.DataFrame(series_search)\r\n return df",
"def parse_response(response):\n data = []\n \n for report in response.get('reports', []):\n columnHeader = report.get('columnHeader', {})\n dimensionHeaders = columnHeader.get('dimensions', [])\n metricHeaders = columnHeader.get('metricHeader', {}).get('metricHeaderEntries', [])\n rows = report.get('data', {}).get('rows', [])\n \n row_count = 0 \n for row in rows:\n #print '\\n\\n', 'ROW_COUNT: ', row_count, '\\n'\n data.append({}) \n\n dimensions = row.get('dimensions', [])\n dateRangeValues = row.get('metrics', [])\n\n for header, dimension in zip(dimensionHeaders, dimensions):\n #print header + ': ' + dimension\n data[row_count][header[3:]] = dimension\n \n for i, values in enumerate(dateRangeValues):\n #print 'Date range (' + str(i) + ')'\n for metricHeader, value in zip(metricHeaders, values.get('values')):\n #print metricHeader.get('name') + ': ' + value\n data[row_count][metricHeader.get('name')[3:]] = value\n \n row_count += 1 \n \n return data",
"def parse_query_result(self):\n results = self.jsonData['results']\n\n df = pd.DataFrame(results)\n df.drop(['rootSource', 'uri'], axis=1, inplace=True)\n\n return df",
"def parse_query_result(self):\n results = self.jsonData['results']\n\n df = pd.DataFrame(results)\n df.drop(['rootSource', 'uri'], axis=1, inplace=True)\n\n return df",
"def get_meter_data(query):\n\n logger.debug(\"sMap: Getting meter data...\")\n r = requests.post(url, data=query)\n logger.debug(\"%s\", r)\n payload = r.json()\n logger.debug(\"%s\", payload)\n\n return payload",
"def get_meter_data_for_time_slice(apt_no, start_time, end_time):\n if apt_no in ['102A', 102]:\n apt_no = '102A'\n\n logger.debug(\"sMap: Getting meter data for %s between %s and %s\", apt_no, start_time, end_time)\n\n query = (\"select data in ('\" + str(start_time) + \"','\" + str(end_time) + \"') \"\n \"limit 200000 \"\n \"where Metadata/LoadLocation/FlatNumber ='\" + str(apt_no) + \"' and \"\n \"Metadata/Extra/PhysicalParameter='Power'\")\n\n r = requests.post(url, data=query)\n # logger.debug (\"%s\",r)\n payload = r.json()\n # logger.debug(\"Payload:%s\", payload)\n\n if apt_no in ['102A', 102]:\n apt_no = 102\n meters = retrieve_meter_info(apt_no)\n logger.debug(\"Meters: %s\", meters)\n\n streams = []\n meter_type = []\n l_meters = range(0, len(meters))\n for i in l_meters:\n uuid = payload[i]['uuid']\n\n # Get meter type based on uuid\n for meter in meters:\n if meter['uuid'] == uuid:\n m_type = meter['type']\n # logger.debug (uuid, m_type)\n\n meter_type.append(m_type)\n streams.append(np.array(payload[i]['Readings']))\n # logger.debug(\"Streams: %s\", streams)\n\n if len(streams[0]) > 0:\n\n df = [pd.DataFrame({'time': readings[:, 0] / 1000, 'power': readings[:, 1],\n 'type': [meter_type[i]] * len(readings)},\n columns=['time', 'power', 'type']) for i, readings in enumerate(streams)]\n else:\n df = []\n\n return df",
"def unpack_query_response(self, query_result_array):\n result = []\n\n for row in query_result_array:\n result.append({\n 'case_id': row['f'][0]['v'],\n 'sample_id': row['f'][1]['v'],\n 'aliquot_id': row['f'][2]['v'],\n 'value': float(row['f'][3]['v'])\n })\n\n return result",
"def data_from_ucr_query(self):\n raise NotImplementedError",
"def _query_data(self, index, tag):\n version, datapoints = yield self.quasar.stream_get(self.name, tag, tag+(15*qdf.MINUTE))\n values = np.empty((BLOCK_SIZE,), dtype=(type(datapoints[0])))\n values[:] = None\n \n for point in datapoints:\n time = float(point.time - tag)\n time_index = int(round(time*SAMPLE_RATE/qdf.SECOND))\n values[time_index] = point\n\n self.cache[index][CACHE_INDEX_TAG] = tag\n self.cache[index][CACHE_INDEX_DATA] = values",
"def build_timeseries(self):\n timeseries = {\n \"metricKind\": \"DELTA\", \n \"metric\": {\n \"labels\": {\n \"response_code\": \"0\"}, \n \"type\": \"agent.googleapis.com/agent/request_count\"\n }, \n \"points\": [\n {\n \"interval\": {\n \"endTime\": \"2019-02-18T22:09:53.939194Z\", \n \"startTime\": \"2019-02-18T21:09:53.939194Z\"\n }, \n \"value\": {\n \"int64Value\": \"62\"\n }\n }, \n {\n \"interval\": {\n \"endTime\": \"2019-02-18T21:09:53.939194Z\", \n \"startTime\": \"2019-02-18T20:09:53.939194Z\"\n }, \n \"value\": {\n \"int64Value\": \"61\"\n }\n }\n ], \n \"resource\": {\n \"labels\": {\n \"instance_id\": \"9113659852587170607\", \n \"project_id\": \"YOUR_PROJECT_ID\", \n \"zone\": \"us-east4-a\"\n }, \n \"type\": \"gce_instance\"\n }, \n \"valueType\": \"INT64\"\n }\n\n return timeseries",
"def retrieve_data_timeseries(hfile, setname):\n dset = hfile[setname]\n sample_rate = dset.attrs[\"SamplingRate(Hz)\"]\n gps_epoch = construct_utc_from_metadata(dset.attrs[\"Date\"], dset.attrs[\"t0\"])\n data = retrieve_channel_data(hfile, setname)\n ts_data = TimeSeries(data, sample_rate=sample_rate, epoch=gps_epoch)\n return ts_data",
"def test_fetch():\n service = WebService(TestFactory())\n query = service.parse(\n parse_qs(\n \"id=BOU&starttime=2016-06-06\"\n \"&endtime=2016-06-07&elements=H,E,Z,F&sampling_period=60\"\n \"&format=iaga2002&type=variation\"\n )\n )\n timeseries = service.fetch(query)\n assert_equal(isinstance(timeseries, Stream), True)",
"def decode_timeseries(self, resp_ttb, tsobj,\n convert_timestamp=False):\n if resp_ttb is None:\n return tsobj\n\n self.maybe_err_ttb(resp_ttb)\n\n # NB: some queries return a BARE 'tsqueryresp' atom\n # catch that here:\n if resp_ttb == tsqueryresp_a:\n return tsobj\n\n # The response atom is the first element in the response tuple\n resp_a = resp_ttb[0]\n if resp_a == tsputresp_a:\n return\n elif resp_a == tsgetresp_a or resp_a == tsqueryresp_a:\n resp_data = resp_ttb[1]\n if len(resp_data) == 0:\n return\n elif len(resp_data) == 3:\n resp_colnames = resp_data[0]\n resp_coltypes = resp_data[1]\n tsobj.columns = self.decode_timeseries_cols(\n resp_colnames, resp_coltypes)\n resp_rows = resp_data[2]\n tsobj.rows = []\n for resp_row in resp_rows:\n tsobj.rows.append(\n self.decode_timeseries_row(resp_row, resp_coltypes,\n convert_timestamp))\n else:\n raise RiakError(\n \"Expected 3-tuple in response, got: {}\".format(resp_data))\n else:\n raise RiakError(\"Unknown TTB response type: {}\".format(resp_a))",
"def create_series(self):\n series = []\n for timeline_object in self.timeline['results']:\n count = timeline_object[\"count\"]\n series.insert(0, count)\n self.query_total = self.query_total + count\n label = self.query[0:30]\n if len(self.query) > 30:\n label = label + \"...\"\n label = label + \" (\" + str(self.query_total) + \")\"\n series.insert(0, label)\n return series",
"def TimeSeries(self, header):\n data = self.DictData()\n time_series = [ (row[ \"Date\" ], float(row[ header ]) )for row in data ]\n return time_series",
"def _get_meas_times_web_service(self, last_meas_time):\n subst = ''\n if self._segment and self._segment_value:\n if self._segment['partition_value_type'] == 'int':\n subst = self._segment_value['value_int']\n elif self._segment['partition_value_type'] == 'varchar':\n subst = self._segment_value['value_varchar']\n data_fetch_command_bind_parameter = self._segment['data_fetch_command_bind_parameter']\n else:\n data_fetch_command_bind_parameter = ''\n subst = ''\n\n #meas_times = self._outer_conn.query(last_meas_time, data_fetch_command_bind_parameter, subst, 'get_meas_times', None)\n ret_data = self._outer_conn.query(last_meas_time, data_fetch_command_bind_parameter, subst)\n self._web_service_data = dict()\n meas_times = {'header':'meas_time', 'data': list()}\n for meas_time, meas_data in ret_data.iteritems():\n meas_times['data'].append([meas_time])\n self._web_service_data[meas_time] = meas_data \n \n return meas_times",
"def _parse_time_metadata(self, data, kwargs):\n try:\n time = self._get_time_range(data)\n except KeyError:\n time = []\n try:\n time_steps = data.coords[self.time_field].size\n except KeyError:\n time_steps = kwargs.get('limit')\n return time, time_steps",
"def _get_serieses(parsed_response: dict) -> list:\n serieses = parsed_response[\"message:GenericData\"][\"message:DataSet\"][\"generic:Series\"]\n if type(serieses) != list:\n serieses = [serieses]\n return serieses",
"def _get_meas_times_from_db(self):\n meas_times = []\n if self._data['report_save_historical_instances_ind'] != 'Y':\n # for non historical reports take measurement time from saved dataset\n dataset = self._jfile.get_current_stored_dataset()\n try:\n meas_time = datetime.datetime.strptime(dataset['meas_time'], '%Y-%m-%d %H:%M:%S')\n except ValueError:\n raise Exception(\"Cannot unformat string %s to datetime\" % dataset['meas_time'])\n meas_times.append(meas_time)\n\n else:\n # for historical reports take measurement times from db datasets\n where_sql = ''\n where_sql_list = list()\n params = [self._id, self._segment_value_id]\n\n if self._process_dataset_ids:\n for dataset_id in self._process_dataset_ids:\n if type(dataset_id) == list:\n where_sql_list.append(\"(report_data_set_instance_id >= %s AND report_data_set_instance_id <= %s)\")\n if dataset_id[0] < dataset_id[1]:\n params.append(dataset_id[0])\n params.append(dataset_id[1])\n else:\n params.append(dataset_id[1])\n params.append(dataset_id[0])\n else:\n where_sql_list.append(\"report_data_set_instance_id = %s\")\n params.append(dataset_id)\n where_sql = ' AND (%s)' % ' OR '.join(where_sql_list)\n\n self._db.Query(\"\"\"SELECT measurement_time\n FROM report_data_set_instance\n WHERE\n `element_id`= %%s\n AND segment_value_id = %%s\n %s\n ORDER BY measurement_time ASC\"\"\" % where_sql, tuple(params))\n meas_times = [item['measurement_time'] for item in self._db.record]\n\n return meas_times",
"def time_series_daily(symbol: str, outputsize: str = 'compact') -> Tuple[pd.DataFrame, dict]:\n response = _fetch(symbol=symbol, function='TIME_SERIES_DAILY', outputsize=outputsize)\n\n response_dict = json.loads(response.content)\n\n df = pd.DataFrame.from_dict(response_dict[f'Time Series (Daily)'], orient='index', dtype=np.float64)\n df.index = pd.to_datetime(df.index)\n df = df.rename(columns=_string_map(df.columns))\n\n metadata = response_dict['Meta Data']\n _rename_dict_keys(metadata)\n\n metadata['begin_datetime'] = df.index.min()\n metadata['end_datetime'] = df.index.max()\n\n return df, metadata",
"def _fetch_response_data(user):\n cursor = connection.cursor()\n cursor.execute(\"\"\"\n SELECT q.pivot, q.pivot_type, q_time.is_correct, q_time.timestamp\n FROM (\n SELECT mco.question_id, mco.is_correct, ot.timestamp\n FROM (\n SELECT mcr.option_id, rbt.timestamp\n FROM (\n SELECT id, timestamp FROM drill_response\n WHERE user_id = %s\n ) AS rbt\n INNER JOIN drill_multiplechoiceresponse AS mcr\n ON mcr.response_ptr_id = rbt.id\n ) AS ot\n INNER JOIN drill_multiplechoiceoption AS mco\n ON mco.id = ot.option_id\n ) AS q_time\n INNER JOIN drill_question AS q\n ON q.id = q_time.question_id\n ORDER BY q_time.timestamp ASC\n \"\"\", (user.id, ))\n data = cursor.fetchall()\n return data",
"def series_query(survey_codes, indicator_code, char_grp_code, over_time):\n json_list = DatalabData.filter_minimal(survey_codes, indicator_code,\n char_grp_code, over_time)\n if over_time:\n series_list = DatalabData.data_to_time_series(json_list)\n else:\n series_list = DatalabData.data_to_series(json_list)\n return series_list",
"def prepareQuery(self, qid):\r\n \r\n connection = self.getConnection()\r\n cursor = connection.cursor()\r\n\r\n if self.granularity == 'day':\r\n extractTime = \"TO_CHAR(t.START_DATE, 'yyyy,mm,dd'), TO_CHAR(t.END_DATE, 'yyyy,mm,dd')\"\r\n elif self.granularity == 'year':\r\n extractTime = \"EXTRACT(YEAR FROM t.START_DATE), EXTRACT(YEAR FROM t.END_DATE)\"\r\n \r\n cursor.execute(\"SELECT t.TYPE, t.GEOMETRY.Get_WKT(), \" + extractTime + \",\" + \\\r\n\"t.DATE_TYPE, t.Z_MIN, t.Z_MAX FROM \" + self.queriesTable + \"\"\" t \r\nWHERE id = \"\"\" + qid + \"\"\" AND dataset = '\"\"\" + self.dataset.lower() + \"'\")\r\n\r\n self.qtype, self.wkt, self.start_date, self.end_date, self.timeType, self.ozmin, self.ozmax = cursor.fetchall()[0]\r\n\r\n if self.wkt is not None:\r\n self.wkt = str(self.wkt)\r\n connection.close()\r\n \r\n # Setting up the missing variables along with transformations to the time encoding. \r\n if self.granularity == 'day':\r\n if self.start_date is None and self.end_date is None:\r\n times = [[self.mint * self.scale, self.maxt * self.scale]]\r\n elif self.start_date is not None and self.end_date is not None:\r\n self.start_date = map(int, self.start_date.split(','))\r\n self.end_date = map(int, self.end_date.split(','))\r\n times = [[reader.daySinceEpoch(self.start_date[0], \r\n self.start_date[1], self.start_date[2]) * self.scale, \r\n reader.daySinceEpoch(self.end_date[0], \r\n self.end_date[1], self.end_date[2]) * self.scale]]\r\n elif self.end_date is None:\r\n self.start_date = map(int, self.start_date.split(','))\r\n times = [[reader.daySinceEpoch(self.start_date[0], self.start_date[1], self.start_date[2]) * self.scale, None]]\r\n else:\r\n if self.start_date is None and self.end_date is None:\r\n times = [[self.mint * self.scale, self.maxt * self.scale]]\r\n elif self.start_date is not None and self.end_date is not None:\r\n times = [[self.start_date * self.scale, self.end_date * self.scale]]\r\n elif self.end_date is None:\r\n times = [[self.start_date * self.scale, None]]\r\n\r\n if self.ozmin is None or self.ozmax is None: #no selectivity on z\r\n zmin = int(round((self.minz - self.offz)/self.scalez, 0))\r\n zmax = int(round((self.maxz - self.offz)/self.scalez, 0))\r\n else:\r\n zmin = int(round((self.ozmin - self.offz)/self.scalez, 0))\r\n zmax = int(round((self.ozmax - self.offz)/self.scalez, 0))\r\n\r\n # Preparing the different types of queries: Space and space - time\r\n continuous = True\r\n if self.wkt:\r\n if self.qtype.replace(' ', '').lower() != 'nn-search':\r\n ordinates = list(loads(self.wkt).exterior.coords)\r\n else:\r\n ordinates = list(loads(self.wkt).coords)\r\n \r\n if self.case == 1: #lxyt\r\n geometry = Polygon(self.list2ScaleOffset(ordinates)).wkt\r\n if self.qtype.lower() == 'space':\r\n coarser = self.params[0] #0, 0\r\n else:\r\n coarser = self.params[1] #4, 4\r\n \r\n elif self.case == 2: #lxyzt\r\n geometry = Polygon3D(Polygon(self.list2ScaleOffset(ordinates)), zmin, zmax)\r\n\r\n if self.qtype.lower() == 'space':\r\n coarser = self.params[2] #4, 4\r\n else:\r\n coarser = self.params[3] #3, 3\r\n\r\n elif self.case == 3: #dxyt\r\n geom = Polygon(self.list2ScaleOffset(ordinates)) \r\n if times[0][1] is None:\r\n continuous = False\r\n times[0][1] = times[0][0]\r\n coarser = self.params[4] #1, 8\r\n elif self.qtype.lower() == 'space':\r\n if times[0][0] == times[0][1]:\r\n continuous = False\r\n coarser = self.params[5] #-2, 1\r\n else:\r\n coarser = self.params[5] - 7\r\n elif self.timeType == 'continuous':\r\n coarser = self.params[6] #0, 2\r\n elif self.timeType == 'discrete':\r\n coarser = self.params[7] #3, 8\r\n \r\n if self.timeType == 'discrete' and (self.start_date is not None) and (self.end_date is not None):\r\n geometry = [dynamicPolygon(geom, times[0][0], times[0][0]),\r\n dynamicPolygon(geom, times[0][1], times[0][1])]\r\n else:\r\n geometry = dynamicPolygon(geom, times[0][0], times[0][1]) \r\n \r\n elif self.case == 4: #dxyzt\r\n geom = Polygon(self.list2ScaleOffset(ordinates))\r\n if times[0][1] == None:\r\n continuous = False\r\n coarser = self.params[8] #4, 9\r\n times[0][1] = times[0][0]\r\n elif self.qtype.lower() == 'space':\r\n if times[0][0] == times[0][1]:\r\n coarser = self.params[9] #0, 2\r\n else:\r\n coarser = self.params[9] - 4\r\n elif self.timeType == 'continuous':\r\n coarser = self.params[10] #0, 2\r\n elif self.timeType == 'discrete':\r\n coarser = self.params[11] #4, 9\r\n \r\n if self.timeType == 'discrete' and self.start_date is not None and self.end_date is not None:\r\n geometry = [Polygon4D(geom, zmin, zmax, times[0][0], times[0][0]),\r\n Polygon4D(geom, zmin, zmax, times[0][1], times[0][1])]\r\n else:\r\n geometry = Polygon4D(geom, zmin, zmax, times[0][0], times[0][1])\r\n \r\n else: #time queries\r\n if self.case == 1:\r\n geometry = []\r\n \r\n elif self.case == 2:\r\n geometry = []\r\n \r\n elif self.case == 3:\r\n temp_geom = self.list2ScaleOffset([(self.minx, self.miny), (self.maxx, self.maxy)])\r\n geom = box(temp_geom[0][0], temp_geom[0][1], temp_geom[1][0], temp_geom[1][1])\r\n \r\n if times[0][1] is None:\r\n times[0][1] = times[0][0]\r\n coarser = self.params[12] #3, 7\r\n continuous = False\r\n elif self.timeType == 'continuous':\r\n coarser = self.params[13] #0, 3\r\n else:\r\n coarser = self.params[14] #3, 8\r\n \r\n if self.timeType == 'discrete' and self.start_date is not None and self.end_date is not None:\r\n geometry = [dynamicPolygon(geom, times[0][0], times[0][0]),\r\n dynamicPolygon(geom, times[0][1], times[0][1])]\r\n else:\r\n geometry = dynamicPolygon(geom, times[0][0], times[0][1])\r\n\r\n elif self.case == 4:\r\n temp_geom = self.list2ScaleOffset([(self.minx, self.miny),(self.maxx, self.maxy)])\r\n geom = box(temp_geom[0][0], temp_geom[0][1], temp_geom[1][0], temp_geom[1][1])\r\n if times[0][1] is None:\r\n times[0][1] = times[0][0]\r\n coarser = self.params[15] #4, 12\r\n continuous = False\r\n elif self.timeType == 'continuous':\r\n coarser = self.params[16] #1, 3\r\n elif self.timeType == 'discrete':\r\n coarser = self.params[17] #4, 11\r\n \r\n if self.timeType == 'discrete' and self.start_date is not None and self.end_date is not None:\r\n geometry = [Polygon4D(geom, zmin, zmax, times[0][0], times[0][0]),\r\n Polygon4D(geom, zmin, zmax, times[0][1], times[0][1])]\r\n else: \r\n geometry = Polygon4D(geom, zmin, zmax, times[0][0], times[0][1])\r\n \r\n\r\n \"\"\"The final lines have to do with the way of posing the query to the \r\n database. Two options are possible:\r\n (a) sql: A SQL query is posed to the database. The number of ranges is\r\n limited by a maximum number.\r\n (b) join: The table is joined explicitly with a table containing the \r\n ranges.\"\"\"\r\n if geometry == []:\r\n mortonWhere, self.mortonJoinWhere, ranges, rangeTab, morPrep, insert, Levels = ('', '', 0, None, 0, 0, 0)\r\n else:\r\n if self.method == 'join':\r\n rangeTab = (self.rangeTable + qid).upper()\r\n ranges, morPrep, insert, Levels = self.join(geometry, coarser, rangeTab, continuous)\r\n mortonWhere = self.mortonJoinWhere\r\n elif self.method == 'sql':\r\n rangeTab, insert = None, 0\r\n mortonWhere, ranges, morPrep, Levels = self.sql(geometry, coarser, continuous)\r\n \r\n # if deep the time is in the morton code\r\n if self.integration == 'deep' or (self.start_date is None and self.end_date is None and self.integration == 'loose'): \r\n timeWhere = ''\r\n elif self.integration == 'loose': \r\n timeWhere = whereClause.addTimeCondition(times, 'time', self.timeType)\r\n \r\n return whereClause.getWhereStatement([timeWhere, mortonWhere]), ranges, morPrep, insert, Levels, rangeTab"
]
| [
"0.6610051",
"0.627801",
"0.6274864",
"0.61707604",
"0.6128382",
"0.6069567",
"0.5944313",
"0.58993965",
"0.5889491",
"0.58621395",
"0.58621395",
"0.5862082",
"0.5802309",
"0.57957715",
"0.5789012",
"0.57394266",
"0.5738667",
"0.5714318",
"0.5689396",
"0.5662071",
"0.5643546",
"0.56370634",
"0.5585601",
"0.55558765",
"0.5536063",
"0.55151147",
"0.55067295",
"0.54898655",
"0.5488739",
"0.5464263"
]
| 0.84172064 | 0 |
Query timeseries for a project using mql. | def query_timeseries_mql(project_id, mql):
project_name = _PROJECTS % project_id
client = gcp.monitoring_service()
# pylint:disable=no-member
request = client.projects().timeSeries().query(name=project_name,
body={'query': mql})
# pylint:enable=no-member
response = gcp.execute_request(request)
if response:
return _extract_mql_timeseries_data(response)
return [] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def run(self, query, project=\"odyssey-193217\"):\n\t\tfrom google.cloud import bigquery\n\t\tjob_config = bigquery.QueryJobConfig()\n\t\tclient = bigquery.Client(project=project)\n\t\tresult = client.query(query,job_config=job_config)\n\t\tjob_config.allowLargeResults = True\n\t\tresult.__done_timeout = 99999999\n\t\treturn list(result)",
"def query_project_tasks(self, project_data):\n\n # Get project ID.\n project_id = project_data[0][0]\n query = \"select task_datest, task_dateend, task_info, skill_descrpt, \" \\\n \"TS_Qty \" \\\n \"from skill, task_skills, task \" \\\n \"where task_skills.task_id = task.task_id \" \\\n \"and proj_id = '{}' \" \\\n \"and task_skills.skill_id = skill.skill_id \" \\\n \"order by task_datest\".format(project_id)\n\n try:\n self.dbCursor.execute(query)\n return self.dbCursor.fetchall()\n except mysql.connector.Error as err:\n ErrorMessageWindow(err)",
"def query_project(self, project_query_options):\n\n query = \"select * from project where \"\n row_names = [\"Proj_ID\", \"Cus_ID\", \"Emp_ID\", \"Proj_Date\",\n \"Proj_Descrpt\", \"Proj_EstDateSt\", \"Proj_EstDateEnd\",\n \"Proj_EstBudget\", \"Proj_ActDateSt\",\n \"Proj_ActDateEnd\", \"Proj_ActCost\"]\n\n entries = project_query_options\n options_index = []\n arguments = []\n\n index = 0\n for item in entries:\n if item is not None:\n arguments.append(item)\n options_index.append(index)\n index += 1\n\n count = 0\n for arg in arguments:\n if count == 0:\n query = query + \"{}='{}' \".format(\n row_names[options_index[count]],\n arg)\n else:\n query = query + \"and {}='{}' \".format(\n row_names[options_index[count]],\n arg)\n count += 1\n\n try:\n self.dbCursor.execute(query)\n return self.dbCursor.fetchall()\n except mysql.connector.Error as err:\n ErrorMessageWindow(err)",
"def get_results_for_project(self, project, timestamp=None):\n session = self.session_factory()\n query = session.query(PipelineRun).filter(PipelineRun.project == project)\n if timestamp:\n query = query.filter(PipelineRun.timestamp <= timestamp)\n results = query.all()\n session.close()\n return results",
"def query(statement, project, **kwargs):\n\n with bqapi.connect(project) as conn:\n return conn.execute(statement, **kwargs).fetchall()",
"def get_all_projects(engine): \n # Query db\n# sql = (\"SELECT a.project_id, \"\n# \" b.o_number, \"\n# \" a.project_name, \"\n# \" a.project_description \"\n# \"FROM nivadatabase.projects a, \"\n# \" nivadatabase.projects_o_numbers b \"\n# \"WHERE a.project_id = b.project_id \"\n# \"ORDER BY a.project_id\")\n sql = (\"SELECT project_id, \"\n \" project_name, \"\n \" project_description \"\n \"FROM nivadatabase.projects \"\n \"ORDER BY project_id\")\n df = pd.read_sql(sql, engine)\n\n return df",
"def soql_query(self, query):\n self.builtin.log(\"Running SOQL Query: {}\".format(query))\n return self.cumulusci.sf.query_all(query)",
"def execute_query(self):\n query_sum = self.initialize_totals()\n data = []\n\n with tenant_context(self.tenant):\n query = self.query_table.objects.filter(self.query_filter)\n query_data = query.annotate(**self.annotations)\n group_by_value = self._get_group_by()\n\n query_group_by = [\"date\"] + group_by_value\n query_order_by = [\"-date\"]\n query_order_by.extend([self.order]) # add implicit ordering\n\n query_data = query_data.values(*query_group_by).annotate(**self.report_annotations)\n\n if self._limit and query_data:\n query_data = self._group_by_ranks(query, query_data)\n if not self.parameters.get(\"order_by\"):\n # override implicit ordering when using ranked ordering.\n query_order_by[-1] = \"rank\"\n\n # Populate the 'total' section of the API response\n if query.exists():\n aggregates = self._mapper.report_type_map.get(\"aggregates\")\n metric_sum = query.aggregate(**aggregates)\n query_sum = {key: metric_sum.get(key) for key in aggregates}\n\n query_data, total_capacity = self.get_cluster_capacity(query_data)\n if total_capacity:\n query_sum.update(total_capacity)\n\n if self._delta:\n query_data = self.add_deltas(query_data, query_sum)\n is_csv_output = self.parameters.accept_type and \"text/csv\" in self.parameters.accept_type\n\n query_data = self.order_by(query_data, query_order_by)\n\n if is_csv_output:\n if self._limit:\n data = self._ranked_list(list(query_data))\n else:\n data = list(query_data)\n else:\n # Pass in a copy of the group by without the added\n # tag column name prefix\n groups = copy.deepcopy(query_group_by)\n groups.remove(\"date\")\n data = self._apply_group_by(list(query_data), groups)\n data = self._transform_data(query_group_by, 0, data)\n\n sum_init = {\"cost_units\": self._mapper.cost_units_key}\n if self._mapper.usage_units_key:\n sum_init[\"usage_units\"] = self._mapper.usage_units_key\n query_sum.update(sum_init)\n\n ordered_total = {\n total_key: query_sum[total_key] for total_key in self.report_annotations.keys() if total_key in query_sum\n }\n ordered_total.update(query_sum)\n\n self.query_sum = ordered_total\n self.query_data = data\n return self._format_query_response()",
"def query(self, qpath):\n return data.Query(self, qpath)",
"def __getQueryObject(self, query):\n try:\n query_request = self.bq.jobs()\n query_data = {'query': (query)}\n query_response = query_request.query(projectId=PROJECT_ID,\n body=query_data).execute()\n return query_response\n\n except HttpError as err:\n print('Error in listDatasets:')\n pprint.pprint(err.content)\n\n except AccessTokenRefreshError:\n print('Credentials have been revoked or expired, please re-run'\n 'the application to re-authorize')",
"def _query(self, mapping, from_date=None, to_date=None, max_count=None,\n offset=None, ascendingly=True, describe=False):\n group, key = mapping.data_var.split(self._data_var_separator)\n\n # build params\n params = 'describe={describe}&keys={key}'.format(describe=str(describe).lower(), key=key)\n if self._api['token'] is not None:\n params += '&apitoken={}'.format(self._api['token'])\n if from_date is not None:\n params += '&from-date={}'.format(from_date.isoformat())\n if to_date is not None:\n params += '&to-date={}'.format(to_date.isoformat())\n\n # build url\n url = '{}{}?{}'.format(self._api['host'], self._api['url'], params).format(group=group)\n\n r = requests.get(url)\n if r.status_code == 200:\n data = json.loads(r.content.decode('utf-8'))\n # return query result\n if not describe:\n # sort\n data = sorted(\n data,\n key=lambda k: k.get(self._timestampkey),\n reverse=(not ascendingly))\n # apply constraints\n if offset is not None:\n data = data[offset:]\n if max_count is not None:\n data = data[:max_count]\n # process to query result\n res = QueryResult(mapping.obs_uri)\n for r in data:\n res.add_row(\n dateutil.parser.parse(r.get(self._timestampkey)),\n r.get(self._valuekey))\n # return\n return res\n # return query result description\n else:\n min = data.get('mindate', None)\n if min is not None:\n min = dateutil.parser.parse(min)\n max = data.get('maxdate', None)\n if max is not None:\n max = dateutil.parser.parse(max)\n return QueryResultDescription(mapping.obs_uri, min, max, data.get('count', 0))\n else:\n # empty/erronous response\n self.pyerr(\"Failed calling API: {}\".format(url))\n if not describe:\n return QueryResult(mapping.obs_uri)\n return QueryResultDescription(mapping.obs_uri, None, None, 0)",
"def query(name: list, granularity: str, start: str = None, end: str = None, limit: int = 0) -> list:\n if granularity == \"province\":\n table = \"area_province\"\n deserilize_func = deserilize_edpidemic_province\n else:\n table = \"area_city\"\n deserilize_func = deserilize_edpidemic_city\n\n if start:\n start = datetime.datetime.strptime(start, DATE_FORMAT)\n\n if end:\n end = datetime.datetime.strptime(end, DATE_FORMAT)\n\n if limit <= 0:\n limit = None\n\n # table name is not transormbale by psycopg2\n SQL = \"\"\"SELECT * FROM {table} AS t \n WHERE \n (%(start)s is null or %(start)s <= t.curday) \n AND \n (%(end)s is null or %(end)s >= t.curday) \n LIMIT %(limit)s\"\"\".format(\n table=table\n )\n results = database.query(sql=SQL, limit=limit, start=start, end=end)\n return list(map(deserilize_func, results))",
"def pull_jobs(start_date, end_date=datetime.now(), limit=50000,\n project=['proj_codem']):\n logger.info(\"Pulling jobs from QPID API.\")\n logger.info(f\"Checking {start_date} to {end_date}\")\n dfs = []\n for p in project:\n logger.info(f\"Checking project {p}\")\n jobs = requests.get(QPID_API.format(cluster='fair'), params={\n 'limit': limit,\n 'project': p,\n 'ran_after': start_date,\n 'job_prefix': 'cod_'\n }).json()\n df = pd.DataFrame(jobs)\n dfs.append(df)\n df2 = pd.concat(dfs)\n return df2",
"def query_from(self, temporal):\n raise NotImplementedError()",
"def query(self, q, **kwargs):\n return self._client.query(self._db_name, q, **kwargs)",
"def query_datacube(product,latitude,longitude,time,measurements):\r\n\r\n dc = datacube.Datacube(app=\"Query\")\r\n\r\n xarr = dc.load(\r\n product=product, \r\n longitude=longitude, \r\n latitude=latitude,\r\n # Time format YYYY-MM-DD\r\n time=time, \r\n measurements=measurements\r\n )\r\n\r\n return xarr",
"def prepareQuery(self, qid):\r\n \r\n connection = self.getConnection()\r\n cursor = connection.cursor()\r\n\r\n if self.granularity == 'day':\r\n extractTime = \"TO_CHAR(t.START_DATE, 'yyyy,mm,dd'), TO_CHAR(t.END_DATE, 'yyyy,mm,dd')\"\r\n elif self.granularity == 'year':\r\n extractTime = \"EXTRACT(YEAR FROM t.START_DATE), EXTRACT(YEAR FROM t.END_DATE)\"\r\n \r\n cursor.execute(\"SELECT t.TYPE, t.GEOMETRY.Get_WKT(), \" + extractTime + \",\" + \\\r\n\"t.DATE_TYPE, t.Z_MIN, t.Z_MAX FROM \" + self.queriesTable + \"\"\" t \r\nWHERE id = \"\"\" + qid + \"\"\" AND dataset = '\"\"\" + self.dataset.lower() + \"'\")\r\n\r\n self.qtype, self.wkt, self.start_date, self.end_date, self.timeType, self.ozmin, self.ozmax = cursor.fetchall()[0]\r\n\r\n if self.wkt is not None:\r\n self.wkt = str(self.wkt)\r\n connection.close()\r\n \r\n # Setting up the missing variables along with transformations to the time encoding. \r\n if self.granularity == 'day':\r\n if self.start_date is None and self.end_date is None:\r\n times = [[self.mint * self.scale, self.maxt * self.scale]]\r\n elif self.start_date is not None and self.end_date is not None:\r\n self.start_date = map(int, self.start_date.split(','))\r\n self.end_date = map(int, self.end_date.split(','))\r\n times = [[reader.daySinceEpoch(self.start_date[0], \r\n self.start_date[1], self.start_date[2]) * self.scale, \r\n reader.daySinceEpoch(self.end_date[0], \r\n self.end_date[1], self.end_date[2]) * self.scale]]\r\n elif self.end_date is None:\r\n self.start_date = map(int, self.start_date.split(','))\r\n times = [[reader.daySinceEpoch(self.start_date[0], self.start_date[1], self.start_date[2]) * self.scale, None]]\r\n else:\r\n if self.start_date is None and self.end_date is None:\r\n times = [[self.mint * self.scale, self.maxt * self.scale]]\r\n elif self.start_date is not None and self.end_date is not None:\r\n times = [[self.start_date * self.scale, self.end_date * self.scale]]\r\n elif self.end_date is None:\r\n times = [[self.start_date * self.scale, None]]\r\n\r\n if self.ozmin is None or self.ozmax is None: #no selectivity on z\r\n zmin = int(round((self.minz - self.offz)/self.scalez, 0))\r\n zmax = int(round((self.maxz - self.offz)/self.scalez, 0))\r\n else:\r\n zmin = int(round((self.ozmin - self.offz)/self.scalez, 0))\r\n zmax = int(round((self.ozmax - self.offz)/self.scalez, 0))\r\n\r\n # Preparing the different types of queries: Space and space - time\r\n continuous = True\r\n if self.wkt:\r\n if self.qtype.replace(' ', '').lower() != 'nn-search':\r\n ordinates = list(loads(self.wkt).exterior.coords)\r\n else:\r\n ordinates = list(loads(self.wkt).coords)\r\n \r\n if self.case == 1: #lxyt\r\n geometry = Polygon(self.list2ScaleOffset(ordinates)).wkt\r\n if self.qtype.lower() == 'space':\r\n coarser = self.params[0] #0, 0\r\n else:\r\n coarser = self.params[1] #4, 4\r\n \r\n elif self.case == 2: #lxyzt\r\n geometry = Polygon3D(Polygon(self.list2ScaleOffset(ordinates)), zmin, zmax)\r\n\r\n if self.qtype.lower() == 'space':\r\n coarser = self.params[2] #4, 4\r\n else:\r\n coarser = self.params[3] #3, 3\r\n\r\n elif self.case == 3: #dxyt\r\n geom = Polygon(self.list2ScaleOffset(ordinates)) \r\n if times[0][1] is None:\r\n continuous = False\r\n times[0][1] = times[0][0]\r\n coarser = self.params[4] #1, 8\r\n elif self.qtype.lower() == 'space':\r\n if times[0][0] == times[0][1]:\r\n continuous = False\r\n coarser = self.params[5] #-2, 1\r\n else:\r\n coarser = self.params[5] - 7\r\n elif self.timeType == 'continuous':\r\n coarser = self.params[6] #0, 2\r\n elif self.timeType == 'discrete':\r\n coarser = self.params[7] #3, 8\r\n \r\n if self.timeType == 'discrete' and (self.start_date is not None) and (self.end_date is not None):\r\n geometry = [dynamicPolygon(geom, times[0][0], times[0][0]),\r\n dynamicPolygon(geom, times[0][1], times[0][1])]\r\n else:\r\n geometry = dynamicPolygon(geom, times[0][0], times[0][1]) \r\n \r\n elif self.case == 4: #dxyzt\r\n geom = Polygon(self.list2ScaleOffset(ordinates))\r\n if times[0][1] == None:\r\n continuous = False\r\n coarser = self.params[8] #4, 9\r\n times[0][1] = times[0][0]\r\n elif self.qtype.lower() == 'space':\r\n if times[0][0] == times[0][1]:\r\n coarser = self.params[9] #0, 2\r\n else:\r\n coarser = self.params[9] - 4\r\n elif self.timeType == 'continuous':\r\n coarser = self.params[10] #0, 2\r\n elif self.timeType == 'discrete':\r\n coarser = self.params[11] #4, 9\r\n \r\n if self.timeType == 'discrete' and self.start_date is not None and self.end_date is not None:\r\n geometry = [Polygon4D(geom, zmin, zmax, times[0][0], times[0][0]),\r\n Polygon4D(geom, zmin, zmax, times[0][1], times[0][1])]\r\n else:\r\n geometry = Polygon4D(geom, zmin, zmax, times[0][0], times[0][1])\r\n \r\n else: #time queries\r\n if self.case == 1:\r\n geometry = []\r\n \r\n elif self.case == 2:\r\n geometry = []\r\n \r\n elif self.case == 3:\r\n temp_geom = self.list2ScaleOffset([(self.minx, self.miny), (self.maxx, self.maxy)])\r\n geom = box(temp_geom[0][0], temp_geom[0][1], temp_geom[1][0], temp_geom[1][1])\r\n \r\n if times[0][1] is None:\r\n times[0][1] = times[0][0]\r\n coarser = self.params[12] #3, 7\r\n continuous = False\r\n elif self.timeType == 'continuous':\r\n coarser = self.params[13] #0, 3\r\n else:\r\n coarser = self.params[14] #3, 8\r\n \r\n if self.timeType == 'discrete' and self.start_date is not None and self.end_date is not None:\r\n geometry = [dynamicPolygon(geom, times[0][0], times[0][0]),\r\n dynamicPolygon(geom, times[0][1], times[0][1])]\r\n else:\r\n geometry = dynamicPolygon(geom, times[0][0], times[0][1])\r\n\r\n elif self.case == 4:\r\n temp_geom = self.list2ScaleOffset([(self.minx, self.miny),(self.maxx, self.maxy)])\r\n geom = box(temp_geom[0][0], temp_geom[0][1], temp_geom[1][0], temp_geom[1][1])\r\n if times[0][1] is None:\r\n times[0][1] = times[0][0]\r\n coarser = self.params[15] #4, 12\r\n continuous = False\r\n elif self.timeType == 'continuous':\r\n coarser = self.params[16] #1, 3\r\n elif self.timeType == 'discrete':\r\n coarser = self.params[17] #4, 11\r\n \r\n if self.timeType == 'discrete' and self.start_date is not None and self.end_date is not None:\r\n geometry = [Polygon4D(geom, zmin, zmax, times[0][0], times[0][0]),\r\n Polygon4D(geom, zmin, zmax, times[0][1], times[0][1])]\r\n else: \r\n geometry = Polygon4D(geom, zmin, zmax, times[0][0], times[0][1])\r\n \r\n\r\n \"\"\"The final lines have to do with the way of posing the query to the \r\n database. Two options are possible:\r\n (a) sql: A SQL query is posed to the database. The number of ranges is\r\n limited by a maximum number.\r\n (b) join: The table is joined explicitly with a table containing the \r\n ranges.\"\"\"\r\n if geometry == []:\r\n mortonWhere, self.mortonJoinWhere, ranges, rangeTab, morPrep, insert, Levels = ('', '', 0, None, 0, 0, 0)\r\n else:\r\n if self.method == 'join':\r\n rangeTab = (self.rangeTable + qid).upper()\r\n ranges, morPrep, insert, Levels = self.join(geometry, coarser, rangeTab, continuous)\r\n mortonWhere = self.mortonJoinWhere\r\n elif self.method == 'sql':\r\n rangeTab, insert = None, 0\r\n mortonWhere, ranges, morPrep, Levels = self.sql(geometry, coarser, continuous)\r\n \r\n # if deep the time is in the morton code\r\n if self.integration == 'deep' or (self.start_date is None and self.end_date is None and self.integration == 'loose'): \r\n timeWhere = ''\r\n elif self.integration == 'loose': \r\n timeWhere = whereClause.addTimeCondition(times, 'time', self.timeType)\r\n \r\n return whereClause.getWhereStatement([timeWhere, mortonWhere]), ranges, morPrep, insert, Levels, rangeTab",
"def create_query_df(self):\n\n # display output message for timeframe\n print(\n f'{Fore.GREEN}\\nQuerying database for tags between the timeframe: '\n f'{Fore.LIGHTGREEN_EX}{str(self._start)}{Fore.GREEN} and {Fore.LIGHTGREEN_EX}{str(self._end)}'\n f'{Style.RESET_ALL}')\n print(\n f'{Fore.GREEN}\\nTIMESPAN: '\n f'{Fore.LIGHTGREEN_EX}{self.time_span} hours'\n f'{Style.RESET_ALL}')\n\n engine = get_db_engine()\n offset = 0\n chunk_size = 100000\n\n dfs = []\n while True:\n sa_select = sa.select(\n [self.data_table],\n whereclause=sa.and_(\n self.data_table.c._TIMESTAMP > '{}'.format(self._start),\n self.data_table.c._TIMESTAMP <= '{}'.format(self._end)),\n limit=chunk_size,\n offset=offset,\n order_by=self.data_table.c._NUMERICID\n )\n dfs.append(pd.read_sql(sa_select, engine))\n offset += chunk_size\n if len(dfs[-1]) < chunk_size:\n break\n\n self.query_df = pd.concat(dfs)",
"def query(self, query):",
"def query(self, year=None, month=None, key=None,):\n if not key: key = self.key\n if (year < 1882) or not (0 < month < 13):\n # currently the Archive API only supports year >= 1882\n exception_str = 'Invalid query: See http://developer.nytimes.com/archive_api.json'\n raise InvalidQueryException(exception_str)\n url = self.root.format(year, month, key)\n r = requests.get(url)\n return r.json()",
"def get_query_metric_df(self, field_name, system):\n if system not in self.base_result.systems:\n raise ValueError(\"System not in result_list.\")\n return self._get_field_from_summary(field_name).loc[system]",
"def query(self, **kwargs):",
"def query(self, session, query):\n\t\ttry:\n\t\t\tstart = time.time()\n\t\t\tevent_docs = []\n\t\t\tfor event in self.model.events.query(**query):\n\t\t\t\tif event.PUBLIC:\n\t\t\t\t\tdoc = event.serialize()\n\t\t\t\t\tdoc['id'] = None\n\t\t\t\t\tevent_docs.append(doc)\n\t\t\t\t\n\t\t\t\n\t\t\tend = time.time()\n\t\texcept Exception:\n\t\t\tlogger.error(traceback.format_exc())\n\t\t\treturn responses.database_error(\"getting a set of events with query %s\" % query)\n\t\t\n\t\tquery['after'] = max(\n\t\t\tquery.get('after', 0), \n\t\t\ttime.time() - configuration.snuggle['changes_synchronizer']['max_age']\n\t\t)\n\t\t\n\t\ttry:\n\t\t\tsnuggler, data = user_data()\n\t\t\tevent = types.EventsQueried(\n\t\t\t\tquery,\n\t\t\t\tend-start,\n\t\t\t\tlen(event_docs),\n\t\t\t\tsnuggler,\n\t\t\t\tdata\n\t\t\t)\n\t\t\tself.model.events.insert(event)\n\t\texcept Exception as e:\n\t\t\tlogger.error(traceback.format_exc())\n\t\t\t\n\t\t\n\t\treturn responses.success(event_docs)",
"def model_query(dt_from, dt_to, model_id, sensor_id):\n logging.info(\n \"Calling model %i with parameters %s %s\"\n % (\n model_id,\n dt_from.strftime(CONST_TIMESTAMP_FORMAT),\n dt_to.strftime(CONST_TIMESTAMP_FORMAT),\n )\n )\n\n # subquery to get the last model run from a given model\n sbqr = (\n db.session.query(ModelRunClass)\n .filter(\n ModelRunClass.model_id == model_id,\n ModelRunClass.sensor_id == sensor_id,\n ModelRunClass.time_created >= dt_from,\n ModelRunClass.time_created <= dt_to,\n )\n .all()\n )\n if len(sbqr) > 0:\n sbqr = sbqr[-1]\n else:\n return None\n # query to get all the results from the model run in the subquery\n query = db.session.query(\n ModelClass.id,\n ModelClass.model_name,\n ModelRunClass.sensor_id,\n ModelMeasureClass.measure_name,\n ModelValueClass.prediction_value,\n ModelValueClass.prediction_index,\n ModelProductClass.run_id,\n ModelRunClass.time_created,\n ModelRunClass.time_forecast,\n ModelScenarioClass.ventilation_rate,\n ModelScenarioClass.num_dehumidifiers,\n ModelScenarioClass.lighting_shift,\n ModelScenarioClass.scenario_type,\n ).filter(\n and_(\n ModelClass.id == model_id,\n ModelRunClass.model_id == ModelClass.id,\n ModelRunClass.id == sbqr.id,\n ModelProductClass.run_id == ModelRunClass.id,\n ModelProductClass.measure_id == ModelMeasureClass.id,\n ModelValueClass.product_id == ModelProductClass.id,\n ModelMeasureClass.scenario_id == ModelScenarioClass.id,\n )\n )\n\n df = pd.read_sql(query.statement, query.session.bind)\n df[\"scenario_type\"] = df[\"scenario_type\"].astype(str)\n logging.info(\"Total number of records found: %d\" % (len(df.index)))\n\n if df.empty:\n logging.debug(\"WARNING: Query returned empty\")\n\n return df",
"def query(self):\r\n raise NotImplementedError",
"def generate_queries_count_of_one_month(cls, project_slug):\n today = timezone.now().date()\n last_30th_day = timezone.now().date() - timezone.timedelta(days=30)\n\n qs = cls.objects.filter(\n project__slug=project_slug,\n created__date__lte=today,\n created__date__gte=last_30th_day,\n ).order_by(\"-created\")\n\n # dict containing the total number of queries\n # of each day for the past 30 days (if present in database).\n count_dict = dict(\n qs.annotate(created_date=TruncDate(\"created\"))\n .values(\"created_date\")\n .order_by(\"created_date\")\n .annotate(count=Count(\"id\"))\n .values_list(\"created_date\", \"count\")\n )\n\n count_data = [count_dict.get(date) or 0 for date in _last_30_days_iter()]\n\n # format the date value to a more readable form\n # Eg. `16 Jul`\n last_30_days_str = [\n timezone.datetime.strftime(date, \"%d %b\") for date in _last_30_days_iter()\n ]\n\n final_data = {\n \"labels\": last_30_days_str,\n \"int_data\": count_data,\n }\n\n return final_data",
"def get_time_series_data():\r\n # Grab the requested years and columns from the query arguments\r\n ls_year = [int(year) for year in request.args.getlist(\"n\")]\r\n ls_col = request.args.getlist(\"m\")\r\n\r\n # Generate a list of all the months we need to get\r\n all_years = [str(year) for year in range(min(ls_year), max(ls_year) + 1)]\r\n\r\n # Grab all of the wanted months by filtering for the ones we want\r\n wanted_months = reduce(\r\n lambda a, b: a | b, (app.df[\"month\"].str.contains(year) for year in all_years)\r\n )\r\n\r\n # Create a new dataframe from the one that\r\n df_new = app.df[wanted_months][[\"month\"] + ls_col]\r\n\r\n # Convert all string dates into datetime objects and then sort them\r\n df_new[\"month\"] = pd.to_datetime(df_new[\"month\"])\r\n df_new = df_new.sort_values(by=[\"month\"])\r\n\r\n # Return the dataframe as json\r\n return df_new.to_json(), 200",
"def _execute_query(self, query, values):\n with self as plasticDB:\n cursor = plasticDB.connection.cursor()\n cursor.execute(query,values)\n if not cursor.description:\n return []\n rs = RecordSet(initialData=cursor.fetchall(), recordType=next(zip(*cursor.description)))\n return rs",
"def query(self, **kwargs):\n\n return query.query(self._host, self._session, **kwargs)",
"def q(cls) -> Query:\n if not cls.s:\n raise M2Error('No DB session defined')\n return cls.s.query(cls)"
]
| [
"0.65136653",
"0.63618124",
"0.62450224",
"0.62362874",
"0.6012494",
"0.59513414",
"0.5846183",
"0.55958813",
"0.55835605",
"0.5576945",
"0.55751055",
"0.55694366",
"0.55660886",
"0.55609494",
"0.55261236",
"0.54521173",
"0.54411834",
"0.54324687",
"0.5397769",
"0.53974396",
"0.5395373",
"0.53844714",
"0.5383042",
"0.5351198",
"0.5342407",
"0.5329682",
"0.5320837",
"0.53133523",
"0.52894497",
"0.52883697"
]
| 0.802371 | 0 |
For an incoming transaction from a given origin, check if we have already responded to it. If so, return the response code and response body (as a dict). | def get_received_txn_response(self, transaction_id, origin):
return self.db.runInteraction(
"get_received_txn_response",
self._get_received_txn_response,
transaction_id,
origin,
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_received_txn_response(self, transaction_id, origin, code, response_dict):\n\n return self.db.simple_insert(\n table=\"received_transactions\",\n values={\n \"transaction_id\": transaction_id,\n \"origin\": origin,\n \"response_code\": code,\n \"response_json\": db_binary_type(encode_canonical_json(response_dict)),\n \"ts\": self._clock.time_msec(),\n },\n or_ignore=True,\n desc=\"set_received_txn_response\",\n )",
"def receive_response(self, transaction):\n host, port = transaction.response.source\n key_token = hash(str(host) + str(port) + str(transaction.response.token))\n if key_token in self._block1_sent and transaction.response.block1 is not None:\n item = self._block1_sent[key_token]\n transaction.block_transfer = True\n if item.m == 0:\n transaction.block_transfer = False\n del transaction.request.block1\n return transaction\n n_num, n_m, n_size = transaction.response.block1\n if n_num != item.num: # pragma: no cover\n logger.warning(\"Blockwise num acknowledged error, expected \" + str(item.num) + \" received \" +\n str(n_num))\n return None\n if n_size < item.size:\n logger.debug(\"Scale down size, was \" + str(item.size) + \" become \" + str(n_size))\n item.size = n_size\n request = transaction.request\n del request.mid\n del request.block1\n request.payload = item.payload[item.byte: item.byte+item.size]\n item.num += 1\n item.byte += item.size\n if len(item.payload) <= item.byte:\n m = 0\n else:\n m = 1\n request.block1 = (item.num, m, item.size)\n elif transaction.response.block2 is not None:\n\n num, m, size = transaction.response.block2\n if m == 1:\n transaction.block_transfer = True\n if key_token in self._block2_sent:\n item = self._block2_sent[key_token]\n if num != item.num: # pragma: no cover\n logger.error(\"Receive unwanted block\")\n return self.error(transaction, defines.Codes.REQUEST_ENTITY_INCOMPLETE.number)\n if item.content_type is None:\n item.content_type = transaction.response.content_type\n if item.content_type != transaction.response.content_type: # pragma: no cover\n logger.error(\"Content-type Error\")\n return self.error(transaction, defines.Codes.UNSUPPORTED_CONTENT_FORMAT.number)\n item.byte += size\n item.num = num + 1\n item.size = size\n item.m = m\n item.payload += transaction.response.payload\n else:\n item = BlockItem(size, num + 1, m, size, transaction.response.payload,\n transaction.response.content_type)\n self._block2_sent[key_token] = item\n request = transaction.request\n del request.mid\n del request.block2\n request.block2 = (item.num, 0, item.size)\n else:\n transaction.block_transfer = False\n if key_token in self._block2_sent:\n if self._block2_sent[key_token].content_type != transaction.response.content_type: # pragma: no cover\n logger.error(\"Content-type Error\")\n return self.error(transaction, defines.Codes.UNSUPPORTED_CONTENT_FORMAT.number)\n transaction.response.payload = self._block2_sent[key_token].payload + transaction.response.payload\n del self._block2_sent[key_token]\n else:\n transaction.block_transfer = False\n return transaction",
"def process_response(_origin_details, response, jdata):\n data_portions = []\n if response.status != 200:\n print(\"ERROR: \" + str(response.status) + \": \" + str(jdata['errors'][0]['message']))\n print(f\"ERROR {response.status}: {jdata['errors'][0]['message']}\")\n if response.status == 429:\n exit()\n return 0\n else:\n if 'results' not in jdata:\n print(\"ERROR: API returned with no results.\")\n return {}\n\n # Include the origin city's data\n data_portions.append({_origin_details:\n {'destination': _origin_details,\n 'lon': jdata['results'][0]['points'][0]['lon'],\n 'lat': jdata['results'][0]['points'][0]['lat'],\n 'departure': 0,\n 'arrival': 0,\n 'train_time': 0,\n 'num_transfers': 0,\n 'intermediate_stations': 0,\n 'endnode': 0,\n 'hovertext': jdata['results'][0]['points'][0]['text'],\n }})\n\n # Duplicate the origin city's data, but with the auto-corrected name. Both may be important.\n data_portions.append({jdata['results'][0]['points'][0]['text']: data_portions[0][_origin_details].copy()})\n data_portions[-1][jdata['results'][0]['points'][0]['text']]['destination'] = jdata['results'][0]['points'][0]['text']\n\n # Iterate on the list of destinations given\n for i in range(len(jdata['results'])):\n if 'connections' not in jdata['results'][i]:\n continue\n\n # iterate on the connection for each destination\n for con in jdata['results'][i]['connections']:\n data_portion = {}\n departure_time = datetime_to_timestamp(con['departure'])\n stop_count = 0\n\n # iterate on the legs for each connection\n for leg in range(len(con['legs'])):\n end_node = 0\n if 'exit' in con['legs'][leg]:\n if 'to' in con['legs'][leg]:\n if con['legs'][leg]['exit']['name'] == con['legs'][leg]['to']:\n end_node = 1\n data_portion[con['legs'][leg]['exit']['name']] = \\\n {'destination': con['legs'][leg]['exit']['name'],\n 'lon': con['legs'][leg]['exit']['lon'],\n 'lat': con['legs'][leg]['exit']['lat'],\n 'departure': departure_time,\n 'arrival': datetime_to_timestamp(con['legs'][leg]['exit']['arrival']),\n 'train_time': datetime_to_timestamp(con['legs'][leg]['exit']['arrival']) - departure_time,\n 'num_transfers': leg - 1,\n 'intermediate_stations': stop_count,\n 'endnode': end_node,\n 'hovertext': con['legs'][leg]['exit']['name'] + '<br>' + core_func.sec_to_hhmm(\n datetime_to_timestamp(con['legs'][leg]['exit']['arrival']) - departure_time)\n }\n\n if 'stops' not in con['legs'][leg]:\n continue\n\n if ('departure' not in con['legs'][leg]) | (con['legs'][leg]['stops'] is None):\n end_node = 0\n if 'departure' not in con['legs'][leg]:\n end_node = 1\n data_portion[con['legs'][leg]['name']] = \\\n {'destination': con['legs'][leg]['name'],\n 'lon': con['legs'][leg]['lon'],\n 'lat': con['legs'][leg]['lat'],\n 'departure': departure_time,\n 'arrival': datetime_to_timestamp(con['legs'][leg]['arrival']),\n 'train_time': datetime_to_timestamp(con['legs'][leg]['arrival']) - departure_time,\n 'num_transfers': leg - 1,\n 'intermediate_stations': stop_count,\n 'endnode': end_node,\n 'hovertext': con['legs'][leg]['name'] + '<br>' +\n core_func.sec_to_hhmm(datetime_to_timestamp(con['legs'][leg]['arrival']) - departure_time)\n }\n continue\n\n # iterate on the stops for each leg\n for stop in con['legs'][leg]['stops']:\n if 'arrival' not in stop:\n continue\n train_time = datetime_to_timestamp(stop['arrival']) - departure_time\n if train_time < 86400:\n data_portion[stop['name']] = \\\n {'destination': stop['name'],\n 'lon': stop['lon'],\n 'lat': stop['lat'],\n 'departure': departure_time,\n 'arrival': datetime_to_timestamp(stop['arrival']),\n 'train_time': train_time,\n 'num_transfers': leg,\n 'intermediate_stations': stop_count,\n 'endnode': 0,\n 'hovertext': f\"{stop['name']}<br>{core_func.sec_to_hhmm(train_time)}\",\n }\n stop_count += 1\n data_portions.append(data_portion)\n\n # Data portions contains many multiple entries; delete duplicates (while minimizing train_time)\n output_data_portion = {}\n for conn in data_portions: # iterate through each data_portion (ie each connection)\n for city in conn: # iterate through each destination\n if city not in output_data_portion:\n output_data_portion[city] = conn[city]\n elif conn[city]['train_time'] < output_data_portion[city]['train_time']:\n output_data_portion[city].update(conn[city])\n elif conn[city]['train_time'] == output_data_portion[city]['train_time']:\n if conn[city]['arrival'] < output_data_portion[city]['arrival']:\n output_data_portion[city].update(conn[city])\n else:\n continue\n\n # If the city is not an endnode in any of the entries, then flag the destination as not an endnode\n for city in output_data_portion:\n for conn in data_portions:\n if city in conn:\n if 'endnode' in conn[city]:\n if conn[city]['endnode'] == 0:\n output_data_portion[city]['endnode'] = 0\n\n return output_data_portion",
"def process_incoming_response(self, response):\n # Validate the response.\n if not {\"__id\", \"__data\", \"__error\"}.issubset(iterkeys(response)):\n self.disconnect(\"Bad response received\")\n logger.warning(\"Response is missing some fields, ignoring.\")\n return\n\n # Determine the ID.\n id_ = response[\"__id\"]\n\n if id_ not in self.pending_outgoing_requests:\n logger.warning(\"No pending request with id %s found.\", id_)\n return\n\n request = self.pending_outgoing_requests.pop(id_)\n result = self.pending_outgoing_requests_results.pop(id_)\n error = response[\"__error\"]\n\n if error is not None:\n err_msg = \"%s signaled RPC for method %s was unsuccessful: %s.\" % (\n self.remote_service_coord, request[\"__method\"], error)\n logger.error(err_msg)\n result.set_exception(RPCError(error))\n else:\n result.set(response[\"__data\"])",
"async def _receive_response(\n self, request: Request, stream_id: int\n ) -> typing.Tuple[int, typing.List[typing.Tuple[bytes, bytes]]]:\n while True:\n event = await self._receive_stream_event(request, stream_id)\n if isinstance(event, h2.events.ResponseReceived):\n break\n\n status_code = 200\n headers = []\n for k, v in event.headers:\n if k == b\":status\":\n status_code = int(v.decode(\"ascii\", errors=\"ignore\"))\n elif not k.startswith(b\":\"):\n headers.append((k, v))\n\n return (status_code, headers)",
"async def asyncio_process_response(_origin_details, response):\n # Simply awaiting the response throws a TypeError\n jdata = await response.json()\n return process_response(_origin_details, response, jdata)",
"def _response(request):\n with urllib.request.urlopen(request) as response:\n status = response.getcode()\n # print(status, response.info(), )\n data = json.loads(\n response.read().decode('utf-8')\n )\n # print(data)\n if status == 200 and data[\"ok\"]:\n return data, status\n elif status == 200 and not data[\"ok\"]:\n raise ValueError('client._response() - Server response is not good ' +\n json.dumps(data))\n else:\n raise ConnectionFault('client._response() - Connection Error: ' +\n str(response.getcode()))",
"def get_requests(status=None, origin=None, destination=None):\n result = ResponseEntity()\n try:\n if status is None:\n request_list = Request.objects.exclude(status=Enum.REQUEST_STATUS.Deleted.value)\n else:\n request_list = Request.objects.filter(status=status.value).order_by(\"-created_date\")\n\n if origin is not None:\n request_list = request_list.filter(origin_city=origin)\n if destination is not None:\n request_list = request_list.filter(destination_city=destination)\n\n request_entities = list_Request_to_RequestEntity(request_list)\n result.success = True\n result.data = request_entities\n except Exception as e:\n print str(e)\n result.message = str(e)\n result.success = False\n finally:\n return result",
"def _handle_response(self, url, raw_response, raw_request,\n status_code, headers, request_dict):\n if status_code != 200 and status_code != 201: # pylint: disable=consider-using-in\n response = {}\n # If the result can't be parsed into json, most likely is raw html.\n # Some response are neither json or raw html, handle them here:\n if raw_response:\n response = json_lib.loads(raw_response)\n # Pass raised error to error handler.\n self._handle_http_error(url, response, status_code,\n headers.get('pspReference'),\n raw_request, raw_response,\n headers, request_dict)\n\n try:\n if response['errorCode']:\n raise Adyen.exceptions.AdyenAPICommunicationError(\n \"Unexpected error while communicating with Adyen.\"\n \" Received the response data:'{}', HTTP Code:'{}'. \"\n \"Please reach out to [email protected] if the \"\n \"problem persists with the psp:{}\".format(\n raw_response,\n status_code,\n headers.get('pspReference')),\n status_code=status_code,\n raw_request=raw_request,\n raw_response=raw_response,\n url=url,\n psp=headers.get('pspReference'),\n headers=headers,\n error_code=response['errorCode'])\n except KeyError:\n erstr = 'KeyError: errorCode'\n raise Adyen.exceptions.AdyenAPICommunicationError(\n erstr) from KeyError\n else:\n try:\n response = json_lib.loads(raw_response)\n psp = headers.get('pspReference', response.get('pspReference'))\n return Adyen.client.AdyenResult(message=response, status_code=status_code,\n psp=psp, raw_request=raw_request,\n raw_response=raw_response)\n except ValueError:\n # Couldn't parse json so try to pull error from html.\n\n error = self._error_from_hpp(raw_response)\n\n message = request_dict\n\n reference = message.get(\"reference\",\n message.get(\"merchantReference\"))\n\n errorstring = \"\"\"Unable to retrieve payment \"\n list. Received the error: {}. Please verify your request \"\n and try again. If the issue persists, please reach out to \"\n [email protected] including the \"\n merchantReference: {}\"\"\".format(error, reference), # pylint: disable=trailing-comma-tuple\n\n raise Adyen.exceptions.AdyenInvalidRequestError(\n errorstring) from ValueError",
"def _check_response(self, response):\n if response.status_code == requests.codes.ok:\n # Since the ZenHub REST API does not send back 204 when there is\n # no content, we have to check the Content-Length for 0 :(\n if int(response.headers['Content-Length']):\n return response.json()\n elif response.status_code == requests.codes.not_found:\n return None\n else:\n return response.raise_for_status()",
"def is_response(code):\n return defines.RESPONSE_CODE_LOWER_BOUND <= code <= defines.RESPONSE_CODE_UPPER_BOUND",
"def __handler_get_tx_result(self, request, context):\n utils.logger.spam(f\"checking for test, code: {request.code}\")\n utils.logger.spam(f\"checking for test, channel name: {request.channel}\")\n utils.logger.spam(f\"checking for test, message: {request.message}\")\n utils.logger.spam(f\"checking for test, meta: {json.loads(request.meta)}\")\n\n params = json.loads(request.meta)\n\n utils.logger.spam(f\"params tx_hash({params['tx_hash']})\")\n\n return loopchain_pb2.Message(code=message_code.Response.success)",
"def send_response(self, transaction):\n host, port = transaction.request.source\n key_token = hash(str(host) + str(port) + str(transaction.request.token))\n if (key_token in self._block2_receive and transaction.response.payload is not None) or \\\n (transaction.response.payload is not None and len(transaction.response.payload) > defines.MAX_PAYLOAD):\n if key_token in self._block2_receive:\n\n byte = self._block2_receive[key_token].byte\n size = self._block2_receive[key_token].size\n num = self._block2_receive[key_token].num\n\n else:\n byte = 0\n num = 0\n size = defines.MAX_PAYLOAD\n m = 1\n\n self._block2_receive[key_token] = BlockItem(byte, num, m, size)\n\n if len(transaction.response.payload) > (byte + size):\n m = 1\n else:\n m = 0\n transaction.response.payload = transaction.response.payload[byte:byte + size]\n del transaction.response.block2\n transaction.response.block2 = (num, m, size)\n\n self._block2_receive[key_token].byte += size\n self._block2_receive[key_token].num += 1\n if m == 0:\n del self._block2_receive[key_token]\n\n return transaction",
"def _get_response(self, connection):\n\n response_header = self._receive(connection, 13)\n logger.debug('Response header: %s', response_header)\n\n if (not response_header.startswith(b'ZBXD\\x01')) or (len(response_header) != 13):\n logger.debug('Zabbix return not valid response.')\n result = False\n else:\n response_len = struct.unpack('<Q', response_header[5:])[0]\n response_body = connection.recv(response_len)\n result = json.loads(response_body.decode(\"utf-8\"))\n logger.debug('Data received: %s', result)\n\n try:\n connection.close()\n except socket.error:\n pass\n\n return result",
"def _verify_response(self, text_response, orig_otp, orig_nonce):\n response_dict = dict([line.strip(' ').split('=', 1) for line in\n re.split(r'\\r\\n', text_response)\n if line.strip()])\n\n if 'otp' in response_dict and response_dict['otp'] != orig_otp:\n raise YubiKeyVerificationError(\n \"Received response that does not match the OTP that was \"\n \"sent to be verified.\")\n\n if 'nonce' in response_dict and response_dict['nonce'] != orig_nonce:\n raise YubiKeyVerificationError(\n \"Received response that does not match the OTP that was \"\n \"sent to be verified.\")\n\n if self.api_key is not None:\n sig = sign_query(response_dict, self.api_key)\n if response_dict['h'].decode('base64') != sig.decode('base64'):\n raise YubiKeyVerificationError(\n \"Received a response whose signature is invalid\")\n\n return response_dict",
"def _process_response(self, request, content, status):\n if status == codes.ok:\n bis = ByteInputStream(bytearray(content))\n return self._process_ok_response(bis, request)\n self._process_not_ok_response(content, status)\n raise IllegalStateException('Unexpected http response status: ' +\n str(status))",
"def is_raw_response(self, response: object) -> bool:",
"def getResponseCode(self) -> int:\n ...",
"def check_origin(self, origin):\n return True",
"def check_origin(self, origin):\n return True",
"def check_origin(self, origin):\n return True",
"def check_origin(self, origin):\n return True",
"def check_origin(self, origin):\n # import re\n # bool(re.match(r'^.*?\\.mydomain\\.com', origin))\n # allowed = super.check_origin(origin)\n if self.allow_origin == '*':\n return True\n\n host = self.request.headers.get(\"Host\")\n if origin is None:\n origin = self.request.headers.get(\"Origin\")\n\n # If no header is provided, assume we can't verify origin\n if origin is None:\n LOG.warning(\"user {0} Missing Origin header, rejecting WebSocket connection.\".format(self.client_id))\n return False\n if host is None:\n LOG.warning(\"user {0} Missing Host header, rejecting WebSocket connection.\".format(self.client_id))\n return False\n\n origin = origin.lower()\n origin_host = urlparse(origin).netloc\n\n # OK if origin matches host\n if origin_host == host:\n return True\n\n # Check CORS headers\n if self.allow_origin:\n allow = self.allow_origin == origin\n # elif self.allow_origin_pat:\n # allow = bool(self.allow_origin_pat.match(origin))\n else:\n # No CORS headers deny the request\n allow = False\n if not allow:\n LOG.warning(\"user {0} Blocking Cross Origin WebSocket Attempt. Origin: %s, Host: %s\",\n self.client_id, origin, host)\n return allow",
"def receive_request(self, transaction):\n if transaction.request.block2 is not None:\n host, port = transaction.request.source\n key_token = hash(str(host) + str(port) + str(transaction.request.token))\n num, m, size = transaction.request.block2\n if key_token in self._block2_receive:\n self._block2_receive[key_token].num = num\n self._block2_receive[key_token].size = size\n self._block2_receive[key_token].m = m\n del transaction.request.block2\n else:\n # early negotiation\n byte = 0\n self._block2_receive[key_token] = BlockItem(byte, num, m, size)\n del transaction.request.block2\n\n elif transaction.request.block1 is not None:\n # POST or PUT\n host, port = transaction.request.source\n key_token = hash(str(host) + str(port) + str(transaction.request.token))\n num, m, size = transaction.request.block1\n if key_token in self._block1_receive:\n content_type = transaction.request.content_type\n if num != self._block1_receive[key_token].num \\\n or content_type != self._block1_receive[key_token].content_type:\n # Error Incomplete\n return self.incomplete(transaction)\n self._block1_receive[key_token].payload += transaction.request.payload\n else:\n # first block\n if num != 0:\n # Error Incomplete\n return self.incomplete(transaction)\n content_type = transaction.request.content_type\n self._block1_receive[key_token] = BlockItem(size, num, m, size, transaction.request.payload,\n content_type)\n\n if m == 0:\n transaction.request.payload = self._block1_receive[key_token].payload\n # end of blockwise\n del transaction.request.block1\n transaction.block_transfer = False\n # TODO remove from _block1_receive\n return transaction\n else:\n # Continue\n transaction.block_transfer = True\n transaction.response = Response()\n transaction.response.destination = transaction.request.source\n transaction.response.token = transaction.request.token\n transaction.response.code = defines.Codes.CONTINUE.number\n transaction.response.block1 = (num, m, size)\n\n num += 1\n byte = size\n self._block1_receive[key_token].byte = byte\n self._block1_receive[key_token].num = num\n self._block1_receive[key_token].size = size\n self._block1_receive[key_token].m = m\n\n return transaction",
"def process_raw_response(self):\n non_excepts = self.non_exceptionals\n raw = self.raw_response\n\n #if the raw respones is an urllib2 error act accordingly.\n if isinstance(raw, non_excepts):\n self.error = raw\n if isinstance(raw, HTTPError):\n self.status_code = raw.code\n self.headers = dict(raw.headers)\n else:\n #its a url error nothing to do\n pass\n\n else:\n #only urllib.addinfourl type should be now be possible\n self.status_code = raw.code\n self.headers = dict(raw.headers)\n self.body = \"\".join(raw.readlines())",
"def process_incoming_request(self, request):\n # Validate the request.\n if not {\"__id\", \"__method\", \"__data\"}.issubset(iterkeys(request)):\n self.disconnect(\"Bad request received\")\n logger.warning(\"Request is missing some fields, ignoring.\")\n return\n\n # Determine the ID.\n id_ = request[\"__id\"]\n\n # Store the request.\n self.pending_incoming_requests_threads.add(gevent.getcurrent())\n\n # Build the response.\n response = {\"__id\": id_,\n \"__data\": None,\n \"__error\": None}\n\n method_name = request[\"__method\"]\n\n if not hasattr(self.local_service, method_name):\n response[\"__error\"] = \"Method %s doesn't exist.\" % method_name\n else:\n method = getattr(self.local_service, method_name)\n\n if not getattr(method, \"rpc_callable\", False):\n response[\"__error\"] = \"Method %s isn't callable.\" % method_name\n else:\n try:\n response[\"__data\"] = method(**request[\"__data\"])\n except Exception as error:\n response[\"__error\"] = \"%s: %s\\n%s\" % \\\n (error.__class__.__name__, error,\n traceback.format_exc())\n\n # Encode it.\n try:\n data = json.dumps(response).encode('utf-8')\n except (TypeError, ValueError):\n logger.warning(\"JSON encoding failed.\", exc_info=True)\n return\n\n # Send it.\n try:\n self._write(data)\n except IOError:\n # Log messages have already been produced.\n return",
"async def extra_make_response(self, pkt, source):\n if False:\n yield None",
"def is_responded(self):\n if not self.requires_response:\n return False, None, None\n for history_entry in self.history[::-1]:\n if history_entry.action == MessageAction.respond:\n return True, history_entry.timestamp, history_entry.username\n else:\n return False, None, None",
"def isResp(obxDict):\n readingCode = getReadingCode(obxDict)\n return readingCode == '76270-8'",
"def confirm_transaction_response(response) -> dict:\n result = {}\n root = ET.fromstring(response)\n namespace_ = {\n \"SOAP-ENV\": \"http://schemas.xmlsoap.org/soap/envelope/\",\n \"ns1\": \"tns:ns\"}\n for child in root.findall(\"SOAP-ENV:Body\", namespace_):\n checkout_element = child.find(\n \"ns1:transactionConfirmResponse\", namespace_)\n result[\"status_code\"] = checkout_element.find(\"RETURN_CODE\").text\n result[\"desc\"] = checkout_element.find(\"DESCRIPTION\").text\n result[\"trans_id\"] = checkout_element.find(\"TRX_ID\").text\n result[\"merchant_trans_id\"] = checkout_element.find(\n \"MERCHANT_TRANSACTION_ID\").text\n\n return result"
]
| [
"0.603699",
"0.56335163",
"0.5395163",
"0.526941",
"0.5254303",
"0.52452505",
"0.52179694",
"0.5169635",
"0.5075448",
"0.504402",
"0.50247324",
"0.5017175",
"0.49992537",
"0.49695182",
"0.4865666",
"0.48077887",
"0.47865376",
"0.47796166",
"0.4770173",
"0.4770173",
"0.4770173",
"0.4770173",
"0.47693613",
"0.47640717",
"0.47371134",
"0.4732174",
"0.47227284",
"0.4710278",
"0.47078365",
"0.4690536"
]
| 0.6981153 | 0 |
Persist the response we returened for an incoming transaction, and should return for subsequent transactions with the same transaction_id and origin. | def set_received_txn_response(self, transaction_id, origin, code, response_dict):
return self.db.simple_insert(
table="received_transactions",
values={
"transaction_id": transaction_id,
"origin": origin,
"response_code": code,
"response_json": db_binary_type(encode_canonical_json(response_dict)),
"ts": self._clock.time_msec(),
},
or_ignore=True,
desc="set_received_txn_response",
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def process_response(self, request, response):\r\n if transaction.is_managed():\r\n if transaction.is_dirty():\r\n transaction.commit()\r\n transaction.leave_transaction_management()\r\n return response",
"def store_response(resp, response_dict):\n if response_dict is not None:\n response_dict['status'] = resp.status\n response_dict['reason'] = resp.reason\n response_dict['headers'] = resp_header_dict(resp)",
"def store_response(self, new_response):\n self.responses.append(new_response)",
"def save_response(self, key, response):\n self.responses[key] = response, datetime.now(timezone.utc)",
"def save_response(self):\n self.indexes['resp'] = attribute_index('resp', self)\n # Checking if the attribute \"resp\" is not empty:\n if not type(self.resp['coords']) == np.ndarray:\n print(\"Response is empty. Please run a simulation.\")\n # Checking if the target response has already been registered:\n elif self.indexes['resp'] == None:\n # Registering the synapse if necessary:\n self.indexes['syn'] = register_instance('syn', self)\n # Registering the response and setting its new index:\n self.indexes['resp'] = register_instance('resp', self)\n create_directory('resp', self)\n # Exporting the contents of the attribute \"resp\" to csv files:\n path_dir = path_directory('resp', self)\n coords_ref = create_coords_ref(self.resp['coords'])\n pd.DataFrame(self.resp['coords']).to_csv(os.path.join(path_dir, 'coords.csv'))\n for i in range(len(coords_ref)) :\n self.resp['glus'][i].to_csv(os.path.join(path_dir, 'resglu{}.csv'.format(coords_ref[i])), header=True)\n self.resp['AMPAtot'].to_csv(os.path.join(path_dir, 'resAMPAtot.csv'), header=True)\n self.resp['V'].to_csv(os.path.join(path_dir, 'resV.csv'), header=True)\n print(\"Saved: response at index {} for synapse {}.\".format(self.indexes['resp'], self.indexes['syn']))\n else:\n print(\"Response already registered at index {} for synapse {}.\".format(self.indexes['resp'], self.indexes['syn']))",
"def save_response(self, request, response):\n response_dict = self.process_response(request.path, response)\n try:\n self.ser.info(pickle.dumps(response_dict))\n self.ser.info(RESPONSE_UNIQUE_STRING)\n except (TypeError, pickle.PicklingError):\n #Can't pickle wsgi.error objects\n pass",
"def process_response(self, request, response):\n if hasattr(threadlocal, 'auditlog'):\n pre_save.disconnect(sender=LogEntry, dispatch_uid=threadlocal.auditlog['signal_duid'])\n\n return response",
"def get_received_txn_response(self, transaction_id, origin):\n\n return self.db.runInteraction(\n \"get_received_txn_response\",\n self._get_received_txn_response,\n transaction_id,\n origin,\n )",
"async def save_response(self, key: str, response: ClientResponse):\n if not self.is_cacheable(response):\n return\n logger.info(f'Saving response for key: {key}')\n\n expires = self.get_expiration_date(response)\n cached_response = await CachedResponse.from_client_response(response, expires)\n await self.responses.write(key, cached_response)\n\n # Alias any redirect requests to the same cache key\n for r in response.history:\n await self.redirects.write(self.create_key(r.method, r.url), key)",
"def store_response(self, resource):\n\n \"\"\"Get the content from the POST request.\"\"\"\n content_length = int(self.headers.getheader('Content-Length'))\n body = self.rfile.read(content_length)\n response = json.loads(body)\n\n \"\"\"Add the content to the configured resource queue\"\"\"\n if resource not in self.responses_qeues:\n self.responses_qeues[resource] = []\n self.responses_qeues[resource].append(response)\n else:\n self.responses_qeues[resource].append(response)\n\n \"\"\"Add the content to the dictionary of responses.\"\"\"\n #self.responses_dict.update(response)\n\n \"\"\"Send the response to the request.\"\"\"\n self.send_response(204)\n self.end_headers()",
"def _persist(self):\n trunk.set(self.uuid, self.json)",
"def process_non_adherent_response(self, sender, message, response):\n\t\tnow = datetime.datetime.now()\n\t\tmessage.datetime_responded = now\n\t\tmessage.save()\n\t\traise Exception(\"Not yet implemented\")",
"def store_producer_decision_and_response(producer_decision_and_response):\n pass",
"def process_response(request, response):\n # A higher middleware layer may return a request which does not contain\n # messages storage, so make no assumption that it will be there.\n if hasattr(request, '_events'):\n # noinspection PyProtectedMember\n unstored_events = request._events.update(response)\n if unstored_events and settings.DEBUG:\n raise ValueError('Not all temporary events could be stored.')\n return response",
"def process_non_adherent_questionnaire_response(self, sender, message, response):\n\t\tnow = datetime.datetime.now()\n\t\tmessage.datetime_responded = now\n\t\tmessage.save()\n\t\traise Exception(\"Not yet implemented\")",
"def process_response(self, request, response):\n return response",
"def process_response(self, request, response):\n return response",
"def _handle_orders(self, response):\n response_type = response['type']\n state_updated = False\n if response_type == \"subscription_ack\":\n # Insure the subscription details are expected. Don't do anything.\n account_id = response['accountId']\n # TODO: should we do anything with the subscription id?\n # subscription_id = response['subscriptionId']\n symbol_filter = response['symbolFilter']\n api_session_filter = response['apiSessionFilter']\n event_type_filter = response['eventTypeFilter']\n if len(symbol_filter) or len(event_type_filter):\n raise Exception(\"No symbol or event type were specified, but \"\n \"filters were registered.\")\n if len(api_session_filter) != 1:\n raise Exception(\"1 session filter should have been registered.\"\n f\"{len(api_session_filter)} were registered.\")\n accepted_key = api_session_filter[0]\n if accepted_key != self._api_credentials\\\n .api_key:\n raise Exception(\"The whitelisted api session key does not \"\n \"match our session key.\")\n elif response_type == \"initial\":\n # Create a new order record for the initial response.\n order_response = OrderResponse.from_json_dict(response)\n new_order = exchanges.Order()\n order_response.update_order(new_order)\n existing_order = self.exchange_state.order(new_order.order_id)\n if existing_order:\n raise Exception(\"An initial response was received for an \"\n \"existing order (id: {new_order.order_id}).\")\n self.exchange_state.set_order(new_order.order_id, new_order)\n state_updated = True\n elif response_type == \"accepted\":\n # Create a new order. Mark the corresponding action as successful.\n order_response = OrderResponse.from_json_dict(response)\n new_order = exchanges.Order()\n order_response.update_order(new_order)\n self.exchange_state.set_order(new_order.order_id, new_order)\n found_action = False\n for a in self._create_actions:\n if id(a) == int(order_response.client_order_id):\n if a.order is not None:\n raise Exception(\"An order accept message was received, \"\n \"but its corresponding action already \"\n \"has an order (id:{a.order.order_id}).\")\n a.order = new_order\n # I don't know if we need this status.\n a.status = exchanges.Action.Status.SUCCESS\n found_action = True\n break\n if not found_action:\n raise Exception(\"Received an order accept message, but no \"\n \"matching order action was found.\")\n state_updated = True\n elif response_type == \"rejected\":\n order_response = OrderResponse.from_json_dict(response)\n log.warning(f\"An order was rejected. Reason: \" + response['reason'])\n new_order = exchanges.Order()\n order_response.update_order(new_order)\n self.exchange_state.set_order(new_order.order_id, new_order)\n found_action = False\n for a in self._create_actions:\n if id(a) == int(order_response.client_order_id):\n if a.order is not None:\n raise Exception(\"An order reject message was received, \"\n \"but its corresponding action already \"\n \"has an order (id:{a.order.order_id}).\")\n a.order = new_order\n a.status = exchanges.Action.Status.FAILED\n found_action = True\n break\n if not found_action:\n raise Exception(\"Received an order reject message, but no \"\n \"matching order action was found.\")\n state_updated = True\n elif response_type == \"booked\":\n # I don't think we need to act on this.\n log.info(\"Order booked. Order id:{response['order_id']}.\")\n elif response_type == \"fill\":\n order_response = OrderResponse.from_json_dict(response)\n order = self.exchange_state.order(order_response.order_id)\n if not order:\n raise Exception(\"Received a fill response for an unknown order \"\n f\"(id:{order_response.order_id}).\")\n log.info(\"Order fill response received for order id: \"\n f\"{order_response.order_id}.\")\n order_response.update_order(order)\n state_updated = True\n # TODO: we could add some checks here to see if our fee calculation\n # is correct.\n elif response_type == \"cancelled\":\n order_response = OrderResponse.from_json_dict(response)\n order = self.exchange_state.order(order_response.order_id)\n reason = response.get('reason', 'No reason provided.')\n # Unused:\n # cancel_command_id = response.get('cancel_command_id', None)\n if not order:\n raise Exception(\"Received a cancelled response for an unknown \"\n f\"order (id:{order_response.order_id}). Reason:\"\n f\"{reason}\")\n log.info(\"Order fill response received for order id: \"\n f\"{order_response.order_id}. Reason: {reason}\")\n cancel_action = self._cancel_actions.get(order_response.order_id,\n None)\n if not cancel_action:\n raise Exception(\"Received a cancel response but can't find a \"\n \"matching cancel action.\")\n cancel_action.status = exchanges.Action.Status.SUCCESS\n state_updated = True\n elif response_type == \"cancel_rejected\":\n order_response = OrderResponse.from_json_dict(response)\n reason = response.get('reason', 'No reason provided.')\n log.warning(\"Failed to cancel order (id: \"\n f\"{order_response.order_id}). Reason: {reason}\")\n cancel_action = self._cancel_actions.get(order_response.order_id,\n None)\n if not cancel_action:\n raise Exception(\"Received a cancel rejected response but can't \"\n \"find a matching cancel action.\")\n cancel_action.status = exchanges.Action.Status.FAILED\n state_updated = True\n elif response_type == \"closed\":\n order_response = OrderResponse.from_json_dict(response)\n order = self.exchange_state.order(order_response.order_id)\n if not order:\n raise Exception(\"Received a close response for an unknown order\"\n f\" (id:{order_response.order_id}).\")\n log.info(\"Order close response received for order id: \"\n f\"{order_response.order_id}.\")\n order_response.update_order(order)\n state_updated = True\n else:\n raise Exception(f\"Unexpected response type: {response_type}.\")\n return state_updated",
"def send_response(self, transaction):\n host, port = transaction.request.source\n key_token = hash(str(host) + str(port) + str(transaction.request.token))\n if (key_token in self._block2_receive and transaction.response.payload is not None) or \\\n (transaction.response.payload is not None and len(transaction.response.payload) > defines.MAX_PAYLOAD):\n if key_token in self._block2_receive:\n\n byte = self._block2_receive[key_token].byte\n size = self._block2_receive[key_token].size\n num = self._block2_receive[key_token].num\n\n else:\n byte = 0\n num = 0\n size = defines.MAX_PAYLOAD\n m = 1\n\n self._block2_receive[key_token] = BlockItem(byte, num, m, size)\n\n if len(transaction.response.payload) > (byte + size):\n m = 1\n else:\n m = 0\n transaction.response.payload = transaction.response.payload[byte:byte + size]\n del transaction.response.block2\n transaction.response.block2 = (num, m, size)\n\n self._block2_receive[key_token].byte += size\n self._block2_receive[key_token].num += 1\n if m == 0:\n del self._block2_receive[key_token]\n\n return transaction",
"def process_incoming_response(self, response):\n # Validate the response.\n if not {\"__id\", \"__data\", \"__error\"}.issubset(iterkeys(response)):\n self.disconnect(\"Bad response received\")\n logger.warning(\"Response is missing some fields, ignoring.\")\n return\n\n # Determine the ID.\n id_ = response[\"__id\"]\n\n if id_ not in self.pending_outgoing_requests:\n logger.warning(\"No pending request with id %s found.\", id_)\n return\n\n request = self.pending_outgoing_requests.pop(id_)\n result = self.pending_outgoing_requests_results.pop(id_)\n error = response[\"__error\"]\n\n if error is not None:\n err_msg = \"%s signaled RPC for method %s was unsuccessful: %s.\" % (\n self.remote_service_coord, request[\"__method\"], error)\n logger.error(err_msg)\n result.set_exception(RPCError(error))\n else:\n result.set(response[\"__data\"])",
"def handle_response(self, response):\n\n self._tmp_request_args = {}\n self.cache_response(response)",
"def record(self, response):\n self.get_recorder().record(self.request, response)",
"def process_response(self, request, response):\n if not hasattr(response, 'context_data'):\n return response\n\n if HONEST_AB_COOKIE_KEY in response.context_data:\n for key, value in response.context_data[HONEST_AB_COOKIE_KEY]['__cache__'].iteritems():\n response.set_signed_cookie(key, value['value'], salt=value['salt'])\n\n return response",
"def process_med_info_response(self, sender, message, response):\n\t\tnow = datetime.datetime.now()\n\t\tmessage.datetime_responded = now\n\t\tmessage.save()\n\t\traise Exception(\"Not yet implemented\")",
"def finish_transaction(self, *, transaction: \"Transaction\" = None) -> \"TransactionInfo\":\n response = super().finish_transaction(transaction=transaction)\n self._set_transaction_id(\"\")\n return response",
"def _store_transaction(account, transaction):\n tr_tx = transaction['tx']\n meta = transaction.get('meta', {})\n\n if meta.get('TransactionResult') != 'tesSUCCESS':\n return\n\n amount = meta.get('delivered_amount') or tr_tx.get('Amount', {})\n\n is_unprocessed = (\n tr_tx['TransactionType'] == 'Payment' and\n tr_tx['Destination'] == account and\n isinstance(amount, dict) and\n not Transaction.objects.filter(hash=tr_tx['hash'])\n )\n if is_unprocessed:\n logger.info(\n format_log_message(\n 'Saving transaction: %s', transaction\n )\n )\n\n transaction_object = Transaction.objects.create(\n account=tr_tx['Account'],\n hash=tr_tx['hash'],\n destination=account,\n ledger_index=tr_tx['ledger_index'],\n destination_tag=tr_tx.get('DestinationTag'),\n source_tag=tr_tx.get('SourceTag'),\n status=Transaction.RECEIVED,\n currency=amount['currency'],\n issuer=amount['issuer'],\n value=amount['value']\n )\n\n logger.info(\n format_log_message(\n \"Transaction saved: %s\", transaction_object\n )\n )",
"def process_response(self, response):\n return response",
"def save(self, fl_ctx: FLContext, shareable: Shareable, record_origin: str):\n pass",
"def save_response(self, res) -> None:\n file = open(\"response_{}.json\".format(self.num_res), \"w\")\n file.write(str(res))\n file.close()",
"def persist(self):\n pass"
]
| [
"0.6364257",
"0.6238919",
"0.61123395",
"0.6015873",
"0.5987165",
"0.5851384",
"0.5706261",
"0.5692146",
"0.5657159",
"0.56424594",
"0.56416804",
"0.54970986",
"0.5437243",
"0.5410561",
"0.53601235",
"0.53576314",
"0.53576314",
"0.5333208",
"0.5309181",
"0.5278578",
"0.5256496",
"0.52481717",
"0.52309465",
"0.5213536",
"0.5200779",
"0.5198186",
"0.5183184",
"0.5151661",
"0.5145319",
"0.5128693"
]
| 0.6618684 | 0 |
Gets the current retry timings (if any) for a given destination. | def get_destination_retry_timings(self, destination):
result = self._destination_retry_cache.get(destination, SENTINEL)
if result is not SENTINEL:
return result
result = yield self.db.runInteraction(
"get_destination_retry_timings",
self._get_destination_retry_timings,
destination,
)
# We don't hugely care about race conditions between getting and
# invalidating the cache, since we time out fairly quickly anyway.
self._destination_retry_cache[destination] = result
return result | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_destination_retry_timings(\n self, destination, failure_ts, retry_last_ts, retry_interval\n ):\n\n self._destination_retry_cache.pop(destination, None)\n return self.db.runInteraction(\n \"set_destination_retry_timings\",\n self._set_destination_retry_timings,\n destination,\n failure_ts,\n retry_last_ts,\n retry_interval,\n )",
"def timings(self):\r\n return self._timings",
"def get_tries(self):\n return self._tries",
"def getDeliveryTime(ori, dest):\n\n start_time = time.time()\n\n routingApi = herepy.RoutingApi(os.getenv(\"HERE_KEY\"))\n gm = GoogleMaps(os.getenv(\"GOOGLE_KEY\"))\n\n try:\n response = routingApi.truck_route(ori.coords[::-1], dest.coords[::-1], [herepy.RouteMode.truck, herepy.RouteMode.fastest]).as_dict()\n distance = response.get('response').get('route')[0].get('summary').get('distance') / 1000\n except herepy.error.HEREError:\n try:\n response = gm.distance_matrix(ori.coords[::-1], dest.coords[::-1], mode=\"driving\", departure_time=dt.datetime.now(), traffic_model=\"pessimistic\")\n distance = response.get('rows')[0].get('elements')[0].get('distance').get('value') / 1000\n except Exception as e:\n capture_exception(e)\n raise e\n\n if distance < 51:\n deltime = 6\n elif distance > 50 and distance < 701:\n deltime = 24\n elif distance > 700 and distance < 1400:\n deltime = 48\n else:\n deltime = 72\n\n print('--- Tiempo de ejecucion calcDeliveryTime: {} segundos ---'.format((time.time() - start_time)))\n\n return deltime, distance",
"def get_drive_time(apiKey, origin, destination):\n import requests\n url = ('https://maps.googleapis.com/maps/api/distancematrix/json?units=imperial&origins={}&destinations={}&key={}'\n .format(origin.replace(' ','+'),\n destination.replace(' ','+'),\n apiKey\n )\n )\n try:\n response = requests.get(url)\n resp_json_payload = response.json()\n drive_time = resp_json_payload['rows'][0]['elements'][0]['duration']['value']/60\n except:\n print('ERROR: {}, {}'.format(origin, destination))\n drive_time = 0\n return drive_time",
"def get_retry_count(self):\r\n return self.retried_nomax + self.retried_withmax",
"def travel_time(self, origin, destination):\n assert 2 <= len(origin) <= 3, \"Origin should by (x, y) or (x, y, z)\"\n assert 2 <= len(destination) <= 3, \"Origin should by (x, y) or (x, y, z)\"\n assert len(origin) == len(destination), \"Elevation should be present in origin and destination or absent in both\"\n if len(origin) == 2:\n xo, yo = origin\n xd, yd = destination\n zo = zd = 0\n else:\n assert len(origin) == 3\n xo, yo, zo = origin\n xd, yd, zd = destination\n\n ground_distance = np.sqrt((xd-xo)**2 + (yd-yo)**2)\n elevation_diff = zd - zo\n if elevation_diff >= 0:\n return max(ground_distance / self.max_airspeed, elevation_diff / self.max_rate_of_climb)\n else:\n return max(ground_distance / self.max_airspeed, -elevation_diff / self.max_rate_of_descent)",
"def get_times(self):\n raise NotImplementedError(\"Abstract method not implemented.\")",
"def get_retry_delay(self, last_delay):\n return last_delay * 2",
"def queue_times(self):\r\n return [task.scheduler_launch_time - self.__arrival_time\r\n for task in self.__tasks.values() if task.complete()]",
"def get_retry_interval(self) -> int:\n if self.retry_intervals is None:\n return 0\n number_of_intervals = len(self.retry_intervals)\n index = max(number_of_intervals - self.retries_left, 0)\n return self.retry_intervals[index]",
"def find_destination_hub_and_leg_travel_time(destination):\n milk_run_shipments = create_shipments_milk_run()\n for shipment in milk_run_shipments:\n for dest in shipment.milk_run.split('-'):\n if dest == destination:\n return shipment.hub, shipment.travel_time\n return 'not found', 0",
"def determine_sleep_times(self):\n\n determined_sleep_time = \\\n random.randrange(self.dns_conf.min_backoff_range,\n self.dns_conf.max_backoff_range)\n\n backoff = [(2 ** i) * determined_sleep_time for i in\n range(0, self.dns_conf.retries)]\n\n return backoff",
"def round_trip_time(self):\r\n return self.completion_time - self.launch_time",
"def getNextDest(self):\n\n if self.direction_forward:\n if len(self.destinations)-1 == self.current_loc: #if Autobuz reaches rightmost destination, it also takes a break and changes directions\n self.direction_forward = False #Autobuz changes direction\n self.updateOmLocation()\n return self.destinations[self.current_loc], (self.break_duration + self.trip_duration) #return destination reached and elapsed time\n \n else:\n self.current_loc += 1\n self.updateOmLocation()\n return self.destinations[self.current_loc], self.trip_duration\n \n else:\n if 0 == self.current_loc: #if Autobuz reaches leftmost destination, it also takes a break and changes directions\n self.direction_forward = True #Autobuz changes direction\n self.updateOmLocation()\n return self.destinations[self.current_loc], (self.break_duration + self.trip_duration)\n \n else:\n self.current_loc -= 1\n self.updateOmLocation()\n return self.destinations[self.current_loc], self.trip_duration",
"def getRetryCount():\n return int(webapp2.get_request().headers.get('X-Appengine-TaskRetryCount', 0))",
"def getTimes():",
"def getTimes():",
"def getTimes():",
"def travel_time(self, passenger_id):\n for passenger in self.passengers:\n if passenger_id == passenger.id:\n trip_time = self._passenger_trip_time(passenger)\n rounded_time = {key: round(val, 2)\n for key, val in trip_time.items()}\n return rounded_time\n return ValueError(\n f\"A passenger with Id={passenger_id} does not exist.\")",
"def get_time(self) -> int:\n return self._select_interface(self._rc_get_time, self._http_get_time)",
"def retry(self):\n return self._retry",
"def get_best_times(self):\n\n return self.best_times",
"def getTravelTimes(self): \n nrec = self.nrec\n nsrc = self.nsrc\n if (nrec < 1): \n print(\"No receiver locations set\")\n return None\n if (nsrc < 1):\n print(\"No sources\")\n ttr = ascontiguousarray(zeros(nrec*nsrc), dtype='float64')\n ttrPointer = ttr.ctypes.data_as(POINTER(c_double))\n ierr = c_int(1)\n self.fteik2d.fteik_solver2d_getTravelTimes64f(nrec, ttrPointer, ierr)\n if (ierr.value != 0): \n print(\"Error getting travel times\")\n return None\n if (nsrc > 1):\n ttr = reshape(ttr, [self.nrec, self.nsrc], order='F')\n return ttr",
"def determine_arrival_time(middle_destination):\r\n\r\n start_link = 'https://maps.googleapis.com/maps/api/directions''/json?'\r\n end_link = '&mode=transit&transit_mode=subway'\r\n final_link = start_link + 'origin=%s&destination=%s&key=%s&arrival_time=%s' % (\r\n origin, middle_destination, api_key, str(arrival_time),) + end_link\r\n # change to directions matrix\r\n json_total_routes = requests.get(final_link).json()\r\n # determines start time to get to destination and then adds 5 minute (300 sec) buffer, this is in unix form\r\n buffer_time = 300\r\n arrival_time_transit = json_total_routes['routes'][0]['legs'][0]['departure_time']['value'] - buffer_time\r\n return arrival_time_transit",
"def restore_traffic_time_to_healed_or_new_endpoint_in_minutes(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"restore_traffic_time_to_healed_or_new_endpoint_in_minutes\")",
"def retry_interval_in_minutes(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"retry_interval_in_minutes\")",
"def retry_interval_in_minutes(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"retry_interval_in_minutes\")",
"def queue_times(self):\r\n queue_times = []\r\n for task in self.__tasks.values():\r\n if task.complete():\r\n queue_times.append(task.queued_time())\r\n return queue_times",
"def _get_recordTtl(self):\n return self.__recordTtl"
]
| [
"0.67022175",
"0.58530706",
"0.5591563",
"0.546668",
"0.5392975",
"0.52803975",
"0.52738106",
"0.5146915",
"0.51433176",
"0.5128729",
"0.5097669",
"0.50913644",
"0.5060083",
"0.5018542",
"0.5013286",
"0.49948463",
"0.49795264",
"0.49795264",
"0.49795264",
"0.4966824",
"0.49573004",
"0.49568945",
"0.4942928",
"0.4934503",
"0.49304378",
"0.49295262",
"0.49112192",
"0.49112192",
"0.49003896",
"0.48858717"
]
| 0.8384712 | 0 |
Sets the current retry timings for a given destination. Both timings should be zero if retrying is no longer occuring. | def set_destination_retry_timings(
self, destination, failure_ts, retry_last_ts, retry_interval
):
self._destination_retry_cache.pop(destination, None)
return self.db.runInteraction(
"set_destination_retry_timings",
self._set_destination_retry_timings,
destination,
failure_ts,
retry_last_ts,
retry_interval,
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_destination_retry_timings(self, destination):\n\n result = self._destination_retry_cache.get(destination, SENTINEL)\n if result is not SENTINEL:\n return result\n\n result = yield self.db.runInteraction(\n \"get_destination_retry_timings\",\n self._get_destination_retry_timings,\n destination,\n )\n\n # We don't hugely care about race conditions between getting and\n # invalidating the cache, since we time out fairly quickly anyway.\n self._destination_retry_cache[destination] = result\n return result",
"def set_retry_timeout(self, retry_timeout):",
"def set_tries(self,lives):\n self._tries = lives",
"def round_trip_times(self, round_trip_times):\n self._round_trip_times = round_trip_times",
"def SetIterationsAndRetries(self, iterations, retries):\n if not isinstance(iterations, int) or iterations == 0 or iterations < -1:\n raise ValueError(\n 'In test %s, Iterations must be a positive integer or -1, not %r' % (\n self.path, iterations))\n if not isinstance(retries, int) or retries < -1:\n raise ValueError(\n 'In test %s, Retries must be a positive integer, 0, or -1, not %r' % (\n self.path, retries))\n self.iterations = float('inf') if iterations == -1 else iterations\n self.retries = float('inf') if retries == -1 else retries",
"def retries(self, count: int):\n if count < 0:\n raise ValueError(\"negative\")\n\n self._retries = count",
"def set_timeouts(self, timeouts):\n self._timeouts = timeouts",
"def set_retry_after(self, value):\n self._retry_after = self._to_datetime(value or 1)\n return self",
"def reset_rate_limit(self):\n self.rate_limit_remaining += 1\n self.rate_limit_remaining = min(\n self.rate_limit_remaining, self.rate_limit_limit)\n\n # Countdown of retry sleep seconds\n if self.rate_limit_sleep:\n self.rate_limit_sleep -= 1",
"def retry_strategy(self, retry_strat):\n self.retry_strategy = retry_strat\n return self",
"def mark_retry(self, eta=None, delay=None, trace=None):\n if delay is not None:\n eta = timezone.now() + delay\n self.eta = eta\n self.status = self.RETRY\n self.traceback = trace\n self.save(update_fields={'eta', 'status', 'traceback', 'retries', 'updated_at'})",
"def timing(self, timing):\n\n self._timing = timing",
"def retry(self, times):\n return Retry((requests.ConnectionError, requests.Timeout), times)",
"def retry_timer(which_retry, retry_base_interval, mode = None):\n\n if mode == None:\n mode = 'random'\n\n if mode == 'random':\n retry_wait_interval = retry_base_interval * random.random()\n elif mode == 'multiply':\n retry_wait_interval = which_retry * retry_base_interval\n elif mode == 'multirand':\n retry_wait_interval = which_retry * retry_base_interval * random.random()\n\n return {'mode': mode, 'interval': retry_wait_interval, 'retry': which_retry }",
"def settimeout(self, value: int) -> None:\n ...",
"def timeout_set(self, x):\n if not self._dtr_enabled:\n self._resume_time = self.__micros() + x",
"def set_throttle_timer(self, view, value):\n for cls in view.throttle_classes:\n cls.timer = lambda self: value",
"def set_throttle(self, limit=None, units=None):\n self.delay = 0\n self.max_requests = 1e16\n self.made_requests = 0",
"def restore_traffic_time_to_healed_or_new_endpoint_in_minutes(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"restore_traffic_time_to_healed_or_new_endpoint_in_minutes\")",
"def set_timeout(self, seconds):\n self._timeout = seconds",
"def reps(self, r: int) -> None:\n self._invalidate()\n self._reps = r",
"def __init__(self, tries, exceptions=None, delay=1):\r\n self.tries = tries\r\n if exceptions is None:\r\n exceptions = retry.default_exceptions\r\n self.exceptions = exceptions\r\n self.delay = delay",
"def get_retry_count(self):\r\n return self.retried_nomax + self.retried_withmax",
"def test_change_default_throttling_settings_http_with_overwrite_throttled():",
"def set_num_rounds(cls, new_num_rounds):\n raise NotImplementedError(\"subclasses need to override this method\")",
"def set_time(self, value: float):\n if value < 0:\n value = 0\n\n self.controller.row = self.rps * value",
"def __init__(self, tries , exceptions=None, delay=0.01):\n self.tries = tries\n if exceptions is None:\n exceptions = Retry.default_exceptions\n self.exceptions = exceptions\n self.delay = delay",
"def set_destination(self):\n # TODO: consider new implementation with multiple paths possible.\n self.destination = self.network[self.current_node]['next']\n lead_time = self.network[self.current_node]['path'].lead_time\n return datetime.timedelta(hours=lead_time)",
"def setDelays(self, d):\n raise NotImplementedError",
"def set_limit_per_second(self, rate_limit_per_second):\n pass"
]
| [
"0.64224875",
"0.62720156",
"0.56372",
"0.555946",
"0.5518571",
"0.53955126",
"0.53396565",
"0.531217",
"0.52713865",
"0.52484846",
"0.5234268",
"0.5068851",
"0.50572044",
"0.5030623",
"0.50164175",
"0.49849373",
"0.49810487",
"0.48878133",
"0.4884664",
"0.48801792",
"0.4871506",
"0.48679605",
"0.48669907",
"0.48501462",
"0.48459557",
"0.48455828",
"0.48406863",
"0.48344168",
"0.48318717",
"0.48181957"
]
| 0.7926547 | 0 |
Returns the smallest string up to max_len characters accepted by this NFA greater than sequence. | def next_accepted(self, sequence: Sequence[Text], max_len: int) -> Optional[Text]:
max_hi = num_seqs_with_max_len(len(self.alphabet), max_len)
desired_num_accepted = self.num_accepts_ge(max_len, sequence) - self.accepts(sequence)
lo = seq_to_num(sequence, self.inverse_alphabet, max_len) + 1
# We don't have an upper bound on where the next_accepted string is, so we look at an exponentially increasing
# gap above low until we find at least one accepted string.
diff = 1
while True:
hi = lo + diff
if hi >= max_hi:
hi = max_hi - 1
break
hi_seq = "".join(num_to_seq(hi, self.alphabet, max_len))
hi_num_accepted = self.num_accepts_ge(max_len, hi_seq)
if hi_num_accepted < desired_num_accepted:
break
lo = hi
diff *= 2
# Now we know that the string we're looking for is [lo, hi]. The upper bound is inclusive!
while lo <= hi:
mid = (lo + hi) // 2
mid_seq = "".join(num_to_seq(mid, self.alphabet, max_len))
mid_num_accepted = self.num_accepts_ge(max_len, mid_seq)
if mid_num_accepted < desired_num_accepted:
hi = mid - 1
elif mid_num_accepted == desired_num_accepted and self.accepts(mid_seq):
return mid_seq
else: # elif mid_num_accepted > desired_num_accepted:
lo = mid + 1
return None # There is no next sequence! | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _model_string_maxlen():\n # hardcoded for convenience. Could be dynamically set in future.\n # the current longest is: BLOSUM62+I+G+X, i.e. 14 chars.\n # so we just over double it, for safety\n\n return 30",
"def filter_max_length(self, string):\n newstring = string\n length = len(newstring)\n max_length = 63\n if length > max_length:\n newstring = newstring[0:max_length]\n\n return newstring",
"def prev_accepted(self, sequence: Sequence[Text], max_len: int) -> Optional[Text]:\n desired_num_accepted = self.num_accepts_ge(max_len, sequence) + 1\n hi = seq_to_num(sequence, self.inverse_alphabet, max_len)\n\n # We don't have a lower bound on where the next_accepted string is, so we look at an exponentially increasing\n # gap above low until we find at least one accepted string.\n diff = 1\n while True:\n lo = hi - diff\n if lo <= 0:\n lo = 0\n break\n lo_seq = \"\".join(num_to_seq(lo, self.alphabet, max_len))\n lo_num_accepted = self.num_accepts_ge(max_len, lo_seq)\n if lo_num_accepted > desired_num_accepted:\n break\n diff *= 2\n\n while lo <= hi:\n mid = (lo + hi) // 2\n mid_seq = \"\".join(num_to_seq(mid, self.alphabet, max_len))\n mid_num_accepted = self.num_accepts_ge(max_len, mid_seq)\n if mid_num_accepted < desired_num_accepted:\n hi = mid - 1\n elif mid_num_accepted == desired_num_accepted and self.accepts(mid_seq):\n return mid_seq\n else: # elif mid_num_accepted > desired_num_accepted:\n lo = mid + 1\n return None # There is no previous sequence!",
"def question_15(list_str: str) -> str:\n return max(list_str, key=len)",
"def check_this_input(self, inp, max_len):\n\t\tst = inp.get()\n\t\tst = st.strip()\n\t\treturn st if len(st) > 0 and len(st) < max_len else None",
"def get_max_character(strings):\n m=0\n for string in strings:\n for char in string:\n if char>m:\n m=char\n return m",
"def len_of_longest_string(s):\n return len(max(s, key=len))",
"def max_length(self):\n\t\treturn self._max_length",
"def minimum_length(char_list, length):\n return len(char_list)**length + length - 1",
"def filter_min_length(self, string):\n newstring = string\n length = len(newstring)\n min_length = 3\n num_to_add = min_length - length\n while num_to_add > 0:\n newstring = newstring + \"x\"\n num_to_add = num_to_add - 1\n\n return newstring",
"def min_length(self) -> int:\n return pulumi.get(self, \"min_length\")",
"def _min_length(self):\n return self.__min_length",
"def minimum_length(self):\n\n return self._minimum_length.value",
"def max_seq_len() -> int:\n return 8",
"def _max_length(self):\n return self.__max_length",
"def min_length(self) -> int | None:\n return self._underlying.min_length",
"def question_16(list_str: str) -> str:\n return min(list_str, key=len)",
"def longest_ORF(dna):\n orfs = find_all_ORFs_both_strands(dna)\n maxorf =orfs[1];\n for s in orfs:\n if len(s)>len(maxorf):\n maxorf=s\n return maxorf",
"def str_maxed(arg, maxlen, ellipsis_str=\"..\"):\n s = str(arg)\n if maxlen <= 0 or len(s) <= maxlen:\n return s\n else:\n return \"%s%s\" % (s[:maxlen], ellipsis_str)",
"def longestAwesome(self, s: str) -> int:\n\n # So we are moving right, and reducing length by 1\n # for every time we move right - we start from the longest substring that can be formed to lowest one\n # So the moment, we find something we can instantly breal\n\n max_length = 0\n\n if s == s[::-1]:\n return len(s)\n\n for i in range(0, len(s)):\n left = i\n right = len(s)\n\n if right - left > max_length:\n\n while right > left:\n\n candidate = s[left:right]\n # print(f\"The candidate is: {candidate}\")\n ctr = Counter(candidate)\n\n # initial base check\n odd_cnt = 0\n fl = False\n for k, v in ctr.items():\n if v & 1:\n odd_cnt += 1\n if odd_cnt > 1:\n fl = True\n break\n\n if not fl:\n if max_length < (right - left):\n max_length = right - left\n # max_length = max(max_length, len(candidate))\n\n right -= 1\n\n return max_length",
"def max_value(self, num_characters):\n return pow(self.alphabet_len, num_characters) - 1",
"def get_minmaxlen(record, field_name, minlen, maxlen=-1):\n val = recordval(record, field_name)\n if minlen > 0 and val == \"\":\n parser_error(\"missing value in \"+field_name+\": '\"+val+\"'-- field required.\")\n elif minlen > 0 and len(val) < minlen:\n parser_error(\"value not long enough in \"+field_name+\": '\"+val+\"'-- \"+\n \"requires %d characters\" % minlen)\n elif maxlen > 0 and len(val) > maxlen:\n parser_error(\"value too long in \" + field_name + \": '\" + val[0:maxlen] + \"'--\" +\n \"requires less than %d characters\" % maxlen)\n return val",
"def str(self, max_length=20):\n length = self.int(max_length) + 1\n characters = [\n self.rng.choice(self.alphabet)\n for _ in range(self.rng.integers(length))\n ]\n return \"\".join(characters)",
"def upperLimit(lenFact1):\n\tfact2 = lenFact1*fact9\n\tlenfact2 = len(str(fact2))\n\tmaxFact = lenfact2*fact9\n\tlenMax = len(str(maxFact))\n\tif lenMax == lenfact2: \n\t\treturn maxFact\n\telse:\n\t\treturn upperLimit(lenMax)",
"def longest_ORF(dna):\n both_strings=find_all_ORFs_both_strands(dna)\n L=max(both_strings,key=len)\n Q=len(L)\n return Q\n\n #save out put of find all orfboth string to some variable",
"def longest_word_length(words):",
"def max_length(self) -> int | None:\n return self._underlying.max_length",
"def test__limit_string_length(string, max_length):\n return limit_string_length(string, max_length)",
"def checkio_best(text):\n text = text.lower()\n # text.count为函数,返回指定char的数量\n return max(string.ascii_lowercase, key=text.count)",
"def max_chars(self):\n return self.range_field[0] * self.range_field[1]"
]
| [
"0.68739814",
"0.670806",
"0.6582716",
"0.6533468",
"0.64901865",
"0.6415257",
"0.635727",
"0.6322649",
"0.6315081",
"0.62874407",
"0.62872374",
"0.6286444",
"0.6286076",
"0.62738734",
"0.6236837",
"0.62363255",
"0.61767066",
"0.61482036",
"0.61432",
"0.61285764",
"0.60883576",
"0.6088212",
"0.60836214",
"0.605859",
"0.60372955",
"0.6036558",
"0.60253686",
"0.6019862",
"0.5993774",
"0.59830004"
]
| 0.67607486 | 1 |
Returns the number of sequences accepted by this NFA. Return the number of sequences with length less than or equal to max_len that are lexicographically greater than or equal to bound. | def num_accepts(self, max_len: int, bound: Sequence[Text] = ()) -> Tuple[int, int, int]:
lt1: Dict[FrozenSet[int], int] = collections.defaultdict(int)
lt2: Dict[FrozenSet[int], int] = collections.defaultdict(int)
eq1: Dict[FrozenSet[int], int] = collections.defaultdict(int)
eq2: Dict[FrozenSet[int], int] = collections.defaultdict(int)
gt1: Dict[FrozenSet[int], int] = collections.defaultdict(int)
gt2: Dict[FrozenSet[int], int] = collections.defaultdict(int)
eq1[frozenset(self.start_nodes)] = 1
num_accepted_le = int(self.accepts(""))
num_accepted_gt = 0
for c in itertools.islice(itertools.chain(bound, itertools.repeat(None)), 0, max_len):
for nodes, count in lt1.items():
for element in self.possible_transitions(nodes):
next_nodes = frozenset(self.next_nodes(nodes, element))
lt2[next_nodes] += count
for nodes, count in eq1.items():
for element in self.possible_transitions(nodes):
next_nodes = frozenset(self.next_nodes(nodes, element))
if c is None or (element is not None and element > c):
gt2[next_nodes] += count
elif element == c:
eq2[next_nodes] += count
else:
lt2[next_nodes] += count
for nodes, count in gt1.items():
for element in self.possible_transitions(nodes):
next_nodes = frozenset(self.next_nodes(nodes, element))
gt2[next_nodes] += count
num_accepted_le += self._sum_tables(eq2)
num_accepted_le += self._sum_tables(lt2)
num_accepted_gt += self._sum_tables(gt2)
if not lt2 and not eq2 and not gt2:
break # Exit early if we know this regex cannot accept anymore strings.
lt1, lt2 = lt2, collections.defaultdict(int)
eq1, eq2 = eq2, collections.defaultdict(int)
gt1, gt2 = gt2, collections.defaultdict(int)
num_accepted_eq = int(len(bound) <= max_len and self.accepts(bound))
return num_accepted_le - num_accepted_eq, num_accepted_eq, num_accepted_gt | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def num_accepts_ge(self, max_len: int, bound: Sequence[Text] = ()) -> int:\n _, num_accepted_eq, num_accepted_gt = self.num_accepts(max_len, bound)\n return num_accepted_eq + num_accepted_gt",
"def length_of_sequences(self):\n return self._seq_length",
"def max_sequence_length(self) -> int:\n return self._max_request_length",
"def max_request_length(self) -> int:\n return self.max_sequence_length",
"def max_seq_len() -> int:\n return 8",
"def determine_max_length(sequences, ids):\n max_len = 0\n for i in ids:\n if len(sequences[i]) > max_len:\n max_len = len(sequences[i])\n\n return max_len",
"def max_length(self) -> int | None:\n return self._underlying.max_length",
"def __len__(self):\n return self.total_num_sequences",
"def aln_length(self) -> int:\n return len(self)",
"def next_accepted(self, sequence: Sequence[Text], max_len: int) -> Optional[Text]:\n max_hi = num_seqs_with_max_len(len(self.alphabet), max_len)\n desired_num_accepted = self.num_accepts_ge(max_len, sequence) - self.accepts(sequence)\n lo = seq_to_num(sequence, self.inverse_alphabet, max_len) + 1\n\n # We don't have an upper bound on where the next_accepted string is, so we look at an exponentially increasing\n # gap above low until we find at least one accepted string.\n diff = 1\n while True:\n hi = lo + diff\n if hi >= max_hi:\n hi = max_hi - 1\n break\n hi_seq = \"\".join(num_to_seq(hi, self.alphabet, max_len))\n hi_num_accepted = self.num_accepts_ge(max_len, hi_seq)\n if hi_num_accepted < desired_num_accepted:\n break\n lo = hi\n diff *= 2\n\n # Now we know that the string we're looking for is [lo, hi]. The upper bound is inclusive!\n while lo <= hi:\n mid = (lo + hi) // 2\n mid_seq = \"\".join(num_to_seq(mid, self.alphabet, max_len))\n mid_num_accepted = self.num_accepts_ge(max_len, mid_seq)\n if mid_num_accepted < desired_num_accepted:\n hi = mid - 1\n elif mid_num_accepted == desired_num_accepted and self.accepts(mid_seq):\n return mid_seq\n else: # elif mid_num_accepted > desired_num_accepted:\n lo = mid + 1\n return None # There is no next sequence!",
"def prev_accepted(self, sequence: Sequence[Text], max_len: int) -> Optional[Text]:\n desired_num_accepted = self.num_accepts_ge(max_len, sequence) + 1\n hi = seq_to_num(sequence, self.inverse_alphabet, max_len)\n\n # We don't have a lower bound on where the next_accepted string is, so we look at an exponentially increasing\n # gap above low until we find at least one accepted string.\n diff = 1\n while True:\n lo = hi - diff\n if lo <= 0:\n lo = 0\n break\n lo_seq = \"\".join(num_to_seq(lo, self.alphabet, max_len))\n lo_num_accepted = self.num_accepts_ge(max_len, lo_seq)\n if lo_num_accepted > desired_num_accepted:\n break\n diff *= 2\n\n while lo <= hi:\n mid = (lo + hi) // 2\n mid_seq = \"\".join(num_to_seq(mid, self.alphabet, max_len))\n mid_num_accepted = self.num_accepts_ge(max_len, mid_seq)\n if mid_num_accepted < desired_num_accepted:\n hi = mid - 1\n elif mid_num_accepted == desired_num_accepted and self.accepts(mid_seq):\n return mid_seq\n else: # elif mid_num_accepted > desired_num_accepted:\n lo = mid + 1\n return None # There is no previous sequence!",
"def maxContigLength(self):\n\t\tstats = self.scores()\n\t\treturn stats['largestContig']",
"def max_length(self):\n\t\treturn self._max_length",
"def maxLength(self, arr: List[str]) -> int:\r\n res = 0\r\n for p in powerset(arr):\r\n allChars = \"\".join(w for w in p)\r\n if len(allChars) == len(set(allChars)):\r\n res = max(res, len(allChars))\r\n return res",
"def _getMaxSize(self):\n \n # get gene list and related seqs\n geneList = map(lambda l: l.strip('\\n'), open(self.mdapArgs[1]).readlines())\n self.coRegSeqs = MDAP_defs.seqSubSet(geneList,self.mdapArgs[0])\n \n # warn user if there are missing genes\n if self.coRegSeqs[1]:\n sys.stderr.write('Warning: %s seqs in your gene list were not found in the fasta file provided.\\nA list of names follows:\\n%s\\n'\\\n % (len(self.coRegSeqs[1]),str(self.coRegSeqs[1])))\n \n # Concatonate, get and set self.maxsize\n return len(''.join(self.coRegSeqs[0].values()))\n \n \n #----------- ",
"def maxlen(self):\n \n return reduce(max, list(map(len, self.tags)))",
"def f_get_range_length(self):\n if not self.f_has_range():\n raise TypeError(\"Not applicable, parameter does not have a range\")\n elif hasattr(self, \"__len__\"):\n return len(self)\n else:\n raise NotImplementedError(\"Should have implemented this.\")",
"def sequence_length(self):\n return self.get_sequence_length()",
"def RISCC_N_non_confirming_seqs(self, max_distance=MAX_POSITION_DISTANCE):\n return len(self.RISCC_genome_side_aligned_reads) - self.RISCC_N_confirming_seqs(max_distance)",
"def get_encoded_seq_len(self, seq_len: int) -> int:\n return int(ceil(seq_len / self.pool_stride))",
"def number_of_sequences(self):\n return self.sequence_last() + 1",
"def aln_length(self) -> int:\n return sum([l for l, _ in self])",
"def GetSequenceLength(num_nodes: int) -> int:\n return num_nodes * (3 + (num_nodes - 1) * 2)",
"def maximumORFLength(self):\n return max(len(orf) for orf in self.ORFs())",
"def get_max_length(item_list: list) -> int:\n max_value = -float('inf')\n for item in item_list:\n if len(str(item)) > max_value:\n max_value = len(str(item))\n return max_value",
"def sequence_length(self):\n return self._sequence_length",
"def max_length(lines):\n return max([len(s.split()) for s in lines])",
"def len_max(self):\n return 16 + 16 + 8 + 8 + Tools.bin_to_dec(self.get_data_size()) + Tools.bin_to_dec(self.get_verification_size())",
"def get_total_gti_length(gti, minlen=0):\n lengths = get_gti_lengths(gti)\n return np.sum(lengths[lengths >= minlen])",
"def length_aln_on_sequence(start, end):\n return end - start + 1"
]
| [
"0.7504999",
"0.65972674",
"0.6497466",
"0.6494008",
"0.6415731",
"0.63933873",
"0.6205498",
"0.6124807",
"0.59868175",
"0.5957853",
"0.5955138",
"0.5945578",
"0.59398663",
"0.59378064",
"0.5933087",
"0.59312755",
"0.59160244",
"0.59088737",
"0.59002274",
"0.5860422",
"0.5850322",
"0.5850083",
"0.58310795",
"0.581696",
"0.5802713",
"0.5799617",
"0.57930374",
"0.5790908",
"0.57806635",
"0.5771303"
]
| 0.7307848 | 1 |
Returns the number of sequences >= bound with len() <= max_len accepted by this NFA. | def num_accepts_ge(self, max_len: int, bound: Sequence[Text] = ()) -> int:
_, num_accepted_eq, num_accepted_gt = self.num_accepts(max_len, bound)
return num_accepted_eq + num_accepted_gt | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def num_accepts(self, max_len: int, bound: Sequence[Text] = ()) -> Tuple[int, int, int]:\n lt1: Dict[FrozenSet[int], int] = collections.defaultdict(int)\n lt2: Dict[FrozenSet[int], int] = collections.defaultdict(int)\n eq1: Dict[FrozenSet[int], int] = collections.defaultdict(int)\n eq2: Dict[FrozenSet[int], int] = collections.defaultdict(int)\n gt1: Dict[FrozenSet[int], int] = collections.defaultdict(int)\n gt2: Dict[FrozenSet[int], int] = collections.defaultdict(int)\n eq1[frozenset(self.start_nodes)] = 1\n num_accepted_le = int(self.accepts(\"\"))\n num_accepted_gt = 0\n for c in itertools.islice(itertools.chain(bound, itertools.repeat(None)), 0, max_len):\n for nodes, count in lt1.items():\n for element in self.possible_transitions(nodes):\n next_nodes = frozenset(self.next_nodes(nodes, element))\n lt2[next_nodes] += count\n for nodes, count in eq1.items():\n for element in self.possible_transitions(nodes):\n next_nodes = frozenset(self.next_nodes(nodes, element))\n if c is None or (element is not None and element > c):\n gt2[next_nodes] += count\n elif element == c:\n eq2[next_nodes] += count\n else:\n lt2[next_nodes] += count\n for nodes, count in gt1.items():\n for element in self.possible_transitions(nodes):\n next_nodes = frozenset(self.next_nodes(nodes, element))\n gt2[next_nodes] += count\n num_accepted_le += self._sum_tables(eq2)\n num_accepted_le += self._sum_tables(lt2)\n num_accepted_gt += self._sum_tables(gt2)\n if not lt2 and not eq2 and not gt2:\n break # Exit early if we know this regex cannot accept anymore strings.\n lt1, lt2 = lt2, collections.defaultdict(int)\n eq1, eq2 = eq2, collections.defaultdict(int)\n gt1, gt2 = gt2, collections.defaultdict(int)\n num_accepted_eq = int(len(bound) <= max_len and self.accepts(bound))\n return num_accepted_le - num_accepted_eq, num_accepted_eq, num_accepted_gt",
"def length_of_sequences(self):\n return self._seq_length",
"def __len__(self):\n return self.total_num_sequences",
"def max_seq_len() -> int:\n return 8",
"def length_aln_on_sequence(start, end):\n return end - start + 1",
"def within_length(flowgram, minlength=0, maxlength=400):\r\n seq = flowgram.toSeq()\r\n l = len(seq)\r\n return (l >= minlength and l <= maxlength)",
"def __len__(self):\n return self.get_num_sequence()",
"def determine_max_length(sequences, ids):\n max_len = 0\n for i in ids:\n if len(sequences[i]) > max_len:\n max_len = len(sequences[i])\n\n return max_len",
"def number_of_sequences(self):\n return self.sequence_last() + 1",
"def max_request_length(self) -> int:\n return self.max_sequence_length",
"def max_sequence_length(self) -> int:\n return self._max_request_length",
"def GetSequenceLength(num_nodes: int) -> int:\n return num_nodes * (3 + (num_nodes - 1) * 2)",
"def __len__(self) -> int:\n return self._num_ann",
"def aln_length(self) -> int:\n return len(self)",
"def sequence_length(self):\n return self.get_sequence_length()",
"def __len__(self):\n return len(self.seq)",
"def __len__(self):\n return len(self.seq)",
"def lena(self) -> int:\n return self._core.lena()",
"def __len__(self):\n return len(self._ngrams)",
"def get_seq_lenght(seq_arry, end_symbol):\n scale_arry = np.argmax(seq_arry, axis=2) + np.sum(seq_arry, axis=2)\n end_symbol_scale = np.argmax(end_symbol) + np.sum(end_symbol)\n cond = (scale_arry != end_symbol_scale).astype(np.int)\n lens = cond.sum(axis=1)\n return lens",
"def length(self):\n return len(self._sequence)",
"def test_len_seq(self):\n m, seq = DNA.make_seq(\"ACGGT--A\").parse_out_gaps()\n self.assertEqual(len_seq(m), 6)",
"def get_total_gti_length(gti, minlen=0):\n lengths = get_gti_lengths(gti)\n return np.sum(lengths[lengths >= minlen])",
"def next_accepted(self, sequence: Sequence[Text], max_len: int) -> Optional[Text]:\n max_hi = num_seqs_with_max_len(len(self.alphabet), max_len)\n desired_num_accepted = self.num_accepts_ge(max_len, sequence) - self.accepts(sequence)\n lo = seq_to_num(sequence, self.inverse_alphabet, max_len) + 1\n\n # We don't have an upper bound on where the next_accepted string is, so we look at an exponentially increasing\n # gap above low until we find at least one accepted string.\n diff = 1\n while True:\n hi = lo + diff\n if hi >= max_hi:\n hi = max_hi - 1\n break\n hi_seq = \"\".join(num_to_seq(hi, self.alphabet, max_len))\n hi_num_accepted = self.num_accepts_ge(max_len, hi_seq)\n if hi_num_accepted < desired_num_accepted:\n break\n lo = hi\n diff *= 2\n\n # Now we know that the string we're looking for is [lo, hi]. The upper bound is inclusive!\n while lo <= hi:\n mid = (lo + hi) // 2\n mid_seq = \"\".join(num_to_seq(mid, self.alphabet, max_len))\n mid_num_accepted = self.num_accepts_ge(max_len, mid_seq)\n if mid_num_accepted < desired_num_accepted:\n hi = mid - 1\n elif mid_num_accepted == desired_num_accepted and self.accepts(mid_seq):\n return mid_seq\n else: # elif mid_num_accepted > desired_num_accepted:\n lo = mid + 1\n return None # There is no next sequence!",
"def f_get_range_length(self):\n if not self.f_has_range():\n raise TypeError(\"Not applicable, parameter does not have a range\")\n elif hasattr(self, \"__len__\"):\n return len(self)\n else:\n raise NotImplementedError(\"Should have implemented this.\")",
"def sequence_length(self):\n return self._sequence_length",
"def RISCC_N_non_confirming_seqs(self, max_distance=MAX_POSITION_DISTANCE):\n return len(self.RISCC_genome_side_aligned_reads) - self.RISCC_N_confirming_seqs(max_distance)",
"def __len__(self):\n return len(self.sequence)",
"def __len__(self):\n return len(self.sequence)",
"def aln_length(self) -> int:\n return sum([l for l, _ in self])"
]
| [
"0.71217704",
"0.6733206",
"0.67215616",
"0.66500634",
"0.6458716",
"0.64107955",
"0.6339628",
"0.63114727",
"0.6308476",
"0.6282204",
"0.6279598",
"0.62426364",
"0.62255585",
"0.6195235",
"0.6125995",
"0.6120646",
"0.6120646",
"0.61179775",
"0.6109793",
"0.6084972",
"0.60760224",
"0.60624707",
"0.60587984",
"0.6035416",
"0.60333645",
"0.60311335",
"0.6028572",
"0.6022372",
"0.6022372",
"0.6009924"
]
| 0.74793345 | 0 |
Returns a string representation of this NFA as a GraphViz .dot file. | def build_dot_str(self) -> Text:
s = []
s.append("digraph {")
for node in self.nodes:
label = str(node)
if node in self.start_nodes:
label += "S"
if node in self.accept_nodes:
label += "A"
s.append(f' "{node}" [label="{label}"];')
s.append("")
for from_node, transitions in self.nodes.items():
for transition, to_nodes in transitions.items():
if not transition:
transition = "ε"
for to_node in to_nodes:
s.append(f' "{from_node}" -> "{to_node}" [label="{transition}"];')
s.append("}")
return "\n".join(s) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def dump_graph(self) -> str:\n graph_dot_file = f'{self._name}.dot'\n graph_diagram_file = f'{self._name}.svg'\n write_dot(self._graph, graph_dot_file)\n subprocess.check_output(\n shlex.split(f'dot -Tsvg {graph_dot_file} -o {graph_diagram_file}')\n )\n return graph_diagram_file",
"def _to_dot(self, detailed=False):\n g = ast_to_labeled_graph(self, detailed)\n import tulip.graphics as _graphics\n return _graphics.networkx_to_graphviz(g)",
"def to_dot(self, name='BDD'): # pragma: no cover\n\t\t# print(\"to_dot\")\n\t\tparts = ['graph', name, '{']\n\t\tfor node in self.dfs_postorder():\n\t\t\tif node is BDDNODEZERO:\n\t\t\t\tparts += ['n' + str(id(node)), '[label=0,shape=box];']\n\t\t\telif node is BDDNODEONE:\n\t\t\t\tparts += ['n' + str(id(node)), '[label=1,shape=box];']\n\t\t\telse:\n\t\t\t\tv = _VARS[node.root]\n\t\t\t\tparts.append('n' + str(id(node)))\n\t\t\t\tparts.append('[label=\"{}\",shape=circle];'.format(v))\n\t\tfor node in self.dfs_postorder():\n\t\t\tif node is not BDDNODEZERO and node is not BDDNODEONE:\n\t\t\t\tparts += ['n' + str(id(node)), '--',\n\t\t\t\t\t\t 'n' + str(id(node.lo)),\n\t\t\t\t\t\t '[label=0,style=dashed];']\n\t\t\t\tparts += ['n' + str(id(node)), '--',\n\t\t\t\t\t\t 'n' + str(id(node.hi)),\n\t\t\t\t\t\t '[label=1];']\n\t\tparts.append('}')\n\t\treturn \" \".join(parts)",
"def dot(self) -> str:\n dot = to_pydot(self._graph)\n return dot.to_string()",
"def generate_dot_file(self):\n dot_text = \"digraph blockchain {\"\n frontier = [self.root]\n while frontier != []:\n parent = frontier.pop(0)\n children = parent.children\n for child in children:\n frontier.append(child)\n dot_text += \"\\n\\t{c} -> {p};\".format(p='<' + str(parent.block) + '>',\n c='<' + str(child.block) + '>'\n )\n dot_text += \"\\n}\"\n with open(\"blockchain.gv\", \"w\") as writeFile:\n writeFile.write(dot_text)",
"def saveGraph (self, filename) :\n\t\tss = \"digraph {\\n\"\n\t\tfor key, rules in self.production_rules.items() :\n\t\t\tfor rule in rules :\n\t\t\t\tr = [op.val for op in rule]\n\t\t\t\tr = [i.replace (\"-\", \"\") for i in r]\n\t\t\t\tr = [i.replace (\".\", \"\") for i in r]\n\t\t\t\tr = [i.replace (\"\\'\\'\", \"eps\") for i in r]\n\t\t\t\tr = [i.replace (\"\\\"\\\"\", \"eps\") for i in r]\n\t\t\t\tr = [i.replace (\"/\", \"_\") for i in r]\n\t\t\t\tk = key.replace (\"-\", \"\")\n\t\t\t\tk = k.replace (\"/\", \"_\")\n\t\t\t\tk = k.replace (\".\", \"_tok\")\n\t\t\t\tss += \"\\t\" + k + \" -> \" \n\t\t\t\tss += \" -> \".join (r)\n\t\t\t\tss += \" ;\\n\"\n\t\tss += \"}\"\n\t\tfilestream = open (filename + '.dot', 'w') \n\t\tfilestream.write(ss)\n\t\tfilestream.close ()\n\t\tcmd = 'dot -Tpng -o ' + filename + '.png ' + filename + '.dot'\n\t\tos.system (cmd)\n\t\tcmd = 'rm ' + filename + '.dot'\n\t\tos.system (cmd)",
"def write_dot(graph: Graph, f: IO[str], directed=False):\n if directed:\n f.write('digraph G {\\n')\n else:\n f.write('graph G {\\n')\n\n name = {}\n next_name = 0\n\n for v in graph:\n name[v] = next_name\n next_name += 1\n options = 'penwidth=3,'\n\n if hasattr(v, 'label'):\n options += 'label=\"' + str(v.label) + '\",'\n\n if hasattr(v, 'colour'):\n options += 'color=' + str(v.colour % NUM_COLORS) + ', colorscheme=' + DEFAULT_COLOR_SCHEME + ','\n if v.colour >= NUM_COLORS:\n options += 'style=filled,fillcolor=' + str((v.colour // NUM_COLORS) % NUM_COLORS) + ','\n if len(options) > 0:\n f.write(' ' + str(name[v]) + ' [' + options[:-1] + ']\\n')\n else:\n f.write(' ' + str(name[v]) + '\\n')\n f.write('\\n')\n\n for e in graph.edges():\n options = ','\n if len(options) > 0:\n options = '[penwidth=2]'\n if directed:\n f.write(' ' + str(name[e.tail]) + ' -> ' + str(name[e.head]) + options + '\\n')\n else:\n f.write(' ' + str(name[e.tail]) + '--' + str(name[e.head]) + options + '\\n')\n\n f.write('}')",
"def __str__(self):\n built_string = \"Graph(\"\n built_string += str(self.get_nodes())\n built_string += \", \"\n built_string += str(self.get_edges())\n built_string += \")\"\n return built_string",
"def __str__(self):\n built_string = \"Graph(\"\n built_string += str(self.get_nodes())\n built_string += \", \"\n built_string += str(self.get_edges())\n built_string += \")\"\n return built_string",
"def get_dot(self):\n return \"digraph G{\\n%s}\" % (\"\" if self.val is None else (\n \"\\t%s;\\n%s\\n\" % (\n self.val,\n \"\\n\".join(self._get_dot())\n )\n ))",
"def get_dot(self):\n return \"digraph G{\\n%s}\" % (\"\" if self.val is None else (\n \"\\t%s;\\n%s\\n\" % (\n self.val,\n \"\\n\".join(self._get_dot())\n )\n ))",
"def write_dot(graph: Graph, f: IO[str], directed=False):\n if directed:\n f.write('digraph G {\\n')\n else:\n f.write('graph G {\\n')\n\n name = {}\n next_name = 0\n for v in graph:\n name[v] = next_name\n next_name += 1\n options = 'penwidth=3,'\n if hasattr(v, 'label'):\n options += 'label=\"' + str(v.label) + '\",'\n if hasattr(v, 'colortext'):\n options += 'color=\"' + v.colortext + '\",'\n elif hasattr(v, 'colornum'):\n options += 'color=' + str(v.colornum % NUM_COLORS + 1) + ', colorscheme=' + DEFAULT_COLOR_SCHEME + ','\n if v.colornum >= NUM_COLORS:\n options += 'style=filled,fillcolor=' + str((v.colornum // NUM_COLORS) % NUM_COLORS + 1) + ','\n if len(options) > 0:\n f.write(' ' + str(name[v]) + ' [' + options[:-1] + ']\\n')\n else:\n f.write(' ' + str(name[v]) + '\\n')\n f.write('\\n')\n\n for e in graph.edges:\n options = 'penwidth=2,'\n if hasattr(e, 'weight'):\n options += 'label=\"' + str(e.weight) + '\",'\n if hasattr(e, 'colortext'):\n options += 'color=\"' + e.colortext + '\",'\n elif hasattr(e, 'colornum'):\n options += 'color=' + str(e.colornum % NUM_COLORS + 1) + ', colorscheme=' + DEFAULT_COLOR_SCHEME + ','\n if len(options) > 0:\n options = ' [' + options[:-1] + ']'\n if directed:\n f.write(' ' + str(name[e.tail]) + ' -> ' + str(name[e.head]) + options + '\\n')\n else:\n f.write(' ' + str(name[e.tail]) + '--' + str(name[e.head]) + options + '\\n')\n\n f.write('}')",
"def print(self):\n dot = \"digraph G {\\nrankdir = UD\\n\"\n\n for i in range(len(self.allNodes)):\n if self.allNodes[i].left is not None:\n dot += str(self.allNodes[i].key) + \" -> \" + str(self.allNodes[i].left.key) + \"\\n\"\n if self.allNodes[i].right is not None:\n dot += str(self.allNodes[i].key) + \" -> \" + str(self.allNodes[i].right.key) + \"\\n\"\n\n dot += \"}\"\n\n file = open(\"outputfiles/BinTree.dot\", \"w\")\n file.write(dot)\n file.close()\n\n os.system(\"dot outputfiles/BinTree.dot -Tpng -o outputfiles/BinTree.png\")",
"def generate_graph_dot(self, dot_path):\n\n dot_str = []\n visited_cells = set()\n queue = self.sources[:]\n\n # BFS through graph\n while len(queue) > 0:\n cell = queue.pop(0)\n if cell in visited_cells:\n continue\n visited_cells.add(cell)\n\n nexts = cell.get_nexts()\n queue.extend(nexts)\n \n # Encode cell\n nexts_names = [next.name for next in nexts]\n nexts_str = \"\\t%s -> { %s }\\n\" % (cell.name, \" \".join(nexts_names))\n dot_str.append(nexts_str) \n\n # Write encoded graph to file\n dot_str = \"strict digraph {\\n\" + \"\".join(dot_str) + \"}\"\n try:\n with open(dot_path, 'w') as dot_file:\n dot_file.write(dot_str)\n except:\n raise EnvironmentError(\"Unable to open %s\" % (dot_path))",
"def __str__(self):\n return np.array2string(self.graph.toarray())",
"def __str__(self):\n out = [f'{v}: {self.adj_list[v]}' for v in self.adj_list]\n out = '\\n '.join(out)\n if len(out) < 70:\n out = out.replace('\\n ', ', ')\n return f'GRAPH: {{{out}}}'\n return f'GRAPH: {{\\n {out}}}'",
"def __str__(self):\n stringRepresentation = []\n for node in self.getNodes():\n stringRepresentation.append(\"->\".join(\n (str(node), str(self.graph[node]))))\n\n return str(stringRepresentation)",
"def dot_format(out, graph, name=\"digraph\"):\n\n out.write(\"digraph %s {\\n\" % name)\n for step, deps in each_step(graph):\n for dep in deps:\n out.write(\" \\\"%s\\\" -> \\\"%s\\\";\\n\" % (step, dep))\n\n out.write(\"}\\n\")",
"def graphviz_prettify(self, network):\n graph_settings = {\n 'rankdir': 'LR',\n 'dpi': 60,\n }\n network.graph.update(graph_settings)\n\n for n in network.nodes():\n if isinstance(n, Variable):\n network.nodes[n]['label'] = n.name\n elif isinstance(n, Equation):\n network.nodes[n]['shape'] = 'diamond'",
"def __str__(self):\n s = f\"GraphViaEdges '{self.name}',\\nedges :\\n\"\n for edge, edgetype in self.edges.items():\n s += f\" {edge[0]} {edgetype.value} {edge[1]}\\n\"\n\n return s",
"def dot(self, name):\n nodes = \" \".join(\"_%s_%s;\" % (x, name) for x in self)\n edges = \" \".join(\n '_%s_%s -> _%s_%s [label=\"%.2f/%s\"];'\n % (s, name, t, name, self.get_score(s, t), self.get_label(s, t))\n for s, t in self.iteredges()\n )\n return \"digraph _%s {%s %s}\" % (name, nodes, edges)",
"def get_graph_drawing(self):\r\n graph_drawing = graphviz.Digraph(comment=\"Directed Graph\", format=\"png\")\r\n for vertex in self.get_all_vertices():\r\n graph_drawing.node(str(vertex))\r\n for _from, _to, _cost in self.get_all_edges():\r\n graph_drawing.edge(str(_from), str(_to), label=str(_cost))\r\n return graph_drawing",
"def __str__(self):\n outstr = [\"\\n<%s: %s>\" % (self.__class__, self.name)]\n outstr.append(\"%d graphs\" % len(self._graphs))\n outstr = \"\\n\".join(outstr)\n return outstr",
"def __str__(self):\n\t\treturn str(self.graph)",
"def WriteToDotFile(self, filename: str) -> None:\n with open(\"16.dot\", \"w\") as dotFile:\n print(\"digraph packets {\", file=dotFile)\n for line in self.DotRepresentation():\n print(line, file=dotFile)\n print(\"}\", file=dotFile)",
"def __str__(self):\n s = ''\n for node in self.nodes:\n s += '\\n\\n'+str(node)+'\\n\\t'\n edges = node.getChildren()\n keys = edges.keys()\n keys.sort()\n for key in keys:\n bounds = edges[key].getSuffix()\n s += str(edges[key])+' '\n for i in xrange(bounds[0], bounds[1]):\n s += self.target[i]\n s += '\\n\\t'\n return s",
"def dot_string(self) -> str:\n\n ret = \"{0}[label = \\\"{1}\\\"];\\n\".format(self._index, self.label)\n ret += \"{0}--{{\".format(self._index)\n ret += ''.join([f'{child.index} ' for child in self._children])\n ret += \"}\\n\"\n self._is_drawn = True\n ret += ''.join([child.dot_string() for child in self._children])\n\n return ret",
"def __str__(self):\n out = 'Content of Graph stored in file ' + self.filename + '\\n'\n out += 'Content of headers:' + '\\n'\n out += str(self.headers)\n out += '\\nNumber of Curves: '+str(self.length())\n return out",
"def write(self, classes):\n assert isinstance(classes, list)\n dot_string = \"digraph \\\"class\\\" {\\n\"\n dot_string = self.write_node(dot_string, classes)\n dot_string = self.write_edge(dot_string, classes)\n dot_string += \"}\"\n print(dot_string)\n return dot_string",
"def create_dot(nodes, assocs, hierarchy):\n def field_names(fields):\n return ' | '.join(sorted(fields))\n out = StringIO()\n print >> out, \"digraph phemi_class_diagram {\"\n print >> out, \" node[shape=record];\"\n for clazz, fields in nodes.iteritems():\n print >> out, ' \"%s\" [label=\"{%s | %s}\"];' % (\n fullname(clazz), clazz.__name__, field_names(fields)\n )\n for edgemap in [assocs, hierarchy]:\n for clazz, edges in edgemap.iteritems():\n for edge in edges:\n print >> out, ' \"%s\" -> \"%s\" %s' % (\n fullname(clazz), fullname(edge.dst), edge.style\n )\n print >> out, \"}\"\n return out.getvalue()"
]
| [
"0.75486964",
"0.7548447",
"0.7353784",
"0.7292023",
"0.7183512",
"0.7085536",
"0.70306456",
"0.70009273",
"0.70009273",
"0.70002353",
"0.70002353",
"0.69679123",
"0.6958223",
"0.6934876",
"0.6925822",
"0.6865341",
"0.6827346",
"0.6814584",
"0.6760694",
"0.66928124",
"0.66715807",
"0.66604006",
"0.6653036",
"0.6637309",
"0.66133463",
"0.6578472",
"0.65621984",
"0.65244055",
"0.6518025",
"0.65133935"
]
| 0.7643195 | 0 |
Return a sequence that partitions the space of accepted strings. Returns a sequence, results, that is not necessarily accepted by nfa that partitions the space of all sequences lexicographically between lo and hi that nfa accepts of length max_len or less such target_ratio + tolerance_ratio of the sequences are lexicographically less than result. | def find_partition_seq(
nfa: NFA,
max_len: int,
target_ratio=fractions.Fraction(1, 2),
low: Iterable[Text] = (),
high: Optional[Iterable[Text]] = None,
tolerance_ratio: float = 0.0,
) -> Tuple[Text, ...]:
max_letter = max(nfa.alphabet)
lo: int = seq_to_num(low, nfa.inverse_alphabet, max_len)
hi: int = seq_to_num((max_letter for _ in range(max_len)) if high is None else high, nfa.inverse_alphabet, max_len)
lo_num_accepts = nfa.num_accepts_ge(max_len, tuple(num_to_seq(lo, nfa.alphabet, max_len)))
hi_num_accepts = nfa.num_accepts_ge(max_len, tuple(num_to_seq(hi, nfa.alphabet, max_len)))
total = lo_num_accepts - hi_num_accepts
assert total >= 0
target = lo_num_accepts - int(total * target_ratio)
try:
tolerance = int(total * tolerance_ratio)
except OverflowError:
tolerance = int(total * fractions.Fraction(tolerance_ratio))
for j in itertools.cycle((0, 1)):
if j == 0 and hi_num_accepts != lo_num_accepts:
mid: int = lo + (hi - lo) * (lo_num_accepts - target) // (lo_num_accepts - hi_num_accepts)
assert mid >= lo
assert mid <= hi
else:
mid = (lo + hi) // 2
mid_str = tuple(num_to_seq(mid, nfa.alphabet, max_len))
mid_num_accepts = nfa.num_accepts_ge(max_len, mid_str)
if lo >= hi or mid_num_accepts == target or abs(lo_num_accepts - hi_num_accepts) <= tolerance:
break
elif mid_num_accepts < target:
hi = mid + 1
hi_num_accepts = mid_num_accepts
elif mid_num_accepts > target:
lo = mid
lo_num_accepts = mid_num_accepts
return tuple(num_to_seq(mid, nfa.alphabet, max_len)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def next_accepted(self, sequence: Sequence[Text], max_len: int) -> Optional[Text]:\n max_hi = num_seqs_with_max_len(len(self.alphabet), max_len)\n desired_num_accepted = self.num_accepts_ge(max_len, sequence) - self.accepts(sequence)\n lo = seq_to_num(sequence, self.inverse_alphabet, max_len) + 1\n\n # We don't have an upper bound on where the next_accepted string is, so we look at an exponentially increasing\n # gap above low until we find at least one accepted string.\n diff = 1\n while True:\n hi = lo + diff\n if hi >= max_hi:\n hi = max_hi - 1\n break\n hi_seq = \"\".join(num_to_seq(hi, self.alphabet, max_len))\n hi_num_accepted = self.num_accepts_ge(max_len, hi_seq)\n if hi_num_accepted < desired_num_accepted:\n break\n lo = hi\n diff *= 2\n\n # Now we know that the string we're looking for is [lo, hi]. The upper bound is inclusive!\n while lo <= hi:\n mid = (lo + hi) // 2\n mid_seq = \"\".join(num_to_seq(mid, self.alphabet, max_len))\n mid_num_accepted = self.num_accepts_ge(max_len, mid_seq)\n if mid_num_accepted < desired_num_accepted:\n hi = mid - 1\n elif mid_num_accepted == desired_num_accepted and self.accepts(mid_seq):\n return mid_seq\n else: # elif mid_num_accepted > desired_num_accepted:\n lo = mid + 1\n return None # There is no next sequence!",
"def prev_accepted(self, sequence: Sequence[Text], max_len: int) -> Optional[Text]:\n desired_num_accepted = self.num_accepts_ge(max_len, sequence) + 1\n hi = seq_to_num(sequence, self.inverse_alphabet, max_len)\n\n # We don't have a lower bound on where the next_accepted string is, so we look at an exponentially increasing\n # gap above low until we find at least one accepted string.\n diff = 1\n while True:\n lo = hi - diff\n if lo <= 0:\n lo = 0\n break\n lo_seq = \"\".join(num_to_seq(lo, self.alphabet, max_len))\n lo_num_accepted = self.num_accepts_ge(max_len, lo_seq)\n if lo_num_accepted > desired_num_accepted:\n break\n diff *= 2\n\n while lo <= hi:\n mid = (lo + hi) // 2\n mid_seq = \"\".join(num_to_seq(mid, self.alphabet, max_len))\n mid_num_accepted = self.num_accepts_ge(max_len, mid_seq)\n if mid_num_accepted < desired_num_accepted:\n hi = mid - 1\n elif mid_num_accepted == desired_num_accepted and self.accepts(mid_seq):\n return mid_seq\n else: # elif mid_num_accepted > desired_num_accepted:\n lo = mid + 1\n return None # There is no previous sequence!",
"def shrink_seq(mrnaseq, mrna_frag, mrna_frag_target, total_length=50):\n # Prepare sequences with no gaps\n mrnaseq_nogap = mrnaseq.replace(\"-\", \"\")\n mrna_frag_nogap = mrna_frag.replace(\"-\", \"\")\n if len(mrna_frag_nogap) < total_length:\n syserr(mrna_frag_nogap)\n syserr(mrnaseq)\n syserr(mrna_frag)\n syserr(mrna_frag_target)\n raise Exception(\n \"Check your sequences maybe you should extend, not shrink them\")\n span = re.search(mrna_frag_nogap, mrnaseq_nogap).span()\n\n # Decide which type of extension to do\n gap_pos_mean = mean(\n [i for i, x in enumerate(mrna_frag_target) if x == \"-\"])\n list_median = median([i for i in range(len(mrna_frag_target))])\n\n # this ratio gives us relative position of the gaps\n ratio = gap_pos_mean / list_median\n\n # Based on the ratio do the shrinkage of the sequence\n if ratio > 0.5 and ratio < 1.5: # extend both sides\n li = span[0]\n ui = span[1]\n length = ui - li\n if length < total_length:\n return -1\n elif length == total_length:\n return mrnaseq_nogap[li:ui]\n else:\n dif = abs(total_length - length)\n quot = dif // 2 # this is explicit integer division\n l_ext = li + quot\n u_ext = ui - (dif - quot)\n if (u_ext < 0) or (u_ext > len(mrnaseq_nogap) - 1):\n return \"NA\"\n else:\n return mrnaseq_nogap[l_ext:u_ext]\n elif ratio <= 0.5: # trim left - it means upstream (5'end)\n li = span[0]\n ui = span[1]\n length = ui - li\n dif = len(mrna_frag_nogap) - total_length\n return mrnaseq_nogap[li + dif:ui]\n elif ratio >= 1.5: # extend right - it means downstream (3'end)\n li = span[0]\n ui = span[1]\n length = ui - li\n dif = len(mrna_frag_nogap) - total_length\n return mrnaseq_nogap[li:ui - dif]",
"def get_sequence_slices(df, target_seq, model_context_len, start_idx=1, scoring_window=\"optimal\", indel_mode=False):\n len_target_seq = len(target_seq)\n num_mutants = len(df['mutated_sequence'])\n df=df.reset_index(drop=True)\n if scoring_window==\"optimal\":\n df['mutation_barycenter'] = df['mutant'].apply(lambda x: int(np.array([int(mutation[1:-1]) - start_idx for mutation in x.split(':')]).mean())) if not indel_mode else df['mutated_sequence'].apply(lambda x: len(x)//2)\n df['scoring_optimal_window'] = df['mutation_barycenter'].apply(lambda x: get_optimal_window(x, len_target_seq, model_context_len)) if not indel_mode else df['mutated_sequence'].apply(lambda x: (0,len(x)))\n df['sliced_mutated_sequence'] = [df['mutated_sequence'][index][df['scoring_optimal_window'][index][0]:df['scoring_optimal_window'][index][1]] for index in range(num_mutants)]\n df['window_start'] = df['scoring_optimal_window'].map(lambda x: x[0]) \n df['window_end'] = df['scoring_optimal_window'].map(lambda x: x[1])\n del df['scoring_optimal_window'], df['mutation_barycenter']\n if 'mutant' in df: del df['mutant']\n df_wt=df.copy()\n df_wt['mutated_sequence'] = [target_seq] * num_mutants\n if indel_mode: # For indels, we set the wild type reference to be always the same (full length) sequence. We assume here that the length is lower than model context size (otherwise \"Sliding\" mode should be used)\n df_wt['window_end'] = df_wt['mutated_sequence'].map(lambda x:len(x))\n df_wt['sliced_mutated_sequence'] = [target_seq[df_wt['window_start'][index]:df_wt['window_end'][index]] for index in range(num_mutants)]\n df = pd.concat([df,df_wt], axis=0)\n df = df.drop_duplicates()\n elif scoring_window==\"sliding\":\n num_windows = 1 + int( len_target_seq / model_context_len)\n df_list=[]\n start=0\n for window_index in range(1, num_windows+1):\n df_sliced = df.copy()\n df_sliced['sliced_mutated_sequence'] = df_sliced['mutated_sequence'].map(lambda x: x[start:start+model_context_len]) \n df_sliced['window_start'] = [start] * num_mutants \n df_sliced['window_end'] = df_sliced['mutated_sequence'].map(lambda x: min(len(x), start+model_context_len)) \n df_sliced_wt = df_sliced.copy()\n df_sliced_wt['mutated_sequence'] = [target_seq] * num_mutants\n df_sliced_wt['sliced_mutated_sequence'] = df_sliced_wt['mutated_sequence'].map(lambda x: x[start:start+model_context_len])\n df_sliced_wt['window_end'] = df_sliced_wt['mutated_sequence'].map(lambda x: min(len(x), start+model_context_len)) #Need to adjust end index if WT and sequence are not same full length\n df_list.append(df_sliced)\n df_list.append(df_sliced_wt)\n start += model_context_len\n df_final = pd.concat(df_list,axis=0)\n if 'mutant' in df_final: del df_final['mutant']\n df = df_final.drop_duplicates()\n return df.reset_index(drop=True)",
"def rgenerate(allele1, allele2, min_length, max_length):\n from .models import Primer\n\n if len(allele1) != len(allele2):\n raise ValueError('Aligned alleles must be the same length.')\n\n allele1 = str(allele1)\n allele2 = str(allele2)\n candidates = []\n\n for size in range(min_length, max_length+1):\n allele1_span = (0, size)\n allele2_span = (0, size)\n\n for i in range(len(allele1)-size):\n if allele1[i:i+size] == allele2[i:i+size]:\n candidates.append(Primer(sequence=allele1[i:i+size],\n allele1_span=allele1_span,\n allele2_span=allele2_span,\n strand=1))\n\n if allele1[i] != '-':\n allele1_span = (allele1_span[0]+1, allele1_span[1]+1)\n\n if allele2[i] != '-':\n allele2_span = (allele2_span[0]+1, allele2_span[1]+1)\n\n return candidates",
"def inferSpaces(s):\n\n # Find the best match for the i first characters, assuming cost has\n # been built for the i-1 first characters.\n # Returns a pair (match_cost, match_length).\n def best_match(i):\n candidates = enumerate(reversed(cost[max(0, i - maxword):i]))\n return min((c + wordcost.get(s[i - k - 1:i], 9e999), k + 1) for k, c in candidates)\n\n # Build the cost array.\n cost = [0]\n for i in range(1, len(s) + 1):\n c, k = best_match(i)\n cost.append(c)\n\n # Backtrack to recover the minimal-cost string.\n out = []\n i = len(s)\n while i > 0:\n c, k = best_match(i)\n assert c == cost[i]\n out.append(s[i - k:i])\n i -= k\n\n return formatSymbols(\" \".join(reversed(out)))\n #return list(reversed(out))",
"def abbreviate_target_ids(arr):\r\n split_keys = [tuple(a.split('.')) for a in arr]\r\n\r\n split_keys_by_subseq = {}\r\n\r\n def subseq_map(arr, subseq_fn=None, result_cmp_fn=None):\r\n def subseq_map_rec(remaining_arr, subseq, indent=''):\r\n if not remaining_arr:\r\n if subseq_fn:\r\n subseq_fn(arr, subseq)\r\n return subseq\r\n\r\n next_segment = remaining_arr.pop()\r\n next_subseq = tuple([next_segment] + list(subseq))\r\n\r\n skip_value = subseq_map_rec(remaining_arr, subseq, indent + '\\t')\r\n\r\n add_value = subseq_map_rec(remaining_arr, next_subseq, indent + '\\t')\r\n\r\n remaining_arr.append(next_segment)\r\n\r\n if result_cmp_fn:\r\n if not subseq:\r\n # Empty subsequence should always lose.\r\n return add_value\r\n if result_cmp_fn(skip_value, add_value):\r\n return skip_value\r\n return add_value\r\n\r\n return None\r\n\r\n val = subseq_map_rec(list(arr), tuple())\r\n return val\r\n\r\n def add_subseq(arr, subseq):\r\n if subseq not in split_keys_by_subseq:\r\n split_keys_by_subseq[subseq] = set()\r\n if split_key not in split_keys_by_subseq[subseq]:\r\n split_keys_by_subseq[subseq].add(arr)\r\n\r\n for split_key in split_keys:\r\n subseq_map(split_key, add_subseq)\r\n\r\n def return_min_subseqs(subseq1, subseq2):\r\n collisions1 = split_keys_by_subseq[subseq1]\r\n collisions2 = split_keys_by_subseq[subseq2]\r\n return (len(collisions1) < len(collisions2)\r\n or (len(collisions1) == len(collisions2)\r\n and len(subseq1) <= len(subseq2)))\r\n\r\n min_subseq_by_key = {}\r\n\r\n for split_key in split_keys:\r\n min_subseq = subseq_map(split_key, result_cmp_fn=return_min_subseqs)\r\n if not min_subseq:\r\n raise Exception(\"No min subseq found for %s: %s\" % (str(split_key), str(min_subseq)))\r\n min_subseq_by_key['.'.join(str(segment) for segment in split_key)] = '.'.join(min_subseq)\r\n\r\n return min_subseq_by_key",
"def get_strings(max_len: int = 0, limit: int = 0) -> Generator:\n return _limit_helper(_fuzzdb_get_strings(max_len), limit)",
"def solve(chars, length):\n return generate_greedy(generate_string_list(length, chars))",
"def test_quality_filter_sequence_fail_w_N(self):\r\n\r\n # 'N' in sequence causes failure\r\n header = \"990:2:4:11271:5323#1/1\"\r\n sequence = \\\r\n \"GCACTCACCGCCCGTCACACCACGAAAGTNGGTAACACCCGAAGCCGGTGAGATAACCTTTTAGGAGTCAGCTGTC\"\r\n quality = \\\r\n \"bbbbbbbbbbbbbbbbbbbbbbbbbY``\\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^U`\"\r\n actual = quality_filter_sequence(header,\r\n sequence,\r\n quality,\r\n max_bad_run_length=0,\r\n phred_quality_threshold=2,\r\n min_per_read_length=75,\r\n seq_max_N=0,\r\n filter_bad_illumina_qual_digit=True)\r\n expected = (2,\r\n \"GCACTCACCGCCCGTCACACCACGAAAGTNGGTAACACCCGAAGCCGGTGAGATAACCTTTTAGGAGTCAGCTGTC\",\r\n \"bbbbbbbbbbbbbbbbbbbbbbbbbY``\\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^U`\")\r\n self.assertEqual(actual, expected)\r\n\r\n # increasing max N rescues sequence\r\n header = \"990:2:4:11271:5323#1/1\"\r\n sequence = \\\r\n \"GCACTCACCGCCCGTCACACCACGAAAGTNGGTAACACCCGAAGCCGGTGAGATAACCTTTTAGGAGTCAGCTGTC\"\r\n quality = \\\r\n \"bbbbbbbbbbbbbbbbbbbbbbbbbY``\\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^U`\"\r\n actual = quality_filter_sequence(header,\r\n sequence,\r\n quality,\r\n max_bad_run_length=0,\r\n phred_quality_threshold=2,\r\n min_per_read_length=75,\r\n seq_max_N=1,\r\n filter_bad_illumina_qual_digit=True)\r\n\r\n expected = (0,\r\n \"GCACTCACCGCCCGTCACACCACGAAAGTNGGTAACACCCGAAGCCGGTGAGATAACCTTTTAGGAGTCAGCTGTC\",\r\n \"bbbbbbbbbbbbbbbbbbbbbbbbbY``\\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^U`\")\r\n self.assertEqual(actual, expected)\r\n\r\n # truncation of N rescues sequence (sequence is truncated when\r\n # the quality hits B, and the truncated sequence is above the\r\n # length threshold and no longer contains an N)\r\n header = \"990:2:4:11271:5323#1/1\"\r\n sequence = \\\r\n \"GCACTCACCGCCCGTCACACCACGAAAGTTGGTAACACCCGAAGCCGGTGAGATAACCTTTTAGGAGTCAGCTGTN\"\r\n quality = \\\r\n _ascii_to_phred64(\"bbbbbbbbbbbbbbbbbbbbbbbbbY``\\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^B`\")\r\n actual = quality_filter_sequence(header,\r\n sequence,\r\n quality,\r\n max_bad_run_length=0,\r\n phred_quality_threshold=2,\r\n min_per_read_length=50,\r\n seq_max_N=0,\r\n filter_bad_illumina_qual_digit=True)\r\n\r\n expected = (0,\r\n \"GCACTCACCGCCCGTCACACCACGAAAGTTGGTAACACCCGAAGCCGGTGAGATAACCTTTTAGGAGTCAGCTG\",\r\n _ascii_to_phred64(\"bbbbbbbbbbbbbbbbbbbbbbbbbY``\\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^\"))\r\n np.testing.assert_equal(actual, expected)",
"def palindrome_search(sequence, min_len, max_len, alphabet, prob_cutoff=None):\n # get the sequence complement\n trans_table = str.maketrans('ACGT', 'TGCA')\n seq_complement = sequence.translate(trans_table)\n # gets the base composition\n nucs = base_stats(sequence, alphabet, False, True)\n # define maches bases\n matches = ['AT', 'TA', 'GC', 'CG']\n # probability of a match according tho the background\n p_match = 0\n # iterates tohrough the bases matches\n for b in matches:\n # calculate the probabilities\n p_match += nucs[b[0]] * nucs[b[1]]\n # checks if the results matches\n assert p_match == sum([nucs[b[0]] * nucs[b[1]] for b in matches])\n # initialize the container of possible probability using length and mismatches\n # as the indexes\n prob_dict = defaultdict(float)\n # iterates through the range of lengths\n for length in range(min_len, max_len):\n # iterates throught the half of the sequence\n for mismatches in range(0, (length // 2) + 1):\n # get the probabilities and number the mismatches\n p = probability(length, mismatches, p_match)\n prob_dict[(length, mismatches)] = prob_dict.get((length, mismatches), 0.0) + p\n # create an container for the results\n palindromes = []\n # iterates through the range of lengths\n for length in range(min_len, max_len):\n # defined mismatch threshold\n half_l = length // 2\n mismatches_cutoff = 0.5 * half_l\n half_list = range(half_l)\n # iterates throught to find the starts\n for start in range(0, (len(sequence) - length + 1)):\n # gets the putative palindromes\n seq = sequence[start:start + length]\n # gets the complement\n seq_comp = seq_complement[start:start + length]\n mismatches = 0\n # iterates throught the half lengths\n for i in half_list:\n # check for mismatches and increment the counts\n if seq[i] != seq_comp[-i - 1]:\n mismatches += 1\n # check if the number of mismatches is allowed\n if mismatches <= mismatches_cutoff:\n # look up the probability,\n pr = prob_dict[(length, mismatches)]\n # if it passes the cutoff\n if pr <= prob_cutoff:\n # add the results into the container\n # count the number of the palindrome in the sequence\n cnt_pal = get_pattern_count(sequence, seq)\n palindromes += [[length, start, pr, mismatches, cnt_pal, seq]]\n return palindromes",
"def get_exhaustive_text_correction_proposal(self, input_text):\n arr = []\n self.complexity=20\n prev = self.alpha\n self.alpha = 0.95 # temporary\n arr_i = 0\n\n with torch.no_grad():\n for text_chunk in tqdm(self._string_to_chunks(input_text)):\n self.oryginal_input_text = text_chunk\n self.input_text = text_chunk\n self._compute_exhaustive_outputs()\n\n for ix in range(self.input_size):\n token_id = self.input_ids[0][ix]\n token_obj = {}\n token_obj[\"name\"] = self.tokenizer.decode(token_id.item())\n token_obj[\"probability\"] = self.normalized_token_prob[ix]\n token_obj[\"oddballness\"] = self._get_oddballness_proba(token_obj[\"probability\"], self.probs[ix],\n alpha=self.alpha).item()\n arr.append(token_obj)\n\n self.input_text = self.oryginal_input_text\n self._compute_outputs()\n for ix in range(self.input_size):\n self.sorted_probs, self.sorted_indices = torch.sort(self.probs[ix - 1], descending=True)\n _, correction_indices = self._get_best_tokens(5)\n\n arr[arr_i + ix][\"corrections\"] = [self.tokenizer.decode(token_id.item()) for token_id in correction_indices]\n arr_i += self.input_size\n\n arr.pop()\n arr.pop(0)\n self._trim_bpe_space_artifact(arr)\n self.token_array = arr\n self.alpha = prev # temporary\n return arr",
"def _get_sharded_ranges(\n begin,\n end,\n max_length,\n):\n if max_length <= 0:\n raise ValueError(\"max_length <= 0.\")\n length = end - begin\n if length <= max_length:\n return [(begin, end)]\n pivot = begin + length // 2\n return (_get_sharded_ranges(begin, pivot, max_length) +\n _get_sharded_ranges(pivot, end, max_length))",
"def infer_spaces(s):\n\n\t# Find the best match for the i first characters, assuming cost has\n\t# been built for the i-1 first characters.\n\t# Returns a pair (match_cost, match_length).\n\tdef best_match(i):\n\t\tcandidates = enumerate(reversed(cost[max(0, i-maxword):i]))\n\t\treturn min((c + wordcost.get(s[i-k-1:i], 9e999), k+1) for k,c in candidates)\n\n\t# Build the cost array.\n\tcost = [0]\n\tfor i in range(1,len(s)+1):\n\t\tc,k = best_match(i)\n\t\tcost.append(c)\n\n\t# Backtrack to recover the minimal-cost string.\n\tout = []\n\ti = len(s)\n\twhile i>0:\n\t\tc,k = best_match(i)\n\t\tassert c == cost[i]\n\t\tout.append(s[i-k:i])\n\t\ti -= k\n\n\treturn \" \".join(reversed(out))",
"def seq_align(string1,string2,mismatch_penalty,gap_penalty):\n\n # define 2x2 matrix\n matrix = []\n for i in range(len(string1)+1):\n if i == 0:\n matrix.append(list([gap_penalty * x for x in range(len(string2)+1)]))\n else:\n matrix.append(list([gap_penalty * i if x == 0 else None for x in range(len(string2)+1)]))\n\n # populate matrix by looping through the strings and finding optimal value for each spot\n for i in range(len(string1)):\n for j in range(len(string2)):\n if string1[i] == string2[j]:\n val1 = 0 + matrix[i][j]\n else:\n val1 = mismatch_penalty + matrix[i][j]\n val2 = gap_penalty + matrix[i][j+1]\n val3 = gap_penalty + matrix[i+1][j]\n min_val = min(val1,val2,val3)\n matrix[i+1][j+1] = min_val\n\n\n # define values to use while retracing\n result_str1 = ''\n result_str2 = ''\n i = len(matrix)-1\n j = len(matrix[0])-1\n\n # trace through matrix to find the optimal character alignment\n while i > 0 and j > 0:\n val1 = matrix[i-1][j-1]\n val2 = matrix[i-1][j]\n val3 = matrix[i][j-1]\n min_val = min(val1,val2,val3)\n if val1 == min_val:\n result_str1 += string1[i-1]\n result_str2 += string2[j-1]\n i -= 1\n j -= 1\n elif val2 == min_val:\n result_str1 += \"-\"\n result_str2 += string2[j-1]\n i -= 1\n else:\n result_str1 += string1[i-1]\n result_str2 += \"-\"\n j -= 1\n\n # for any leftover j values\n if i == 0:\n while j > 0:\n result_str1 += '-'\n result_str2 += string2[j]\n j -=1\n\n # for any leftover i values\n if j == 0:\n while i > 0:\n result_str1 += string1[i]\n result_str2 += \"-\"\n i -= 1\n\n return matrix[len(matrix)-1][len(matrix[0])-1], result_str1[::-1], result_str2[::-1]",
"def search_trimmers(seq: str) -> str:\n return [seq[i:i+3] for i in range(len(seq)-2)]",
"def gen_passage(ngrams, start=None, min_length=100, max_sentence_length=10):\n counter = 0\n strn =\"\"\n while counter < max_sentence_length:\n if start == None:\n start = str(random.choice((list(ngrams.keys()))))\n k = random.choice(ngrams[start])\n strn += str.capitalize(start) +\" \" + \" \".join(k)+\" \"\n #last token/word of selected sequence is the new start token IFF it is a KEY!\n for i in range(min_length):\n start = k[-1]\n if start not in ngrams.keys() and start:\n if \".\" in start:\n start = None\n break\n else:\n strn+=\". \"\n start = None\n #make sure this completely breaks out...\n break\n else:\n k = random.choice(ngrams[start])\n strn+= \" \".join(k)\n counter+=1\n print(strn)\n return strn",
"def rabin_partition(longstr, avgchunk=16, minchunk=8, maxchunk=64, windowsize=16, windowslide=1):\n # ensure inputs meet minimum requirements\n from types import StringType\n assert type(longstr) is StringType # *longstr* must be a string\n assert len(longstr) >= windowsize # *windowsize* must be less than length of *longstr*\n assert windowsize >= windowslide # *windowsize* should be greater than sliding interval \n assert windowsize >= minchunk # *windowsize* should be greater than minimum chunk size\n assert avgchunk%2==0 # *avgchunk* must be a multiple of 2\n \n # limit range of min/max chunksize relative to avg chunk size\n minfactor,maxfactor = 1/10.,10.\n if float(minchunk)/avgchunk < minfactor: minchunk = int(minfactor*avgchunk)\n if float(maxchunk)/avgchunk > maxfactor: maxchunk = int(maxfactor*avgchunk)\n \n # base params\n len_longstr = len(longstr)\n ix_last = -1\n ix_breaks = []\n \n # review each sliding window, and based on hash modulo, determine if it is a chunk edge\n for ix in xrange(0, len_longstr - windowsize + windowslide, windowslide):\n # limit min chunk size \n # key optimization: ~15% speed-up by skipping review of windows below min chunk size\n if ix < ix_last + minchunk: \n continue\n\n # limit max chunk size\n if ix >= ix_last + maxchunk:\n ix_last = ix\n ix_breaks.append(ix)\n continue\n \n # calculate hash of local window and check if it's determine hash\n window = longstr[ix : min(ix+windowsize, len_longstr)]\n if hash(window) % avgchunk == 0:\n ix_breaks.append(ix) \n ix_last = ix\n \n # export longstr, segmented into chunks, based on Rabin fingerprint\n ix_breaks = [0] + ix_breaks + [len_longstr]\n return [longstr[i:j] for i,j in zip(ix_breaks[:-1],ix_breaks[1:])]",
"def get_split(text, maxlen=200, overlap=50):\n l_total = []\n l_partial = []\n den = maxlen - overlap\n if len(text.split()) // den > 0:\n n = len(text.split()) // den\n else:\n n = 1\n for w in range(n):\n if w == 0:\n l_partial = text.split()[:maxlen]\n l_total.append(\" \".join(l_partial))\n else:\n l_partial = text.split()[w * den : w * den + maxlen]\n l_total.append(\" \".join(l_partial))\n return l_total",
"def convert_single_example(text_a, text_b, max_seq_length, tokenize_fn):\n tokens_a = tokenize_fn(text_a)\n tokens_b = None\n if text_b:\n tokens_b = tokenize_fn(text_b)\n if tokens_b:\n # Modifies `tokens_a` and `tokens_b` in place so that the total\n # length is less than the specified length.\n # Account for two [SEP] & one [CLS] with \"- 3\"\n _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)\n else:\n # Account for one [SEP] & one [CLS] with \"- 2\"\n if len(tokens_a) > max_seq_length - 2:\n tokens_a = tokens_a[:max_seq_length - 2]\n\n tokens = []\n segment_ids = []\n for token in tokens_a:\n tokens.append(token)\n segment_ids.append(SEG_ID_A)\n tokens.append(SEP_ID)\n segment_ids.append(SEG_ID_A)\n\n if tokens_b:\n for token in tokens_b:\n tokens.append(token)\n segment_ids.append(SEG_ID_B)\n tokens.append(SEP_ID)\n segment_ids.append(SEG_ID_B)\n\n tokens.append(CLS_ID)\n segment_ids.append(SEG_ID_CLS)\n\n input_ids = tokens\n\n # The mask has 0 for real tokens and 1 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [0] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n if len(input_ids) < max_seq_length:\n delta_len = max_seq_length - len(input_ids)\n input_ids = [0] * delta_len + input_ids\n input_mask = [1] * delta_len + input_mask\n segment_ids = [SEG_ID_PAD] * delta_len + segment_ids\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n return (input_ids, input_mask, segment_ids)",
"def process_text(text: str, max_length: int, pad: bool = True) -> Tuple[List[str], List[str]]:\n input_unigrams = [c for c in text]\n text_bigrams = DatasetLSTM.compute_bigrams(text)\n input_bigrams = [c for c in text_bigrams]\n # cut to max len\n input_unigrams = input_unigrams[:max_length]\n input_bigrams = input_bigrams[:max_length]\n # pad sequences\n if pad and len(input_unigrams) < max_length:\n input_unigrams += [\"<PAD>\"] * (max_length - len(input_unigrams))\n if pad and len(input_bigrams) < max_length:\n input_bigrams += [\"<PAD>\"] * (max_length - len(input_bigrams))\n return input_unigrams, input_bigrams",
"def get_kmers_from_sequence(sequence, kmin, kmax):\n limits = range(kmin, kmax + 1)\n seq_range = len(sequence) - kmax + 1\n for i in range(0, seq_range):\n for j in limits:\n yield sequence[i:i + j]",
"def seq_to_list(s, max_length):\n t_str = s.lower()\n t_str = t_str.replace('-', ' ').replace('/', ' ')\n t_str = t_str.translate(str.maketrans('', '', string.punctuation))\n q_list = t_str.strip().split()\n return q_list[:max_length]",
"def find_best_answer_for_passage(start_probs, end_probs, passage_len=None, max_a_len=None):\n if passage_len is None:\n passage_len = len(start_probs)\n else:\n passage_len = min(len(start_probs), passage_len)\n best_start, best_end, max_prob = -1, -1, 0\n # 从头扫描passage\n for start_idx in range(passage_len):\n for ans_len in range(max_a_len):\n end_idx = start_idx + ans_len\n if end_idx >= passage_len:\n continue\n prob = start_probs[start_idx] * end_probs[end_idx]\n if prob > max_prob:\n best_start = start_idx\n best_end = end_idx\n max_prob = prob\n return [best_start, best_end], max_prob",
"def extract_phrase(self, src_text, tgt_text, alignment, max_phrase_len=0):\n def extract_from_range(tgt_start, tgt_end, src_start, src_end, max_phrase_len):\n \"\"\"Extract a set of possible phrase given the source, language ranges.\n\n \"\"\"\n # print(\"rages\", tgt_start, tgt_end, src_start, src_end)\n if tgt_end < 0:\n return \n # If `src_align_idx` out of the `src_start` and `src_target`.\n for src_align_idx, tgt_align_idx in alignment:\n # target align point\n # sorce align point out of range\n if ((tgt_start <= tgt_align_idx <= tgt_end) and \n (src_align_idx < src_start or src_align_idx > src_end)): \n return\n phrase_set = set()\n ts = tgt_start # For increment\n while True:\n te = min(tgt_end, ts+max_phrase_len-1) # For decrement\n # te = tgt_end \n while True:\n # Add phrase pair (src_start, src_end, tgt_start, tgt_end)\n src_phrase = \" \".join(src_sent[i] for i in range(src_start,src_end+1))\n tgt_phrase = \" \".join(tgt_sent[i] for i in range(ts,te+1))\n phrase_set.add(((src_start, src_end+1), src_phrase, tgt_phrase))\n te+= 1\n # Add phrase until `te` aligned or out of range\n if te in tgt_aligned or te == tgt_len:\n break\n ts-=1\n # Add phrase until `te` aligned or out of range\n if ts in tgt_aligned or ts < 0:\n break\n \n return phrase_set\n\n # List of words\n src_sent = src_text.split()\n tgt_sent = tgt_text.split()\n \n # Set ot collect hrases\n phrase_set = set()\n \n # Length of sentences \n src_len = len(src_sent)\n tgt_len = len(tgt_sent)\n\n # Target language's align points\n tgt_aligned = [tgt_idx for _,tgt_idx in alignment ]\n max_phrase_len = max_phrase_len or max(src_len, tgt_len)\n\n\n ### Extraction ##### \n # Two steps:\n # (1) Loop all possible soruce language phrases matching minimal target language phrases\n # (2) By finding shortest target language phrases that includes \n # all the foreign counterparts for the source words.\n #\n ### Extraction #####\n # Go over each source substring starting from begin \n for src_start in range(src_len):\n # Set maximal length for phrase length \n max_idx = min(src_len, src_start+max_phrase_len)\n for src_end in range(src_start, max_idx):\n # print('src_start, end', src_start, src_end)\n # Find the minimal matching of foreign phrase\n tgt_start, tgt_end = tgt_len-1, -1\n for src_align_idx, tgt_align_idx in alignment:\n # print('alignment', src_align_idx, tgt_align_idx)\n # Length of phrase is greater or equal to one\n if src_start <= src_align_idx <= src_end:\n # print(tgt_align_idx, tgt_start, tgt_end)\n # Longest substring in target langage phrase\n tgt_start = min(tgt_align_idx, tgt_start)\n tgt_end = max(tgt_align_idx, tgt_end)\n # print(tgt_start, tgt_end, end='\\n\\n')\n # print(src_start, src_end)\n # print(tgt_start, tgt_end, end='\\n\\n')\n # Extract a set of phrases \n phrase = extract_from_range(tgt_start, tgt_end, src_start, src_end,max_phrase_len)\n if phrase:\n phrase_set.update(phrase)\n\n\n return phrase_set",
"def optimal_string_alignment_similarity(s1, s2):\n max_cost = max(len(s1), len(s2))\n\n if max_cost == 0:\n return 1.0\n\n return 1.0 - float(optimal_string_alignment_distance(s1, s2)) / max_cost",
"def num_accepts(self, max_len: int, bound: Sequence[Text] = ()) -> Tuple[int, int, int]:\n lt1: Dict[FrozenSet[int], int] = collections.defaultdict(int)\n lt2: Dict[FrozenSet[int], int] = collections.defaultdict(int)\n eq1: Dict[FrozenSet[int], int] = collections.defaultdict(int)\n eq2: Dict[FrozenSet[int], int] = collections.defaultdict(int)\n gt1: Dict[FrozenSet[int], int] = collections.defaultdict(int)\n gt2: Dict[FrozenSet[int], int] = collections.defaultdict(int)\n eq1[frozenset(self.start_nodes)] = 1\n num_accepted_le = int(self.accepts(\"\"))\n num_accepted_gt = 0\n for c in itertools.islice(itertools.chain(bound, itertools.repeat(None)), 0, max_len):\n for nodes, count in lt1.items():\n for element in self.possible_transitions(nodes):\n next_nodes = frozenset(self.next_nodes(nodes, element))\n lt2[next_nodes] += count\n for nodes, count in eq1.items():\n for element in self.possible_transitions(nodes):\n next_nodes = frozenset(self.next_nodes(nodes, element))\n if c is None or (element is not None and element > c):\n gt2[next_nodes] += count\n elif element == c:\n eq2[next_nodes] += count\n else:\n lt2[next_nodes] += count\n for nodes, count in gt1.items():\n for element in self.possible_transitions(nodes):\n next_nodes = frozenset(self.next_nodes(nodes, element))\n gt2[next_nodes] += count\n num_accepted_le += self._sum_tables(eq2)\n num_accepted_le += self._sum_tables(lt2)\n num_accepted_gt += self._sum_tables(gt2)\n if not lt2 and not eq2 and not gt2:\n break # Exit early if we know this regex cannot accept anymore strings.\n lt1, lt2 = lt2, collections.defaultdict(int)\n eq1, eq2 = eq2, collections.defaultdict(int)\n gt1, gt2 = gt2, collections.defaultdict(int)\n num_accepted_eq = int(len(bound) <= max_len and self.accepts(bound))\n return num_accepted_le - num_accepted_eq, num_accepted_eq, num_accepted_gt",
"def partition_text(text):\n if len(text) < 3500:\n yield text\n else:\n text_list = text.split('\\n')\n l = 0 # length iterator of current block\n i = 0 # start position of block\n j = 0 # end position of block\n\n # j scans through list of lines from start position i l tracks length\n # of all characters in the current scan If length of everything from i\n # to j+1 > the limit, yield current block, joined into single string,\n # and shift the scanning position up to the start of the new block.\n for m in text_list:\n l += len(m)\n try:\n # if adding another line will breach the limit,\n # yield current block\n if l+len(text_list[j+1]) > 3500:\n indices = [i, j]\n yield '\\n'.join(\n [msg for k, msg in enumerate(text_list)\n if k in indices])\n # shift start position for the next block\n i = j+1\n l = 0\n j += 1\n except IndexError:\n yield text_list[i]",
"def generate_diverse_beam(\n self, src_enc, src_len, tgt_lang_id, beam_size, length_penalty, early_stopping,\n num_groups, diversity_strength, max_len=200, nbest=None, sample_temperature=None\n):\n\n # check inputs\n assert src_enc.size(0) == src_len.size(0)\n assert beam_size >= 1\n assert beam_size % num_groups == 0\n\n # batch size / number of words\n bs = len(src_len)\n n_words = self.n_words\n\n # expand to beam size the source latent representations / source lengths\n src_enc = src_enc.unsqueeze(1).expand((bs, beam_size) + src_enc.shape[1:]).contiguous().view(\n (bs * beam_size,) + src_enc.shape[1:])\n src_len = src_len.unsqueeze(1).expand(bs, beam_size).contiguous().view(-1)\n\n # generated sentences (batch with beam current hypotheses)\n generated = src_len.new(max_len, bs * beam_size) # upcoming output\n generated.fill_(self.pad_index) # fill upcoming ouput with <PAD>\n generated[0].fill_(self.eos_index) # we use <EOS> for <BOS> everywhere\n\n # generated hypotheses\n generated_hyps = [BeamHypotheses(beam_size, max_len, length_penalty, early_stopping) for _ in range(bs)]\n\n # positions\n positions = src_len.new(max_len).long()\n positions = torch.arange(max_len, out=positions).unsqueeze(1).expand_as(generated)\n\n # language IDs\n langs = positions.clone().fill_(tgt_lang_id)\n\n # scores for each sentence in the beam\n beam_scores = src_enc.new(bs, beam_size).fill_(0)\n beam_scores[:, 1:] = torch.tensor(-1e9).type_as(beam_scores)\n beam_scores = beam_scores.view(-1)\n\n # current position\n cur_len = 1\n\n # cache compute states\n cache = {'slen': 0}\n\n # done sentences\n done = [False for _ in range(bs)]\n\n while cur_len < max_len:\n\n # compute word scores\n tensor = self.forward(\n 'fwd',\n x=generated[:cur_len],\n lengths=src_len.new(bs * beam_size).fill_(cur_len),\n positions=positions[:cur_len],\n langs=langs[:cur_len],\n causal=True,\n src_enc=src_enc,\n src_len=src_len,\n cache=cache\n )\n assert tensor.size() == (1, bs * beam_size, self.dim)\n tensor = tensor.data[-1, :, :] # (bs * beam_size, dim)\n scores = self.pred_layer.get_scores(tensor) # (bs * beam_size, n_words)\n\n scores = F.log_softmax(scores, dim=-1) # (bs * beam_size, n_words)\n assert scores.size() == (bs * beam_size, n_words)\n\n assert sample_temperature is None or sample_temperature == 1.0, 'sample_temperature={} not support'.format(\n sample_temperature)\n\n # select next words with scores\n _scores = scores + beam_scores[:, None].expand_as(scores) # (bs * beam_size, n_words)\n _scores = _scores.view(bs, beam_size * n_words) # (bs, beam_size * n_words)\n\n next_scores, next_words = torch.topk(_scores, 2 * beam_size, dim=1, largest=True, sorted=True)\n assert next_scores.size() == next_words.size() == (bs, 2 * beam_size)\n\n # next batch beam content\n # list of (bs * beam_size) tuple(next hypothesis score, next word, current position in the batch)\n next_batch_beam = []\n\n # for each sentence\n for sent_id in range(bs):\n\n # if we are done with this sentence\n done[sent_id] = done[sent_id] or generated_hyps[sent_id].is_done(next_scores[sent_id].max().item())\n if done[sent_id]:\n next_batch_beam.extend([(0, self.pad_index, 0)] * beam_size) # pad the batch\n continue\n\n # next sentence beam content\n next_sent_beam = []\n\n # next words for this sentence\n for idx, value in zip(next_words[sent_id], next_scores[sent_id]):\n\n # get beam and word IDs\n beam_id = idx // n_words\n word_id = idx % n_words\n\n # end of sentence, or next word\n if word_id == self.eos_index or cur_len + 1 == max_len:\n generated_hyps[sent_id].add(generated[:cur_len, sent_id * beam_size + beam_id].clone(),\n value.item())\n else:\n next_sent_beam.append((value, word_id, sent_id * beam_size + beam_id))\n\n # the beam for next step is full\n if len(next_sent_beam) == beam_size:\n break\n\n # update next beam content\n assert len(next_sent_beam) == 0 if cur_len + 1 == max_len else beam_size\n if len(next_sent_beam) == 0:\n next_sent_beam = [(0, self.pad_index, 0)] * beam_size # pad the batch\n next_batch_beam.extend(next_sent_beam)\n assert len(next_batch_beam) == beam_size * (sent_id + 1)\n\n # sanity check / prepare next batch\n assert len(next_batch_beam) == bs * beam_size\n beam_scores = beam_scores.new([x[0] for x in next_batch_beam])\n beam_words = generated.new([x[1] for x in next_batch_beam])\n beam_idx = src_len.new([x[2] for x in next_batch_beam])\n\n # re-order batch and internal states\n generated = generated[:, beam_idx]\n generated[cur_len] = beam_words\n for k in cache.keys():\n if k != 'slen':\n cache[k] = (cache[k][0][beam_idx], cache[k][1][beam_idx])\n\n # update current length\n cur_len = cur_len + 1\n\n # stop when we are done with each sentence\n if all(done):\n break\n\n return compute_final_decoded(generated_hyps, bs, src_len, self.pad_index, self.eos_index, beam_size, nbest)",
"def test_sentence_output(self, words, max_overlap_ratio, max_overlap_total):\n # Reject large chunks of similarity\n overlap_ratio = round(max_overlap_ratio * len(words))\n overlap_max = min(max_overlap_total, overlap_ratio)\n overlap_over = overlap_max + 1\n gram_count = max((len(words) - overlap_max), 1)\n grams = [words[i : i + overlap_over] for i in range(gram_count)]\n for g in grams:\n gram_joined = self.word_join(g)\n if gram_joined in self.rejoined_text:\n return False\n return True"
]
| [
"0.5758776",
"0.54950756",
"0.5426463",
"0.53324264",
"0.5252544",
"0.51077604",
"0.5059399",
"0.5043758",
"0.5004417",
"0.4975387",
"0.49677187",
"0.49565747",
"0.49563187",
"0.49299663",
"0.48925182",
"0.4888343",
"0.48725554",
"0.48663878",
"0.48532745",
"0.48340026",
"0.48289838",
"0.4818096",
"0.48105198",
"0.4797679",
"0.47904426",
"0.4780943",
"0.47782513",
"0.47759348",
"0.47747833",
"0.47636375"
]
| 0.79418826 | 0 |
Create should fail with a StratisCliNameConflictError trying to create new pool with the same devices and the same name as previous. | def test_create_same_devices(self):
command_line = self._MENU + [self._POOLNAME] + self.devices
self.check_error(StratisCliNameConflictError, command_line, _ERROR) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_create_different_devices(self):\n command_line = self._MENU + [self._POOLNAME] + _DEVICE_STRATEGY()\n self.check_error(StratisCliNameConflictError, command_line, _ERROR)",
"def test_create_same_devices(self):\n command_line = self._MENU + [self._POOLNAME_2] + self._DEVICES\n self.check_error(StratisCliInUseSameTierError, command_line, _ERROR)",
"def pool_create(self, pool_name):\n self.core.api.os.shell.cmd('{0} add apppool /name:\"{1}\"'.format(\n self.APP_CMD, pool_name\n ))",
"def create_device_pool(pool_name, project_arn):\n\n new_device_pool = device_farm.create_device_pool(\n projectArn=project_arn,\n name=pool_name,\n description='it is edX device pool',\n maxDevices=1,\n rules=[\n {\n \"attribute\": \"PLATFORM\",\n \"operator\": \"EQUALS\",\n \"value\": '\"ANDROID\"'\n },\n {\n \"attribute\": \"OS_VERSION\",\n \"operator\": \"GREATER_THAN_OR_EQUALS\",\n \"value\": '\"9\"'\n },\n {\n \"attribute\": \"MANUFACTURER\",\n \"operator\": \"EQUALS\",\n \"value\": '\"Google\"'\n },\n {\n \"attribute\": \"AVAILABILITY\",\n \"operator\": \"EQUALS\",\n \"value\": '\"HIGHLY_AVAILABLE\"'\n },\n {\n \"attribute\": \"FLEET_TYPE\",\n \"operator\": \"EQUALS\",\n \"value\": '\"PUBLIC\"'\n }\n ]\n )\n if new_device_pool is not None:\n new_pool_name = new_device_pool['devicePool']['name']\n new_pool_arn = new_device_pool['devicePool']['arn']\n print('{} is created successfully'.format(pool_name))\n return new_pool_arn\n else:\n print('Problem creating {} device pool'.format(project_name))",
"def create_pool(self, device, tier, poolname):\n print \"Adding pool %s...\" % poolname\n pool = device.findRemoteStoragePool(StoragePoolPredicates.name(poolname))\n pool.setTier(tier)\n pool.save()\n return pool",
"def create_device_pool(projectArn=None, name=None, description=None, rules=None):\n pass",
"def _create_volume_pool(self, pool_name):\n osd_map = self._rados_command('osd dump', {})\n\n existing_id = self._get_pool_id(osd_map, pool_name)\n if existing_id is not None:\n log.info(\"Pool {0} already exists\".format(pool_name))\n return existing_id\n\n osd_count = len(osd_map['osds'])\n\n # We can't query the actual cluster config remotely, but since this is\n # just a heuristic we'll assume that the ceph.conf we have locally reflects\n # that in use in the rest of the cluster.\n pg_warn_max_per_osd = int(self.rados.conf_get('mon_max_pg_per_osd'))\n\n other_pgs = 0\n for pool in osd_map['pools']:\n if not pool['pool_name'].startswith(self.POOL_PREFIX):\n other_pgs += pool['pg_num']\n\n # A basic heuristic for picking pg_num: work out the max number of\n # PGs we can have without tripping a warning, then subtract the number\n # of PGs already created by non-manila pools, then divide by ten. That'll\n # give you a reasonable result on a system where you have \"a few\" manila\n # shares.\n pg_num = ((pg_warn_max_per_osd * osd_count) - other_pgs) // 10\n # TODO Alternatively, respect an override set by the user.\n\n self._rados_command(\n 'osd pool create',\n {\n 'pool': pool_name,\n 'pg_num': int(pg_num),\n }\n )\n\n osd_map = self._rados_command('osd dump', {})\n pool_id = self._get_pool_id(osd_map, pool_name)\n\n if pool_id is None:\n # If the pool isn't there, that's either a ceph bug, or it's some outside influence\n # removing it right after we created it.\n log.error(\"OSD map doesn't contain expected pool '{0}':\\n{1}\".format(\n pool_name, json.dumps(osd_map, indent=2)\n ))\n raise RuntimeError(\"Pool '{0}' not present in map after creation\".format(pool_name))\n else:\n return pool_id",
"def setup_device_pool(project_arn, device_pool_name):\n\n target_device_pool_arn = ''\n is_device_pool_exists = False\n for device_pool in device_farm.list_device_pools(arn=project_arn)[\n 'devicePools']:\n pool_name = device_pool['name']\n if pool_name == device_pool_name:\n print('{} already exists'.format(pool_name))\n target_device_pool_arn = device_pool['arn']\n is_device_pool_exists = True\n break\n else:\n is_device_pool_exists = False\n\n if not is_device_pool_exists:\n target_device_pool_arn = create_device_pool(\n device_pool_name, project_arn)\n\n return target_device_pool_arn\n\n raise KeyError('Problem finding device pool %r' % device_pool_name)",
"def _create(self, name):\n command = [\n 'ipset create -exist ' + name + ' hash:net family inet maxelem 536870912',\n ]\n self.__run(command)",
"def _ValidateUniqueNames(pools):\n used_names = set()\n for pool in pools:\n name = pool.nodePool\n if name in used_names:\n raise exceptions.InvalidArgumentException(\n '--pools', 'Pool name \"%s\" used more than once.' % name)\n used_names.add(name)",
"def test_create_pool_with_mandatory_params(self):\r\n resource = 'pool'\r\n cmd = pool.CreatePool(test_cli20.MyApp(sys.stdout), None)\r\n name = 'my-name'\r\n lb_method = 'ROUND_ROBIN'\r\n protocol = 'HTTP'\r\n subnet_id = 'subnet-id'\r\n tenant_id = 'my-tenant'\r\n my_id = 'my-id'\r\n args = ['--lb-method', lb_method,\r\n '--name', name,\r\n '--protocol', protocol,\r\n '--subnet-id', subnet_id,\r\n '--tenant-id', tenant_id]\r\n position_names = ['admin_state_up', 'lb_method', 'name',\r\n 'protocol', 'subnet_id', 'tenant_id']\r\n position_values = [True, lb_method, name,\r\n protocol, subnet_id, tenant_id]\r\n self._test_create_resource(resource, cmd, name, my_id, args,\r\n position_names, position_values)",
"def csAddPool(self,pool,creatorid,nas=None):\n\n logger.debug(\"Attempting to create pool '\"+pool+\"'.\")\n\n localpath = \"/.\"+pool\n\n url = self.csurl + \"/polcentral/v1_0/pools/\"\n\n if nas == None:\n logger.debug(\"No NAS object provided, will create pool '\"+pool+\"' type 'MW'.\")\n pooltype = 'MW'\n subscribedevices = True\n deviceid = ''\n pathinpool = '/'\n servername = ''\n sharename = ''\n sharepath = ''\n creditname = ''\n overridewarnings = True\n else:\n logger.debug(\"NAS object provided, will create pool '\"+pool+\"' type 'PS'.\")\n pooltype = 'PS'\n subscribedevices = True\n deviceid = ''\n pathinpool = '/'\n servername = ''\n sharename = ''\n sharepath = ''\n creditname = ''\n overridewarnings = True\n\n payload = {\n \"name\": pool,\n \"description\":\"Pool added by testbot\",\n \"creatorid\": {\"$id\": creatorid},\n \"type\":pooltype,\n \"allowpiggybacks\":True,\n \"localpoolpath\": localpath\n }\n \"\"\"\n payload = {\n \"name\": poolname,\n \"description\":\"Pool added by testbot\",\n \"creatorid\": {\"$id\": creatorid},\n \"type\":pooltype,\n \"allowpiggybacks\":True,\n \"localpoolpath\": localpath,\n \"subscribedevices\":subscribedevices,\n \"deviceid\": deviceid,\n \"pathinpool\": pathinpool,\n \"servername\": servername,\n \"sharename\": sharename,\n \"sharepath\": sharepath,\n \"credsetname\": creditname,\n \"overridewarnings\": overridewarnings\n }\n \"\"\"\n\n try:\n r = requests.post(url, data=json.dumps(payload))\n except Exception:\n logger.error(\"Exception during api call to add pool.\")\n return 'Error'\n\n if r.status_code == 200:\n logger.debug(\"Pool '\"+pool+\"' was successfully created.\")\n poolid = r.json()['_id']\n return poolid['$id']\n else:\n logger.error(\"Pool '\"+pool+\"' was not created. Error code is \"+str(r.status_code)+\".\")\n return 'Error'",
"def _create_pool_vm(args):\n # check storage pool name unicity\n conn = libvirt.open(None)\n _sps = list()\n if conn:\n _sps = [sp for sp in conn.listAllStoragePools() if sp.name() == args.name]\n conn.close()\n else:\n print('Cannot contact hypervisor', file=sys.stderr)\n return 1\n\n if len(_sps) != 0:\n print(\"Storage pool with name [%s] already exists\" % args.name, file=sys.stderr)\n return 1\n\n if args.disk and args.netfshost:\n print(\"--disk and --host option are exclusive\", file=sys.stderr)\n return 1\n\n if not args.disk and not args.netfshost:\n print(\"Either --disk or --host must be specified.\", file=sys.stderr)\n return 1\n\n if args.netfshost and not args.path:\n print(\"Must specify the remote resource path with the --path option\", file=sys.stderr)\n return 1\n\n _pool_name = args.name\n if args.disk:\n return oci_utils.kvm.virt.create_fs_pool(args.disk, _pool_name)\n if args.netfshost:\n return oci_utils.kvm.virt.create_netfs_pool(args.netfshost, args.path, _pool_name)",
"def test_create_with_clevis_1(self):\n command_line = [\n \"pool\",\n \"create\",\n \"--clevis=tang\",\n \"--tang-url=url\",\n \"--thumbprint=jkj\",\n \"--trust-url\",\n \"pn\",\n \"/dev/n\",\n ]\n for prefix in [[], [\"--propagate\"]]:\n self.check_system_exit(prefix + command_line, _PARSE_ERROR)",
"def test_duplicate_names_fail(self):\n name = 'some_name'\n instance_types.create(name, 256, 1, 120, 200, 'flavor1')\n self.assertRaises(exception.InstanceTypeExists,\n instance_types.create,\n name, 256, 1, 120, 200, 'flavor2')",
"def test_duplicate_name_error_validation():\n template_name = pxe.CustomizationTemplate(\n name=generate_random_string(size=8),\n description=generate_random_string(size=16),\n image_type='RHEL-6',\n script_type='Kickstart',\n script_data='Testing the script')\n\n template_name.create()\n with error.expected('Name has already been taken'):\n template_name.create()\n template_name.delete(cancel=False)",
"def test_create_pool_with_all_params(self):\r\n resource = 'pool'\r\n cmd = pool.CreatePool(test_cli20.MyApp(sys.stdout), None)\r\n name = 'my-name'\r\n description = 'my-desc'\r\n lb_method = 'ROUND_ROBIN'\r\n protocol = 'HTTP'\r\n subnet_id = 'subnet-id'\r\n tenant_id = 'my-tenant'\r\n my_id = 'my-id'\r\n provider = 'lbaas'\r\n args = ['--admin-state-down',\r\n '--description', description,\r\n '--lb-method', lb_method,\r\n '--name', name,\r\n '--protocol', protocol,\r\n '--subnet-id', subnet_id,\r\n '--tenant-id', tenant_id,\r\n '--provider', provider]\r\n position_names = ['admin_state_up', 'description', 'lb_method', 'name',\r\n 'protocol', 'subnet_id', 'tenant_id', 'provider']\r\n position_values = [False, description, lb_method, name,\r\n protocol, subnet_id, tenant_id, provider]\r\n self._test_create_resource(resource, cmd, name, my_id, args,\r\n position_names, position_values)",
"def create_pool(self, service, bigips):\n pool = self.service_adapter.get_pool(service)\n error = None\n\n for bigip in bigips:\n try:\n self.pool_helper.create(bigip, pool)\n except HTTPError as err:\n if err.response.status_code == 409:\n LOG.debug(\"Pool already exists...updating\")\n try:\n self.pool_helper.update(bigip, pool)\n except Exception as err:\n error = f5_ex.PoolUpdateException(err.message)\n LOG.error(\"Failed to assure pool %s on %s: %s\",\n pool['name'], bigip, error.message)\n else:\n error = f5_ex.PoolCreationException(err.message)\n LOG.error(\"Failed to assure pool %s on %s: %s\",\n pool['name'], bigip, error.message)\n except Exception as err:\n error = f5_ex.PoolCreationException(err.message)\n LOG.error(\"Failed to assure pool %s on %s: %s\",\n pool['name'], bigip, error.message)\n\n return error",
"def test_create_with_only_name(self):\n with OrionState() as cfg:\n name = \"bm00001\"\n with pytest.raises(NoConfigurationError) as exc:\n get_or_create_benchmark(cfg.storage, name).close()\n\n assert f\"Benchmark {name} does not exist in DB\" in str(exc.value)",
"def test_cannot_create_with_same_category_and_name(self):\n # Create an initial service\n self.project.services.create(name = \"service1\", category = self.category)\n # Then try to create the same service using the serializer\n serializer = ServiceSerializer(\n data = dict(name = \"service1\", category = self.category.pk),\n context = dict(project = self.project)\n )\n self.assertFalse(serializer.is_valid())\n self.assertEqual(serializer.errors['name'][0].code, 'unique')",
"def test_networking_project_network_tag_create(self):\n pass",
"def add_pool(ctx, pool_name, global_ip_range, global_port_range):\n\n if len(pool_name) > 32:\n ctx.fail(\"Invalid pool name. Maximum allowed pool name is 32 characters !!\")\n\n # Verify the ip address range and format\n ip_address = global_ip_range.split(\"-\")\n if len(ip_address) > 2:\n ctx.fail(\"Given ip address range {} is invalid. Please enter a valid ip address range !!\".format(global_ip_range))\n elif len(ip_address) == 2:\n if is_valid_ipv4_address(ip_address[0]) is False:\n ctx.fail(\"Given ip address {} is not valid global address. Please enter a valid ip address !!\".format(ip_address[0]))\n\n if is_valid_ipv4_address(ip_address[1]) is False:\n ctx.fail(\"Given ip address {} is not valid global address. Please enter a valid ip address !!\".format(ip_address[1]))\n\n ipLowLimit = int(ipaddress.IPv4Address(ip_address[0]))\n ipHighLimit = int(ipaddress.IPv4Address(ip_address[1]))\n if ipLowLimit >= ipHighLimit:\n ctx.fail(\"Given ip address range {} is invalid. Please enter a valid ip address range !!\".format(global_ip_range))\n else:\n if is_valid_ipv4_address(ip_address[0]) is False:\n ctx.fail(\"Given ip address {} is not valid global address. Please enter a valid ip address !!\".format(ip_address[0]))\n ipLowLimit = int(ipaddress.IPv4Address(ip_address[0]))\n ipHighLimit = int(ipaddress.IPv4Address(ip_address[0]))\n\n # Verify the port address range and format\n if global_port_range is not None: \n port_address = global_port_range.split(\"-\")\n\n if len(port_address) > 2:\n ctx.fail(\"Given port address range {} is invalid. Please enter a valid port address range !!\".format(global_port_range))\n elif len(port_address) == 2:\n if is_valid_port_address(port_address[0]) is False:\n ctx.fail(\"Given port value {} is invalid. Please enter a valid port value !!\".format(port_address[0]))\n\n if is_valid_port_address(port_address[1]) is False:\n ctx.fail(\"Given port value {} is invalid. Please enter a valid port value !!\".format(port_address[1]))\n\n portLowLimit = int(port_address[0])\n portHighLimit = int(port_address[1])\n if portLowLimit >= portHighLimit:\n ctx.fail(\"Given port address range {} is invalid. Please enter a valid port address range !!\".format(global_port_range))\n else:\n if is_valid_port_address(port_address[0]) is False:\n ctx.fail(\"Given port value {} is invalid. Please enter a valid port value !!\".format(port_address[0]))\n else:\n global_port_range = \"NULL\"\n\n config_db = ConfigDBConnector()\n config_db.connect()\n\n entryFound = False\n table = \"NAT_POOL\"\n key = pool_name\n dataKey1 = 'nat_ip'\n dataKey2 = 'nat_port'\n\n data = config_db.get_entry(table, key)\n if data:\n if data[dataKey1] == global_ip_range and data[dataKey2] == global_port_range:\n click.echo(\"Trying to add pool, which is already present.\")\n entryFound = True\n\n pool_dict = config_db.get_table(table) \n if len(pool_dict) == 16:\n click.echo(\"Failed to add pool, as already reached maximum pool limit 16.\")\n entryFound = True\n\n # Verify the Ip address is overlapping with any Static NAT entry\n if entryFound == False:\n static_dict = config_db.get_table('STATIC_NAT')\n if static_dict:\n for staticKey, staticValues in static_dict.items():\n global_ip = \"---\"\n local_ip = \"---\"\n nat_type = \"dnat\"\n\n if isinstance(staticKey, str) is True:\n global_ip = staticKey\n else:\n continue\n\n local_ip = staticValues[\"local_ip\"]\n\n if \"nat_type\" in staticValues:\n nat_type = staticValues[\"nat_type\"]\n\n if nat_type == \"snat\":\n global_ip = local_ip\n\n ipAddress = int(ipaddress.IPv4Address(global_ip))\n if (ipAddress >= ipLowLimit and ipAddress <= ipHighLimit):\n ctx.fail(\"Given Ip address entry is overlapping with existing Static NAT entry !!\")\n\n if entryFound == False:\n config_db.set_entry(table, key, {dataKey1: global_ip_range, dataKey2 : global_port_range})",
"def test_networking_project_network_create(self):\n pass",
"def test_create_with_clevis_2(self):\n command_line = [\n \"--propagate\",\n \"pool\",\n \"create\",\n \"--clevis=tang\",\n \"--tang-url=url\",\n \"pn\",\n \"/dev/n\",\n ]\n self.check_error(StratisCliMissingClevisThumbprintError, command_line, 1)",
"def test_create_with_clevis_1(self):\n command_line = [\n \"--propagate\",\n \"pool\",\n \"create\",\n \"--clevis=tang\",\n \"pn\",\n \"/dev/n\",\n ]\n self.check_error(StratisCliMissingClevisTangURLError, command_line, 1)",
"def storage_pools_create(context, storage_pools):\n session = get_session()\n storage_pool_refs = []\n with session.begin():\n\n for storage_pool in storage_pools:\n LOG.debug('adding new storage_pool for native_storage_pool_id {0}:'\n .format(storage_pool.get('native_storage_pool_id')))\n if not storage_pool.get('id'):\n storage_pool['id'] = uuidutils.generate_uuid()\n\n storage_pool_ref = models.StoragePool()\n storage_pool_ref.update(storage_pool)\n storage_pool_refs.append(storage_pool_ref)\n\n session.add_all(storage_pool_refs)\n\n return storage_pool_refs",
"def pool_create_from_dict(self, parameters: dict):\n pool_name = parameters[KnownParameters.SITE_NAME.value]\n parameters[KnownParameters.POOL_NAME.value] = pool_name\n for pool in self.get_app_pool_list():\n if pool.name.lower() == pool_name.lower():\n return\n return self.pool_create(pool_name)",
"def test_autocreate_licensepool(self):\n identifier = self._identifier()\n assert [] == identifier.licensed_through\n provider = AlwaysSuccessfulCollectionCoverageProvider(\n self._default_collection\n )\n pool = provider.license_pool(identifier)\n assert [pool] == identifier.licensed_through\n assert pool.data_source == provider.data_source\n assert pool.identifier == identifier\n assert pool.collection == provider.collection\n\n # Calling license_pool again finds the same LicensePool\n # as before.\n pool2 = provider.license_pool(identifier)\n assert pool == pool2\n\n # It's possible for a CollectionCoverageProvider to create a\n # LicensePool for a different DataSource than the one\n # associated with the Collection. Only the metadata wrangler\n # needs to do this -- it's so a CoverageProvider for a\n # third-party DataSource can create an 'Internal Processing'\n # LicensePool when some other part of the metadata wrangler\n # failed to do this earlier.\n\n # If a working pool already exists, it's returned and no new\n # pool is created.\n same_pool = provider.license_pool(\n identifier, DataSource.INTERNAL_PROCESSING\n )\n assert same_pool == pool2\n assert provider.data_source == same_pool.data_source\n\n # A new pool is only created if no working pool can be found.\n identifier2 = self._identifier()\n new_pool = provider.license_pool(\n identifier2, DataSource.INTERNAL_PROCESSING\n )\n assert new_pool.data_source.name == DataSource.INTERNAL_PROCESSING\n assert new_pool.identifier == identifier2\n assert new_pool.collection == provider.collection",
"def test_creation_fail(self):\n\n # Assert that a RelaxError occurs when the pipe type is invalid.\n self.assertRaises(RelaxError, pipes.create, 'new', 'x')",
"def create_pool(self, context, pool):\n LOG.info(\"Received request 'Create Pool' for Pool:%(pool_id)s \",\n {'pool_id': pool['id']})\n arg_dict = {'context': context,\n lb_const.POOL: pool\n }\n # REVISIT(jiahao) M:N pool is not yet implemented.\n self._send_event(lb_const.EVENT_CREATE_POOL_V2, arg_dict,\n serialize=True,\n binding_key=pool['loadbalancer_id'],\n key=pool['id'])"
]
| [
"0.6936273",
"0.668377",
"0.65317404",
"0.6379177",
"0.6103705",
"0.6079796",
"0.60304755",
"0.6021698",
"0.60045826",
"0.59128916",
"0.59012294",
"0.58715385",
"0.57743937",
"0.5755864",
"0.57389754",
"0.57138234",
"0.57114106",
"0.57097745",
"0.5707323",
"0.5629869",
"0.5587077",
"0.5568605",
"0.54494905",
"0.54298705",
"0.54284614",
"0.54162014",
"0.5415392",
"0.5410742",
"0.54069895",
"0.53999686"
]
| 0.7493981 | 0 |
Create should fail with a StratisCliNameConflictError trying to create new pool with different devices and the same name as previous. | def test_create_different_devices(self):
command_line = self._MENU + [self._POOLNAME] + _DEVICE_STRATEGY()
self.check_error(StratisCliNameConflictError, command_line, _ERROR) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_create_same_devices(self):\n command_line = self._MENU + [self._POOLNAME] + self.devices\n self.check_error(StratisCliNameConflictError, command_line, _ERROR)",
"def test_create_same_devices(self):\n command_line = self._MENU + [self._POOLNAME_2] + self._DEVICES\n self.check_error(StratisCliInUseSameTierError, command_line, _ERROR)",
"def pool_create(self, pool_name):\n self.core.api.os.shell.cmd('{0} add apppool /name:\"{1}\"'.format(\n self.APP_CMD, pool_name\n ))",
"def create_device_pool(pool_name, project_arn):\n\n new_device_pool = device_farm.create_device_pool(\n projectArn=project_arn,\n name=pool_name,\n description='it is edX device pool',\n maxDevices=1,\n rules=[\n {\n \"attribute\": \"PLATFORM\",\n \"operator\": \"EQUALS\",\n \"value\": '\"ANDROID\"'\n },\n {\n \"attribute\": \"OS_VERSION\",\n \"operator\": \"GREATER_THAN_OR_EQUALS\",\n \"value\": '\"9\"'\n },\n {\n \"attribute\": \"MANUFACTURER\",\n \"operator\": \"EQUALS\",\n \"value\": '\"Google\"'\n },\n {\n \"attribute\": \"AVAILABILITY\",\n \"operator\": \"EQUALS\",\n \"value\": '\"HIGHLY_AVAILABLE\"'\n },\n {\n \"attribute\": \"FLEET_TYPE\",\n \"operator\": \"EQUALS\",\n \"value\": '\"PUBLIC\"'\n }\n ]\n )\n if new_device_pool is not None:\n new_pool_name = new_device_pool['devicePool']['name']\n new_pool_arn = new_device_pool['devicePool']['arn']\n print('{} is created successfully'.format(pool_name))\n return new_pool_arn\n else:\n print('Problem creating {} device pool'.format(project_name))",
"def create_pool(self, device, tier, poolname):\n print \"Adding pool %s...\" % poolname\n pool = device.findRemoteStoragePool(StoragePoolPredicates.name(poolname))\n pool.setTier(tier)\n pool.save()\n return pool",
"def create_device_pool(projectArn=None, name=None, description=None, rules=None):\n pass",
"def test_create_pool_with_mandatory_params(self):\r\n resource = 'pool'\r\n cmd = pool.CreatePool(test_cli20.MyApp(sys.stdout), None)\r\n name = 'my-name'\r\n lb_method = 'ROUND_ROBIN'\r\n protocol = 'HTTP'\r\n subnet_id = 'subnet-id'\r\n tenant_id = 'my-tenant'\r\n my_id = 'my-id'\r\n args = ['--lb-method', lb_method,\r\n '--name', name,\r\n '--protocol', protocol,\r\n '--subnet-id', subnet_id,\r\n '--tenant-id', tenant_id]\r\n position_names = ['admin_state_up', 'lb_method', 'name',\r\n 'protocol', 'subnet_id', 'tenant_id']\r\n position_values = [True, lb_method, name,\r\n protocol, subnet_id, tenant_id]\r\n self._test_create_resource(resource, cmd, name, my_id, args,\r\n position_names, position_values)",
"def setup_device_pool(project_arn, device_pool_name):\n\n target_device_pool_arn = ''\n is_device_pool_exists = False\n for device_pool in device_farm.list_device_pools(arn=project_arn)[\n 'devicePools']:\n pool_name = device_pool['name']\n if pool_name == device_pool_name:\n print('{} already exists'.format(pool_name))\n target_device_pool_arn = device_pool['arn']\n is_device_pool_exists = True\n break\n else:\n is_device_pool_exists = False\n\n if not is_device_pool_exists:\n target_device_pool_arn = create_device_pool(\n device_pool_name, project_arn)\n\n return target_device_pool_arn\n\n raise KeyError('Problem finding device pool %r' % device_pool_name)",
"def _create_volume_pool(self, pool_name):\n osd_map = self._rados_command('osd dump', {})\n\n existing_id = self._get_pool_id(osd_map, pool_name)\n if existing_id is not None:\n log.info(\"Pool {0} already exists\".format(pool_name))\n return existing_id\n\n osd_count = len(osd_map['osds'])\n\n # We can't query the actual cluster config remotely, but since this is\n # just a heuristic we'll assume that the ceph.conf we have locally reflects\n # that in use in the rest of the cluster.\n pg_warn_max_per_osd = int(self.rados.conf_get('mon_max_pg_per_osd'))\n\n other_pgs = 0\n for pool in osd_map['pools']:\n if not pool['pool_name'].startswith(self.POOL_PREFIX):\n other_pgs += pool['pg_num']\n\n # A basic heuristic for picking pg_num: work out the max number of\n # PGs we can have without tripping a warning, then subtract the number\n # of PGs already created by non-manila pools, then divide by ten. That'll\n # give you a reasonable result on a system where you have \"a few\" manila\n # shares.\n pg_num = ((pg_warn_max_per_osd * osd_count) - other_pgs) // 10\n # TODO Alternatively, respect an override set by the user.\n\n self._rados_command(\n 'osd pool create',\n {\n 'pool': pool_name,\n 'pg_num': int(pg_num),\n }\n )\n\n osd_map = self._rados_command('osd dump', {})\n pool_id = self._get_pool_id(osd_map, pool_name)\n\n if pool_id is None:\n # If the pool isn't there, that's either a ceph bug, or it's some outside influence\n # removing it right after we created it.\n log.error(\"OSD map doesn't contain expected pool '{0}':\\n{1}\".format(\n pool_name, json.dumps(osd_map, indent=2)\n ))\n raise RuntimeError(\"Pool '{0}' not present in map after creation\".format(pool_name))\n else:\n return pool_id",
"def _create(self, name):\n command = [\n 'ipset create -exist ' + name + ' hash:net family inet maxelem 536870912',\n ]\n self.__run(command)",
"def csAddPool(self,pool,creatorid,nas=None):\n\n logger.debug(\"Attempting to create pool '\"+pool+\"'.\")\n\n localpath = \"/.\"+pool\n\n url = self.csurl + \"/polcentral/v1_0/pools/\"\n\n if nas == None:\n logger.debug(\"No NAS object provided, will create pool '\"+pool+\"' type 'MW'.\")\n pooltype = 'MW'\n subscribedevices = True\n deviceid = ''\n pathinpool = '/'\n servername = ''\n sharename = ''\n sharepath = ''\n creditname = ''\n overridewarnings = True\n else:\n logger.debug(\"NAS object provided, will create pool '\"+pool+\"' type 'PS'.\")\n pooltype = 'PS'\n subscribedevices = True\n deviceid = ''\n pathinpool = '/'\n servername = ''\n sharename = ''\n sharepath = ''\n creditname = ''\n overridewarnings = True\n\n payload = {\n \"name\": pool,\n \"description\":\"Pool added by testbot\",\n \"creatorid\": {\"$id\": creatorid},\n \"type\":pooltype,\n \"allowpiggybacks\":True,\n \"localpoolpath\": localpath\n }\n \"\"\"\n payload = {\n \"name\": poolname,\n \"description\":\"Pool added by testbot\",\n \"creatorid\": {\"$id\": creatorid},\n \"type\":pooltype,\n \"allowpiggybacks\":True,\n \"localpoolpath\": localpath,\n \"subscribedevices\":subscribedevices,\n \"deviceid\": deviceid,\n \"pathinpool\": pathinpool,\n \"servername\": servername,\n \"sharename\": sharename,\n \"sharepath\": sharepath,\n \"credsetname\": creditname,\n \"overridewarnings\": overridewarnings\n }\n \"\"\"\n\n try:\n r = requests.post(url, data=json.dumps(payload))\n except Exception:\n logger.error(\"Exception during api call to add pool.\")\n return 'Error'\n\n if r.status_code == 200:\n logger.debug(\"Pool '\"+pool+\"' was successfully created.\")\n poolid = r.json()['_id']\n return poolid['$id']\n else:\n logger.error(\"Pool '\"+pool+\"' was not created. Error code is \"+str(r.status_code)+\".\")\n return 'Error'",
"def test_create_with_clevis_1(self):\n command_line = [\n \"pool\",\n \"create\",\n \"--clevis=tang\",\n \"--tang-url=url\",\n \"--thumbprint=jkj\",\n \"--trust-url\",\n \"pn\",\n \"/dev/n\",\n ]\n for prefix in [[], [\"--propagate\"]]:\n self.check_system_exit(prefix + command_line, _PARSE_ERROR)",
"def _create_pool_vm(args):\n # check storage pool name unicity\n conn = libvirt.open(None)\n _sps = list()\n if conn:\n _sps = [sp for sp in conn.listAllStoragePools() if sp.name() == args.name]\n conn.close()\n else:\n print('Cannot contact hypervisor', file=sys.stderr)\n return 1\n\n if len(_sps) != 0:\n print(\"Storage pool with name [%s] already exists\" % args.name, file=sys.stderr)\n return 1\n\n if args.disk and args.netfshost:\n print(\"--disk and --host option are exclusive\", file=sys.stderr)\n return 1\n\n if not args.disk and not args.netfshost:\n print(\"Either --disk or --host must be specified.\", file=sys.stderr)\n return 1\n\n if args.netfshost and not args.path:\n print(\"Must specify the remote resource path with the --path option\", file=sys.stderr)\n return 1\n\n _pool_name = args.name\n if args.disk:\n return oci_utils.kvm.virt.create_fs_pool(args.disk, _pool_name)\n if args.netfshost:\n return oci_utils.kvm.virt.create_netfs_pool(args.netfshost, args.path, _pool_name)",
"def test_create_pool_with_all_params(self):\r\n resource = 'pool'\r\n cmd = pool.CreatePool(test_cli20.MyApp(sys.stdout), None)\r\n name = 'my-name'\r\n description = 'my-desc'\r\n lb_method = 'ROUND_ROBIN'\r\n protocol = 'HTTP'\r\n subnet_id = 'subnet-id'\r\n tenant_id = 'my-tenant'\r\n my_id = 'my-id'\r\n provider = 'lbaas'\r\n args = ['--admin-state-down',\r\n '--description', description,\r\n '--lb-method', lb_method,\r\n '--name', name,\r\n '--protocol', protocol,\r\n '--subnet-id', subnet_id,\r\n '--tenant-id', tenant_id,\r\n '--provider', provider]\r\n position_names = ['admin_state_up', 'description', 'lb_method', 'name',\r\n 'protocol', 'subnet_id', 'tenant_id', 'provider']\r\n position_values = [False, description, lb_method, name,\r\n protocol, subnet_id, tenant_id, provider]\r\n self._test_create_resource(resource, cmd, name, my_id, args,\r\n position_names, position_values)",
"def test_duplicate_names_fail(self):\n name = 'some_name'\n instance_types.create(name, 256, 1, 120, 200, 'flavor1')\n self.assertRaises(exception.InstanceTypeExists,\n instance_types.create,\n name, 256, 1, 120, 200, 'flavor2')",
"def _ValidateUniqueNames(pools):\n used_names = set()\n for pool in pools:\n name = pool.nodePool\n if name in used_names:\n raise exceptions.InvalidArgumentException(\n '--pools', 'Pool name \"%s\" used more than once.' % name)\n used_names.add(name)",
"def test_create_with_only_name(self):\n with OrionState() as cfg:\n name = \"bm00001\"\n with pytest.raises(NoConfigurationError) as exc:\n get_or_create_benchmark(cfg.storage, name).close()\n\n assert f\"Benchmark {name} does not exist in DB\" in str(exc.value)",
"def test_networking_project_network_tag_create(self):\n pass",
"def test_duplicate_name_error_validation():\n template_name = pxe.CustomizationTemplate(\n name=generate_random_string(size=8),\n description=generate_random_string(size=16),\n image_type='RHEL-6',\n script_type='Kickstart',\n script_data='Testing the script')\n\n template_name.create()\n with error.expected('Name has already been taken'):\n template_name.create()\n template_name.delete(cancel=False)",
"def create_pool(self, service, bigips):\n pool = self.service_adapter.get_pool(service)\n error = None\n\n for bigip in bigips:\n try:\n self.pool_helper.create(bigip, pool)\n except HTTPError as err:\n if err.response.status_code == 409:\n LOG.debug(\"Pool already exists...updating\")\n try:\n self.pool_helper.update(bigip, pool)\n except Exception as err:\n error = f5_ex.PoolUpdateException(err.message)\n LOG.error(\"Failed to assure pool %s on %s: %s\",\n pool['name'], bigip, error.message)\n else:\n error = f5_ex.PoolCreationException(err.message)\n LOG.error(\"Failed to assure pool %s on %s: %s\",\n pool['name'], bigip, error.message)\n except Exception as err:\n error = f5_ex.PoolCreationException(err.message)\n LOG.error(\"Failed to assure pool %s on %s: %s\",\n pool['name'], bigip, error.message)\n\n return error",
"def test_cannot_create_with_same_category_and_name(self):\n # Create an initial service\n self.project.services.create(name = \"service1\", category = self.category)\n # Then try to create the same service using the serializer\n serializer = ServiceSerializer(\n data = dict(name = \"service1\", category = self.category.pk),\n context = dict(project = self.project)\n )\n self.assertFalse(serializer.is_valid())\n self.assertEqual(serializer.errors['name'][0].code, 'unique')",
"def test_networking_project_network_create(self):\n pass",
"def test_create_with_clevis_1(self):\n command_line = [\n \"--propagate\",\n \"pool\",\n \"create\",\n \"--clevis=tang\",\n \"pn\",\n \"/dev/n\",\n ]\n self.check_error(StratisCliMissingClevisTangURLError, command_line, 1)",
"def test_create_with_clevis_2(self):\n command_line = [\n \"--propagate\",\n \"pool\",\n \"create\",\n \"--clevis=tang\",\n \"--tang-url=url\",\n \"pn\",\n \"/dev/n\",\n ]\n self.check_error(StratisCliMissingClevisThumbprintError, command_line, 1)",
"def add_pool(ctx, pool_name, global_ip_range, global_port_range):\n\n if len(pool_name) > 32:\n ctx.fail(\"Invalid pool name. Maximum allowed pool name is 32 characters !!\")\n\n # Verify the ip address range and format\n ip_address = global_ip_range.split(\"-\")\n if len(ip_address) > 2:\n ctx.fail(\"Given ip address range {} is invalid. Please enter a valid ip address range !!\".format(global_ip_range))\n elif len(ip_address) == 2:\n if is_valid_ipv4_address(ip_address[0]) is False:\n ctx.fail(\"Given ip address {} is not valid global address. Please enter a valid ip address !!\".format(ip_address[0]))\n\n if is_valid_ipv4_address(ip_address[1]) is False:\n ctx.fail(\"Given ip address {} is not valid global address. Please enter a valid ip address !!\".format(ip_address[1]))\n\n ipLowLimit = int(ipaddress.IPv4Address(ip_address[0]))\n ipHighLimit = int(ipaddress.IPv4Address(ip_address[1]))\n if ipLowLimit >= ipHighLimit:\n ctx.fail(\"Given ip address range {} is invalid. Please enter a valid ip address range !!\".format(global_ip_range))\n else:\n if is_valid_ipv4_address(ip_address[0]) is False:\n ctx.fail(\"Given ip address {} is not valid global address. Please enter a valid ip address !!\".format(ip_address[0]))\n ipLowLimit = int(ipaddress.IPv4Address(ip_address[0]))\n ipHighLimit = int(ipaddress.IPv4Address(ip_address[0]))\n\n # Verify the port address range and format\n if global_port_range is not None: \n port_address = global_port_range.split(\"-\")\n\n if len(port_address) > 2:\n ctx.fail(\"Given port address range {} is invalid. Please enter a valid port address range !!\".format(global_port_range))\n elif len(port_address) == 2:\n if is_valid_port_address(port_address[0]) is False:\n ctx.fail(\"Given port value {} is invalid. Please enter a valid port value !!\".format(port_address[0]))\n\n if is_valid_port_address(port_address[1]) is False:\n ctx.fail(\"Given port value {} is invalid. Please enter a valid port value !!\".format(port_address[1]))\n\n portLowLimit = int(port_address[0])\n portHighLimit = int(port_address[1])\n if portLowLimit >= portHighLimit:\n ctx.fail(\"Given port address range {} is invalid. Please enter a valid port address range !!\".format(global_port_range))\n else:\n if is_valid_port_address(port_address[0]) is False:\n ctx.fail(\"Given port value {} is invalid. Please enter a valid port value !!\".format(port_address[0]))\n else:\n global_port_range = \"NULL\"\n\n config_db = ConfigDBConnector()\n config_db.connect()\n\n entryFound = False\n table = \"NAT_POOL\"\n key = pool_name\n dataKey1 = 'nat_ip'\n dataKey2 = 'nat_port'\n\n data = config_db.get_entry(table, key)\n if data:\n if data[dataKey1] == global_ip_range and data[dataKey2] == global_port_range:\n click.echo(\"Trying to add pool, which is already present.\")\n entryFound = True\n\n pool_dict = config_db.get_table(table) \n if len(pool_dict) == 16:\n click.echo(\"Failed to add pool, as already reached maximum pool limit 16.\")\n entryFound = True\n\n # Verify the Ip address is overlapping with any Static NAT entry\n if entryFound == False:\n static_dict = config_db.get_table('STATIC_NAT')\n if static_dict:\n for staticKey, staticValues in static_dict.items():\n global_ip = \"---\"\n local_ip = \"---\"\n nat_type = \"dnat\"\n\n if isinstance(staticKey, str) is True:\n global_ip = staticKey\n else:\n continue\n\n local_ip = staticValues[\"local_ip\"]\n\n if \"nat_type\" in staticValues:\n nat_type = staticValues[\"nat_type\"]\n\n if nat_type == \"snat\":\n global_ip = local_ip\n\n ipAddress = int(ipaddress.IPv4Address(global_ip))\n if (ipAddress >= ipLowLimit and ipAddress <= ipHighLimit):\n ctx.fail(\"Given Ip address entry is overlapping with existing Static NAT entry !!\")\n\n if entryFound == False:\n config_db.set_entry(table, key, {dataKey1: global_ip_range, dataKey2 : global_port_range})",
"def test_instance_naming_creation(os_info):\n NEUTRON.list_security_groups = mock.MagicMock(\n return_value=iter([{\"security_groups\": []}]))\n NEUTRON.create_subnet = mock.MagicMock(\n return_value={\"subnet\": SUBNETS}\n )\n\n instance_names = os_info.nodes_names\n for i in range(len(instance_names)):\n assert instance_names[i] == 'test-node-{}'.format(i + 1)",
"def test_bad_uuid_pool(self):\n command_line = [\"pool\", \"debug\", \"get-object-path\", \"--uuid=not\"]\n for prefix in [[], [\"--propagate\"]]:\n self.check_system_exit(prefix + command_line, _PARSE_ERROR)",
"def test_creation_fail(self):\n\n # Assert that a RelaxError occurs when the pipe type is invalid.\n self.assertRaises(RelaxError, pipes.create, 'new', 'x')",
"def create_pool(self, context, pool):\n LOG.info(\"Received request 'Create Pool' for Pool:%(pool_id)s \",\n {'pool_id': pool['id']})\n arg_dict = {'context': context,\n lb_const.POOL: pool\n }\n # REVISIT(jiahao) M:N pool is not yet implemented.\n self._send_event(lb_const.EVENT_CREATE_POOL_V2, arg_dict,\n serialize=True,\n binding_key=pool['loadbalancer_id'],\n key=pool['id'])",
"def test_create_device1(self):\n pass"
]
| [
"0.7578934",
"0.6852764",
"0.6572364",
"0.649063",
"0.6186363",
"0.61459994",
"0.6054562",
"0.60405385",
"0.60289097",
"0.6028448",
"0.5937939",
"0.5918405",
"0.58632976",
"0.5842114",
"0.58106863",
"0.57899123",
"0.5784483",
"0.5784168",
"0.57270914",
"0.56444716",
"0.56397796",
"0.56301177",
"0.5617183",
"0.5616734",
"0.56070036",
"0.55121064",
"0.55071634",
"0.5477233",
"0.5471233",
"0.5463193"
]
| 0.71884704 | 1 |
Test that creating two pools with different names and the same devices raises a StratisCliInUseSameTierError exception. | def test_create_same_devices(self):
command_line = self._MENU + [self._POOLNAME_2] + self._DEVICES
self.check_error(StratisCliInUseSameTierError, command_line, _ERROR) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_create_same_devices(self):\n command_line = self._MENU + [self._POOLNAME] + self.devices\n self.check_error(StratisCliNameConflictError, command_line, _ERROR)",
"def test_create_different_devices(self):\n command_line = self._MENU + [self._POOLNAME] + _DEVICE_STRATEGY()\n self.check_error(StratisCliNameConflictError, command_line, _ERROR)",
"async def test_change_pools(self, setup, trusted_and_fee, self_hostname):\n trusted, fee = trusted_and_fee\n full_nodes, wallet_nodes, receive_address, client, rpc_cleanup = setup\n our_ph = receive_address[0]\n pool_a_ph = receive_address[1]\n wallets = [wallet_n.wallet_state_manager.main_wallet for wallet_n in wallet_nodes]\n pool_b_ph = await wallets[1].get_new_puzzlehash()\n full_node_api = full_nodes[0]\n\n if trusted:\n wallet_nodes[0].config[\"trusted_peers\"] = {\n full_node_api.full_node.server.node_id.hex(): full_node_api.full_node.server.node_id.hex()\n }\n else:\n wallet_nodes[0].config[\"trusted_peers\"] = {}\n\n await wallet_nodes[0].server.start_client(\n PeerInfo(self_hostname, uint16(full_node_api.full_node.server._port)), None\n )\n\n WAIT_SECS = 200\n try:\n assert len(await client.get_wallets(WalletType.POOLING_WALLET)) == 0\n\n async def have_chia():\n await farm_blocks(full_node_api, our_ph, 1)\n return (await wallets[0].get_confirmed_balance()) > 0\n\n await time_out_assert(timeout=WAIT_SECS, function=have_chia)\n await time_out_assert(20, wallet_is_synced, True, wallet_nodes[0], full_node_api)\n\n creation_tx: TransactionRecord = await client.create_new_pool_wallet(\n pool_a_ph, \"https://pool-a.org\", 5, f\"{self_hostname}:5000\", \"new\", \"FARMING_TO_POOL\", fee\n )\n\n await time_out_assert(\n 10,\n full_node_api.full_node.mempool_manager.get_spendbundle,\n creation_tx.spend_bundle,\n creation_tx.name,\n )\n\n await farm_blocks(full_node_api, our_ph, 6)\n assert full_node_api.full_node.mempool_manager.get_spendbundle(creation_tx.name) is None\n\n await time_out_assert(20, wallet_is_synced, True, wallet_nodes[0], full_node_api)\n\n summaries_response = await client.get_wallets(WalletType.POOLING_WALLET)\n assert len(summaries_response) == 1\n wallet_id: int = summaries_response[0][\"id\"]\n status: PoolWalletInfo = (await client.pw_status(wallet_id))[0]\n\n assert status.current.state == PoolSingletonState.FARMING_TO_POOL.value\n assert status.target is None\n\n async def status_is_farming_to_pool():\n await farm_blocks(full_node_api, our_ph, 1)\n pw_status: PoolWalletInfo = (await client.pw_status(wallet_id))[0]\n return pw_status.current.state == PoolSingletonState.FARMING_TO_POOL.value\n\n await time_out_assert(timeout=WAIT_SECS, function=status_is_farming_to_pool)\n\n pw_info: PoolWalletInfo = (await client.pw_status(wallet_id))[0]\n assert pw_info.current.pool_url == \"https://pool-a.org\"\n assert pw_info.current.relative_lock_height == 5\n status: PoolWalletInfo = (await client.pw_status(wallet_id))[0]\n\n join_pool_tx: TransactionRecord = (\n await client.pw_join_pool(\n wallet_id,\n pool_b_ph,\n \"https://pool-b.org\",\n 10,\n fee,\n )\n )[\"transaction\"]\n assert join_pool_tx is not None\n\n async def status_is_leaving():\n await farm_blocks(full_node_api, our_ph, 1)\n pw_status: PoolWalletInfo = (await client.pw_status(wallet_id))[0]\n return pw_status.current.state == PoolSingletonState.LEAVING_POOL.value\n\n await time_out_assert(timeout=WAIT_SECS, function=status_is_leaving)\n pw_info: PoolWalletInfo = (await client.pw_status(wallet_id))[0]\n\n await time_out_assert(timeout=WAIT_SECS, function=status_is_farming_to_pool)\n pw_info: PoolWalletInfo = (await client.pw_status(wallet_id))[0]\n assert pw_info.current.pool_url == \"https://pool-b.org\"\n assert pw_info.current.relative_lock_height == 10\n assert len(await wallets[0].wallet_state_manager.tx_store.get_unconfirmed_for_wallet(2)) == 0\n\n finally:\n client.close()\n await client.await_closed()\n await rpc_cleanup()",
"def _ValidateUniqueNames(pools):\n used_names = set()\n for pool in pools:\n name = pool.nodePool\n if name in used_names:\n raise exceptions.InvalidArgumentException(\n '--pools', 'Pool name \"%s\" used more than once.' % name)\n used_names.add(name)",
"def test_temp_create_sg_multinode(iam_client_stub, ec2_client_stub):\n\n # Generate a config of the desired form.\n subnet_id = DEFAULT_SUBNET[\"SubnetId\"]\n # head and worker stuff:\n head_and_worker_kludge = {\n \"head_node\": {\n \"SubnetIds\": [subnet_id]\n },\n \"worker_nodes\": {\n \"SubnetIds\": [subnet_id]\n }\n }\n # security group info to go in provider field\n provider_data = helpers.load_aws_example_config_file(\n \"example-security-group.yaml\")[\"provider\"]\n\n # a multi-node-type config -- will add head/worker stuff and security group\n # info to this.\n base_config = helpers.load_aws_example_config_file(\"example-full.yaml\")\n\n config = copy.deepcopy(base_config)\n # Add security group data\n config[\"provider\"] = provider_data\n # Add head and worker fields.\n config.update(head_and_worker_kludge)\n\n # Generate stubs\n stubs.configure_iam_role_default(iam_client_stub)\n stubs.configure_key_pair_default(ec2_client_stub)\n\n # Only one of these (the one specified in head_node / worker_nodes)\n # is in the correct vpc.\n # This list of subnets is generated by the ec2.subnets.all() call\n # and then ignored, since head_node and worker_nodes already specify\n # subnet_ids.\n stubs.describe_a_thousand_subnets_in_different_vpcs(ec2_client_stub)\n\n # The rest of the stubbing logic is copied from\n # test_create_sg_with_custom_inbound_rules_and_name.\n\n # expect to describe the head subnet ID\n stubs.describe_subnets_echo(ec2_client_stub, DEFAULT_SUBNET)\n # given no existing security groups within the VPC...\n stubs.describe_no_security_groups(ec2_client_stub)\n # expect to create a security group on the head node VPC\n stubs.create_sg_echo(ec2_client_stub, DEFAULT_SG_WITH_NAME)\n # expect new head security group details to be retrieved after creation\n stubs.describe_sgs_on_vpc(\n ec2_client_stub,\n [DEFAULT_SUBNET[\"VpcId\"]],\n [DEFAULT_SG_WITH_NAME],\n )\n\n # given custom existing default head security group inbound rules...\n # expect to authorize both default and custom inbound rules\n stubs.authorize_sg_ingress(\n ec2_client_stub,\n DEFAULT_SG_WITH_NAME_AND_RULES,\n )\n\n # given the prior modification to the head security group...\n # expect the next read of a head security group property to reload it\n stubs.describe_sg_echo(ec2_client_stub, DEFAULT_SG_WITH_NAME_AND_RULES)\n\n _get_vpc_id_or_die.cache_clear()\n\n # given our mocks and the config as input...\n # expect the config to be validated and bootstrapped successfully\n bootstrapped_config = helpers.bootstrap_aws_config(config)\n\n # expect the bootstrapped config to have the custom security group...\n # name and in bound rules\n assert bootstrapped_config[\"provider\"][\"security_group\"][\n \"GroupName\"] == DEFAULT_SG_WITH_NAME_AND_RULES[\"GroupName\"]\n assert config[\"provider\"][\"security_group\"][\n \"IpPermissions\"] == CUSTOM_IN_BOUND_RULES\n\n # Confirming boostrap config does not currently touch available node types.\n assert bootstrapped_config[\"available_node_types\"] == config[\n \"available_node_types\"]\n\n # Confirming head and worker subnet_ids are untouched\n assert bootstrapped_config[\"head_node\"][\"SubnetIds\"] ==\\\n config[\"head_node\"][\"SubnetIds\"] ==\\\n config[\"worker_nodes\"][\"SubnetIds\"] ==\\\n bootstrapped_config[\"worker_nodes\"][\"SubnetIds\"] ==\\\n [DEFAULT_SUBNET[\"SubnetId\"]]\n\n # Confirming correct security group got filled for head and workers\n sg_id = DEFAULT_SG[\"GroupId\"]\n assert bootstrapped_config[\"head_node\"][\"SecurityGroupIds\"] == [sg_id]\n assert bootstrapped_config[\"worker_nodes\"][\"SecurityGroupIds\"] == [sg_id]\n\n # Confirm security group is in the right VPC.\n # (Doesn't really confirm anything except for the structure of this test\n # data.)\n assert DEFAULT_SG[\"VpcId\"] == DEFAULT_SUBNET[\"VpcId\"]\n assert DEFAULT_SUBNET[\"SubnetId\"] ==\\\n bootstrapped_config[\"head_node\"][\"SubnetIds\"][0]\n\n # expect no pending responses left in IAM or EC2 client stub queues\n iam_client_stub.assert_no_pending_responses()\n ec2_client_stub.assert_no_pending_responses()",
"def test_create_pool_with_all_params(self):\r\n resource = 'pool'\r\n cmd = pool.CreatePool(test_cli20.MyApp(sys.stdout), None)\r\n name = 'my-name'\r\n description = 'my-desc'\r\n lb_method = 'ROUND_ROBIN'\r\n protocol = 'HTTP'\r\n subnet_id = 'subnet-id'\r\n tenant_id = 'my-tenant'\r\n my_id = 'my-id'\r\n provider = 'lbaas'\r\n args = ['--admin-state-down',\r\n '--description', description,\r\n '--lb-method', lb_method,\r\n '--name', name,\r\n '--protocol', protocol,\r\n '--subnet-id', subnet_id,\r\n '--tenant-id', tenant_id,\r\n '--provider', provider]\r\n position_names = ['admin_state_up', 'description', 'lb_method', 'name',\r\n 'protocol', 'subnet_id', 'tenant_id', 'provider']\r\n position_values = [False, description, lb_method, name,\r\n protocol, subnet_id, tenant_id, provider]\r\n self._test_create_resource(resource, cmd, name, my_id, args,\r\n position_names, position_values)",
"def test_index_nas_shares_by_pool(self):\n pass",
"def test_instance_naming_creation(os_info):\n NEUTRON.list_security_groups = mock.MagicMock(\n return_value=iter([{\"security_groups\": []}]))\n NEUTRON.create_subnet = mock.MagicMock(\n return_value={\"subnet\": SUBNETS}\n )\n\n instance_names = os_info.nodes_names\n for i in range(len(instance_names)):\n assert instance_names[i] == 'test-node-{}'.format(i + 1)",
"def _ValidatePoolsHaveSameLocation(pools):\n if not pools:\n return\n initial_locations = None\n for pool in pools:\n if pool.nodePoolConfig is not None:\n locations = pool.nodePoolConfig.locations\n if initial_locations is None:\n initial_locations = locations\n continue\n elif initial_locations != locations:\n raise exceptions.InvalidArgumentException(\n '--pools', 'All pools must have identical locations.')",
"def test_bad_uuid_pool(self):\n command_line = [\"pool\", \"debug\", \"get-object-path\", \"--uuid=not\"]\n for prefix in [[], [\"--propagate\"]]:\n self.check_system_exit(prefix + command_line, _PARSE_ERROR)",
"def test_update_nas_share_by_pool(self):\n pass",
"def testAsBackendOfTargetPool(self):\n ### create test resources\n instance_name_1 = 'end-to-end-test-instance-1'\n instance_selfLink_1 = \\\n self.test_resource_creator.create_instance_using_template(\n instance_name_1,\n self.test_resource_creator.legacy_instance_template_selfLink)[\n 'targetLink']\n original_instance_config = self.google_api_interface.get_instance_configs(\n instance_name_1)\n target_pool_name = 'end-to-end-test-target-pool'\n self.test_resource_creator.create_target_pool_with_health_check(\n 'sample_target_pool_with_no_instance.json',\n target_pool_name,\n [],\n [instance_selfLink_1],\n health_check_selfLink=None)\n\n ### start migration\n selfLink_executor = SelfLinkExecutor(self.compute, instance_selfLink_1,\n self.test_resource_creator.network_name,\n self.test_resource_creator.subnetwork_name,\n )\n migration_handler = selfLink_executor.build_migration_handler()\n migration_handler.network_migration()\n ### check migration result\n # the migration is successful\n new_instance_config = self.google_api_interface.get_instance_configs(\n instance_name_1)\n self.assertTrue(\n resource_config_is_unchanged_except_for_network(new_instance_config,\n original_instance_config))\n # network changed\n self.assertTrue(check_instance_network(new_instance_config,\n self.test_resource_creator.network_selfLink,\n self.test_resource_creator.subnetwork_selfLink))\n\n print('Pass the current test')",
"def test_change_subnet(self):\n lease = CustomerIpLeaseModel.fetch_subscriber_lease(\n customer_mac='1:2:3:4:5:6',\n device_mac='12:13:14:15:16:17',\n device_port=2,\n is_dynamic=True\n )\n self.assertIsNotNone(lease)\n self.assertEqual(lease.ip_address, '10.11.12.2')\n self.assertEqual(lease.customer, self.customer)\n self.assertTrue(lease.is_dynamic)\n\n ippool2 = NetworkIpPool.objects.create(\n network='10.10.11.0/24',\n kind=NetworkIpPoolKind.NETWORK_KIND_INTERNET,\n description='test',\n ip_start='10.10.11.2',\n ip_end='10.10.11.254',\n gateway='10.10.11.1',\n is_dynamic=True\n )\n self.ippool.groups.remove(self.group)\n ippool2.groups.add(self.group)\n\n lease = CustomerIpLeaseModel.fetch_subscriber_lease(\n customer_mac='1:2:3:4:5:6',\n device_mac='12:13:14:15:16:17',\n device_port=2,\n is_dynamic=True\n )\n self.assertIsNotNone(lease)\n self.assertEqual(lease.ip_address, '10.10.11.2')\n self.assertEqual(lease.customer, self.customer)\n self.assertTrue(lease.is_dynamic)\n\n lease = CustomerIpLeaseModel.fetch_subscriber_lease(\n customer_mac='1:2:3:4:5:7',\n device_mac='12:13:14:15:16:17',\n device_port=2,\n is_dynamic=True\n )\n self.assertIsNotNone(lease)\n self.assertEqual(lease.ip_address, '10.10.11.3')\n self.assertEqual(lease.customer, self.customer)\n self.assertTrue(lease.is_dynamic)\n\n lease = CustomerIpLeaseModel.fetch_subscriber_lease(\n customer_mac='1:2:3:4:5:6',\n device_mac='12:13:14:15:16:17',\n device_port=2,\n is_dynamic=True\n )\n self.assertIsNotNone(lease)\n self.assertEqual(lease.ip_address, '10.10.11.2')\n self.assertEqual(lease.customer, self.customer)\n self.assertTrue(lease.is_dynamic)",
"def test_duplicate_names_fail(self):\n name = 'some_name'\n instance_types.create(name, 256, 1, 120, 200, 'flavor1')\n self.assertRaises(exception.InstanceTypeExists,\n instance_types.create,\n name, 256, 1, 120, 200, 'flavor2')",
"async def test_change_pools_reorg(self, setup, trusted_and_fee, bt, self_hostname):\n trusted, fee = trusted_and_fee\n full_nodes, wallet_nodes, receive_address, client, rpc_cleanup = setup\n our_ph = receive_address[0]\n pool_a_ph = receive_address[1]\n wallets = [wallet_n.wallet_state_manager.main_wallet for wallet_n in wallet_nodes]\n pool_b_ph = await wallets[1].get_new_puzzlehash()\n full_node_api = full_nodes[0]\n WAIT_SECS = 30\n if trusted:\n wallet_nodes[0].config[\"trusted_peers\"] = {\n full_node_api.full_node.server.node_id.hex(): full_node_api.full_node.server.node_id.hex()\n }\n else:\n wallet_nodes[0].config[\"trusted_peers\"] = {}\n\n await wallet_nodes[0].server.start_client(\n PeerInfo(self_hostname, uint16(full_node_api.full_node.server._port)), None\n )\n\n try:\n assert len(await client.get_wallets(WalletType.POOLING_WALLET)) == 0\n\n async def have_chia():\n await farm_blocks(full_node_api, our_ph, 1)\n return (await wallets[0].get_confirmed_balance()) > 0\n\n await time_out_assert(timeout=WAIT_SECS, function=have_chia)\n await time_out_assert(20, wallet_is_synced, True, wallet_nodes[0], full_node_api)\n\n creation_tx: TransactionRecord = await client.create_new_pool_wallet(\n pool_a_ph, \"https://pool-a.org\", 5, f\"{self_hostname}:5000\", \"new\", \"FARMING_TO_POOL\", fee\n )\n\n await time_out_assert(\n 10,\n full_node_api.full_node.mempool_manager.get_spendbundle,\n creation_tx.spend_bundle,\n creation_tx.name,\n )\n\n await farm_blocks(full_node_api, our_ph, 6)\n assert full_node_api.full_node.mempool_manager.get_spendbundle(creation_tx.name) is None\n\n await time_out_assert(20, wallet_is_synced, True, wallet_nodes[0], full_node_api)\n\n summaries_response = await client.get_wallets(WalletType.POOLING_WALLET)\n assert len(summaries_response) == 1\n wallet_id: int = summaries_response[0][\"id\"]\n status: PoolWalletInfo = (await client.pw_status(wallet_id))[0]\n\n assert status.current.state == PoolSingletonState.FARMING_TO_POOL.value\n assert status.target is None\n\n async def status_is_farming_to_pool():\n await farm_blocks(full_node_api, our_ph, 1)\n pw_status: PoolWalletInfo = (await client.pw_status(wallet_id))[0]\n return pw_status.current.state == PoolSingletonState.FARMING_TO_POOL.value\n\n await time_out_assert(timeout=WAIT_SECS, function=status_is_farming_to_pool)\n\n pw_info: PoolWalletInfo = (await client.pw_status(wallet_id))[0]\n assert pw_info.current.pool_url == \"https://pool-a.org\"\n assert pw_info.current.relative_lock_height == 5\n\n join_pool_tx: TransactionRecord = (\n await client.pw_join_pool(\n wallet_id,\n pool_b_ph,\n \"https://pool-b.org\",\n 10,\n fee,\n )\n )[\"transaction\"]\n assert join_pool_tx is not None\n await time_out_assert(\n 10,\n full_node_api.full_node.mempool_manager.get_spendbundle,\n join_pool_tx.spend_bundle,\n join_pool_tx.name,\n )\n await farm_blocks(full_node_api, our_ph, 1)\n\n async def status_is_leaving_no_blocks():\n pw_status: PoolWalletInfo = (await client.pw_status(wallet_id))[0]\n return pw_status.current.state == PoolSingletonState.LEAVING_POOL.value\n\n async def status_is_farming_to_pool_no_blocks():\n pw_status: PoolWalletInfo = (await client.pw_status(wallet_id))[0]\n return pw_status.current.state == PoolSingletonState.FARMING_TO_POOL.value\n\n await time_out_assert(timeout=WAIT_SECS, function=status_is_leaving_no_blocks)\n\n current_blocks = await full_node_api.get_all_full_blocks()\n more_blocks = full_node_api.bt.get_consecutive_blocks(\n 3,\n farmer_reward_puzzle_hash=pool_a_ph,\n pool_reward_puzzle_hash=pool_b_ph,\n block_list_input=current_blocks[:-1],\n force_overflow=True,\n guarantee_transaction_block=True,\n seed=32 * b\"4\",\n transaction_data=join_pool_tx.spend_bundle,\n )\n\n for block in more_blocks[-3:]:\n await full_node_api.full_node.respond_block(RespondBlock(block))\n\n await asyncio.sleep(5)\n await time_out_assert(timeout=WAIT_SECS, function=status_is_leaving_no_blocks)\n\n # Eventually, leaves pool\n await time_out_assert(timeout=WAIT_SECS, function=status_is_farming_to_pool)\n\n finally:\n client.close()\n await client.await_closed()\n await rpc_cleanup()",
"def test_create_pool_with_mandatory_params(self):\r\n resource = 'pool'\r\n cmd = pool.CreatePool(test_cli20.MyApp(sys.stdout), None)\r\n name = 'my-name'\r\n lb_method = 'ROUND_ROBIN'\r\n protocol = 'HTTP'\r\n subnet_id = 'subnet-id'\r\n tenant_id = 'my-tenant'\r\n my_id = 'my-id'\r\n args = ['--lb-method', lb_method,\r\n '--name', name,\r\n '--protocol', protocol,\r\n '--subnet-id', subnet_id,\r\n '--tenant-id', tenant_id]\r\n position_names = ['admin_state_up', 'lb_method', 'name',\r\n 'protocol', 'subnet_id', 'tenant_id']\r\n position_values = [True, lb_method, name,\r\n protocol, subnet_id, tenant_id]\r\n self._test_create_resource(resource, cmd, name, my_id, args,\r\n position_names, position_values)",
"async def test_distributed_paillier_exception(pool_http: Tuple[Pool, ...]) -> None:\n max_corruption_threshold = math.ceil(len(pool_http) / 2) - 1\n corruption_threshold = max_corruption_threshold + 1\n key_length = 64\n prime_threshold = 200\n correct_param_biprime = 20\n stat_sec_shamir = 20\n with pytest.raises(ValueError):\n _distributed_schemes = await asyncio.gather(\n *[\n DistributedPaillier.from_security_parameter(\n pool_http[i],\n corruption_threshold,\n key_length,\n prime_threshold,\n correct_param_biprime,\n stat_sec_shamir,\n distributed=False,\n )\n for i in range(len(pool_http))\n ]\n )",
"def testClientShouldNotBeAbleToConnectToNodesNodeStack(pool):\n\n async def go(ctx):\n for n in ctx.nodeset:\n n.nodestack.keep.auto = AutoMode.never\n\n nodestacksVersion = {k: v.ha for k, v in ctx.nodeset.nodeReg.items()}\n client1, _ = genTestClient(nodeReg=nodestacksVersion, tmpdir=ctx.tmpdir)\n ctx.looper.add(client1)\n with pytest.raises(NotConnectedToAny):\n await client1.ensureConnectedToNodes()\n\n pool.run(go)",
"def test_bad_uuid_blockdev_2(self):\n command_line = [\"pool\", \"extend-data\", \"poolname\", \"--device-uuid=not\"]\n for prefix in [[], [\"--propagate\"]]:\n self.check_system_exit(prefix + command_line, _PARSE_ERROR)",
"def test_different_sizes(self):\n\n self._test_find_next_subnet(\n network=\"10.0.0.0/16\",\n subnets=[\"10.0.0.0/25\"],\n requests=[24, 25],\n expected=[\"10.0.0.128/24\", \"10.0.1.128/25\"],\n )",
"def test_autocreate_licensepool(self):\n identifier = self._identifier()\n assert [] == identifier.licensed_through\n provider = AlwaysSuccessfulCollectionCoverageProvider(\n self._default_collection\n )\n pool = provider.license_pool(identifier)\n assert [pool] == identifier.licensed_through\n assert pool.data_source == provider.data_source\n assert pool.identifier == identifier\n assert pool.collection == provider.collection\n\n # Calling license_pool again finds the same LicensePool\n # as before.\n pool2 = provider.license_pool(identifier)\n assert pool == pool2\n\n # It's possible for a CollectionCoverageProvider to create a\n # LicensePool for a different DataSource than the one\n # associated with the Collection. Only the metadata wrangler\n # needs to do this -- it's so a CoverageProvider for a\n # third-party DataSource can create an 'Internal Processing'\n # LicensePool when some other part of the metadata wrangler\n # failed to do this earlier.\n\n # If a working pool already exists, it's returned and no new\n # pool is created.\n same_pool = provider.license_pool(\n identifier, DataSource.INTERNAL_PROCESSING\n )\n assert same_pool == pool2\n assert provider.data_source == same_pool.data_source\n\n # A new pool is only created if no working pool can be found.\n identifier2 = self._identifier()\n new_pool = provider.license_pool(\n identifier2, DataSource.INTERNAL_PROCESSING\n )\n assert new_pool.data_source.name == DataSource.INTERNAL_PROCESSING\n assert new_pool.identifier == identifier2\n assert new_pool.collection == provider.collection",
"def test_show_nas_share_by_pool(self):\n pass",
"def test_create_resource_with_invalid_target_bucket_rpc(\n self, mcg_obj, mcg_connection_factory\n ):\n connection_name = mcg_connection_factory()\n for target_bucket in (\"\", \" \", \"/*-#$%@^\"):\n response = mcg_obj.send_rpc_query(\n \"pool_api\",\n \"create_namespace_resource\",\n {\n \"name\": \"invalid_resource\",\n \"connection\": connection_name,\n \"target_bucket\": target_bucket,\n },\n )\n assert \"error\" in response.json()",
"def test_resource_combinations_rpc(\n self, ns_resource_factory, bucket_factory, platform1, platform2\n ):\n # Create the namespace resources and verify health\n ns_resource_name1 = ns_resource_factory(platform=platform1)[1]\n ns_resource_name2 = ns_resource_factory(platform=platform2)[1]\n\n # Create the namespace bucket on top of the namespace resource\n bucket_factory(\n amount=1,\n interface=\"mcg-namespace\",\n write_ns_resource=ns_resource_name1,\n read_ns_resources=[ns_resource_name1, ns_resource_name2],\n )",
"def test_instance_naming_with_illegal_chars(self):\n NEUTRON.list_security_groups = mock.MagicMock(\n return_value=iter([{\"security_groups\": []}]))\n NEUTRON.create_subnet = mock.MagicMock(\n return_value={\"subnet\": SUBNETS}\n )\n conn = MagicMock()\n config_bad = copy.deepcopy(CONFIG)\n config_bad['cluster-name'] = \"illegal:)chars\"\n\n conn.network.networks.return_value = {\"name\": \"ext01\"}\n\n info = OSClusterInfo(NOVA, NEUTRON, CINDER, config_bad, conn)\n with self.assertRaises(SystemExit):\n # assert this raises system exit\n info.nodes_names",
"def test_two_multihops_same_intermediate_rse(rse_factory, did_factory, root_account, core_config_mock, caches_mock):\n # +------+ +------+ +------+ +------+ +------+\n # | | | | | | | | | |\n # | RSE1 +--->| RSE2 +--->| RSE3 +--->| RSE4 +--->| RSE5 |\n # | | | | | | | | | |\n # +------+ +------+ +---+--+ +------+ +------+\n # |\n # | +------+ +------+\n # | | | | |\n # +------>| RSE6 +--->| RSE7 |\n # | | | |\n # +------+ +------+\n _, _, reaper_cache_region = caches_mock\n rse1, rse1_id = rse_factory.make_rse(scheme='mock', protocol_impl='rucio.rse.protocols.posix.Default')\n rse2, rse2_id = rse_factory.make_rse(scheme='mock', protocol_impl='rucio.rse.protocols.posix.Default')\n rse3, rse3_id = rse_factory.make_rse(scheme='mock', protocol_impl='rucio.rse.protocols.posix.Default')\n rse4, rse4_id = rse_factory.make_rse(scheme='mock', protocol_impl='rucio.rse.protocols.posix.Default')\n rse5, rse5_id = rse_factory.make_rse(scheme='mock', protocol_impl='rucio.rse.protocols.posix.Default')\n rse6, rse6_id = rse_factory.make_rse(scheme='mock', protocol_impl='rucio.rse.protocols.posix.Default')\n rse7, rse7_id = rse_factory.make_rse(scheme='mock', protocol_impl='rucio.rse.protocols.posix.Default')\n all_rses = [rse1_id, rse2_id, rse3_id, rse4_id, rse5_id, rse6_id, rse7_id]\n for rse_id in all_rses:\n rse_core.add_rse_attribute(rse_id, 'fts', TEST_FTS_HOST)\n rse_core.set_rse_limits(rse_id=rse_id, name='MinFreeSpace', value=1)\n rse_core.set_rse_usage(rse_id=rse_id, source='storage', used=1, free=0)\n distance_core.add_distance(rse1_id, rse2_id, distance=10)\n distance_core.add_distance(rse2_id, rse3_id, distance=10)\n distance_core.add_distance(rse3_id, rse4_id, distance=10)\n distance_core.add_distance(rse4_id, rse5_id, distance=10)\n distance_core.add_distance(rse3_id, rse6_id, distance=10)\n distance_core.add_distance(rse6_id, rse7_id, distance=10)\n\n did = did_factory.upload_test_file(rse1)\n rule_core.add_rule(dids=[did], account=root_account, copies=2, rse_expression=f'{rse5}|{rse7}', grouping='ALL', weight=None, lifetime=None, locked=False, subscription_id=None)\n\n class _FTSWrapper(FTSWrapper):\n @staticmethod\n def on_submit(file):\n # Simulate using the mock gfal plugin a transfer failure\n file['sources'] = [set_query_parameters(s_url, {'errno': 2}) for s_url in file['sources']]\n\n # Submit the first time, but force a failure to verify that retries are correctly handled\n with patch('rucio.core.transfer.TRANSFERTOOL_CLASSES_BY_NAME', new={'fts3': _FTSWrapper}):\n submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], group_bulk=10, partition_wait_time=0, transfertype='single', filter_transfertool=None)\n\n request = __wait_for_state_transition(dst_rse_id=rse2_id, **did)\n assert request['state'] == RequestState.FAILED\n\n # Re-submit the transfer without simulating a failure. Everything should go as normal starting now.\n for _ in range(4):\n # for multihop, finisher works one hop at a time. 4 is the maximum number of hops in this test graph\n finisher(once=True, partition_wait_time=0)\n submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], group_bulk=10, partition_wait_time=0, transfertype='single', filter_transfertool=None)\n # one request must be submitted, but the second will only be queued\n if request_core.get_request_by_did(rse_id=rse5_id, **did)['state'] == RequestState.QUEUED:\n rse_id_second_to_last_queued, rse_id_queued = rse4_id, rse5_id\n rse_id_second_to_last_submit, rse_id_submitted = rse6_id, rse7_id\n else:\n rse_id_second_to_last_queued, rse_id_queued = rse6_id, rse7_id\n rse_id_second_to_last_submit, rse_id_submitted = rse4_id, rse5_id\n request = request_core.get_request_by_did(rse_id=rse_id_queued, **did)\n assert request['state'] == RequestState.QUEUED\n request = request_core.get_request_by_did(rse_id=rse_id_submitted, **did)\n assert request['state'] == RequestState.SUBMITTED\n\n # Calling submitter again will not unblock the queued requests\n submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], group_bulk=10, partition_wait_time=0, transfertype='single', filter_transfertool=None)\n replica = __wait_for_replica_transfer(dst_rse_id=rse_id_submitted, **did)\n assert replica['state'] == ReplicaState.AVAILABLE\n request = request_core.get_request_by_did(rse_id=rse_id_queued, **did)\n assert request['state'] == RequestState.QUEUED\n\n # Once the submitted transfer is done, the submission will continue for second request (one hop at a time)\n # First of the remaining two hops submitted\n submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], group_bulk=10, partition_wait_time=0, transfertype='single', filter_transfertool=None)\n replica = __wait_for_replica_transfer(dst_rse_id=rse_id_second_to_last_queued, **did)\n assert replica['state'] == ReplicaState.AVAILABLE\n\n # One of the intermediate replicas is eligible for deletion. Others are blocked by entries in source table\n reaper_cache_region.invalidate()\n reaper(once=True, rses=[], include_rses='|'.join([rse2, rse3, rse4, rse6]), exclude_rses=None)\n with pytest.raises(ReplicaNotFound):\n replica_core.get_replica(rse_id=rse_id_second_to_last_submit, **did)\n for rse_id in [rse2_id, rse3_id, rse_id_second_to_last_queued]:\n replica_core.get_replica(rse_id=rse_id, **did)\n\n # Final hop\n submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], group_bulk=10, partition_wait_time=0, transfertype='single', filter_transfertool=None)\n replica = __wait_for_replica_transfer(dst_rse_id=rse_id_queued, **did)\n assert replica['state'] == ReplicaState.AVAILABLE\n\n # All intermediate replicas can be deleted\n reaper_cache_region.invalidate()\n reaper(once=True, rses=[], include_rses='|'.join([rse2, rse3, rse4, rse6]), exclude_rses=None)\n for rse_id in [rse2_id, rse3_id, rse4_id, rse6_id]:\n with pytest.raises(ReplicaNotFound):\n replica_core.get_replica(rse_id=rse_id, **did)",
"def test_pool_dependence(self):\n ok_(id(CSVarPool.pool) == id(VarPool.pool), 'mem address of pools should be the same')\n\n var = 'foo'\n app = 'test'\n VarPool(app=app).set(var, 'bar')\n eq_(CSVarPool.get(var, app=app), 'bar')\n CSVarPool(app=app).set(var, 'foo')\n eq_(VarPool.get(var, app=app), 'foo')",
"def test_destroy_nas_share_by_pool(self):\n pass",
"def test_mount_status_nas_share_by_pool(self):\n pass",
"async def test_leave_pool(self, setup, trusted_and_fee, self_hostname):\n trusted, fee = trusted_and_fee\n full_nodes, wallet_nodes, receive_address, client, rpc_cleanup = setup\n our_ph = receive_address[0]\n wallets = [wallet_n.wallet_state_manager.main_wallet for wallet_n in wallet_nodes]\n pool_ph = receive_address[1]\n full_node_api = full_nodes[0]\n if trusted:\n wallet_nodes[0].config[\"trusted_peers\"] = {\n full_node_api.full_node.server.node_id.hex(): full_node_api.full_node.server.node_id.hex()\n }\n else:\n wallet_nodes[0].config[\"trusted_peers\"] = {}\n\n await wallet_nodes[0].server.start_client(\n PeerInfo(self_hostname, uint16(full_node_api.full_node.server._port)), None\n )\n\n try:\n assert len(await client.get_wallets(WalletType.POOLING_WALLET)) == 0\n\n async def have_chia():\n await farm_blocks(full_node_api, our_ph, 1)\n return (await wallets[0].get_confirmed_balance()) > 0\n\n await time_out_assert(timeout=MAX_WAIT_SECS, function=have_chia)\n await time_out_assert(20, wallet_is_synced, True, wallet_nodes[0], full_node_api)\n\n creation_tx: TransactionRecord = await client.create_new_pool_wallet(\n our_ph, \"\", 0, f\"{self_hostname}:5000\", \"new\", \"SELF_POOLING\", fee\n )\n\n await time_out_assert(\n 10,\n full_node_api.full_node.mempool_manager.get_spendbundle,\n creation_tx.spend_bundle,\n creation_tx.name,\n )\n\n await farm_blocks(full_node_api, our_ph, 6)\n assert full_node_api.full_node.mempool_manager.get_spendbundle(creation_tx.name) is None\n\n await time_out_assert(20, wallet_is_synced, True, wallet_nodes[0], full_node_api)\n\n summaries_response = await client.get_wallets(WalletType.POOLING_WALLET)\n assert len(summaries_response) == 1\n wallet_id: int = summaries_response[0][\"id\"]\n status: PoolWalletInfo = (await client.pw_status(wallet_id))[0]\n\n assert status.current.state == PoolSingletonState.SELF_POOLING.value\n assert status.target is None\n\n join_pool_tx: TransactionRecord = (\n await client.pw_join_pool(\n wallet_id,\n pool_ph,\n \"https://pool.example.com\",\n 5,\n fee,\n )\n )[\"transaction\"]\n assert join_pool_tx is not None\n\n status: PoolWalletInfo = (await client.pw_status(wallet_id))[0]\n\n assert status.current.state == PoolSingletonState.SELF_POOLING.value\n assert status.current.pool_url == \"\"\n assert status.current.relative_lock_height == 0\n assert status.current.state == 1\n assert status.current.version == 1\n\n assert status.target\n assert status.target.pool_url == \"https://pool.example.com\"\n assert status.target.relative_lock_height == 5\n assert status.target.state == 3\n assert status.target.version == 1\n\n async def status_is_farming_to_pool():\n await farm_blocks(full_node_api, our_ph, 1)\n pw_status: PoolWalletInfo = (await client.pw_status(wallet_id))[0]\n return pw_status.current.state == PoolSingletonState.FARMING_TO_POOL.value\n\n await time_out_assert(timeout=MAX_WAIT_SECS, function=status_is_farming_to_pool)\n\n await time_out_assert(20, wallet_is_synced, True, wallet_nodes[0], full_node_api)\n\n status: PoolWalletInfo = (await client.pw_status(wallet_id))[0]\n\n leave_pool_tx: Dict[str, Any] = await client.pw_self_pool(wallet_id, fee)\n assert leave_pool_tx[\"transaction\"].wallet_id == wallet_id\n assert leave_pool_tx[\"transaction\"].amount == 1\n\n async def status_is_leaving():\n await farm_blocks(full_node_api, our_ph, 1)\n pw_status: PoolWalletInfo = (await client.pw_status(wallet_id))[0]\n return pw_status.current.state == PoolSingletonState.LEAVING_POOL.value\n\n await time_out_assert(timeout=MAX_WAIT_SECS, function=status_is_leaving)\n\n async def status_is_self_pooling():\n # Farm enough blocks to wait for relative_lock_height\n await farm_blocks(full_node_api, our_ph, 1)\n pw_status: PoolWalletInfo = (await client.pw_status(wallet_id))[0]\n return pw_status.current.state == PoolSingletonState.SELF_POOLING.value\n\n await time_out_assert(timeout=MAX_WAIT_SECS, function=status_is_self_pooling)\n assert len(await wallets[0].wallet_state_manager.tx_store.get_unconfirmed_for_wallet(2)) == 0\n\n finally:\n client.close()\n await client.await_closed()\n await rpc_cleanup()"
]
| [
"0.69769305",
"0.685628",
"0.6208406",
"0.6059924",
"0.60429776",
"0.5966177",
"0.59638995",
"0.59604377",
"0.5955759",
"0.5941339",
"0.59009176",
"0.5895814",
"0.5869945",
"0.58545804",
"0.58469856",
"0.5839096",
"0.582636",
"0.58109653",
"0.5797299",
"0.57923716",
"0.576472",
"0.57628304",
"0.57412934",
"0.57357836",
"0.57255805",
"0.571289",
"0.5711829",
"0.5689486",
"0.5657721",
"0.561516"
]
| 0.78499734 | 0 |
Test that creating with tpm2 does something reasonable. | def test_create_tpm(self):
command_line = self._MENU + [self._POOLNAME] + self._DEVICES + ["--clevis=tpm2"]
TEST_RUNNER(command_line) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_create_system_entire(self):\n pass",
"def test_create_tang_1(self):\n command_line = (\n self._MENU\n + [self._POOLNAME]\n + self._DEVICES\n + [\"--clevis=tang\", \"--trust-url\", \"--tang-url=http\"]\n )\n TEST_RUNNER(command_line)",
"def test_create_tang_2(self):\n command_line = (\n self._MENU\n + [self._POOLNAME]\n + self._DEVICES\n + [\"--clevis=tang\", \"--thumbprint=print\", \"--tang-url=http\"]\n )\n TEST_RUNNER(command_line)",
"def test_create_unexpected_problem(self):\n pass",
"def test_create(self):\n\n adminuser,adminpass = self.testdata.find_account_for('toolmanager')\n\n self.utils.account.login_as(adminuser,adminpass)\n\n self.contribtool.create(TOOLNAME)",
"def test_create(self):\n pass",
"def test_create_identity(self):\n pass",
"def test_create_run(self):\n pass",
"def test_create_device1(self):\n pass",
"def test_creation(sqlite_db):\n new_pass = \"TheNewPassword\"\n site = \"www.example.com\"\n response = smm.create_passwd(site, new_pass)\n assert response\n # Make sure we can't create twice.\n bad_response = smm.create_passwd(site, new_pass)\n assert not bad_response",
"def test_create_device_template(self):\n pass",
"def test_create10(self):\n pass",
"def test_create_device(self):\n pass",
"def test_create_device(self):\n pass",
"def test_T2():",
"def test_T2():",
"def test_create_key():\n\n assert symmetric.create_key() != \"\"",
"def test_simple_create(self):\n md5 = PersistableMD5()",
"def test_create_bios_policy(self):\n pass",
"def test_create_warranty(self):\n pass",
"def test_change_provisioned_throughput_usual_case():",
"def test_create_get(self):\n self.shell.onecmd(\"create %s/one 'hello'\" % (self.tests_path))\n self.shell.onecmd(\"get %s/one\" % (self.tests_path))\n self.assertEqual(\"hello\\n\", self.output.getvalue())",
"def test_create_device_data(self):\n pass",
"def test_create_template_subsciption(self):\n pass",
"def test_0_0_create(self):\n\n self.assertTrue(self.b1)",
"def crypto_test(tdesc, tpm):\n node_name = tdesc.get('name')\n key = get_attribute(tdesc, 'key')\n if len(key) not in (16, 24, 32):\n raise subcmd.TpmTestError('wrong key size \"%s:%s\"' % (\n node_name,\n ''.join('%2.2x' % ord(x) for x in key)))\n iv = get_attribute(tdesc, 'iv', required=False)\n if iv and len(iv) != 16:\n raise subcmd.TpmTestError('wrong iv size \"%s:%s\"' % (\n node_name,\n ''.join('%2.2x' % ord(x) for x in iv)))\n clear_text = get_attribute(tdesc, 'clear_text')\n if tpm.debug_enabled():\n print('clear text size', len(clear_text))\n cipher_text = get_attribute(tdesc, 'cipher_text', required=False)\n real_cipher_text = crypto_run(node_name, ENCRYPT, key, iv,\n clear_text, cipher_text, tpm)\n crypto_run(node_name, DECRYPT, key, iv, real_cipher_text,\n clear_text, tpm)\n print(utils.cursor_back() + 'SUCCESS: %s' % node_name)",
"def test_r1t2(capsys):\n helper(\n capsys=capsys,\n terminal_input=['createacct', 'login', 'atm', 'logout', 'n'],\n intput_valid_accounts=['1234568'],\n expected_tail_of_terminal_output=[\"Please enter 'yes'/'y' if you would like to start another session or 'no'/'n' if not: Thank you for using Quinterac, have a nice day!\"],\n expected_output_transactions=['EOS 0000000 000 0000000 ***']\n )",
"def test_client_verification_create(self):\n pass",
"def test_create_virtual_account_transfer(self):\n pass",
"def test_simple_creation():\n # Get model file\n create.main(\"mlp\", \"10:12:8\", \"model_test.tar\")"
]
| [
"0.6962036",
"0.67384905",
"0.66820294",
"0.6649081",
"0.6562297",
"0.6489998",
"0.6451913",
"0.6450422",
"0.644342",
"0.6434589",
"0.64143276",
"0.63866884",
"0.6297511",
"0.6297511",
"0.6280332",
"0.6280332",
"0.6259234",
"0.6234679",
"0.61640173",
"0.61496794",
"0.6148087",
"0.6134859",
"0.6105317",
"0.60643595",
"0.6059986",
"0.60579115",
"0.6040331",
"0.6029623",
"0.6005352",
"0.5991445"
]
| 0.79857373 | 0 |
Tests whether an exception is raised when a mandatory attribute does not belong to the product model definition. | def test_missing_mandatory_attributes():
model_definition = {'source': {'type': 'list',
'required': True,
'persisted': True},
'resources.title': {'type': 'text',
'required': True,
'persisted': True}}
# missing language in the model
_ = ProductModelFactory(model_definition) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _clean_standalone(self):\n if not self.title:\n raise ValidationError(_(\"Your product must have a title.\"))\n if not self.product_class:\n raise ValidationError(_(\"Your product must have a product class.\"))\n if self.parent_id:\n raise ValidationError(_(\"Only child products can have a parent.\"))",
"def _must_skip(self):\n if not self.magento_record :\n return \"Product attribute can not imported because it is not importable.\"\n apply_to = self.magento_record.get('apply_to')\n if apply_to and len(apply_to) > 0 and 'simple' not in apply_to:\n return \"Product attribute can not imported because it not for simple product.\"\n return",
"def test_alright_when_non_required_field_is_missing():\n\n model_definition = {'language': {'type': 'fixed',\n 'required': True,\n 'persisted': True},\n 'source': {'type': 'list',\n 'required': False,\n 'persisted': True},\n 'resources.title': {'type': 'text',\n 'required': False,\n 'persisted': True}}\n product1 = {'language': 'english'}\n factory = ProductModelFactory(model_definition)\n factory.build('product1', product1)\n # Ok. No exceptions were raised.",
"def _has_valid_mandatory_properties(self):\n for prop in self.mandatory_properties:\n if not hasattr(self, prop):\n logger.error(\n \"Skipping %s: could not find information about '%s'\",\n self, prop)\n return False\n return True",
"def validate(self, attrs):\n if attrs['product_mrp'] <= 0:\n raise serializers.ValidationError(\"Price Cannot Be Zero or Negative.\")\n return attrs",
"def __mandatory_is_not_given(self):\n\n strTestName = 'Mandatory parameter must be given (incorrect)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('mandatory_parameter', 'Mandatory parameter')\n\n self.__parametersCheck_error(RxCSObject, ParameterMissingError, strTestName)",
"def test_lacking_required_field(self):\n\n filename = 'datapackage_schema_missing_required.json'\n self.config['datapackage_file'] = os.path.join('tests', 'fixtures', filename)\n checker = tasks.check_datapackage.DataPackageChecker(self.config)\n default_datapkg = utilities.get_default_datapackage()\n self.assertRaisesRegexp(ValueError, 'miss', checker.check_resource_schema,\n default_datapkg.resources[0], checker.datapackage.resources[0])",
"def test_missing_required_field_raises_error():\n with pytest.raises(ValidationError):\n Entity()",
"def test_raise_if_no_attr(self):\n self.assertRaises(AttributeError, self.Model.set_primary_key, 'asdf')",
"def test_create_a_recommendation_missing_data(self):\n recommendation = Recommendation(product_id=1, recommendation_product_id=None, relationship=Type.UP_SELL)\n self.assertRaises(DataValidationError,recommendation.create)",
"def test_model_custom_field_editing_attribute_missing(self):\n\n try:\n error = False\n\n # GIVEN invalid model field definition\n # WHEN model gets executed in the system\n class TestTestModel(models.Model):\n name = AppModelCharField(max_length=256, blank=True, null=True)\n\n except Exception as e:\n msg = e.args[0]\n error = True\n\n # THEN error should be raised\n self.assertTrue(error)\n\n # AND clear error description is present\n ref_msg = 'Field editing statuses are missing for AppModelCharField; called from TestTestModel'\n self.assertEqual(ref_msg, msg)",
"def _validate_item_required_attrs(self, item):\n if not getattr(item, \"title\", None):\n raise ValueError(\"Required attribute title is not set.\")\n\n if not getattr(item, \"published\", None):\n raise ValueError(\"Required attribute published is not set.\")\n\n if not getattr(item, \"link\", None):\n raise ValueError(\"Required attribute link is not set.\")\n\n return True",
"def test_create_invalid_product_no_name(self):\n size = Size.SIZE_4_5\n colour = \"Red\"\n price = 47.00\n product_type = ProductType.SHOE\n product_code = \"A\"\n department = Department.LADIES\n\n product = Product(\n size=size,\n colour=colour,\n price=price,\n product_type=product_type,\n product_code=product_code,\n department=department,\n )\n\n with self.assertRaises(ValidationError):\n product.full_clean()",
"def testRequired(self):\n prop = make_prop()\n with self.assertRaises(ValueError):\n prop.interpret(recipe_api.PROPERTY_SENTINEL, {})",
"def test_model_formfield_doesnt_raise(self):\n try:\n fields_for_model(Color())\n except AttributeError:\n self.fail(\"Raised Attribute Error\")",
"def test_fails_if_required_attrs_not_included(self):\n\n with vcr.use_cassette('test/vcr_cassettes/badge_retrieval.yaml'):\n with self.assertRaises(exceptions.RequiredAttributesMissingError):\n # We need more attrs than just created_at\n Badge({'created_at': '2019-09-04T19:03:24Z'})",
"def check_properties(self):\r\n for prop in self.mandatory_properties:\r\n if not hasattr(self, prop):\r\n raise NameError(prop)",
"def test_attribute_not_found(self):\n with pytest.raises(\n ClickException,\n match=\"Attribute `non_existing_attribute` is not allowed to be updated!\",\n ):\n self.runner.invoke(\n cli,\n [\n *CLI_LOG_OPTION,\n \"config\",\n \"set\",\n \"skills.dummy.non_existing_attribute\",\n \"value\",\n ],\n standalone_mode=False,\n catch_exceptions=False,\n )",
"def enforce_required_fields(self, attrs):\n if self.instance is not None:\n return\n # missing_items = {\n # field_name: self.missing_message\n # for field_name in self.fields\n # if field_name not in attrs\n # }\n # if missing_items:\n # raise ValidationError(missing_items, code='required')",
"def test_product_price_is_required(self):\n product = {\n 'name': 'LAPTOP',\n 'price': '',\n 'image': ''\n }\n res = self.client.post(PRODUCTS_URL, product)\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)",
"def test_raise_on_missing_critical(self):\n name_for_field = 'absent_field'\n field_opts = {'names': (name_for_field, 'absent'), 'alt_field': '', 'computed': False}\n critical_fields = {'absent_field': field_opts}\n with self.assertRaises(ImproperlyConfigured):\n self.form.fields_for_critical(critical_fields)",
"def _check_missing(self, key: str, value: Any):\n required = from_dot_notation(\n field=\".\".join([*self.parents, key]), obj=self.definition\n ).get(\"required\", True)\n\n if required and value is None:\n raise Exception(f\"Value for '{key}' is empty but a value is required\")",
"def validate(self, attrs):\n exception_body = []\n for orderline in attrs.get('orderlines', []):\n product = orderline['product']\n\n # If orderline has less units than available, all good.\n if orderline['units'] <= product.units:\n continue\n\n # else error is accumulated\n if product.units > 0:\n exception_body.append({product.name: 'Only {0} units available.'.format(str(product.units))})\n else:\n exception_body.append({product.name: 'Out of stock'})\n\n # If any orderline has problem, reject order.\n if exception_body:\n raise exceptions.PermissionDenied({'errors': exception_body})\n\n return attrs",
"def test_missing_description(self):\n self.check_validation_error(\"description\\n field required\", name=\"Name\")",
"def test_required():\n schema = Schema({Required('q'): 1})\n # Can't use nose's raises (because we need to access the raised\n # exception, nor assert_raises which fails with Python 2.6.9.\n try:\n schema({})\n except Invalid as e:\n assert_equal(str(e), \"required key not provided @ data['q']\")\n else:\n assert False, \"Did not raise Invalid\"",
"def _check_required_fields(self):\n assert self.title\n assert self.format",
"def test_negative_pricing(self):\n with self.assertRaises(InvalidProductPriceException):\n Product(self.test_product_name, -1.00)\n with self.assertRaises(InvalidProductPriceException):\n Product(self.test_product_name, -0.01)\n with self.assertRaises(InvalidProductPriceException):\n Product(self.test_product_name, 0)\n with self.assertRaises(InvalidProductPriceException):\n Product(self.test_product_name, 0.00)\n try:\n Product(self.test_product_name, 1.00)\n Product(self.test_product_name, 0.01)\n except InvalidProductPriceException:\n self.fail(\"InvalidProductPriceException raised for positive value unexpectedly\")",
"def check_class_definition(cls):\n super().check_class_definition()\n\n if not cls.model:\n cls.definition_error('Must provide \"model\" attribute.')",
"def hasRequiredAttributes(self):\n return _libsbml.GeneProduct_hasRequiredAttributes(self)",
"def assert_valid_attribute(self, name):\n if name.startswith('_'):\n return\n self.assert_known_field(name)"
]
| [
"0.66832054",
"0.6592669",
"0.6539923",
"0.65391064",
"0.6538347",
"0.6508393",
"0.6435435",
"0.6404906",
"0.6383828",
"0.6370085",
"0.6338571",
"0.6333693",
"0.6308506",
"0.6305229",
"0.62869805",
"0.62426364",
"0.6228736",
"0.6196969",
"0.61729705",
"0.6161835",
"0.6150827",
"0.61398816",
"0.6107066",
"0.6100368",
"0.6097649",
"0.60687774",
"0.60647964",
"0.60450006",
"0.60413504",
"0.6037162"
]
| 0.69952655 | 0 |
Tests whether the factory successfully validates a model when a nonrequired attribute is missing from the product model. | def test_alright_when_non_required_field_is_missing():
model_definition = {'language': {'type': 'fixed',
'required': True,
'persisted': True},
'source': {'type': 'list',
'required': False,
'persisted': True},
'resources.title': {'type': 'text',
'required': False,
'persisted': True}}
product1 = {'language': 'english'}
factory = ProductModelFactory(model_definition)
factory.build('product1', product1)
# Ok. No exceptions were raised. | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_missing_mandatory_attributes():\n model_definition = {'source': {'type': 'list',\n 'required': True,\n 'persisted': True},\n 'resources.title': {'type': 'text',\n 'required': True,\n 'persisted': True}}\n # missing language in the model\n _ = ProductModelFactory(model_definition)",
"def test_create_invalid_product_no_name(self):\n size = Size.SIZE_4_5\n colour = \"Red\"\n price = 47.00\n product_type = ProductType.SHOE\n product_code = \"A\"\n department = Department.LADIES\n\n product = Product(\n size=size,\n colour=colour,\n price=price,\n product_type=product_type,\n product_code=product_code,\n department=department,\n )\n\n with self.assertRaises(ValidationError):\n product.full_clean()",
"def test_object_is_not_created_without_required_fields(self):\n data1 = self.data.copy()\n del data1[\"title\"]\n\n serializer = ProductSerializer(data=data1)\n\n self.assertFalse(serializer.is_valid())\n self.assertEqual(serializer.errors.get(\"title\")[0], self.error_message)\n\n data2 = self.data.copy()\n del data2[\"description\"]\n\n serializer = ProductSerializer(data=data2)\n self.assertFalse(serializer.is_valid())\n self.assertEqual(serializer.errors.get(\"description\")[0], self.error_message)\n\n data3 = self.data.copy()\n del data3[\"price\"]\n\n serializer = ProductSerializer(data=data3)\n self.assertFalse(serializer.is_valid())\n self.assertEqual(serializer.errors.get(\"price\")[0], self.error_message)",
"def test_required_field_values_are_present():\n\n model_definition = {'language': {'type': 'fixed',\n 'required': True,\n 'persisted': True},\n 'source': {'type': 'list',\n 'required': False,\n 'persisted': True},\n 'resources.title': {'type': 'text',\n 'required': True,\n 'persisted': True}}\n product1 = {'language': 'english'}\n factory = ProductModelFactory(model_definition)\n factory.build('product1', product1)",
"def test_is_valid_return_only_good_products(self):\n self.assertTrue(ProductValidator().is_valid(self.good_product))\n self.assertFalse(ProductValidator().is_valid(self.bad_product))",
"def test_create_enforces_required_fields(self):\n serializer = ServiceSerializer(data = {}, context = dict(project = self.project))\n self.assertFalse(serializer.is_valid())\n required_fields = {'name', 'category'}\n self.assertCountEqual(serializer.errors.keys(), required_fields)\n for name in required_fields:\n self.assertEqual(serializer.errors[name][0].code, 'required')",
"def test_missing_required_field_raises_error():\n with pytest.raises(ValidationError):\n Entity()",
"def test_create_invalid_product_blank_name(self):\n product_name = \"\"\n size = Size.SIZE_4_5\n colour = \"Red\"\n price = 47.00\n product_type = ProductType.SHOE\n product_code = \"A\"\n department = Department.LADIES\n\n product = Product(\n name=product_name,\n size=size,\n colour=colour,\n price=price,\n product_type=product_type,\n product_code=product_code,\n department=department,\n )\n\n with self.assertRaises(ValidationError):\n product.full_clean()",
"def test_alright_when_required_field_is_missing_but_default_is_given():\n\n model_definition = {'language': {'type': 'fixed',\n 'required': True,\n 'persisted': True,\n 'default': 'portuguese'},\n 'source': {'type': 'list',\n 'required': False,\n 'persisted': True}}\n product1 = {'source': ['Whatever']}\n factory = ProductModelFactory(model_definition)\n factory.build('product1', product1)\n # Ok. No exceptions were raised.",
"def _clean_standalone(self):\n if not self.title:\n raise ValidationError(_(\"Your product must have a title.\"))\n if not self.product_class:\n raise ValidationError(_(\"Your product must have a product class.\"))\n if self.parent_id:\n raise ValidationError(_(\"Only child products can have a parent.\"))",
"def test_lacking_required_field(self):\n\n filename = 'datapackage_schema_missing_required.json'\n self.config['datapackage_file'] = os.path.join('tests', 'fixtures', filename)\n checker = tasks.check_datapackage.DataPackageChecker(self.config)\n default_datapkg = utilities.get_default_datapackage()\n self.assertRaisesRegexp(ValueError, 'miss', checker.check_resource_schema,\n default_datapkg.resources[0], checker.datapackage.resources[0])",
"def test_model_formfield_doesnt_raise(self):\n try:\n fields_for_model(Color())\n except AttributeError:\n self.fail(\"Raised Attribute Error\")",
"def test_create_a_recommendation_missing_data(self):\n recommendation = Recommendation(product_id=1, recommendation_product_id=None, relationship=Type.UP_SELL)\n self.assertRaises(DataValidationError,recommendation.create)",
"def test_create_valid_product(self):\n product_name = \"Swift Iris\"\n size = Size.SIZE_4_5\n colour = \"Red\"\n price = 47.00\n product_type = ProductType.SHOE\n product_code = \"A\"\n department = Department.LADIES\n\n product = Product(\n name=product_name,\n size=size,\n colour=colour,\n price=price,\n product_type=product_type,\n product_code=product_code,\n department=department,\n )\n with self.assertRaises(ValidationError):\n product.full_clean()",
"def enforce_required_fields(self, attrs):\n if self.instance is not None:\n return\n # missing_items = {\n # field_name: self.missing_message\n # for field_name in self.fields\n # if field_name not in attrs\n # }\n # if missing_items:\n # raise ValidationError(missing_items, code='required')",
"def test_create_invalid_price_three_dp(self):\n product_name = \"Swift Iris\"\n size = Size.SIZE_4_5\n colour = \"Red\"\n price = 47.123\n product_type = ProductType.SHOE\n product_code = \"A\"\n department = Department.LADIES\n\n product = Product(\n name=product_name,\n size=size,\n colour=colour,\n price=price,\n product_type=product_type,\n product_code=product_code,\n department=department,\n )\n\n with self.assertRaises(ValidationError):\n product.full_clean()",
"def test_no_user(self):\n form = self._get_form()\n self.assertTrue(self._validate_form(form), form.errors)\n self.assertRaises(IntegrityError, form.save)",
"def test_deserialize_missing_data(self):\n data = {\"product_id\": 1}\n recommendation = Recommendation()\n self.assertRaises(DataValidationError, recommendation.deserialize, data)",
"def test_model_custom_field_editing_attribute_missing(self):\n\n try:\n error = False\n\n # GIVEN invalid model field definition\n # WHEN model gets executed in the system\n class TestTestModel(models.Model):\n name = AppModelCharField(max_length=256, blank=True, null=True)\n\n except Exception as e:\n msg = e.args[0]\n error = True\n\n # THEN error should be raised\n self.assertTrue(error)\n\n # AND clear error description is present\n ref_msg = 'Field editing statuses are missing for AppModelCharField; called from TestTestModel'\n self.assertEqual(ref_msg, msg)",
"def test_cannot_make_sale_with_missing_fields(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'One of the fields is empty!')\n self.assertEqual(resp.status_code, 400)",
"def test_fails_if_required_attrs_not_included(self):\n\n with vcr.use_cassette('test/vcr_cassettes/badge_retrieval.yaml'):\n with self.assertRaises(exceptions.RequiredAttributesMissingError):\n # We need more attrs than just created_at\n Badge({'created_at': '2019-09-04T19:03:24Z'})",
"def test_raises_on_missing_needed_fields(self):\n test_name = \"impossible_creature_not_present\"\n self.form.constructor_fields = [*self.form.constructor_fields, test_name]\n message = \"The fields for email, username, and constructor must be set in fields. \"\n self.assertNotIn(test_name, self.form.base_fields)\n with self.assertRaisesMessage(ImproperlyConfigured, message):\n self.form.confirm_required_fields()",
"def test_invalid_model(self):\n self.assertRaises(ModelNotFoundError, lambda: ModelContainer('web', 'model').model_cls)",
"def test_001_validate_with_bad_properties(self):\n m = schematics_flexible.BaseFlexible(\n {'code': '06',\n 'properties': {\"a\": \"this is test\"}},\n store_handler=get_mock())\n try:\n m.validate()\n except schematicsValidationError:\n pass\n else:\n self.assertTrue(False,\n 'Model must raise exception when validate raise')",
"def test_no_errors(self):\n try:\n field_name_validator('good_field_name')\n except ValidationError:\n self.fail('Field name raised ValidationError unexpectedly')",
"def test_product_price_is_required(self):\n product = {\n 'name': 'LAPTOP',\n 'price': '',\n 'image': ''\n }\n res = self.client.post(PRODUCTS_URL, product)\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)",
"async def test_create_missing_field(self):\n # the \"value\" field is missing\n data = {'id': 'foo'}\n with self.assertRaises(InvalidResourceDetails) as cm:\n await self.resource.create(data)\n self.assertEqual(\n 'Error: \"value\": Required', str(cm.exception))",
"def _check_required_fields(self):\n assert self.title\n assert self.format",
"def test_manufacturer_bulk_import_invalid(self):\n form = ManufacturerBulkImportForm(data={\"pk\": \"\"})\n\n self.assertFalse(form.is_valid())",
"def validate(self, attrs):\n if attrs['product_mrp'] <= 0:\n raise serializers.ValidationError(\"Price Cannot Be Zero or Negative.\")\n return attrs"
]
| [
"0.7707438",
"0.702368",
"0.7003859",
"0.68904054",
"0.6742272",
"0.6715958",
"0.6712021",
"0.668092",
"0.662",
"0.65714717",
"0.6567007",
"0.6544035",
"0.6535288",
"0.6520894",
"0.65018946",
"0.64207494",
"0.6395853",
"0.6385905",
"0.6313941",
"0.6300126",
"0.6299455",
"0.6260325",
"0.61974555",
"0.6190295",
"0.618872",
"0.61834913",
"0.61777735",
"0.61760825",
"0.6156753",
"0.61525685"
]
| 0.76279354 | 1 |
Tests whether the factory successfully validates a model when a required attribute is missing from the product model, but a default value is given. | def test_alright_when_required_field_is_missing_but_default_is_given():
model_definition = {'language': {'type': 'fixed',
'required': True,
'persisted': True,
'default': 'portuguese'},
'source': {'type': 'list',
'required': False,
'persisted': True}}
product1 = {'source': ['Whatever']}
factory = ProductModelFactory(model_definition)
factory.build('product1', product1)
# Ok. No exceptions were raised. | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_missing_mandatory_attributes():\n model_definition = {'source': {'type': 'list',\n 'required': True,\n 'persisted': True},\n 'resources.title': {'type': 'text',\n 'required': True,\n 'persisted': True}}\n # missing language in the model\n _ = ProductModelFactory(model_definition)",
"def test_alright_when_non_required_field_is_missing():\n\n model_definition = {'language': {'type': 'fixed',\n 'required': True,\n 'persisted': True},\n 'source': {'type': 'list',\n 'required': False,\n 'persisted': True},\n 'resources.title': {'type': 'text',\n 'required': False,\n 'persisted': True}}\n product1 = {'language': 'english'}\n factory = ProductModelFactory(model_definition)\n factory.build('product1', product1)\n # Ok. No exceptions were raised.",
"def test_required_field_values_are_present():\n\n model_definition = {'language': {'type': 'fixed',\n 'required': True,\n 'persisted': True},\n 'source': {'type': 'list',\n 'required': False,\n 'persisted': True},\n 'resources.title': {'type': 'text',\n 'required': True,\n 'persisted': True}}\n product1 = {'language': 'english'}\n factory = ProductModelFactory(model_definition)\n factory.build('product1', product1)",
"def is_required(self):\r\n return self.default == self.NotSpecified",
"def has_default(model_field: DataclassCreationFields) -> bool:\n return (model_field.field.default is not dataclasses.MISSING) or (\n model_field.field.default_factory is not dataclasses.MISSING\n )",
"def test_lacking_required_field(self):\n\n filename = 'datapackage_schema_missing_required.json'\n self.config['datapackage_file'] = os.path.join('tests', 'fixtures', filename)\n checker = tasks.check_datapackage.DataPackageChecker(self.config)\n default_datapkg = utilities.get_default_datapackage()\n self.assertRaisesRegexp(ValueError, 'miss', checker.check_resource_schema,\n default_datapkg.resources[0], checker.datapackage.resources[0])",
"def testRequired(self):\n prop = make_prop()\n with self.assertRaises(ValueError):\n prop.interpret(recipe_api.PROPERTY_SENTINEL, {})",
"def test_create_invalid_product_no_name(self):\n size = Size.SIZE_4_5\n colour = \"Red\"\n price = 47.00\n product_type = ProductType.SHOE\n product_code = \"A\"\n department = Department.LADIES\n\n product = Product(\n size=size,\n colour=colour,\n price=price,\n product_type=product_type,\n product_code=product_code,\n department=department,\n )\n\n with self.assertRaises(ValidationError):\n product.full_clean()",
"def _model_definition_validate(self):\n try:\n assert isinstance(self.__class__.MODEL_TYPE, str)\n assert (isinstance(self.__class__.PRIMARY_KEY, str) or\n self.__class__.PRIMARY_KEY is None)\n assert isinstance(self.__class__.PRIORITY, int)\n for key in self.__class__.MODEL:\n assert re.match(\"^\" + KEY_RE_CONSTRAINT + \"$\", key)\n assert 'name' in self.__class__.MODEL\n except:\n raise ModelInvalidException(\n \"Model %s is invalid and not usable\" % (\n self.__class__.MODEL_TYPE))\n\n if self.__class__.PRIMARY_KEY and self.__class__.PRIMARY_KEY != 'name':\n if self.__class__.PRIMARY_KEY not in self.__class__.MODEL:\n raise ModelInvalidException(\n \"Model %s primary key %s does not exists\" % (\n self.__class__.MODEL_TYPE,\n self.__class__.PRIMARY_KEY))\n\n if not self.__class__.MODEL[self.__class__.PRIMARY_KEY][2]:\n raise ModelInvalidException(\n \"Model %s primary key %s should be mandatory\" % (\n self.__class__.MODEL_TYPE,\n self.__class__.PRIMARY_KEY))\n\n for constraints in self.__class__.MODEL.values():\n if len(constraints) != 6:\n raise ModelInvalidException(\n \"Model %s is invalid and not usable \"\n \"(missing field)\" % (\n self.__class__.MODEL_TYPE))\n\n try:\n # Be sure default values are of the declared type\n # make some others validation on default value\n for key, constraints in self.__class__.MODEL.items():\n # Only act on non-mandatory keys as default\n # is provided. Skip 'name' checking.\n if not constraints[2] and key != 'name':\n # Validate default value type\n assert isinstance(constraints[3],\n constraints[0])\n # Validate default value match the regexp\n # if str type\n if constraints[0] is str:\n assert re.match(constraints[1],\n constraints[3])\n # Validate list default values match the regexp\n # if list type\n if isinstance(constraints[0], list):\n assert all([re.match(constraints[1], c) for\n c in constraints[3]]) is True\n except:\n raise ModelInvalidException(\n \"Model %s is invalid and not usable \"\n \"(Wrong default value according to the type \"\n \"or regex)\" % (\n self.__class__.MODEL_TYPE))\n\n # Validate the callbacks of the inherited model\n try:\n # Be sure we have only the authorized callbacks\n assert len(set(AUTHORIZED_CALLBACKS).symmetric_difference(\n set(self.__class__.CALLBACKS))) is 0\n # Be sure the callbacks are callable or NotImplemented\n for key, callback in self.__class__.CALLBACKS.items():\n if (not callable(callback)\n and callback is not NotImplementedError):\n raise Exception\n except:\n raise ModelInvalidException(\n \"Model %s callbacks are invalid, model is not usable\" % (\n self.__class__.MODEL_TYPE))",
"def test_create_invalid_product_blank_name(self):\n product_name = \"\"\n size = Size.SIZE_4_5\n colour = \"Red\"\n price = 47.00\n product_type = ProductType.SHOE\n product_code = \"A\"\n department = Department.LADIES\n\n product = Product(\n name=product_name,\n size=size,\n colour=colour,\n price=price,\n product_type=product_type,\n product_code=product_code,\n department=department,\n )\n\n with self.assertRaises(ValidationError):\n product.full_clean()",
"def validate_default_value(self):\n if self.has_default_value:\n if not self.is_valid_value(self.default):\n raise AttributeSchemaError(\n \"Default value '%s' is not compliant with the schema\"\n )",
"def test_validate_has_default(self, args, value):\n sch = scheme.Scheme(*args)\n sch.validate(value)",
"def required(self) -> bool:\n return self._default is None",
"def check_required(self):\n for argument in self.arguments:\n if argument.required:\n raise ArgumentRequiredError(argument, self.tagname)\n else:\n self.kwargs[argument.name] = argument.get_default()",
"def test_missing_required_field_raises_error():\n with pytest.raises(ValidationError):\n Entity()",
"def test_create_enforces_required_fields(self):\n serializer = ServiceSerializer(data = {}, context = dict(project = self.project))\n self.assertFalse(serializer.is_valid())\n required_fields = {'name', 'category'}\n self.assertCountEqual(serializer.errors.keys(), required_fields)\n for name in required_fields:\n self.assertEqual(serializer.errors[name][0].code, 'required')",
"def test_data_model_is_not_none_by_default():\n # Given\n context = DataContext()\n\n # Then\n assert context.photo_model is not None",
"def test_schema_default_missing_validator_combinations(test_case):\n evaluate_test_cases([test_case])",
"def _validate_usage_of_optional(self) -> None:\n # Because None can be the default value, None cannot be used to to indicate no default. This is why we need the optional field. This check prevents users of InputSpec from setting these two values to an inconsistent state, forcing users of InputSpec to be explicit about optionality.\n if self.optional is False and self.default is not None:\n raise ValueError(\n f'`optional` argument to {self.__class__.__name__} must be True if `default` is not None.'\n )",
"def test_basemodel_basic_instance_none(self):\n with self.assertRaises(TypeError):\n BaseModel(None)",
"def test_default_required(self):\n schema = yaml.load(self.yaml_multiple_term, Loader=yaml.FullLoader)\n val = DwcaValidator(schema, error_handler=WhipErrorHandler)\n\n document = {'abundance': 'many'}\n val.validate(document)\n self.assertEqual(val.errors, {'eventDate': ['required field']})\n\n document = {'eventDate': '2018-01-01'}\n val.validate(document)\n self.assertEqual(val.errors, {})",
"def is_required(self, field):\n return field.scheme.is_required and not field.scheme.is_pk",
"def test_create_with_default_attributes(self):\n\n x = NotRequiredModel()\n x.tr_title = \"DEFAULT_TRANS_TITLE\"\n\n self.assertNumQueries(2, lambda: x.save()) # master and translation object created\n self.assertEqual(sorted(x.get_available_languages()), [self.conf_fallback])",
"def test_default(self):\n self.assertEqual(self.model.frozen(), False)",
"def test_create_valid_product(self):\n product_name = \"Swift Iris\"\n size = Size.SIZE_4_5\n colour = \"Red\"\n price = 47.00\n product_type = ProductType.SHOE\n product_code = \"A\"\n department = Department.LADIES\n\n product = Product(\n name=product_name,\n size=size,\n colour=colour,\n price=price,\n product_type=product_type,\n product_code=product_code,\n department=department,\n )\n with self.assertRaises(ValidationError):\n product.full_clean()",
"def test_create_a_recommendation_missing_data(self):\n recommendation = Recommendation(product_id=1, recommendation_product_id=None, relationship=Type.UP_SELL)\n self.assertRaises(DataValidationError,recommendation.create)",
"def test_model_custom_field_editing_attribute_missing(self):\n\n try:\n error = False\n\n # GIVEN invalid model field definition\n # WHEN model gets executed in the system\n class TestTestModel(models.Model):\n name = AppModelCharField(max_length=256, blank=True, null=True)\n\n except Exception as e:\n msg = e.args[0]\n error = True\n\n # THEN error should be raised\n self.assertTrue(error)\n\n # AND clear error description is present\n ref_msg = 'Field editing statuses are missing for AppModelCharField; called from TestTestModel'\n self.assertEqual(ref_msg, msg)",
"def _validate_required_field(field_name, field_value, prefix='', **kwargs):\n if prefix:\n field_name = prefix + '__' + field_name\n\n if not field_value:\n raise AssertionError(\n \"Missing required Job Definition field: {0}\".format(field_name)\n )",
"def test_custom_required(self):\n for data in ({}, {'payment_amount': ''}):\n form = DonationAmountForm(data=data)\n self.assertFalse(form.is_valid())\n errors = form.errors.as_data()\n self.assertTrue('payment_amount' in errors)\n self.assertEqual('required', errors['payment_amount'][0].code)",
"def test_product_price_is_required(self):\n product = {\n 'name': 'LAPTOP',\n 'price': '',\n 'image': ''\n }\n res = self.client.post(PRODUCTS_URL, product)\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)"
]
| [
"0.7500169",
"0.74960834",
"0.7320718",
"0.6709409",
"0.64118165",
"0.6393792",
"0.63357896",
"0.63171214",
"0.62877655",
"0.62474984",
"0.62145287",
"0.6192724",
"0.61926067",
"0.61849076",
"0.6179737",
"0.61769146",
"0.6107873",
"0.61037236",
"0.60997593",
"0.6051574",
"0.60071164",
"0.60024846",
"0.5993906",
"0.5988448",
"0.598559",
"0.59744203",
"0.5971893",
"0.5957633",
"0.5933431",
"0.59191006"
]
| 0.7979957 | 0 |
Tests the calculation of the similarity of two products based on a 'numeric' attribute. | def test_similarity_numeric():
similarity = pm.compute_similarity_for_numeric(900, 800)
nose.tools.ok_(abs(similarity - 8/9) < tests.FLOAT_DELTA, "Wrong numeric similarity") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_similarity(self):\n self.assertTrue(np.allclose(self.vectors.similarity('dog.n.01', 'dog.n.01'), 1))\n self.assertTrue(np.allclose(self.vectors.similarity('dog.n.01', 'mammal.n.01'), 0.180901358))",
"def similarity(self, e1, e2):\n\t\tpass",
"def similarity_function_old(feature1, feature2):\n f1Magnitude = feature1.dot(feature1)\n f2Magnitude = feature2.dot(feature2)\n return 1 - feature1.dot(feature2) / (f1Magnitude * f2Magnitude)",
"def similarity_score(self, lhs, rhs):\n pass",
"def text_proximity(str_1: str, str_2: str) -> float:\n tokens_1 = Counter(str_1.split(' '))\n tokens_2 = Counter(str_2.split(' '))\n return _normalized_scalar_product(tokens_1, tokens_2)",
"def similarity(self, w1, w2):\r\n return self.represent(w1).dot(self.represent(w2))",
"def similarity(self, w1, w2):\r\n return self.represent(w1).dot(self.represent(w2))",
"def test_product(self):\n self.assertEqual(functions.product(2, 2), 4)\n self.assertEqual(functions.product(2, -2), -4)",
"def similarity(self, w1, w2):\r\n sim = self.represent(w1).dot(self.represent(w2))\r\n return sim",
"def similarity(pair: Tuple[Text, Text]) -> float:\n (a, b) = pair\n missing = (\n True\n if any(symbol not in Metrics.realine.feature_matrix for symbol in pair)\n else False\n )\n return 0.0 if missing else 1 - Metrics.realine.delta(a, b)",
"def test_similarity_list():\n list1 = [\"a\", \"b\", \"c\"]\n list2 = [\"b\", \"c\", \"d\", \"e\"]\n similarity = pm.compute_similarity_for_list(list1, list2)\n nose.tools.ok_(abs(similarity - 2/3) < tests.FLOAT_DELTA, \"Wrong list similarity\")\n similarity = pm.compute_similarity_for_list(list2, list1) # intentionally asymmetric\n nose.tools.ok_(abs(similarity - 1/2) < tests.FLOAT_DELTA, \"Wrong list similarity\")",
"def test_scalar_multiplication(self):\n\n a1 = tuples.Tuple([\"a\", \"b\", \"c\", \"d\"], 1, -2, 3, -4)\n a2 = a1 * 3.5\n a3 = a1 * 0.5\n\n self.assertEqual(a2,\n tuples.Tuple([\"a\", \"b\", \"c\", \"d\"], 3.5, -7, 10.5, -14))\n self.assertEqual(a3,\n tuples.Tuple([\"a\", \"b\", \"c\", \"d\"], 0.5, -1, 1.5, -2))",
"def test_mul(x, y):\n\n assert mul(x, y) == mul(y, x)",
"def test_cosine_similarity():\n vector1 = np.array([1, 1, 0, 0])\n vector2 = np.array([1, 1, 1, 1])\n score11 = cosine_similarity.py_func(vector1, vector1)\n score12 = cosine_similarity.py_func(vector1, vector2)\n score22 = cosine_similarity.py_func(vector2, vector2)\n\n assert score12 == 2 / np.sqrt(2 * 4), \"Expected different score.\"\n assert score11 == score22 == 1.0, \"Expected different score.\"",
"def get_similarity(user1: Rating, user2: Rating) -> float:\n shared = 0.0\n for m_id in user1:\n if m_id in user2:\n shared += user1[m_id] * user2[m_id]\n norm1 = 0.0\n for m_id in user1:\n norm1 = norm1 + user1[m_id] ** 2\n norm2 = 0.0\n for m_id in user2:\n norm2 = norm2 + user2[m_id] ** 2\n return (shared * shared) / (norm1 * norm2)",
"def test_ssd_similarity_measure_values():\n \n patch1 = torch.tensor([1.3, 4.5, 7.2, 0.2, -0.6])\n patch2 = torch.tensor([0.2, 4.4, 7.6, 0.1, 1.3])\n\n ssd = ssd_similarity_measure(patch1, patch2)\n assert np.isclose(ssd, 5.0, atol=1e-2)",
"def test_dice_similarity():\n vector1 = np.array([1, 1, 0, 0])\n vector2 = np.array([1, 1, 1, 1])\n score11 = dice_similarity.py_func(vector1, vector1)\n score12 = dice_similarity.py_func(vector1, vector2)\n score22 = dice_similarity.py_func(vector2, vector2)\n\n assert score12 == 2 * 2/6, \"Expected different score.\"\n assert score11 == score22 == 1.0, \"Expected different score.\"",
"def similarity_function(feature1, feature2):\n # 256 HOG, 18 HSV, 512 Encoder\n # weight color more if using the full vector\n if len(feature1) > 785:\n salient1 = feature1[256:256 + 18].copy() # be careful not to modify feature vector in place\n salient2 = feature2[256:256 + 18].copy()\n feature1 = feature1.copy()\n feature2 = feature2.copy()\n feature1[256:256 + 18] = salient1 * 10\n feature2[256:256 + 18] = salient2 * 10\n\n abs_distance = np.abs(feature1 - feature2)\n return np.sum(abs_distance)",
"def inner_product_similarity(a: torch.Tensor, b: torch.Tensor, dim=1) -> torch.Tensor:\n outputs = (a * b).sum(dim=dim)\n return outputs",
"def support(self, *mass_functions):\n result = 0\n for mass_function in mass_functions:\n result += self.similarity(mass_function)\n return round(result, 6)",
"def test_sad_similarity_measure_values():\n \n patch1 = torch.tensor([1.3, 4.5, 7.2, 0.2, -0.6])\n patch2 = torch.tensor([0.2, 4.4, 7.6, 0.1, 1.3])\n\n sad = sad_similarity_measure(patch1, patch2)\n\n assert np.isclose(sad, 3.6, atol=1e-2)",
"def test_cosine_similarity_compiled():\n vector1 = np.array([1, 1, 0, 0])\n vector2 = np.array([1, 1, 1, 1])\n score11 = cosine_similarity(vector1, vector1)\n score12 = cosine_similarity(vector1, vector2)\n score22 = cosine_similarity(vector2, vector2)\n\n assert score12 == 2 / np.sqrt(2 * 4), \"Expected different score.\"\n assert score11 == score22 == 1.0, \"Expected different score.\"",
"def wordSimilarityRatio(sent_1,sent_2):",
"def similarity_function(x, y):\n\n def safe_get(field, row, default_value):\n # Safely get a value from the Row. If the value is None, get the\n # default value.\n return row[field] if row[field] is not None else default_value\n\n # Extract the values for the categorical and continuous features for both\n # the x and y samples. Use an empty string as the default value for missing\n # categorical fields and 0 for the continuous ones.\n x_categorical_features = [safe_get(k, x, \"\") for k in CATEGORICAL_FEATURES]\n x_continuous_features = [safe_get(k, x, 0) for k in CONTINUOUS_FEATURES]\n y_categorical_features = [safe_get(k, y, \"\") for k in CATEGORICAL_FEATURES]\n y_continuous_features = [safe_get(k, y, 0) for k in CONTINUOUS_FEATURES]\n\n # Here a larger distance indicates a poorer match between categorical variables.\n j_d = distance.hamming(x_categorical_features, y_categorical_features)\n j_c = distance.canberra(x_continuous_features, y_continuous_features)\n\n # Take the product of similarities to attain a univariate similarity score.\n # Add a minimal constant to prevent zero values from categorical features.\n # Note: since both the distance function return a Numpy type, we need to\n # call the |item| function to get the underlying Python type. If we don't\n # do that this job will fail when performing KDE due to SPARK-20803 on\n # Spark 2.2.0.\n return abs((j_c + 0.001) * j_d).item()",
"def calc_similarity(lhs, rhs):\n lhs_decomp = decompose(lhs)\n rhs_decomp = decompose(rhs)\n dist = editdistance.eval(lhs_decomp, rhs_decomp)\n max_len = max(len(lhs_decomp), len(rhs_decomp))\n sim = float(max_len - dist) / float(max_len)\n logging.debug('SIM: [%s] vs [%s] ==> %d / %d = %f', lhs.encode('UTF-8'), rhs.encode('UTF-8'),\n max_len - dist, max_len, sim)\n return sim",
"def test_dice_similarity_compiled():\n vector1 = np.array([1, 1, 0, 0])\n vector2 = np.array([1, 1, 1, 1])\n score11 = dice_similarity(vector1, vector1)\n score12 = dice_similarity(vector1, vector2)\n score22 = dice_similarity(vector2, vector2)\n\n assert score12 == 2 * 2/6, \"Expected different score.\"\n assert score11 == score22 == 1.0, \"Expected different score.\"",
"def __mul__(self, other):\r\n return self.prod(other)",
"def test_observable_mult(obs_a, obs_b, result):\n o = _observable_mult(obs_a, obs_b)\n assert o.compare(result)",
"def test_mul():\n assert_equal(Vector(3, 1) * 2, Vector(6, 2))\n assert_equal(2 * Vector(3, 1), Vector(6, 2))",
"def __getSimilarityScore(expected, actual):\n return SequenceMatcher(None, expected, actual).ratio()"
]
| [
"0.67872584",
"0.67387223",
"0.65631735",
"0.63529235",
"0.63501847",
"0.6342312",
"0.6342312",
"0.63209295",
"0.623091",
"0.62212586",
"0.6199165",
"0.61860114",
"0.61807173",
"0.6159763",
"0.6153235",
"0.61523235",
"0.61315036",
"0.60996103",
"0.60927147",
"0.60767853",
"0.6055979",
"0.60400146",
"0.6008313",
"0.60081506",
"0.60039365",
"0.59888095",
"0.59750974",
"0.5973111",
"0.59380823",
"0.58945954"
]
| 0.72086656 | 0 |
Tests the calculation of the similarity of two products based on a 'date' attribute. | def test_similarity_date():
date1 = dt.datetime(2000, 11, 24, 10, 0)
date2 = dt.datetime(2000, 11, 26, 10, 0)
similarity = pm.compute_similarity_for_date(date1, date2, halflife=2)
nose.tools.ok_(abs(similarity - 0.5) < tests.FLOAT_DELTA, "Wrong date similarity") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def similarity(self, e1, e2):\n\t\tpass",
"def test_similarity(self):\n self.assertTrue(np.allclose(self.vectors.similarity('dog.n.01', 'dog.n.01'), 1))\n self.assertTrue(np.allclose(self.vectors.similarity('dog.n.01', 'mammal.n.01'), 0.180901358))",
"def compare_state_demand(\n a: pd.DataFrame, b: pd.DataFrame, scaled: bool = True\n) -> pd.DataFrame:\n if not a[\"utc_datetime\"].equals(b[\"utc_datetime\"]):\n raise ValueError(\"Datetime columns do not match\")\n field = \"scaled_demand_mwh\" if scaled else \"demand_mwh\"\n df = pd.DataFrame(\n {\n \"year\": a[\"utc_datetime\"].dt.year,\n \"diff\": a[field] - b[\"demand_mwh\"],\n }\n )\n return df.groupby([\"year\"], as_index=False)[\"diff\"].agg(\n {\n \"rmse\": lambda x: np.sqrt(np.sum(x**2) / x.size),\n \"mae\": lambda x: np.sum(np.abs(x)) / x.size,\n }\n )",
"def cmpArtworkByDateAcquired(artwork1, artwork2):\n\n strdateArt1= artwork1['DateAcquired']\n if len(strdateArt1) == 0:\n return False\n year1=int(strdateArt1[0]+strdateArt1[1]+strdateArt1[2]+strdateArt1[3])\n month1=int(strdateArt1[5]+strdateArt1[6])\n day1=int(strdateArt1[8]+strdateArt1[9])\n dateArt1=datetime.datetime(year1,month1,day1)\n\n strdateArt2= artwork2['DateAcquired']\n if len(strdateArt2) == 0:\n return True\n year2=int(strdateArt2[0]+strdateArt2[1]+strdateArt2[2]+strdateArt2[3])\n month2=int(strdateArt2[5]+strdateArt2[6])\n day2=int(strdateArt2[8]+strdateArt2[9])\n dateArt2=datetime.datetime(year2,month2,day2)\n\n if dateArt1 < dateArt2:\n return True\n else:\n return False",
"def cmpArtworkByDate(artwork1, artwork2):\n return (lt.firstElement(artwork1)['Date'] < lt.firstElement(artwork2)['Date'])",
"def compute_user_similarity(d1, d2, ave_rat1, ave_rat2):\n \n movie_1=set()\n movie_2=set()\n for movie,value in d1.items():\n movie_1.add(movie) \n for movie, value in d2.items():\n movie_2.add(movie) \n common_movie_rated=movie_1.intersection(movie_2)\n if (len(common_movie_rated)==0):\n return 0.0\n num = 0\n den1 = 0\n den2 = 0\n for common_movie in common_movie_rated:\n num += (float(d1[common_movie])-ave_rat1)*(float(d2[common_movie])-ave_rat2)\n den1 += ((float(d1[common_movie])-ave_rat1)**2)\n den2 += ((float(d2[common_movie]) - ave_rat2) ** 2)\n # When the user gives same rating to all the movies\n try:\n user_sim = num/((den1*den2)**0.5)\n except ZeroDivisionError:\n return 0.0\n return user_sim",
"def test_date1_equal_date2(self):\n date1 = datetime.date(2014, 11, 29)\n date2 = datetime.date(2014, 11, 29)\n\n self.assertFalse(self.expander.is_same_date_month_ahead(date1, date2))",
"def test_similarity_for_request(self):\n request1 = factories.RequestFactory(audit_id=self.audit.id)\n request2 = factories.RequestFactory(audit_id=self.audit.id)\n\n self.make_relationships(request1, [self.control, self.regulation])\n\n requests_by_request = Request.get_similar_objects_query(\n id_=request1.id,\n types=[\"Request\"],\n threshold=0,\n ).all()\n\n self.assertSetEqual(\n {(obj.type, obj.id, obj.weight) for obj in requests_by_request},\n {(\"Request\", request2.id, 5)},\n )\n\n requests_by_assessment = Assessment.get_similar_objects_query(\n id_=self.assessment.id,\n types=[\"Request\"],\n threshold=0,\n ).all()\n\n self.assertSetEqual(\n {(obj.type, obj.id, obj.weight) for obj in requests_by_assessment},\n {(\"Request\", request1.id, 18),\n (\"Request\", request2.id, 5)},\n )\n\n assessments_by_request = Request.get_similar_objects_query(\n id_=request1.id,\n types=[\"Assessment\"],\n threshold=0,\n ).all()\n\n other_assessments = {\n (\"Assessment\", assessment.id, self.id_weight_map[assessment.id])\n for assessment in self.other_assessments\n }\n self.assertSetEqual(\n {(obj.type, obj.id, obj.weight) for obj in assessments_by_request},\n {(\"Assessment\", self.assessment.id, 18)}.union(other_assessments),\n )",
"def test_observable_mult(obs_a, obs_b, result):\n o = _observable_mult(obs_a, obs_b)\n assert o.compare(result)",
"def test_similarity_list():\n list1 = [\"a\", \"b\", \"c\"]\n list2 = [\"b\", \"c\", \"d\", \"e\"]\n similarity = pm.compute_similarity_for_list(list1, list2)\n nose.tools.ok_(abs(similarity - 2/3) < tests.FLOAT_DELTA, \"Wrong list similarity\")\n similarity = pm.compute_similarity_for_list(list2, list1) # intentionally asymmetric\n nose.tools.ok_(abs(similarity - 1/2) < tests.FLOAT_DELTA, \"Wrong list similarity\")",
"def calculate_similarity( self, rest1, rest2 ):\n\n\t\t# obtain the number of common (same) reviewers \n\t\trest1_reviewers = self.df[ self.df['business_id'] == rest1 ]['user_id'].unique()\n\t\trest2_reviewers = self.df[ self.df['business_id'] == rest2 ]['user_id'].unique()\n\t\tcommon_reviewers = set(rest1_reviewers).intersection(rest2_reviewers)\n\t\tn_common = len(common_reviewers)\n\n\t\t# obtain the sub-dataframe of the common reviewer's reviews\n\t\t# and calculate the pearson similiarity \n\t\trest1_reviews = self.get_restaurant_reviews( restaurant_id = rest1, \n\t\t\t\t\t\t\t\t\t\t\t\t\t set_of_users = common_reviewers )\n\t\trest2_reviews = self.get_restaurant_reviews( restaurant_id = rest2, \n\t\t\t\t\t\t\t\t\t\t\t\t\t set_of_users = common_reviewers )\n\t\tsim = self.pearson_sim( n_common = n_common, \n\t\t\t\t\t\t\t\trest1_reviews = rest1_reviews, \n\t\t\t\t\t\t\t\trest2_reviews = rest2_reviews )\n\t\treturn sim, n_common",
"def test_aggr_date_input(self):\n\n actual_start_date = set([])\n actual_end_date = set([])\n for year in self.years:\n for my_date in self.dates:\n input_date = date(year, my_date[0], my_date[1])\n retail_date = RetailDate(input_date)\n actual_start_date.add(retail_date.year_start_date)\n actual_end_date.add(retail_date.year_end_date)\n\n # Verify the retail start dates\n expected_start = set([date(mTup[0], mTup[1], mTup[2]) for mTup in self.retail_start_dates])\n diff = expected_start.symmetric_difference(actual_start_date)\n self.assertEqual(len(diff), 0, \"Diff: \" + str(diff))\n\n # Verify the retail end dates\n expected_end = set([date(mTup[0], mTup[1], mTup[2]) for mTup in self.retail_end_dates])\n diff = expected_end.symmetric_difference(actual_end_date)\n self.assertEqual(len(diff), 0, \"Diff: \" + str(diff))",
"def test_dates_and_Datetimes(self):\n if self.skip_tests:\n return\n recipe = (\n self.recipe()\n .dimensions(\"year_by_format\")\n .metrics(\"count\")\n .order_by(\"year_by_format\")\n )\n self.assertRecipeCSV(\n recipe,\n \"\"\"\n year_by_format,count,year_by_format_id\n 2005-01-01 00:00:00,1,2005-01-01 00:00:00\n 2013-01-01 00:00:00,1,2013-01-01 00:00:00\n \"\"\",\n )\n recipe = (\n self.recipe()\n .dimensions(\"year_by_format\")\n .metrics(\"count\")\n .order_by(\"-year_by_format\")\n )\n self.assertRecipeCSV(\n recipe,\n \"\"\"\n year_by_format,count,year_by_format_id\n 2013-01-01 00:00:00,1,2013-01-01 00:00:00\n 2005-01-01 00:00:00,1,2005-01-01 00:00:00\n \"\"\",\n )\n\n # Test a month() conversion\n recipe = (\n self.recipe()\n .dimensions(\"test_month\")\n .metrics(\"age\", \"count\")\n .order_by(\"-test_month\")\n )\n self.assertRecipeCSV(\n recipe,\n \"\"\"\n test_month,age,count,test_month_id\n 2015-05-01,10,1,2015-05-01\n 2015-01-01,5,1,2015-01-01\n \"\"\",\n )",
"def volume_similarity_pd(pd1,pd2):\n\tvolume_similarity = {}\n\n\t# print(\"aaaaa\")\n\n\t# union = vtk.vtkBooleanOperationPolyDataFilter()\n\t# union.SetOperationToDifference()\n\t# union.SetInputData(0,pd1)\n\t# union.SetInputData(1,pd2)\n\t# union.Update()\n\t# u = union.GetOutput()\n\n\t# massUnion = vtk.vtkMassProperties()\n\t# massUnion.SetInputData(u)\n\n\t# intersection = vtk.vtkBooleanOperationPolyDataFilter()\n\t# intersection.SetOperationToIntersection()\n\t# intersection.SetInputData(0,pd1)\n\t# intersection.SetInputData(1,pd2)\n\t# intersection.Update()\n\t# i = intersection.GetOutput()\n\t# massIntersection = vtk.vtkMassProperties()\n\t# massIntersection.SetInputData(i)\n\n\t# # metrics\n\t# tqdm.write(\"intersection vol: {:.2f}\".format(massIntersection.GetVolume()))\n\t# tqdm.write(\"union vol: {:.2f}\".format(massUnion.GetVolume()))\n\n\t# volume_similarity[\"jaccard\"] = 1 - massIntersection.GetVolume()/massUnion.GetVolume()\n\n\t# tqdm.write(\"Jaccard distance: {:.2f}\".format(volume_similarity[\"jaccard\"]))\n\n\thausdorffDistFilter = vtk.vtkHausdorffDistancePointSetFilter()\n\thausdorffDistFilter.SetInputData(0, pd1)\n\thausdorffDistFilter.SetInputData(1, pd2)\n\thausdorffDistFilter.Update()\n\n\tvolume_similarity[\"hausdorff\"] = hausdorffDistFilter.GetHausdorffDistance()\n\tvolume_similarity[\"relative0\"] = hausdorffDistFilter.GetRelativeDistance()[0]\n\tvolume_similarity[\"relative1\"] = hausdorffDistFilter.GetRelativeDistance()[1]\n\ttqdm.write(\"Hausdorff distance: {:.2f} mm\".format(volume_similarity[\"hausdorff\"]))\n\ttqdm.write(\"Relative distance from pd1 to pd2: {:.2f} mm\".format(volume_similarity[\"relative0\"]))\n\ttqdm.write(\"Relative distance from pd2 to pd1: {:.2f} mm\".format(volume_similarity[\"relative1\"]))\n\n\treturn volume_similarity, hausdorffDistFilter.GetOutput(0), hausdorffDistFilter.GetOutput(1)",
"def recommendations_similarity(aData, needed_param, user, products, n = 10, simfunc = sim_cosine):\n table_CF = preproc.make_CF_table(aData, needed_param)\n sim_measures_table = simfunc(table_CF) \n \n scores = sim_measures_table.dot(table_CF)\n mean_scores = np.array(np.sum(sim_measures_table, axis=1).T)\n mean_scores = pd.DataFrame(np.tile(mean_scores, (scores.shape[1],1))).T\n predicted_ratings = np.divide(scores, np.absolute(mean_scores))\n \n ratings = predicted_ratings[user].order(ascending= False)\n ratings = ratings[0:n]\n \n return (ratings.index[ratings.index.isin(products)==False])",
"def assertDateEqual(self, date1, date2):\n date1 = date1.replace(microsecond=0)\n date2 = date2.replace(microsecond=0)\n self.assertEqual(date1, date2)",
"def compute_price(self, date = None):\n\t\tif date is None:\n\t\t\tdate = datetime.now()\n\t\tself.price = 0\n\t\t# Getting list of product in cart\n\t\tcontent = self.cart.cart_content_set.all()\n\t\t# Dictionnary in order to compute minimum state of multi promotion\n\t\tstate = {\n\t\t\t'products':{},\n\t\t\t'promotions':{}\n\t\t}\n\t\trequirements = {}\n\n\t\tfor element in content:\n\t\t\tproduct = element.product\n\t\t\tquantity = element.quantity\n\n\t\t\t# First look for promotion\n\t\t\tsimple_promotions = product.promotion_set.filter(end__gte = date, type = 's').distinct('reference', 'end').order_by('-end', 'reference')\n\t\t\tmulti_promotions = product.promotion_set.filter(end__gte = date, type = 'm').distinct('reference', 'end').order_by('-end', 'reference')\n\t\t\tif len(simple_promotions)>0:\n\t\t\t\tpromotion = simple_promotions[0]\n\t\t\t\tself.price = self.price + quantity*promotion.after\n\t\t\t\n\t\t\telif len(multi_promotions)>0:\n\t\t\t\tfor promotion in multi_promotions:\n\t\t\t\t\tprice_before = promotion.before\n\t\t\t\t\tprice_after = promotion.after\n\t\t\t\t\tcontent = [ (p, 1) for p in promotion.content.all()]\n\t\t\t\t\tfound, requirement = self.get_promotion_requirement(content, price_before)\n\t\t\t\t\tif found and requirement is not None:\n\t\t\t\t\t\trequirements[promotion.id] = { p.id:q for p, q in requirement} # updating promotion multi requirements\n\n\t\t\t\t\t# Updating promotion multi state\n\t\t\t\t\tprod, price = self.get_simple_price([{'product':product, 'quantity':1}], date)[0]\n\t\t\t\t\t# print quantity\n\t\t\t\t\tstate['products'][product.id] = {'price': price, 'qte':quantity}\n\t\t\t\t\t# print state['products'][product.id]\n\t\t\t\t\tstate['promotions'][promotion.id] = {'price': price_after, 'qte':0}\n\t\t\telse:\n\t\t\t\thistory = product.history_set.filter(created__gte = date-timedelta(hours = 24)).order_by('-created')\n\t\t\t\tif len(history)>0:\n\t\t\t\t\tself.price = self.price + quantity*history[0].price\n\t\t\t\telse:\n\t\t\t\t\thistory = product.history_set.all().order_by('-created')\n\t\t\t\t\tif len(history)>0:\n\t\t\t\t\t\tself.price = self.price + quantity*history[0].price\n\n\t\t# Dealing with multi promotion:\n\t\tmin_state, min_price = self.get_min_state(state, requirements)\n\t\tself.price = self.price + min_price\n\n\t\tself.save()\n\n\t\treturn self.price",
"def test_ssd_similarity_measure_values():\n \n patch1 = torch.tensor([1.3, 4.5, 7.2, 0.2, -0.6])\n patch2 = torch.tensor([0.2, 4.4, 7.6, 0.1, 1.3])\n\n ssd = ssd_similarity_measure(patch1, patch2)\n assert np.isclose(ssd, 5.0, atol=1e-2)",
"def test_get_rate_plan_by_product_and_rate_plan(self):\n pass",
"def cmpArtworkByDateAcquired(artwork1, artwork2):\n return artwork1['DateAcquired'] < artwork2['DateAcquired']",
"def test_same_distances(self):\n \n\t\tm1 = models.vgg11(weights='VGG11_Weights.IMAGENET1K_V1')\n\t\tm2 = models.vgg11(weights='VGG11_Weights.IMAGENET1K_V1')\n\t\tavg_dW, avg_db, distances = self.watcher.distances(m1, m2)\n\t\t\n\t\tactual_mean_distance = avg_dW\n\t\texpected_mean_distance = 0.0\t \n\t\tself.assertEqual(actual_mean_distance,expected_mean_distance)\n\t\t\n\t\tactual_mean_distance = avg_db\n\t\texpected_mean_distance = 0.0\t \n\t\tself.assertEqual(actual_mean_distance,expected_mean_distance)\n\t\t\n\t\tprint(distances)",
"def test_dice_similarity():\n vector1 = np.array([1, 1, 0, 0])\n vector2 = np.array([1, 1, 1, 1])\n score11 = dice_similarity.py_func(vector1, vector1)\n score12 = dice_similarity.py_func(vector1, vector2)\n score22 = dice_similarity.py_func(vector2, vector2)\n\n assert score12 == 2 * 2/6, \"Expected different score.\"\n assert score11 == score22 == 1.0, \"Expected different score.\"",
"def compare_dictionaries(d1, d2):\n score = 0\n total = 0\n\n for element in d1:\n total += d1[element]\n\n for item in d2:\n if item in d1:\n score += math.log(d1[item]/total) * (d2[item])\n else:\n score += math.log(0.5/total) * (d2[item])\n return score",
"def test_dice_similarity_compiled():\n vector1 = np.array([1, 1, 0, 0])\n vector2 = np.array([1, 1, 1, 1])\n score11 = dice_similarity(vector1, vector1)\n score12 = dice_similarity(vector1, vector2)\n score22 = dice_similarity(vector2, vector2)\n\n assert score12 == 2 * 2/6, \"Expected different score.\"\n assert score11 == score22 == 1.0, \"Expected different score.\"",
"def compare_dictionaries(d1, d2):\n score = 0\n total = 0\n for key in d1:\n total += d1[key]\n for item in d2:\n if item in d1:\n score += d2[item] * math.log(d1[item]/total)\n else:\n score += d2[item] * math.log(0.5/total)\n return score",
"def compare_results_data(result1, result2):\n def average(r1, r2, delta):\n return (r2 - r1) / delta\n\n if (result1 and result2) and (len(result1) and len(result2)):\n results = result1.copy()\n\n time_delta = round(float(result2['timestamp']) - float(result1['timestamp']))\n results['time_delta'] = time_delta\n\n for key in result1['data'].keys():\n if key in result2['data']:\n results['data'][key]['value'] = round(\n average(float(result1['data'][key]['value']), float(result2['data'][key]['value']), time_delta),\n 2)\n\n results['timestamp'] = time.time()\n\n return results\n \n return {}",
"def test_expression_dates(self):\n import datetime\n import time\n time1 = datetime.datetime.now()\n time.sleep(0.01)\n time2 = datetime.datetime.now()\n\n # Checks on a specified attribute with operators \"==\" and \"!=\" with integers\n expression = BooleanExpression(\"NORMAL\", models.Network.updated_at < time2)\n value = expression.evaluate(KeyedTuple([{\"updated_at\": time1}], [\"networks\"]))\n self.assertTrue(value, \"models.Network.updated_at < time2 with models.Network.id=time1\")\n\n expression = BooleanExpression(\"NORMAL\", models.Network.updated_at > time2)\n value = expression.evaluate(KeyedTuple([{\"updated_at\": time1}], [\"networks\"]))\n self.assertFalse(value, \"models.Network.updated_at < time2 with models.Network.id=time1\")\n\n expression = BooleanExpression(\"NORMAL\", models.Network.updated_at < time1)\n value = expression.evaluate(KeyedTuple([{\"updated_at\": time2}], [\"networks\"]))\n self.assertFalse(value, \"models.Network.updated_at < time1 with models.Network.id=time2\")\n\n expression = BooleanExpression(\"NORMAL\", models.Network.updated_at > time1)\n value = expression.evaluate(KeyedTuple([{\"updated_at\": time2}], [\"networks\"]))\n self.assertTrue(value, \"models.Network.updated_at < time1 with models.Network.id=time2\")\n\n expression = BooleanExpression(\"NORMAL\", models.Network.updated_at == time1)\n value = expression.evaluate(KeyedTuple([{\"updated_at\": time1}], [\"networks\"]))\n self.assertTrue(value, \"models.Network.updated_at < time1 with models.Network.id=time2\")\n\n expression = BooleanExpression(\"NORMAL\", models.Network.updated_at == time2)\n value = expression.evaluate(KeyedTuple([{\"updated_at\": time1}], [\"networks\"]))\n self.assertFalse(value, \"models.Network.updated_at < time1 with models.Network.id=time2\")",
"def test_computeViewReturnsCorrectDataAccordingToTheDate(self):\n tle = TLE.objects.findByCatalogEntryAndTime(\n CatalogEntry.objects.first(),\n format_inline_time('20170825200000')\n )\n sc = SatelliteComputation(tle=tle)\n\n sc.observer.date = '2017/8/25 20:00:00'\n expected_data_1 = sc.compute()\n\n sc.observer.date = '2017/8/25 20:00:01'\n expected_data_2 = sc.compute()\n\n response = self.client.get('/api/v1/compute/25544/?time=20170825200000')\n content = response.content.decode('utf8')\n json_data_1 = json.loads(content)\n del json_data_1['tle']\n\n response = self.client.get('/api/v1/compute/25544/?time=20170825200001')\n content = response.content.decode('utf8')\n json_data_2 = json.loads(content)\n del json_data_2['tle']\n\n self.assertEquals(json_data_1, expected_data_1)\n self.assertEquals(json_data_2, expected_data_2)",
"def compare_series(series_a, series_b):\n return {\n 'rmse': ((series_a - series_b) ** 2).mean() ** 0.5,\n 'mbe': (series_b - series_a).mean(),\n 'mae': abs(series_b - series_a).mean(),\n 'rsqr': stats.linregress(series_a, series_b).rvalue ** 2,\n }",
"def compare_dictionaries(d1, d2):\r\n score = 0\r\n gef = 0\r\n for z in d1:\r\n gef += d1[z]\r\n total = gef\r\n \r\n for x in d2:\r\n if x in d1:\r\n score += math.log(d1[x] / total) * d2[x] \r\n else:\r\n score += math.log(0.5/total) * d2[x]\r\n return score"
]
| [
"0.6140461",
"0.58180696",
"0.5778696",
"0.57648724",
"0.56924784",
"0.56461555",
"0.559752",
"0.55854833",
"0.55672175",
"0.55280447",
"0.55000603",
"0.54928476",
"0.54652286",
"0.5449586",
"0.5445474",
"0.5440284",
"0.5421769",
"0.54209423",
"0.54195374",
"0.54045564",
"0.53903526",
"0.53605855",
"0.5347944",
"0.53443766",
"0.5344166",
"0.53430104",
"0.53387177",
"0.5335523",
"0.53157496",
"0.53000176"
]
| 0.77693075 | 0 |
Tests the calculation of the similarity of two products based on a 'fixed' attribute. | def test_similarity_fixed():
similarity = pm.compute_similarity_for_fixed("Rio de Janeiro", "São Paulo")
nose.tools.eq_(similarity, 0, "Wrong fixed similarity")
similarity = pm.compute_similarity_for_fixed("Rio de Janeiro", "Rio de Janeiro")
nose.tools.eq_(similarity, 1, "Wrong fixed similarity") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_similarity(self):\n self.assertTrue(np.allclose(self.vectors.similarity('dog.n.01', 'dog.n.01'), 1))\n self.assertTrue(np.allclose(self.vectors.similarity('dog.n.01', 'mammal.n.01'), 0.180901358))",
"def similarity(pair: Tuple[Text, Text]) -> float:\n (a, b) = pair\n missing = (\n True\n if any(symbol not in Metrics.realine.feature_matrix for symbol in pair)\n else False\n )\n return 0.0 if missing else 1 - Metrics.realine.delta(a, b)",
"def test_ssd_similarity_measure_values():\n \n patch1 = torch.tensor([1.3, 4.5, 7.2, 0.2, -0.6])\n patch2 = torch.tensor([0.2, 4.4, 7.6, 0.1, 1.3])\n\n ssd = ssd_similarity_measure(patch1, patch2)\n assert np.isclose(ssd, 5.0, atol=1e-2)",
"def similarity(self, e1, e2):\n\t\tpass",
"def similarity_function_old(feature1, feature2):\n f1Magnitude = feature1.dot(feature1)\n f2Magnitude = feature2.dot(feature2)\n return 1 - feature1.dot(feature2) / (f1Magnitude * f2Magnitude)",
"def test_sad_similarity_measure_values():\n \n patch1 = torch.tensor([1.3, 4.5, 7.2, 0.2, -0.6])\n patch2 = torch.tensor([0.2, 4.4, 7.6, 0.1, 1.3])\n\n sad = sad_similarity_measure(patch1, patch2)\n\n assert np.isclose(sad, 3.6, atol=1e-2)",
"def test_similarity_measure_size_compatibility():\n\n patch1 = torch.randn(size=(4, 6, 2))\n patch2 = torch.randn(size=(4, 6, 2))\n\n ssd_similarity_measure(patch1, patch2)\n sad_similarity_measure(patch1, patch2)\n assert True # just check if the ssd calculation was successfull\n\n patch1 = torch.randn(size=(4, 3))\n patch2 = torch.randn(size=(4, 3))\n\n ssd_similarity_measure(patch1, patch2)\n sad_similarity_measure(patch1, patch2)\n assert True # just check if the ssd calculation was successfull\n\n patch1 = torch.randn(size=(5,))\n patch2 = torch.randn(size=(5,))\n\n ssd_similarity_measure(patch1, patch2)\n sad_similarity_measure(patch1, patch2)\n assert True # just check if the ssd calculation was successfull\n\n patch1 = torch.randn(size=(3, 7, 2, 4))\n patch2 = torch.randn(size=(3, 7, 2, 4))\n\n ssd_similarity_measure(patch1, patch2)\n sad_similarity_measure(patch1, patch2)\n assert True # just check if the ssd calculation was successful",
"def test_poincare_distance(self):\n vector_1 = self.vectors['dog.n.01']\n vector_2 = self.vectors['mammal.n.01']\n\n distance = self.vectors.vector_distance(vector_1, vector_2)\n self.assertTrue(np.allclose(distance, 4.5278745))\n\n distance = self.vectors.vector_distance(vector_1, vector_1)\n self.assertTrue(np.allclose(distance, 0))",
"def test_dice_similarity():\n vector1 = np.array([1, 1, 0, 0])\n vector2 = np.array([1, 1, 1, 1])\n score11 = dice_similarity.py_func(vector1, vector1)\n score12 = dice_similarity.py_func(vector1, vector2)\n score22 = dice_similarity.py_func(vector2, vector2)\n\n assert score12 == 2 * 2/6, \"Expected different score.\"\n assert score11 == score22 == 1.0, \"Expected different score.\"",
"def test_similarity_list():\n list1 = [\"a\", \"b\", \"c\"]\n list2 = [\"b\", \"c\", \"d\", \"e\"]\n similarity = pm.compute_similarity_for_list(list1, list2)\n nose.tools.ok_(abs(similarity - 2/3) < tests.FLOAT_DELTA, \"Wrong list similarity\")\n similarity = pm.compute_similarity_for_list(list2, list1) # intentionally asymmetric\n nose.tools.ok_(abs(similarity - 1/2) < tests.FLOAT_DELTA, \"Wrong list similarity\")",
"def test_dice_similarity_compiled():\n vector1 = np.array([1, 1, 0, 0])\n vector2 = np.array([1, 1, 1, 1])\n score11 = dice_similarity(vector1, vector1)\n score12 = dice_similarity(vector1, vector2)\n score22 = dice_similarity(vector2, vector2)\n\n assert score12 == 2 * 2/6, \"Expected different score.\"\n assert score11 == score22 == 1.0, \"Expected different score.\"",
"def similarity_function(feature1, feature2):\n # 256 HOG, 18 HSV, 512 Encoder\n # weight color more if using the full vector\n if len(feature1) > 785:\n salient1 = feature1[256:256 + 18].copy() # be careful not to modify feature vector in place\n salient2 = feature2[256:256 + 18].copy()\n feature1 = feature1.copy()\n feature2 = feature2.copy()\n feature1[256:256 + 18] = salient1 * 10\n feature2[256:256 + 18] = salient2 * 10\n\n abs_distance = np.abs(feature1 - feature2)\n return np.sum(abs_distance)",
"def similarity(self, w1, w2):\r\n return self.represent(w1).dot(self.represent(w2))",
"def similarity(self, w1, w2):\r\n return self.represent(w1).dot(self.represent(w2))",
"def similarity_function(x, y):\n\n def safe_get(field, row, default_value):\n # Safely get a value from the Row. If the value is None, get the\n # default value.\n return row[field] if row[field] is not None else default_value\n\n # Extract the values for the categorical and continuous features for both\n # the x and y samples. Use an empty string as the default value for missing\n # categorical fields and 0 for the continuous ones.\n x_categorical_features = [safe_get(k, x, \"\") for k in CATEGORICAL_FEATURES]\n x_continuous_features = [safe_get(k, x, 0) for k in CONTINUOUS_FEATURES]\n y_categorical_features = [safe_get(k, y, \"\") for k in CATEGORICAL_FEATURES]\n y_continuous_features = [safe_get(k, y, 0) for k in CONTINUOUS_FEATURES]\n\n # Here a larger distance indicates a poorer match between categorical variables.\n j_d = distance.hamming(x_categorical_features, y_categorical_features)\n j_c = distance.canberra(x_continuous_features, y_continuous_features)\n\n # Take the product of similarities to attain a univariate similarity score.\n # Add a minimal constant to prevent zero values from categorical features.\n # Note: since both the distance function return a Numpy type, we need to\n # call the |item| function to get the underlying Python type. If we don't\n # do that this job will fail when performing KDE due to SPARK-20803 on\n # Spark 2.2.0.\n return abs((j_c + 0.001) * j_d).item()",
"def partial_match(self, other_product: Product):\n if self.product_id and other_product.product_id and self.product_id == other_product.product_id:\n return True\n if self.quote_currency and other_product.quote_currency and self.quote_currency == other_product.quote_currency:\n return True\n if self.base_currency and other_product.base_currency and self.base_currency == other_product.base_currency:\n return True\n return False",
"def pair(self, reference: Spectrum, query: Spectrum) -> float:\n binned_reference = self.model.spectrum_binner.transform([reference])[0]\n binned_query = self.model.spectrum_binner.transform([query])[0]\n reference_vector = self.model.base.predict(self._create_input_vector(binned_reference))\n query_vector = self.model.base.predict(self._create_input_vector(binned_query))\n\n return cosine_similarity(reference_vector[0, :], query_vector[0, :])",
"def compute_similarity(site_a, site_b):\n return np.linalg.norm(site_a - site_b)",
"def calc_similarity(lhs, rhs):\n lhs_decomp = decompose(lhs)\n rhs_decomp = decompose(rhs)\n dist = editdistance.eval(lhs_decomp, rhs_decomp)\n max_len = max(len(lhs_decomp), len(rhs_decomp))\n sim = float(max_len - dist) / float(max_len)\n logging.debug('SIM: [%s] vs [%s] ==> %d / %d = %f', lhs.encode('UTF-8'), rhs.encode('UTF-8'),\n max_len - dist, max_len, sim)\n return sim",
"def check_call_similarity(self):\r\n \r\n if self.old and not self.new:\r\n self.similarity = \"LOSS\"\r\n elif not self.old and self.new:\r\n self.similarity = \"GAIN\"\r\n else:\r\n if not self.old.is_variant and self.new.is_variant:\r\n self.similarity = \"GAIN\" \r\n elif self.old.is_variant and not self.new.is_variant:\r\n self.similarity = \"LOSS\" \r\n\r\n else:\r\n self.similarity = \"SAME\"",
"def test_cosine_similarity():\n vector1 = np.array([1, 1, 0, 0])\n vector2 = np.array([1, 1, 1, 1])\n score11 = cosine_similarity.py_func(vector1, vector1)\n score12 = cosine_similarity.py_func(vector1, vector2)\n score22 = cosine_similarity.py_func(vector2, vector2)\n\n assert score12 == 2 / np.sqrt(2 * 4), \"Expected different score.\"\n assert score11 == score22 == 1.0, \"Expected different score.\"",
"def test_similarity_numeric():\n similarity = pm.compute_similarity_for_numeric(900, 800)\n nose.tools.ok_(abs(similarity - 8/9) < tests.FLOAT_DELTA, \"Wrong numeric similarity\")",
"def do(self, a, b):\n raise SkipTest\n u, s, vt = gula.svd(a, 0)\n assert_almost_equal(a, dot(multiply(u, s), vt))",
"def test_cosine_similarity_compiled():\n vector1 = np.array([1, 1, 0, 0])\n vector2 = np.array([1, 1, 1, 1])\n score11 = cosine_similarity(vector1, vector1)\n score12 = cosine_similarity(vector1, vector2)\n score22 = cosine_similarity(vector2, vector2)\n\n assert score12 == 2 / np.sqrt(2 * 4), \"Expected different score.\"\n assert score11 == score22 == 1.0, \"Expected different score.\"",
"def __getSimilarityScore(expected, actual):\n return SequenceMatcher(None, expected, actual).ratio()",
"def similarity(self, word1, word2):\n common_vect = +np.ones(self.nEmbed) * 10000\n if word1 not in self.vocab and word2 in self.vocab:\n id_word_2 = self.w2id[word2]\n w1 = common_vect\n w2 = self.U[id_word_2]\n elif word1 in self.vocab and word2 not in self.vocab:\n id_word_1 = self.w2id[word1]\n w1 = self.U[id_word_1]\n w2 = common_vect\n elif word1 not in self.vocab and word2 not in self.vocab:\n w1 = common_vect\n w2 = common_vect\n else:\n id_word_1 = self.w2id[word1]\n id_word_2 = self.w2id[word2]\n w1 = self.U[id_word_1]\n w2 = self.U[id_word_2]\n\n # scalair = w1.dot(w2)/np.linalg.norm(w1,w2)\n similarity = w1.dot(w2) / (np.linalg.norm(w1) * np.linalg.norm(w2))\n # similarity = 1 / (1 + np.exp(-scalair))\n # similarity = scalair / (np.linalg.norm(w1) * np.linalg.norm(w2))\n return similarity",
"def test_similarity_for_request(self):\n request1 = factories.RequestFactory(audit_id=self.audit.id)\n request2 = factories.RequestFactory(audit_id=self.audit.id)\n\n self.make_relationships(request1, [self.control, self.regulation])\n\n requests_by_request = Request.get_similar_objects_query(\n id_=request1.id,\n types=[\"Request\"],\n threshold=0,\n ).all()\n\n self.assertSetEqual(\n {(obj.type, obj.id, obj.weight) for obj in requests_by_request},\n {(\"Request\", request2.id, 5)},\n )\n\n requests_by_assessment = Assessment.get_similar_objects_query(\n id_=self.assessment.id,\n types=[\"Request\"],\n threshold=0,\n ).all()\n\n self.assertSetEqual(\n {(obj.type, obj.id, obj.weight) for obj in requests_by_assessment},\n {(\"Request\", request1.id, 18),\n (\"Request\", request2.id, 5)},\n )\n\n assessments_by_request = Request.get_similar_objects_query(\n id_=request1.id,\n types=[\"Assessment\"],\n threshold=0,\n ).all()\n\n other_assessments = {\n (\"Assessment\", assessment.id, self.id_weight_map[assessment.id])\n for assessment in self.other_assessments\n }\n self.assertSetEqual(\n {(obj.type, obj.id, obj.weight) for obj in assessments_by_request},\n {(\"Assessment\", self.assessment.id, 18)}.union(other_assessments),\n )",
"def check_similarity(pair):\n user, business = pair['user_id'], pair['business_id']\n similarity = -1\n try:\n user_text = eval(user_profile[user])\n business_text = eval(business_profile[business])\n similarity = cosine_similarity(user_text, business_text)\n except:\n pass\n return similarity if similarity != -1 else 0",
"def wup_measure(self,a, b, similarity_threshold = 0.925, debug = False):\n if debug: print('Original', a, b)\n #if word_pair_dict.has_key(a+','+b):\n if a+','+b in self.word_pair_dict.keys():\n return self.word_pair_dict[a+','+b]\n\n def get_semantic_field(a):\n return wn.synsets(a, pos=wn.NOUN)\n\n if a == b: return 1.0\n\n interp_a = get_semantic_field(a)\n interp_b = get_semantic_field(b)\n if debug: print(interp_a)\n\n if interp_a == [] or interp_b == []:\n return 0.0\n\n if debug: print('Stem', a, b)\n global_max=0.0\n for x in interp_a:\n for y in interp_b:\n local_score=x.wup_similarity(y)\n if debug: print('Local', local_score)\n if local_score > global_max:\n global_max=local_score\n if debug: print('Global', global_max)\n\n # we need to use the semantic fields and therefore we downweight\n # unless the score is high which indicates both are synonyms\n if global_max < similarity_threshold:\n interp_weight = 0.1\n else:\n interp_weight = 1.0\n\n final_score = global_max * interp_weight\n self.word_pair_dict[a+','+b] = final_score\n return final_score",
"def similarity(self, w1, w2):\r\n sim = self.represent(w1).dot(self.represent(w2))\r\n return sim"
]
| [
"0.66236335",
"0.6590189",
"0.6483876",
"0.6461456",
"0.6403854",
"0.62277865",
"0.6196094",
"0.6050941",
"0.6039867",
"0.6006546",
"0.5983247",
"0.59342825",
"0.5914346",
"0.5914346",
"0.5882225",
"0.5880784",
"0.58777213",
"0.5872836",
"0.58536917",
"0.5853683",
"0.5847085",
"0.5844217",
"0.58251834",
"0.58247834",
"0.5812907",
"0.5807516",
"0.57950693",
"0.57928467",
"0.576109",
"0.5754671"
]
| 0.6943262 | 0 |
Tests the calculation of the similarity of two products based on a 'list' attribute. | def test_similarity_list():
list1 = ["a", "b", "c"]
list2 = ["b", "c", "d", "e"]
similarity = pm.compute_similarity_for_list(list1, list2)
nose.tools.ok_(abs(similarity - 2/3) < tests.FLOAT_DELTA, "Wrong list similarity")
similarity = pm.compute_similarity_for_list(list2, list1) # intentionally asymmetric
nose.tools.ok_(abs(similarity - 1/2) < tests.FLOAT_DELTA, "Wrong list similarity") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_similarity(self):\n self.assertTrue(np.allclose(self.vectors.similarity('dog.n.01', 'dog.n.01'), 1))\n self.assertTrue(np.allclose(self.vectors.similarity('dog.n.01', 'mammal.n.01'), 0.180901358))",
"def get_similar_products(list):\n #initialize cart with random ASIN\n params = {\"Item.1.ASIN\":'B000DLB2FI', 'Item.1.Quantity':1}\n cart = amazon.CartCreate(**params)\n root = objectify.fromstring(cart)\n cartid = _safe_get_element_text('Cart.CartId', root)\n hmac = _safe_get_element_text('Cart.HMAC', root)\n\n #create empty list of similar products\n sblist = []\n \n count = 0 #testing\n\n #iterate through list of original ASINs and retrieve also bought products\n print 'Retrieving \\\"Also Bought\\\" Products!' #testing\n for item in list:\n #add to cart\n amazon.CartClear(CartId=cartid, HMAC=hmac)\n params = {\"Item.1.ASIN\":item, 'Item.1.Quantity':1, 'CartId':cartid, 'HMAC':hmac, 'ResponseGroup':'Cart,CartSimilarities'}\n cart = amazon.CartAdd(**params)\n root = objectify.fromstring(cart)\n \n count +=1 #testing\n print count #testing\n \n #iterate through each similar product and add to list\n if \"SimilarProduct\" in cart:# HOW TO ACCOUNT FOR NO SIMILAR PRODUCTS\n for item2 in root.Cart.SimilarProducts.SimilarProduct:\n if _safe_get_element_text('Title', item2) is not None:\n sblist.append({'Original ASIN' : item,\n 'Associated ASIN' : item2.ASIN,\n 'Title' : item2.Title,\n 'Price' : None,\n 'Currency Code' : None,\n 'Relationship' : \"Also Bought\"})\n\n print 'Total # of \\\"Also Bought\\\" Products: ' + str(len(sblist)) #for testing\n count = 0 #testing\n \n #iterate through each similar prodcut and obtain highest price\n print 'Retrieving prices!' #testing\n for item in sblist:\n if item['Title'] is not None:\n title = filter(lambda x: x in string.printable, item['Title'].text) #remove non-ascii\n item['Title'] = title\n \n count+=1 #testing\n print count #testing\n\n pricelist = amazon.ItemLookup(ItemId=item['Associated ASIN'],ResponseGroup=\"OfferSummary,VariationSummary\")\n priceroot = objectify.fromstring(pricelist)\n #conditionals to check if parent or child ASIN or OOS\n if _safe_get_element_text(\"Items.Item.VariationSummary.HighestPrice.FormattedPrice\", priceroot) is not None: #Parent ASIN\n item['Price'] = _safe_get_element_text('Items.Item.VariationSummary.HighestPrice.FormattedPrice', priceroot)\n item['Currency Code'] = _safe_get_element_text('Items.Item.VariationSummary.HighestPrice.CurrencyCode', priceroot)\n elif _safe_get_element_text(\"Items.Item.OfferSummary.LowestNewPrice.FormattedPrice\", priceroot) is not None: #Child ASIN\n #save price and currency in case no other sellers\n price = _safe_get_element_text(\"Items.Item.OfferSummary.LowestNewPrice.FormattedPrice\", priceroot)\n currencycode = _safe_get_element_text(\"Items.Item.OfferSummary.LowestNewPrice.CurrencyCode\", priceroot)\n amazon.CartClear(CartId=cartid, HMAC=hmac)\n params = {\"Item.1.ASIN\":item['Associated ASIN'], 'Item.1.Quantity':1, 'CartId':cartid, 'HMAC':hmac}\n cart = amazon.CartAdd(**params)\n rootcart = objectify.fromstring(cart)\n parentASIN = _safe_get_element_text(\"Cart.ParentASIN\",rootcart) #get Parent ASIN\n parentproduct = amazon.ItemLookup(ItemId=parentASIN, ResponseGroup=\"OfferSummary,VariationSummary\")\n rootparent = objectify.fromstring(parentproduct)\n #No way to obtain highest price without through VariationSummary\n if _safe_get_element_text('Items.Item.VariationSummary.HighestPrice.FormattedPrice', rootparent) is not None:\n item['Price'] = _safe_get_element_text('Items.Item.VariationSummary.HighestPrice.FormattedPrice', rootparent)\n item['Currency Code'] = _safe_get_element_text('Items.Item.VariationSummary.HighestPrice.CurrencyCode', rootparent)\n else:\n item['Price'] = price\n item['Currency Code'] = currencycode\n\n return sblist",
"def similarity(self, e1, e2):\n\t\tpass",
"def compare_lists(l1, l2):\n score = 0\n total = len(l1)\n weight = 110\n\n for item in range(len(l2)):\n if item in range(len(l1)):\n score += math.log(weight/total) * (weight)\n else:\n score += math.log(0.5/total) * (1)\n weight -= 10\n return score",
"def products_match(self, product_names: Iterable[str]) -> bool:\n return tuple(product_names) == self.product_names",
"def similarities(index_name, base_product, base_osm):\n\t\t# First get all similarities form tfidf products\n\t\t# building args dictionnary to apply to filter, you gotta love Python :D\n\t\tkwargs ={\n\t\t\t'query_name': base_osm,\n\t\t\t'index_name': index_name,\n\t\t\tbase_osm+'_product': base_product,\n\t\t\t# index_name+'_product__brand__brandmatch__dalliz_brand__in': base_product.brand.brandmatch_set.all(),\n\t\t\tindex_name+'_product__dalliz_category__in': base_product.dalliz_category.all(),\n\t\t}\n\t\tbase_tags = base_product.tag.all() # Base products tags\n\t\tbase_brand = [ bm.dalliz_brand for bm in base_product.brand.brandmatch_set.all()]\n\t\tsims = base_product.productsimilarity_set.filter(**kwargs).distinct(index_name+'_product') # Getting similarities\n\t\t\n\t\t# Computing scores\n\t\tscores = [ \n\t\t\t\t( \n\t\t\t\t\tgetattr(sim, index_name+'_product'),\n\t\t\t\t\t10*sum([ 1 for tag in getattr(sim,index_name+'_product').tag.all() if tag in base_tags ]) # Tags score\n\t\t\t\t\t+sum([2*sum([ sum([2*(bm.dalliz_brand == dalliz_brand) + 1*( (bm.dalliz_brand != dalliz_brand) and bm.dalliz_brand.is_mdd == dalliz_brand.is_mdd) for dalliz_brand in base_brand]) for bm in brand.brandmatch_set.all()]) for brand in [getattr(sim,index_name+'_product').brand] if brand is not None ]) # brand score\n\t\t\t\t\t+ sim.score\n\t\t\t\t) \n\n\t\tfor sim in sims]\n\n\t\treturn sorted((scores), key=lambda item: -item[1])",
"def test_get_similarity():\n for similarity_enum in SimilarityEnum:\n similarity = get_similarity(similarity=similarity_enum)",
"def object_similarity(obj_1, obj_2):\n w_list = []\n obj_1_bag_size = sum(obj_1['bag_of_words'].values())\n obj_2_bag_size = sum(obj_2['bag_of_words'].values())\n obj_1_set = obj_1['set_of_words']\n obj_2_set = obj_2['set_of_words']\n obj_1_diff_2_set = obj_1_set - obj_2_set\n obj_2_diff_1_set = obj_2_set - obj_1_set\n w_list.append(weight_calculator(obj_1_bag_size, obj_2_bag_size))\n w_list.append(weight_calculator(len(obj_1_set), len(obj_2_set)))\n w_list.append(weight_calculator(len(obj_1_diff_2_set),\n len(obj_2_diff_1_set)))\n if 'total_lines' in obj_1.keys() and 'total_lines' in obj_2.keys():\n w_list.append(weight_calculator(obj_1['total_lines'],\n obj_2['total_lines']))\n if 'total_conversations' in obj_1.keys() and 'total_conversations' in obj_2.keys():\n w_list.append(weight_calculator(obj_1['total_conversations'],\n obj_2['total_conversations']))\n # Added as observations of genre -> rating relations\n if 'metadata' in obj_1.keys() and 'metadata' in obj_2.keys():\n w_list.append(weight_calculator(eval(obj_1['metadata']['genres']),\n eval(obj_2['metadata']['genres'])))\n return mean(w_list)",
"def sim_distance(p1, p2):\n # Get the list of shared_items\n #print '-- sim_distance', p1, p2\n si = [item for item in p1 if item in p2]\n\n if len(si) != 0:\n squares = [pow(p1[item] - p2[item], 2) for item in si]\n # Add up the squares of all the differences\n sum_of_squares = sum(squares)\n return 1 / (1 + np.sqrt(sum_of_squares))\n return 0",
"def analyze_similarities():\r\n print('Total number of candidate pairs:', len(pairs))\r\n print(f'\\nNumber of actual item pairs in the train set: {pairs[\"true_match\"].sum()}\\n')\r\n\r\n for feature in ['text_score', 'image_score', 'txt_img_score', 'words_ratio', 'txt_img_words']:\r\n\r\n # Check distribution of True and False predictions for various similarity scores\r\n print('-' * 50)\r\n print(f'\\nDistribution of True/False predictions for {feature}')\r\n for thr in (0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95):\r\n print('-' * 50)\r\n print(f'Similarity score over {thr}')\r\n pairs_sample = pairs[pairs[feature] >= thr]\r\n print(f'Number of similar item pairs: {len(pairs_sample)}')\r\n print(pairs_sample['true_match'].value_counts(normalize=True))\r\n\r\n # Check if identical phash can be used to improve the accuracy\r\n same_phash = pairs[pairs['phash_match'] == 1]\r\n different_phash = pairs[pairs['phash_match'] == 0]\r\n\r\n print('\\nFor item pairs with the same phash:')\r\n print(same_phash['true_match'].value_counts(normalize=True))\r\n print('Number of item pairs in this subset:', len(same_phash))\r\n\r\n print('\\nFor item pairs with different phash:')\r\n print(different_phash['true_match'].value_counts(normalize=True))\r\n print('Number of item pairs in this subset:', len(different_phash))\r\n\r\n # Check if numbers in titles can be used to improve the accuracy\r\n same_numbers = pairs[pairs['nums_match'] == 1]\r\n different_numbers = pairs[pairs['nums_match'] == 0]\r\n\r\n print('\\nFor item pairs with the same numbers:')\r\n print(same_numbers['true_match'].value_counts(normalize=True))\r\n print('Number of item pairs in this subset:', len(same_numbers))\r\n\r\n print('\\nFor item pairs with different numbers:')\r\n print(different_numbers['true_match'].value_counts(normalize=True))\r\n print('Number of item pairs in this subset:', len(different_numbers))",
"def recommendations_similarity(aData, needed_param, user, products, n = 10, simfunc = sim_cosine):\n table_CF = preproc.make_CF_table(aData, needed_param)\n sim_measures_table = simfunc(table_CF) \n \n scores = sim_measures_table.dot(table_CF)\n mean_scores = np.array(np.sum(sim_measures_table, axis=1).T)\n mean_scores = pd.DataFrame(np.tile(mean_scores, (scores.shape[1],1))).T\n predicted_ratings = np.divide(scores, np.absolute(mean_scores))\n \n ratings = predicted_ratings[user].order(ascending= False)\n ratings = ratings[0:n]\n \n return (ratings.index[ratings.index.isin(products)==False])",
"def test_sim(vec_x, vec_y, feature_list, func):\n feature_map_x = create_feature_map(vec_x, feature_list)\n feature_map_y = create_feature_map(vec_y, feature_list)\n\n if func == 0:\n return cosine_similarity(feature_map_x, feature_map_y)\n\n return minmax(feature_map_x, feature_map_y)",
"def test_intersection_list_single_double():\n first_list = [1, 2, 3, 4, 5]\n second_list = [4, 5, 6, 7, 8]\n intersection_single = main.compute_intersection_list_single(first_list, second_list)\n intersection_double = main.compute_intersection_list_double(first_list, second_list)\n assert len(intersection_single) == 2\n assert len(intersection_double) == 2\n assert intersection_single == intersection_double",
"def compare_lists(self, list1, list2):\n matching_items = []\n\n list1 = list1.copy()\n list2 = list2.copy()\n\n for item in list1:\n if item in list2:\n matching_items.append(item)\n\n for m in matching_items:\n for c in range(list1.count(m)):\n list1.remove(m)\n for c in range(list2.count(m)):\n list2.remove(m)\n if list1 or list2:\n tmp_match = False\n else:\n tmp_match = True\n return tmp_match, list1, list2",
"def similarity(self, w1, w2):\r\n return self.represent(w1).dot(self.represent(w2))",
"def similarity(self, w1, w2):\r\n return self.represent(w1).dot(self.represent(w2))",
"def similarity_scores(self, other):\n word_score = compare_dictionaries(other.words, self.words)\n word_length_score = compare_dictionaries(other.word_lengths, self.word_lengths)\n sentence_length_score = compare_dictionaries(other.sentence_lengths, self.sentence_lengths)\n stem_score = compare_dictionaries(other.stems, self.stems)\n comma_score = compare_dictionaries(other.commas_per_sentence, self.commas_per_sentence)\n list_scores = [word_score, word_length_score, sentence_length_score, stem_score, comma_score]\n return list_scores",
"def similarities (self, listOfWords):\n \n # building the query dictionary\n queryDict = collections.defaultdict(int)\n for w in listOfWords:\n queryDict [w] += + 1.0\n \n # normalizing the query\n length = float (len (listOfWords))\n for k in queryDict:\n queryDict [k] /= length\n \n # computing the list of similarities\n sims = []\n for doc in self.documents:\n score = 0.0\n docDict = doc [1]\n for k in queryDict:\n if docDict.has_key (k):\n score += (queryDict [k] / self.corpusDict [k]) + (docDict [k] / self.corpusDict [k])\n sims.append ([doc [0], score])\n \n return sims",
"def support(self, *mass_functions):\n result = 0\n for mass_function in mass_functions:\n result += self.similarity(mass_function)\n return round(result, 6)",
"def similarity(self, w1, w2):\r\n sim = self.represent(w1).dot(self.represent(w2))\r\n return sim",
"def test_multi_same(nothing_list):\n result = multi_same_list(nothing_list)\n assert result[1][2] == 0\n assert result[0][2] == 0",
"def __multiply(self, listA, listB):\n if len(listA) != len(listB):\n raise ValueError('List should be of same lengths')\n\n return [listA[i]*listB[i] for i in range(len(listA))]",
"def test_simtk_list_of_quantities_to_pint():\n list_of_quantities = [val * omm_unit.meter for val in range(10)]\n quantity_list = omm_unit.meter * [val for val in range(10)]\n\n assert list_of_quantities != quantity_list\n assert all(simtk_to_pint(list_of_quantities) == simtk_to_pint(quantity_list))",
"def match(self, product):\n\n raise NotImplementedError, 'need impletent match method'",
"def test_list_and_tuples(self, tol):\n\n shapes = expected_shapes(1, 2)\n weights = [np.random.random(shape) for shape in shapes]\n\n dev = DummyDevice(wires=2)\n\n circuit = qml.QNode(circuit_template, dev)\n circuit2 = qml.QNode(circuit_decomposed, dev)\n\n res = circuit(*weights)\n res2 = circuit2(*weights)\n assert qml.math.allclose(res, res2, atol=tol, rtol=0)\n\n weights_tuple = tuple(w for w in weights)\n res = circuit(*weights_tuple)\n res2 = circuit2(*weights_tuple)\n assert qml.math.allclose(res, res2, atol=tol, rtol=0)",
"def test_ssd_similarity_measure_values():\n \n patch1 = torch.tensor([1.3, 4.5, 7.2, 0.2, -0.6])\n patch2 = torch.tensor([0.2, 4.4, 7.6, 0.1, 1.3])\n\n ssd = ssd_similarity_measure(patch1, patch2)\n assert np.isclose(ssd, 5.0, atol=1e-2)",
"def test_similarity_for_request(self):\n request1 = factories.RequestFactory(audit_id=self.audit.id)\n request2 = factories.RequestFactory(audit_id=self.audit.id)\n\n self.make_relationships(request1, [self.control, self.regulation])\n\n requests_by_request = Request.get_similar_objects_query(\n id_=request1.id,\n types=[\"Request\"],\n threshold=0,\n ).all()\n\n self.assertSetEqual(\n {(obj.type, obj.id, obj.weight) for obj in requests_by_request},\n {(\"Request\", request2.id, 5)},\n )\n\n requests_by_assessment = Assessment.get_similar_objects_query(\n id_=self.assessment.id,\n types=[\"Request\"],\n threshold=0,\n ).all()\n\n self.assertSetEqual(\n {(obj.type, obj.id, obj.weight) for obj in requests_by_assessment},\n {(\"Request\", request1.id, 18),\n (\"Request\", request2.id, 5)},\n )\n\n assessments_by_request = Request.get_similar_objects_query(\n id_=request1.id,\n types=[\"Assessment\"],\n threshold=0,\n ).all()\n\n other_assessments = {\n (\"Assessment\", assessment.id, self.id_weight_map[assessment.id])\n for assessment in self.other_assessments\n }\n self.assertSetEqual(\n {(obj.type, obj.id, obj.weight) for obj in assessments_by_request},\n {(\"Assessment\", self.assessment.id, 18)}.union(other_assessments),\n )",
"def test_product(self):\n self.assertEqual(functions.product(2, 2), 4)\n self.assertEqual(functions.product(2, -2), -4)",
"def compare():\n data = request.get_json()\n res_sim = audio_featurizer.compare_two_features_sets(data['features_1'], data['features_2'])\n result = dict(similarity=res_sim)\n return jsonify(result)",
"def calculate_similarity(self, tfidf_matrix, test_tfidf):\n\n with open(DATASET.fold_root / 'tags_order.json') as file:\n tags_order = json.load(file)\n\n min_max_scaler = MinMaxScaler()\n\n n_clus = 2\n simis = []\n for test_q in test_tfidf:\n s = cosine_similarity(tfidf_matrix, test_q)\n\n # Sorting and getting indices of sorted similarities\n simi = s.transpose()[0]\n simi_values = np.sort(simi)[::-1][:200]\n simi_indices = simi.argsort()[::-1]\n\n breaks = jenkspy.jenks_breaks(simi_values, n_clus)\n simi_count = len(simi_values[breaks[-2] <= simi_values])\n\n q_tags = [self.train_set[i].tags for i in simi_indices][:simi_count]\n\n tags_votes = Counter(chain(*q_tags))\n all_count = sum(tags_votes.values())\n tags_likelihood = [tags_votes.get(\n tag, 0) / all_count for tag in tags_order]\n\n lh = np.array([float(x)\n for x in tags_likelihood]).reshape(-1, 1)\n normalized_lh = np.concatenate(\n min_max_scaler.fit_transform(lh)\n ).tolist()\n\n simis.append(normalized_lh)\n\n return simis"
]
| [
"0.6316317",
"0.63050604",
"0.6177503",
"0.61020786",
"0.6076241",
"0.60168993",
"0.5944124",
"0.5928959",
"0.59269273",
"0.5791538",
"0.57839596",
"0.5783629",
"0.578113",
"0.57527703",
"0.5746568",
"0.5746568",
"0.5725273",
"0.57178694",
"0.57150406",
"0.56814325",
"0.564408",
"0.5622237",
"0.56106716",
"0.5603427",
"0.55722183",
"0.55718005",
"0.5563298",
"0.5563122",
"0.55612147",
"0.55599463"
]
| 0.7762865 | 0 |
Tests conversion from a dict to a ProductModel instance. | def test_conversion_from_dict():
model_definition = {
'language': {'type': 'fixed', 'default': 'english'},
'a': {'type': 'fixed', 'persisted': True},
'b.c': {'type': 'fixed', 'persisted': True},
'b.d.e': {'type': 'text', 'persisted': True},
'b.d.f': {'type': 'numeric', 'persisted': True}
}
factory = ProductModelFactory(model_definition)
stemmed = text.parse_text_to_stems('english', 'a value that should be stemmed')
model_dict = {
'a': 'test',
'b': {
'c': 'foo',
'd': {
'e': stemmed,
'f': 54321
}
}
}
product = pm.ProductModel.from_dict('test_product', model_dict, factory)
nose.tools.eq_(product.get_attribute('a'), model_dict['a'], 'Attribute does not match')
nose.tools.eq_(product.get_attribute('b.c'), model_dict['b']['c'], 'Attribute does not match')
nose.tools.assert_list_equal(product.get_attribute('b.d.e'),
model_dict['b']['d']['e'], 'Attribute does not match')
nose.tools.eq_(product.get_attribute('b.d.f'), model_dict['b']['d']['f'], 'Attribute does not match') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_create_from_dict(self):\n b1 = BaseModel()\n b1.name = \"Holberton\"\n b1.my_number = 89\n my_model_json = b1.to_dict()\n b2 = BaseModel(**my_model_json)\n self.assertEqual(b1.my_number, b2.my_number)\n self.assertEqual(b1.id, b2.id)\n self.assertEqual(b1.name, b2.name)\n self.assertEqual(b1.created_at, b2.created_at)\n self.assertEqual(b1.updated_at, b2.updated_at)\n self.assertNotEqual(b1, b2)",
"def test_products_model_entry(self):\n data = self.data1\n self.assertTrue(isinstance(data, Product))\n self.assertEqual(str(data), 'django beginners')",
"def from_dict(cls, dikt) -> 'Price':\n return util.deserialize_model(dikt, cls)",
"def from_dict(cls, dikt) -> 'Failure':\n return util.deserialize_model(dikt, cls)",
"def from_dict(cls, dikt) -> 'ModelClass':\n return util.deserialize_model(dikt, cls)",
"def test_dict_to_instance(self):\n r = Review()\n r_dictionary = r.to_dict()\n r2 = Review(**r_dictionary)\n self.assertEqual(type(r), type(r2))",
"def test_conversion_to_dict():\n model_definition = {\n 'language': {'type': 'fixed', 'default': 'english'},\n 'a': {'type': 'fixed', 'persisted': True},\n 'b.c': {'type': 'fixed', 'persisted': True},\n 'b.d.e': {'type': 'text', 'persisted': True},\n 'b.d.f': {'type': 'numeric', 'persisted': True}\n }\n factory = ProductModelFactory(model_definition)\n raw_product = {\n 'a': 'foo',\n 'b': {\n 'c': 'bar',\n 'd': {\n 'e': 'some nested stuff',\n 'f': 12345\n }\n }\n }\n stemmed = text.parse_text_to_stems('english', raw_product['b']['d']['e'])\n model = factory.build('test_product', raw_product)\n model_dict = model.to_dict()\n nose.tools.eq_(model_dict['a'], raw_product['a'], 'Attribute does not match')\n nose.tools.eq_(model_dict['b']['c'], raw_product['b']['c'], 'Attribute does not match')\n nose.tools.assert_list_equal(model_dict['b']['d']['e'], stemmed, 'Attribute does not match')\n nose.tools.eq_(model_dict['b']['d']['f'], raw_product['b']['d']['f'], 'Attribute does not match')",
"def from_dictionary(cls,\n dictionary):\n if dictionary is None:\n return None\n\n # Extract variables from the dictionary\n is_valid = dictionary.get('IsValid')\n browse_node_lookup_request = awsecommerceservice.models.browse_node_lookup_request.BrowseNodeLookupRequest.from_dictionary(dictionary.get('BrowseNodeLookupRequest')) if dictionary.get('BrowseNodeLookupRequest') else None\n item_search_request = awsecommerceservice.models.item_search_request.ItemSearchRequest.from_dictionary(dictionary.get('ItemSearchRequest')) if dictionary.get('ItemSearchRequest') else None\n item_lookup_request = awsecommerceservice.models.item_lookup_request.ItemLookupRequest.from_dictionary(dictionary.get('ItemLookupRequest')) if dictionary.get('ItemLookupRequest') else None\n similarity_lookup_request = awsecommerceservice.models.similarity_lookup_request.SimilarityLookupRequest.from_dictionary(dictionary.get('SimilarityLookupRequest')) if dictionary.get('SimilarityLookupRequest') else None\n cart_get_request = awsecommerceservice.models.cart_get_request.CartGetRequest.from_dictionary(dictionary.get('CartGetRequest')) if dictionary.get('CartGetRequest') else None\n cart_add_request = awsecommerceservice.models.cart_add_request.CartAddRequest.from_dictionary(dictionary.get('CartAddRequest')) if dictionary.get('CartAddRequest') else None\n cart_create_request = awsecommerceservice.models.cart_create_request.CartCreateRequest.from_dictionary(dictionary.get('CartCreateRequest')) if dictionary.get('CartCreateRequest') else None\n cart_modify_request = awsecommerceservice.models.cart_modify_request.CartModifyRequest.from_dictionary(dictionary.get('CartModifyRequest')) if dictionary.get('CartModifyRequest') else None\n cart_clear_request = awsecommerceservice.models.cart_clear_request.CartClearRequest.from_dictionary(dictionary.get('CartClearRequest')) if dictionary.get('CartClearRequest') else None\n errors = awsecommerceservice.models.errors.Errors.from_dictionary(dictionary.get('Errors')) if dictionary.get('Errors') else None\n\n # Return an object of this model\n return cls(is_valid,\n browse_node_lookup_request,\n item_search_request,\n item_lookup_request,\n similarity_lookup_request,\n cart_get_request,\n cart_add_request,\n cart_create_request,\n cart_modify_request,\n cart_clear_request,\n errors)",
"def test_from_dict(self):\n\n class Person(Model):\n name = StringField()\n age = IntegralField(bounds = (0, None))\n siblings = ListField(of = StringField())\n\n # In the normal case where all the data coincides with fields\n # correctly.\n person1 = Person.from_dict({\n \"name\": \"Joe Shmoe\",\n \"age\": 21,\n \"siblings\": [\"Dick Shmoe\", \"Jane Shmoe\"]\n })\n assert person1.name == \"Joe Shmoe\"\n assert person1.age == 21\n assert person1.siblings == [\"Dick Shmoe\", \"Jane Shmoe\"]\n\n # In the less normal case where the data does not coincide with fields\n person2 = Person.from_dict({\n \"notaname\": 2,\n \"age\": \"lots\"\n })\n assert person2.notaname == 2\n assert person2.age == \"lots\"\n assert person2.name is None\n assert person2.siblings is None\n\n # In the even less normal case where no data exists at all\n person3 = Person.from_dict({})\n assert person3.name is None\n assert person3.age is None\n assert person3.siblings is None",
"def from_dict(cls, dikt):\n return util.deserialize_model(dikt, cls)",
"def from_dict(cls, dikt):\n return util.deserialize_model(dikt, cls)",
"def from_dict(cls, dikt):\n return util.deserialize_model(dikt, cls)",
"def from_dict(cls, dikt):\n return util.deserialize_model(dikt, cls)",
"def from_dict(cls, dikt):\n return util.deserialize_model(dikt, cls)",
"def from_dict(cls, dikt):\n return util.deserialize_model(dikt, cls)",
"def test_product_create(self):\n self._create_model(\"product\", self.product_data, [\"name\", \"description\", \"image_link\", \"price\"])",
"def test_create_obj_by_type_from_dict(self):\n test_obj = {}\n returned_obj = self.tested_class._create_obj_by_type(test_obj)\n self.assertIsInstance(returned_obj, self.tested_class)",
"def from_dict(cls, obj):\r\n raise NotImplementedError",
"def from_dictionary(cls,\n dictionary):\n if dictionary is None:\n return None\n\n # Extract variables from the dictionary\n formatted_price = dictionary.get('FormattedPrice')\n amount = dictionary.get('Amount')\n currency_code = dictionary.get('CurrencyCode')\n\n # Return an object of this model\n return cls(formatted_price,\n amount,\n currency_code)",
"def from_dict(cls, dikt) -> 'VultrExtra':\n return util.deserialize_model(dikt, cls)",
"def from_dict(cls, dikt) -> 'SkillPropertyModel':\n return util.deserialize_model(dikt, cls)",
"def _fromResponse(product_search, res):\n # Make sure that product_labels is in dictionary format\n productLabels = {x.key: x.value for x in res.product_labels}\n return ProductSearch.Product(product_search,\n res.name.split('/')[-1],\n res.product_category,\n res.display_name,\n productLabels,\n res.name)",
"def from_dict(cls, dikt) -> 'ResultFeedback':\n return util.deserialize_model(dikt, cls)",
"def test_object_creation(self):\n serializer = ProductSerializer(data=self.data)\n self.assertTrue(serializer.is_valid())\n product = serializer.save()\n\n self.assertEqual(product.title, self.title)\n self.assertEqual(product.description, self.description)\n self.assertEqual(product.price, self.price)\n self.assertTrue(product.is_active)\n self.assertTrue(product.available)",
"def test_products_model_entry(self):\n data = self.data1\n self.assertTrue(isinstance(data, Recipe))\n self.assertEqual(str(data), 'django beginners')",
"def from_dictionary(cls,\n dictionary):\n if dictionary is None:\n return None\n\n # Extract variables from the dictionary\n lowest_new_price = awsecommerceservice.models.price.Price.from_dictionary(dictionary.get('LowestNewPrice')) if dictionary.get('LowestNewPrice') else None\n lowest_used_price = awsecommerceservice.models.price.Price.from_dictionary(dictionary.get('LowestUsedPrice')) if dictionary.get('LowestUsedPrice') else None\n lowest_collectible_price = awsecommerceservice.models.price.Price.from_dictionary(dictionary.get('LowestCollectiblePrice')) if dictionary.get('LowestCollectiblePrice') else None\n lowest_refurbished_price = awsecommerceservice.models.price.Price.from_dictionary(dictionary.get('LowestRefurbishedPrice')) if dictionary.get('LowestRefurbishedPrice') else None\n total_new = dictionary.get('TotalNew')\n total_used = dictionary.get('TotalUsed')\n total_collectible = dictionary.get('TotalCollectible')\n total_refurbished = dictionary.get('TotalRefurbished')\n\n # Return an object of this model\n return cls(lowest_new_price,\n lowest_used_price,\n lowest_collectible_price,\n lowest_refurbished_price,\n total_new,\n total_used,\n total_collectible,\n total_refurbished)",
"def from_dict(cls, dikt) -> 'ProductionFlowItem':\n return util.deserialize_model(dikt, cls)",
"def _from_dict(cls, _dict):\n return cls.from_dict(_dict)",
"def _from_dict(cls, _dict):\n return cls.from_dict(_dict)",
"def _from_dict(cls, _dict):\n return cls.from_dict(_dict)"
]
| [
"0.6527579",
"0.64977103",
"0.62672186",
"0.610359",
"0.60274994",
"0.6020002",
"0.5937814",
"0.59310555",
"0.5926014",
"0.5909174",
"0.5909174",
"0.5909174",
"0.5909174",
"0.5909174",
"0.5909174",
"0.59031826",
"0.5890253",
"0.58854175",
"0.58797616",
"0.5858172",
"0.58544725",
"0.58535534",
"0.5846499",
"0.5846152",
"0.5836464",
"0.5829695",
"0.5825786",
"0.5817312",
"0.5817312",
"0.5817312"
]
| 0.6796946 | 0 |
Return category details url | def category_details_url(id):
return reverse('category:category-detail', args=[id]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_absolute_url(self):\n return reverse('market:category-detail', args=[str(self.id)])",
"def get_absolute_url(self):\n return reverse('category-detail', args=[str(self.categoryId)])",
"def _get_url(self, category):\n query = []\n for key,value in self._params.iteritems():\n query.append(\"{key}={value}\".format(key=key,value=value))\n return \"{base}/{category}?{query}\".format(base = self._base_url, category = category, query = \"&\".join(query))",
"def getCategory():",
"def category2url(cat):\n return remove_diacritics(cat).replace(\" \", \"_\")",
"def get_category(self) -> str:\n return self.category",
"def category(request, slug):\n categry = get_object_or_404(Category,slug=slug)\n story_list = Story.objects.filter(category=category)\n heading = \"Category: %s\" % category.label\n return render_to_response('cms/story_list.html', locals())",
"def category(self):\n return self._ctx.get(\"name\", self._ctx[\"id\"])",
"def cli(ctx, category_id):\n return ctx.ti.categories.show_category(category_id)",
"def get_category_info(category_id):\n uri = 'categories/' + category_id\n return self.make_request(uri)",
"def category(self) -> str:\n return pulumi.get(self, \"category\")",
"def __str__(self):\n return self.category_name",
"def __repr__(self):\n return f\"Category=(id={self.id},category_name={self.category_name},category_slug={self.category_slug})\"",
"def get(self, category_id):\n path = 'urlCategories/{}'.format(category_id)\n return self._session.get(path)",
"def get_absolute_url(self):\n return urls.reverse('blog:article', args=[self.category.slug, self.slug])",
"def view_category(cat_id):\n session['target'] = url_for('view_category', cat_id=cat_id)\n sqlsession = SQLSESSION()\n category = sqlsession.query(Category).filter_by(id=cat_id).first()\n categories = sqlsession.query(Category).all()\n items = sqlsession.query(Item).filter_by(category_id=cat_id).all()\n return render_template(\"view_category.html\",\n category=category,\n categories=categories,\n items=items,\n item_title=category.name + \" Items\")",
"def show_cat(slug):\n cat = Category.query.filter_by(slug=slug).first()\n return redirect(url_for('articles.show_all') + '?c=' + str(cat.id))",
"def get_detail_URL(recipe_id):\n return reverse('recipeapp:recipe-detail', args=[recipe_id])",
"def get_disease_category_url():\n base_url = 'http://www.mayoclinic.org' \n task = load_jt('task.json') \n spider = Crawler()\n \n for url in task:\n html = spider.html(url)\n if html:\n soup = BS4(html)\n div = soup.find_all('div', id = 'main_0_left1_0_tertiarynav')[0]\n for a in div.find_all('a'):\n task[url].setdefault(base_url + a['href'], {'data': {'category': a.text.strip()}})\n dump_jt(task, 'task.json', replace = True)",
"def showCategoryDetails(cat_id):\n\n category = Category.query.get(cat_id)\n # get all the poses under that category\n all_poses = db.session.query(Pose).join(PoseCategory).filter(PoseCategory.cat_id==cat_id).all()\n\n return render_template(\"category-details.html\", all_poses=all_poses, category=category)",
"def goto_category_by_title(self,category):\n\n return self.catbrowser.goto_category_by_title(category)",
"def url(self):\r\n return BASE_URL + \"/courses/\" + self.course_id + \"/\" + self.url_path",
"def detail_url(recipe_id):\n return reverse('recipe:recipe-detail',args=[recipe_id])",
"def _get_absolute_url(self, parent_slug=None):\n return reverse(\n \"catalogue:category\",\n kwargs={\n \"category_slug\": self.get_full_slug(parent_slug=parent_slug),\n \"pk\": self.pk,\n },\n )",
"def get_category(soup):\n category = soup.ul.find_all(\"a\")[-1].text\n return category",
"def get_absolute_url(self):\n return reverse('food-detail', args=[str(self.id)])",
"def get_absolute_url(self):\n return reverse('brand-detail', args=[str(self.id)]) #View on Site (front-end)",
"def get_absolute_url(self):\n return reverse('library-detail', kwargs={'slug': self.slug})",
"def category(request):\n\n return render(request, \"core/category_list.html\", {\n \"category_list\": Category.objects.all()\n })",
"def existing_url(module):\n # Build the format dictionary\n url_base = \"/axapi/v3/web-category/category-list/{name}\"\n\n f_dict = {}\n if '/' in str(module.params[\"name\"]):\n f_dict[\"name\"] = module.params[\"name\"].replace(\"/\", \"%2F\")\n else:\n f_dict[\"name\"] = module.params[\"name\"]\n\n return url_base.format(**f_dict)"
]
| [
"0.76737565",
"0.7562042",
"0.6749167",
"0.65350366",
"0.6471964",
"0.6314211",
"0.6271113",
"0.625414",
"0.6219947",
"0.62155586",
"0.6212428",
"0.6178922",
"0.61175007",
"0.6104493",
"0.60992265",
"0.60960597",
"0.60801506",
"0.6054124",
"0.6013464",
"0.5993353",
"0.5972542",
"0.59637964",
"0.5960707",
"0.59440607",
"0.59430563",
"0.59429294",
"0.5941946",
"0.5939129",
"0.59388316",
"0.5930128"
]
| 0.8083309 | 0 |
Create and return a sample category | def sample_category(name='place'):
return Category.objects.create(name=name) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_category():\n category = Category(name='testcategory', description=\"\", fee=DEFAULT_FEE)\n category.save()\n return category",
"def test_create_category(self):\n pass",
"def test_0005_create_categories(self):\n self.create_category(name='Test 0060 Workflow Features', description='Test 0060 - Workflow Features')",
"def make_test_category(self):\n\n c = Category(slug='test')\n\n return c",
"def getCategory():",
"def create(data):\n \n # create category\n return Category(\n category_id = data['id'],\n name = data['name'])",
"def test_add_category(self):\n self.add_success(self.test_data['pants'])",
"def create_category(name):\n return Category.objects.create(name=name)",
"def test_create_category(self):\n res = self.client().post('/categories/', data=self.category)\n self.assertEqual(res.status_code, 201)\n self.assertIn('Stews', str(res.data))",
"def create_category(self, category):\n\n super().new_entry()\n\n return Categories.objects.create(\n name=category['id'].split(':')[1],\n name_fr=category['name'],\n url=category['url']\n )",
"def create_category(self): # , conf_dir, title):\n category_file_path = self.event_dir / 'category.json'\n category_data = {\n 'title': self.title,\n }\n category_data_text = json.dumps(category_data, **\n JSON_FORMAT_KWARGS) + '\\n'\n save_file(category_file_path, category_data_text)\n logger.debug('File {} created', category_file_path)",
"def test_create_cat_object():\n from .scripts.initializedb import create_cat_object\n cat_object = create_cat_object(\"a\", \"b\", \"c\", \"c\")\n assert isinstance(cat_object, Category)",
"def cc_category(save=True, **kwargs):\n responses = kwargs.pop('responses', [])\n save = save or responses # Adding responses forces save.\n defaults = {'title': str(datetime.now()),\n 'weight': random.choice(range(50)),\n 'locale': settings.LANGUAGE_CODE}\n defaults.update(kwargs)\n\n category = models.CannedCategory(**defaults)\n if save:\n category.save()\n # Add responses to this category.\n for response, weight in responses:\n models.CategoryMembership.objects.create(\n category=category, response=response, weight=weight)\n\n return category",
"def test_create_category(self):\n payload = {\n 'name': 'Houses',\n }\n res = self.client.post(CATEGORY_URL, payload)\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n category = Category.objects.get(id=res.data['id'])\n serializer = CategorySerializer(category)\n self.assertEqual(serializer.data['name'], payload['name'])",
"def test_add_category(self):\n self.signup('Bo', 'Theo', '[email protected]', 'Bo1995', 'Bo1995')\n self.login('[email protected]', 'Bo1995')\n self.dashboard()\n rv = self.category('Breakfast')\n self.assertIn(b'Category created', rv.data)",
"def add_Category(title,image):\n newCategory=Category.objects.create(title=title, image=image)\n return newCategory",
"def create_category(category_name, days):\n time = timezone.now() + datetime.timedelta(days=days)\n return Category.objects.create(category_name=category_name, pub_date=time)",
"def test_create_recipe_category(self):\n self.signup('Bo', 'Theo', '[email protected]', 'Bo1995', 'Bo1995')\n self.login('[email protected]', 'Bo1995')\n self.dashboard()\n self.category('JunkFood')\n self.dashboard()\n self.recipe_dashboard()\n rv = self.create_recipe('cakes', 'blah, blah, blah....mix ingredient, heat')\n self.assertIn(b'Recipe created', rv.data)",
"def test_dashboard_recipe_created_with_category(self):\n self.signup('Bo', 'Theo', '[email protected]', 'Bo1995', 'Bo1995')\n self.login('[email protected]', 'Bo1995')\n self.dashboard()\n self.category('JunkFood')\n self.dashboard()\n rv = self.recipe_dashboard()\n self.assertIn(b'JunkFood', rv.data)",
"def test_add_category_to_asset(self):\n pass",
"def test_create_category_with_existing_name(self):\n sample_category()\n res = self.client.post(CATEGORY_URL, {\"name\": \"place\"})\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(\n res.data['errors']['name'][0],\n 'This field must be unique.')",
"def get_rand_cat(self):\n return randint(0,GAConfig[\"num_categories\"]-1)",
"def createCategory(name, user_id):\n c = Category(name=name, user_id=user_id)\n session.add(c)\n session.commit()\n print 'Category \"' + name + '\" created.'\n return c",
"def test_new_category_data(db_session):\n new_cat = Category(\n label=\"test_label\",\n desc=\"test_desc\"\n )\n db_session.add(new_cat)\n category = db_session.query(Category).all()\n assert category[0].label == \"test_label\"\n assert category[0].desc == \"test_desc\"",
"def createCategory(name, user_id):\n c = Category(name=name, user_id=user_id)\n db_session.add(c)\n db_session.commit()\n return c",
"def create(self, category):\n path = 'urlCategories'\n return self._session.post(path, category)",
"def test_category(self):\n\n # Test empty categories\n self.assertFalse(self.colorspace.hasCategory('ocio'))\n self.assertEqual(len(self.colorspace.getCategories()), 0)\n with self.assertRaises(IndexError):\n self.colorspace.getCategories()[0]\n\n # Test with defined TEST_CATEGORIES.\n for i, y in enumerate(TEST_CATEGORIES):\n self.assertEqual(len(self.colorspace.getCategories()), i)\n self.colorspace.addCategory(y)\n self.assertTrue(self.colorspace.hasCategory(y))\n\n # Test the output list is equal to TEST_CATEGORIES.\n self.assertListEqual(\n list(self.colorspace.getCategories()), TEST_CATEGORIES)\n\n # Test the length of list is equal to the length of TEST_CATEGORIES.\n self.assertEqual(len(self.colorspace.getCategories()),\n len(TEST_CATEGORIES))\n\n iterator = self.colorspace.getCategories()\n for a in TEST_CATEGORIES:\n self.assertEqual(a, next(iterator))\n\n # Test the length of categories is zero after clearCategories()\n self.colorspace.clearCategories()\n self.assertEqual(len(self.colorspace.getCategories()), 0)\n\n # Testing individually adding and removing a category.\n self.colorspace.addCategory(TEST_CATEGORIES[0])\n self.assertEqual(len(self.colorspace.getCategories()), 1)\n self.colorspace.removeCategory(TEST_CATEGORIES[0])\n self.assertEqual(len(self.colorspace.getCategories()), 0)",
"def gen_categ(low=0, up=0):\n share_final = raw.copy()\n if low == 0:\n time = pd.Categorical(share_final.time)\n share_final = share_final.set_index([\"mergeid\", \"time\"])\n share_final[\"time\"] = time\n\n country = pd.Categorical(share_final.country)\n share_final[\"country\"] = country\n return share_final\n else:\n a = raw.loc[(raw[\"yrbirth\"] >= low) & (raw[\"yrbirth\"] <= up)]\n time = pd.Categorical(a.time)\n a = a.set_index([\"mergeid\", \"time\"])\n a[\"time\"] = time\n\n country = pd.Categorical(a.country)\n a[\"country\"] = country\n\n subsample = a.copy()\n\n return subsample",
"def test_get_category_value_to_sample_ids(self):\r\n test_data = get_test_data()\r\n actual = get_category_value_to_sample_ids(\r\n test_data['map'],\r\n 'SampleType')\r\n expected = {'feces': ['f1', 'f2', 'f3', 'f4', 'f5', 'f6'],\r\n 'L_palm': ['p1', 'p2'],\r\n 'Tongue': ['t1', 't2'],\r\n 'Other': ['not16S.1']}\r\n self.assertEqual(actual, expected)\r\n\r\n actual = get_category_value_to_sample_ids(test_data['map'], 'year')\r\n expected = {'2008': ['f1', 'f2', 'f3', 'f4', 'f5', 'f6',\r\n 'p1', 'p2', 't1', 't2', 'not16S.1']}\r\n self.assertEqual(actual, expected)\r\n\r\n self.assertRaises(ValueError,\r\n get_category_value_to_sample_ids,\r\n test_data['map'],\r\n 'not.a.real.category')",
"def test_extract_categories():\n pass"
]
| [
"0.7426105",
"0.73442113",
"0.7260034",
"0.706049",
"0.68367004",
"0.68239397",
"0.6787772",
"0.67004144",
"0.6686712",
"0.6656232",
"0.6518061",
"0.64919376",
"0.6432085",
"0.6426642",
"0.64018744",
"0.63784826",
"0.63733375",
"0.6366401",
"0.63178706",
"0.63171357",
"0.6289764",
"0.62624943",
"0.62345356",
"0.6229226",
"0.62068063",
"0.61615807",
"0.6136288",
"0.6095388",
"0.6091692",
"0.6089733"
]
| 0.8123947 | 0 |
test viewing category details | def test_get_category_details(self):
category = sample_category()
url = category_details_url(category.id)
res = self.client.get(url)
serializer = CategorySerializer(category)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_view_categories(self):\n res = self.client().post('/categories/', data=self.category)\n self.assertEqual(res.status_code, 201)\n res = self.client().get('/categories/')\n self.assertEqual(res.status_code, 200)\n self.assertIn('Stews', str(res.data))",
"def test_get_categories(self):\n pass",
"def test_view_category_by_id(self):\n rv = self.client().post('/categories/', data=self.category)\n self.assertEqual(rv.status_code, 201)\n result_in_json = json.loads(rv.data.decode('utf-8').replace(\"'\", \"\\\"\"))\n result = self.client().get(\n '/categories/{}'.format(result_in_json['category_name']))\n self.assertEqual(result.status_code, 200)\n self.assertIn('Stews', str(result.data))",
"def test_view_categories(self):\n Perms.objects.create(user=self.user, access_level=4).save()\n self.client.login(username='hodor', password='hodor')\n response = self.client.get('/categories/')\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, 'view_categories.html')",
"def test_index_view_with_categories(self):\n add_cat('test',1,1)\n add_cat('temp',1,1)\n add_cat('tmp',1,1)\n add_cat('tmp test temp',1,1)\n\n response = self.client.get(reverse('index'))\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, \"tmp test temp\")\n\n num_cats =len(response.context['categories'])\n self.assertEqual(num_cats , 4)",
"def test_dashboard_recipe_created_with_category(self):\n self.signup('Bo', 'Theo', '[email protected]', 'Bo1995', 'Bo1995')\n self.login('[email protected]', 'Bo1995')\n self.dashboard()\n self.category('JunkFood')\n self.dashboard()\n rv = self.recipe_dashboard()\n self.assertIn(b'JunkFood', rv.data)",
"def test_get_a_category(self):\n self.test_add_category_success()\n response = self.client.get('/categories/1',\n headers={\"Authorization\": self.token})\n self.assertEqual(response.status_code, 200)\n self.assertIn('asian', response.data.decode())",
"def test_product_category_view(self):\n\n response = self.client.get(reverse('website:product_categories'))\n\n # Check that the response is 200 ok\n self.assertEqual(response.status_code, 200)\n\n # Check that the rendered context contains 2 product types\n self.assertEqual(len(response.context['product_categories']),2)\n\n # Product title appears in HTML response content\n self.assertIn('<h6 class=\"mb-1\">Test Product</h6>'.encode(), response.content)\n self.assertIn('<h6 class=\"mb-1\">Test Product2</h6>'.encode(), response.content)\n self.assertIn('<h4 class=\"card-title\">Test Product Type <p class=\"badge badge-primary ml-2\">1</p></h4>'.encode(), response.content)\n self.assertIn('<h4 class=\"card-title\">Test Product Type2 <p class=\"badge badge-primary ml-2\">1</p></h4>'.encode(), response.content)",
"def test_category_url(self):\n data = self.data1\n # response = self.client.post(\n # reverse('recipe:category_list', args=[data.slug]))\n # self.assertEqual(response.status_code, 200)\n self.assertTrue(isinstance(data, Category))",
"def test_Categories_getter(self):\r\n expected = ['Treatment', 'DOB']\r\n observed = self.cs_overview.Categories\r\n self.assertEqual(observed, expected)",
"def test_get_category(self):\n url = reverse(\n 'projectroles:api_project_retrieve',\n kwargs={'project': self.category.sodar_uuid},\n )\n response = self.request_knox(url)\n\n self.assertEqual(response.status_code, 200)\n response_data = json.loads(response.content)\n expected = {\n 'title': self.category.title,\n 'type': self.category.type,\n 'parent': None,\n 'description': self.category.description,\n 'readme': '',\n 'public_guest_access': False,\n 'archive': False,\n 'roles': {\n str(self.owner_as_cat.sodar_uuid): {\n 'user': self.get_serialized_user(self.user_owner_cat),\n 'role': PROJECT_ROLE_OWNER,\n 'inherited': False,\n 'sodar_uuid': str(self.owner_as_cat.sodar_uuid),\n }\n },\n 'sodar_uuid': str(self.category.sodar_uuid),\n }\n self.assertEqual(response_data, expected)",
"def test_create_category(self):\n pass",
"def test_edit_category(self):\n self.signup('Bo', 'Theo', '[email protected]', 'Bo1995', 'Bo1995')\n self.login('[email protected]', 'Bo1995')\n self.dashboard()\n self.category('Breakfast')\n self.dashboard()\n rv = self.edit_category('JunkFood')\n self.assertIn(b'Category successfully updated', rv.data)",
"def test_get(self, init_db, category):\n assert Category.get(category.id) == category",
"def test_get_categories(self):\n res = self.client().get('/categories')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['categories'])\n self.assertTrue(data['total_categories'])",
"def test_add_category(self):\n self.signup('Bo', 'Theo', '[email protected]', 'Bo1995', 'Bo1995')\n self.login('[email protected]', 'Bo1995')\n self.dashboard()\n rv = self.category('Breakfast')\n self.assertIn(b'Category created', rv.data)",
"def test_get_category_search(self):\n self.test_add_category_success()\n response = self.client.get('/categories?q=a',\n headers={\"Authorization\": self.token})\n self.assertEqual(response.status_code, 200)\n self.assertIn('asian', response.data.decode())",
"def test_category_has_access_to_model_data():\n category = Category()\n category_data = category.get_category_data()\n\n assert type(category_data) is list\n assert len(category_data) > 1",
"def test_get_categories(self):\n obs = self.tester._get_categories(self.conn_handler)\n self.assertEqual(obs, self.exp_categories)",
"def test_get_categories(self):\n obs = self.tester._get_categories(self.conn_handler)\n self.assertEqual(obs, self.exp_categories)",
"def test_category_lowercase(self):\n self.assertEqual(self.category.category, \"test\")",
"def testviewlist(self):\n rv = self.app.get('/viewcategory')\n self.assertEqual(rv.status_code, 302, \"viewlist page should not load unless signed in\")",
"def test_get_categories(self):\n res = self.client().get('/categories')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(len(data['categories']))",
"def test_extract_categories():\n pass",
"def test_get_categories(self):\n res = self.client().get('/api/categories')\n res_body = json.loads(res.data)\n\n self.assertEqual(res.status_code, 200)\n self.assertTrue(res_body['success'])\n self.assertTrue(res_body['categories'])",
"def test_get_categories_success(self):\n self.test_add_category_success()\n response = self.client.get('/categories',\n headers={\"Authorization\": self.token})\n self.assertEqual(response.status_code, 200)\n self.assertIn('asian', response.data.decode())",
"def test_edit_recipe_category(self):\n self.signup('Bo', 'Theo', '[email protected]', 'Bo1995', 'Bo1995')\n self.login('[email protected]', 'Bo1995')\n self.dashboard()\n self.category('JunkFood')\n self.dashboard()\n self.recipe_dashboard()\n self.create_recipe('cakes', 'blah, blah, blah....mix ingredient, heat')\n rv = self.edit_recipe('edited cakes', 'edited blah blah blah spoon , heat')\n self.assertIn(b'Recipe successfully updated', rv.data)",
"def test_get_categories(self):\n res = self.client().get('/api/categories')\n self.assertEqual(res.status_code, 200)\n data = json.loads(res.data)\n self.assertTrue(data)",
"def test_questions_by_category(self):\n\n response = self.client().get('/categories/1/questions')\n body = json.loads(response.data)\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(body['success'], True)\n self.assertNotEqual(len(body['questions']), 0)\n self.assertNotEqual(len(body['category']), 'Science')",
"def test_get_categories(self):\n\n res = self.client().get('/categories')\n\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertEqual(len(data['categories']), 6)"
]
| [
"0.7814517",
"0.75773275",
"0.75567085",
"0.7504459",
"0.7501404",
"0.7409347",
"0.7393505",
"0.7224864",
"0.7210572",
"0.72028196",
"0.7201158",
"0.71600854",
"0.7142944",
"0.7045864",
"0.69958264",
"0.695585",
"0.69526976",
"0.692939",
"0.6918037",
"0.6918037",
"0.6916416",
"0.6912314",
"0.6892911",
"0.68671674",
"0.68579704",
"0.6852733",
"0.6851188",
"0.6839804",
"0.68199897",
"0.68198085"
]
| 0.76808107 | 1 |
Test creating a category with invalid details | def test_create_category_with_invalid_details_fails(self):
res = self.client.post(CATEGORY_URL, {})
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(
res.data['errors']['name'][0],
'This field is required.') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_create_category(self):\n pass",
"def test_cannot_create_with_invalid_category(self):\n serializer = ServiceSerializer(\n data = dict(name = \"service1\", category = 10),\n context = dict(project = self.project)\n )\n self.assertFalse(serializer.is_valid())\n self.assertEqual(serializer.errors['category'][0].code, 'does_not_exist')",
"def test_add_category_missing_fields(self):\n category = json.dumps({\n 'desc': \"Jamaican\",\n })\n response = self.client.post('/category', data=category,\n headers={\"Authorization\": self.token})\n self.assertEqual(response.status_code, 400)\n self.assertIn('Check the keys and try again', response.data.decode())",
"def test_create_category(self):\n res = self.client().post('/categories/', data=self.category)\n self.assertEqual(res.status_code, 201)\n self.assertIn('Stews', str(res.data))",
"def test_create_category_with_existing_name(self):\n sample_category()\n res = self.client.post(CATEGORY_URL, {\"name\": \"place\"})\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(\n res.data['errors']['name'][0],\n 'This field must be unique.')",
"def test_create_recipe_category(self):\n self.signup('Bo', 'Theo', '[email protected]', 'Bo1995', 'Bo1995')\n self.login('[email protected]', 'Bo1995')\n self.dashboard()\n self.category('JunkFood')\n self.dashboard()\n self.recipe_dashboard()\n rv = self.create_recipe('cakes', 'blah, blah, blah....mix ingredient, heat')\n self.assertIn(b'Recipe created', rv.data)",
"def test_create(self):\n self.assertTrue(Category.objects.exists())",
"def test_delete_category_does_not_exist(self):\n self.delete_does_not_exist_fail('hats')",
"def test_add_category_empty_name(self):\n category = json.dumps({\n 'name': \"\",\n })\n response = self.client.post('/category', data=category,\n headers={\"Authorization\": self.token})\n self.assertEqual(response.status_code, 400)\n self.assertIn('Missing name', response.data.decode())",
"def test_add_category(self):\n self.signup('Bo', 'Theo', '[email protected]', 'Bo1995', 'Bo1995')\n self.login('[email protected]', 'Bo1995')\n self.dashboard()\n rv = self.category('Breakfast')\n self.assertIn(b'Category created', rv.data)",
"def test_create_category(self):\n payload = {\n 'name': 'Houses',\n }\n res = self.client.post(CATEGORY_URL, payload)\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n category = Category.objects.get(id=res.data['id'])\n serializer = CategorySerializer(category)\n self.assertEqual(serializer.data['name'], payload['name'])",
"def test_blank_category(self):\n self.signup('Bo', 'Theo', '[email protected]', 'Bo1995', 'Bo1995')\n self.login('[email protected]', 'Bo1995')\n rv = self.category('')\n self.assertIn(b'Field must be between 1 and 50 characters long.', rv.data)",
"def test_dashboard_recipe_created_with_category(self):\n self.signup('Bo', 'Theo', '[email protected]', 'Bo1995', 'Bo1995')\n self.login('[email protected]', 'Bo1995')\n self.dashboard()\n self.category('JunkFood')\n self.dashboard()\n rv = self.recipe_dashboard()\n self.assertIn(b'JunkFood', rv.data)",
"def test_category_invalid(self):\n # wiki and questions\n ques = QuestionFactory(title=u'q1 audio')\n ques.tags.add(u'desktop')\n ans = AnswerFactory(question=ques)\n AnswerVoteFactory(answer=ans, helpful=True)\n\n d1 = DocumentFactory(\n title=u'd1 audio',\n locale=u'en-US',\n category=10,\n is_archived=False,\n tags=[u'desktop'])\n ApprovedRevisionFactory(document=d1)\n\n self.refresh()\n\n qs = {'a': 1, 'w': 3, 'format': 'json', 'category': 'invalid'}\n response = self.client.get(reverse('search.advanced'), qs)\n eq_(2, json.loads(response.content)['total'])",
"def test_warning_on_duplicate_category(self):\n self.client.login(username='hodor', password='hodor')\n Perms.objects.create(user=self.user, access_level=2).save()\n response = self.client.post('/categories/add/', {'categoryType': 'tr0npr0n'})\n self.assertRedirects(response, '/categories/')\n response2 = self.client.post('/categories/add/', {'categoryType': 'tr0npr0n'})\n self.assertContains(response2, \"already exists\")",
"def test_crud_category_when_not_logged_in(self):\n with self.client:\n response = self.register_user(\n \"Patrick\", \"Walukagga\", \n \"[email protected]\", \"telnetcmd123\"\n )\n # invalid token\n headers=dict(Authorization='Bearer ')\n response = self.create_category(\"Breakfast\", \n \"How to make breakfast\", \n headers)\n self.assertEqual(response.status_code, 401)\n self.assertIn('Token is missing', str(response.data))\n category_data = json.dumps({\"name\": \"Lunchfast\", \n \"description\": \n \"How to make lunchfast\"})\n response = self.client.put('/recipe_category/1', \n headers=headers,\n data=category_data)\n self.assertEqual(response.status_code, 401)\n self.assertIn('Token is missing', str(response.data))\n response = self.client.delete('/recipe_category/1', \n headers=headers, \n data=category_data)\n self.assertEqual(response.status_code, 401)\n self.assertIn('Token is missing', str(response.data))\n # delete recipe category not in database\n response = self.client.delete('/recipe_category/3', \n headers=headers, \n data=category_data)\n self.assertEqual(response.status_code, 401)\n self.assertIn('Token is missing', str(response.data))",
"def test_creation_when_missing_service_category(self):\n self.data = {\n \"service_name\": \"Live at the yard\",\n \"service_price\": \"5000\",\n \"service_description\": \"See Kendrick perform live at the yard\",\n \"service_category\": \"\",\n \"service_subcategory\": \"Live\",\n \"service_attributes\": {\n \"duration\": \"as long \",\n \"width\": \"20\",\n \"length\": \"20\",\n \"height\": \"20\"\n }\n }\n\n create_store = self.client.post(create_store_url, data=json.dumps(self.shop_zero), headers=self.my_header)\n store_id = json.loads(create_store.data)\n store_id = json.loads(store_id['store_id'])\n store_id = store_id['$oid']\n response2 = self.client.post(store_url + store_id + '/service/',\n data=json.dumps(self.data),\n headers=self.my_header)\n self.assertEqual(response2.status, \"400 BAD REQUEST\")\n self.assertIn(\"Error. Missing Service Category\", str(response2.data))",
"def test_cannot_create_with_same_category_and_name(self):\n # Create an initial service\n self.project.services.create(name = \"service1\", category = self.category)\n # Then try to create the same service using the serializer\n serializer = ServiceSerializer(\n data = dict(name = \"service1\", category = self.category.pk),\n context = dict(project = self.project)\n )\n self.assertFalse(serializer.is_valid())\n self.assertEqual(serializer.errors['name'][0].code, 'unique')",
"def test_add_missing_field(self):\n response = self.client.post('/api/v1/categories',\n data=json.dumps(category[1]),\n content_type='application/json',\n headers=self.admin_headers)\n self.assertEqual(response.status_code, 400)\n self.assertIn('Missing required parameter', str(response.data))",
"def test_add_category(self):\n self.add_success(self.test_data['pants'])",
"def test_delete_category(self):\n pass",
"def test_add_category_missing_token(self):\n category = json.dumps({\n 'name': \"Jamaican\",\n })\n token = \"\"\n response = self.client.post('/category', data=category,\n headers={\"Authorization\": token})\n self.assertEqual(response.status_code, 401)\n self.assertIn('Token not found', response.data.decode())",
"def test_add_category_existing_name(self):\n category = json.dumps({\n 'name': 'Asian',\n })\n self.client.post('/category', data=category,\n headers={\"Authorization\": self.token})\n response = self.client.post('/category', data=category,\n headers={\"Authorization\": self.token})\n self.assertEqual(response.status_code, 409)\n self.assertIn('Name Asian exists', response.data.decode())",
"def test_0005_create_categories(self):\n self.create_category(name='Test 0060 Workflow Features', description='Test 0060 - Workflow Features')",
"def test_add_category_success(self):\n category = json.dumps({\n 'name': 'Asian',\n })\n response = self.client.post('/category', data=category,\n headers={\"Authorization\": self.token})\n self.assertEqual(response.status_code, 201)\n self.assertIn('asian', response.data.decode())",
"def test_add_same_category(self):\n response = self.client.post('/api/v1/categories',\n data=json.dumps(category[0]),\n content_type='application/json',\n headers=self.admin_headers)\n self.assertEqual(response.status_code, 409)\n self.assertIn('category with name already exist',\n str(response.data))",
"def test_category_post(self):\r\n admin = UserFactory.create()\r\n user = UserFactory.create()\r\n name = u'Category'\r\n category = dict(\r\n name=name,\r\n short_name='category',\r\n description=u'description')\r\n data = json.dumps(category)\r\n # no api-key\r\n url = '/api/category'\r\n res = self.app.post(url, data=data)\r\n err = json.loads(res.data)\r\n err_msg = 'Should not be allowed to create'\r\n assert res.status_code == 401, err_msg\r\n assert err['action'] == 'POST', err_msg\r\n assert err['exception_cls'] == 'Unauthorized', err_msg\r\n\r\n # now a real user but not admin\r\n res = self.app.post(url + '?api_key=' + user.api_key, data=data)\r\n err = json.loads(res.data)\r\n err_msg = 'Should not be allowed to create'\r\n assert res.status_code == 403, err_msg\r\n assert err['action'] == 'POST', err_msg\r\n assert err['exception_cls'] == 'Forbidden', err_msg\r\n\r\n # now as an admin\r\n res = self.app.post(url + '?api_key=' + admin.api_key,\r\n data=data)\r\n err = json.loads(res.data)\r\n err_msg = 'Admin should be able to create a Category'\r\n assert res.status_code == 200, err_msg\r\n cat = db.session.query(Category)\\\r\n .filter_by(short_name=category['short_name']).first()\r\n assert err['id'] == cat.id, err_msg\r\n assert err['name'] == category['name'], err_msg\r\n assert err['short_name'] == category['short_name'], err_msg\r\n assert err['description'] == category['description'], err_msg\r\n\r\n # test re-create should fail\r\n res = self.app.post(url + '?api_key=' + admin.api_key,\r\n data=data)\r\n err = json.loads(res.data)\r\n assert res.status_code == 415, err\r\n assert err['status'] == 'failed', err\r\n assert err['action'] == 'POST', err\r\n assert err['exception_cls'] == \"IntegrityError\", err\r\n\r\n # test create with non-allowed fields should fail\r\n data = dict(name='fail', short_name='fail', wrong=15)\r\n res = self.app.post(url + '?api_key=' + admin.api_key,\r\n data=data)\r\n err = json.loads(res.data)\r\n err_msg = \"ValueError exception should be raised\"\r\n assert res.status_code == 415, err\r\n assert err['action'] == 'POST', err\r\n assert err['status'] == 'failed', err\r\n assert err['exception_cls'] == \"ValueError\", err_msg\r\n # Now with a JSON object but not valid\r\n data = json.dumps(data)\r\n res = self.app.post(url + '?api_key=' + user.api_key,\r\n data=data)\r\n err = json.loads(res.data)\r\n err_msg = \"TypeError exception should be raised\"\r\n assert err['action'] == 'POST', err_msg\r\n assert err['status'] == 'failed', err_msg\r\n assert err['exception_cls'] == \"TypeError\", err_msg\r\n assert res.status_code == 415, err_msg\r\n\r\n # test update\r\n data = {'name': 'My New Title'}\r\n datajson = json.dumps(data)\r\n ## anonymous\r\n res = self.app.put(url + '/%s' % cat.id,\r\n data=data)\r\n error_msg = 'Anonymous should not be allowed to update'\r\n assert_equal(res.status, '401 UNAUTHORIZED', error_msg)\r\n error = json.loads(res.data)\r\n assert error['status'] == 'failed', error\r\n assert error['action'] == 'PUT', error\r\n assert error['exception_cls'] == 'Unauthorized', error\r\n\r\n ### real user but not allowed as not admin!\r\n url = '/api/category/%s?api_key=%s' % (cat.id, user.api_key)\r\n res = self.app.put(url, data=datajson)\r\n error_msg = 'Should not be able to update apps of others'\r\n assert_equal(res.status, '403 FORBIDDEN', error_msg)\r\n error = json.loads(res.data)\r\n assert error['status'] == 'failed', error\r\n assert error['action'] == 'PUT', error\r\n assert error['exception_cls'] == 'Forbidden', error\r\n\r\n # Now as an admin\r\n res = self.app.put('/api/category/%s?api_key=%s' % (cat.id, admin.api_key),\r\n data=datajson)\r\n assert_equal(res.status, '200 OK', res.data)\r\n out2 = db.session.query(Category).get(cat.id)\r\n assert_equal(out2.name, data['name'])\r\n out = json.loads(res.data)\r\n assert out.get('status') is None, error\r\n assert out.get('id') == cat.id, error\r\n\r\n # With fake data\r\n data['algo'] = 13\r\n datajson = json.dumps(data)\r\n res = self.app.put('/api/category/%s?api_key=%s' % (cat.id, admin.api_key),\r\n data=datajson)\r\n err = json.loads(res.data)\r\n assert res.status_code == 415, err\r\n assert err['status'] == 'failed', err\r\n assert err['action'] == 'PUT', err\r\n assert err['exception_cls'] == 'TypeError', err\r\n\r\n # With not JSON data\r\n datajson = data\r\n res = self.app.put('/api/category/%s?api_key=%s' % (cat.id, admin.api_key),\r\n data=datajson)\r\n err = json.loads(res.data)\r\n assert res.status_code == 415, err\r\n assert err['status'] == 'failed', err\r\n assert err['action'] == 'PUT', err\r\n assert err['exception_cls'] == 'ValueError', err\r\n\r\n # With wrong args in the URL\r\n data = dict(\r\n name='Category3',\r\n short_name='category3',\r\n description=u'description3')\r\n\r\n datajson = json.dumps(data)\r\n res = self.app.put('/api/category/%s?api_key=%s&search=select1' % (cat.id, admin.api_key),\r\n data=datajson)\r\n err = json.loads(res.data)\r\n assert res.status_code == 415, err\r\n assert err['status'] == 'failed', err\r\n assert err['action'] == 'PUT', err\r\n assert err['exception_cls'] == 'AttributeError', err\r\n\r\n # test delete\r\n ## anonymous\r\n res = self.app.delete(url + '/%s' % cat.id, data=data)\r\n error_msg = 'Anonymous should not be allowed to delete'\r\n assert_equal(res.status, '401 UNAUTHORIZED', error_msg)\r\n error = json.loads(res.data)\r\n assert error['status'] == 'failed', error\r\n assert error['action'] == 'DELETE', error\r\n assert error['target'] == 'category', error\r\n ### real user but not admin\r\n url = '/api/category/%s?api_key=%s' % (cat.id, user.api_key)\r\n res = self.app.delete(url, data=datajson)\r\n error_msg = 'Should not be able to delete apps of others'\r\n assert_equal(res.status, '403 FORBIDDEN', error_msg)\r\n error = json.loads(res.data)\r\n assert error['status'] == 'failed', error\r\n assert error['action'] == 'DELETE', error\r\n assert error['target'] == 'category', error\r\n\r\n # As admin\r\n url = '/api/category/%s?api_key=%s' % (cat.id, admin.api_key)\r\n res = self.app.delete(url, data=datajson)\r\n\r\n assert_equal(res.status, '204 NO CONTENT', res.data)\r\n\r\n # delete a category that does not exist\r\n url = '/api/category/5000?api_key=%s' % admin.api_key\r\n res = self.app.delete(url, data=datajson)\r\n error = json.loads(res.data)\r\n assert res.status_code == 404, error\r\n assert error['status'] == 'failed', error\r\n assert error['action'] == 'DELETE', error\r\n assert error['target'] == 'category', error\r\n assert error['exception_cls'] == 'NotFound', error\r\n\r\n # delete a category that does not exist\r\n url = '/api/category/?api_key=%s' % admin.api_key\r\n res = self.app.delete(url, data=datajson)\r\n assert res.status_code == 404, error",
"def test_23_admin_add_category(self):\r\n self.create()\r\n category = {'name': 'cat', 'short_name': 'cat',\r\n 'description': 'description'}\r\n # Anonymous user\r\n url = '/admin/categories'\r\n res = self.app.post(url, data=category, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Anonymous users should be redirected to sign in\"\r\n assert dom.find(id='signin') is not None, err_msg\r\n\r\n # Authenticated user but not admin\r\n self.signin(email=self.email_addr2, password=self.password)\r\n res = self.app.post(url, data=category, follow_redirects=True)\r\n err_msg = \"Non-Admin users should get 403\"\r\n assert res.status_code == 403, err_msg\r\n self.signout()\r\n\r\n # Admin\r\n self.signin(email=self.root_addr, password=self.root_password)\r\n res = self.app.post(url, data=category, follow_redirects=True)\r\n err_msg = \"Category should be added\"\r\n assert \"Category added\" in res.data, err_msg\r\n assert category['name'] in res.data, err_msg\r\n\r\n category = {'name': 'cat', 'short_name': 'cat',\r\n 'description': 'description'}\r\n\r\n self.signin(email=self.root_addr, password=self.root_password)\r\n res = self.app.post(url, data=category, follow_redirects=True)\r\n err_msg = \"Category form validation should work\"\r\n assert \"Please correct the errors\" in res.data, err_msg",
"def test_422_invalid_category_payload(self): \n data = {\n 'question':'Test Question',\n 'answer':'Test Answer',\n 'category':'10',\n 'difficulty':'1'\n } \n res = self.client().post('/questions/add', \n data=json.dumps(data),\n content_type='application/json')\n self.data = json.loads(res.data)\n self.assertEqual(res.status_code, 422)\n json_res = json.loads(res.get_data(as_text=False))",
"def test_empty_category_dashboard(self):\n self.signup('Bo', 'Theo', '[email protected]', 'Bo1995', 'Bo1995')\n rv = self.login('[email protected]', 'Bo1995')\n self.assertIn(b'Create a Recipe Category', rv.data)"
]
| [
"0.82101303",
"0.8041852",
"0.791494",
"0.78192854",
"0.7700955",
"0.76209366",
"0.7575074",
"0.7443271",
"0.74368286",
"0.7422457",
"0.7414787",
"0.74138415",
"0.73887557",
"0.73368895",
"0.7318645",
"0.7282309",
"0.72419256",
"0.722112",
"0.71900594",
"0.71621287",
"0.7139944",
"0.7135251",
"0.7132124",
"0.71202475",
"0.70949095",
"0.70688313",
"0.706103",
"0.70484084",
"0.7014317",
"0.6961515"
]
| 0.87169325 | 0 |
Test create category with existing same name fails | def test_create_category_with_existing_name(self):
sample_category()
res = self.client.post(CATEGORY_URL, {"name": "place"})
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(
res.data['errors']['name'][0],
'This field must be unique.') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_create_category(self):\n pass",
"def test_add_same_category(self):\n response = self.client.post('/api/v1/categories',\n data=json.dumps(category[0]),\n content_type='application/json',\n headers=self.admin_headers)\n self.assertEqual(response.status_code, 409)\n self.assertIn('category with name already exist',\n str(response.data))",
"def test_add_category_existing_name(self):\n category = json.dumps({\n 'name': 'Asian',\n })\n self.client.post('/category', data=category,\n headers={\"Authorization\": self.token})\n response = self.client.post('/category', data=category,\n headers={\"Authorization\": self.token})\n self.assertEqual(response.status_code, 409)\n self.assertIn('Name Asian exists', response.data.decode())",
"def test_create_category(self):\n res = self.client().post('/categories/', data=self.category)\n self.assertEqual(res.status_code, 201)\n self.assertIn('Stews', str(res.data))",
"def test_0005_create_categories(self):\n self.create_category(name='Test 0060 Workflow Features', description='Test 0060 - Workflow Features')",
"def test_create(self):\n self.assertTrue(Category.objects.exists())",
"def test_update_category(self):\n pass",
"def test_create_category(self):\n payload = {\n 'name': 'Houses',\n }\n res = self.client.post(CATEGORY_URL, payload)\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n category = Category.objects.get(id=res.data['id'])\n serializer = CategorySerializer(category)\n self.assertEqual(serializer.data['name'], payload['name'])",
"def test_cannot_create_with_same_category_and_name(self):\n # Create an initial service\n self.project.services.create(name = \"service1\", category = self.category)\n # Then try to create the same service using the serializer\n serializer = ServiceSerializer(\n data = dict(name = \"service1\", category = self.category.pk),\n context = dict(project = self.project)\n )\n self.assertFalse(serializer.is_valid())\n self.assertEqual(serializer.errors['name'][0].code, 'unique')",
"def test_add_category(self):\n self.add_success(self.test_data['pants'])",
"def test_add_category(self):\n self.signup('Bo', 'Theo', '[email protected]', 'Bo1995', 'Bo1995')\n self.login('[email protected]', 'Bo1995')\n self.dashboard()\n rv = self.category('Breakfast')\n self.assertIn(b'Category created', rv.data)",
"def test_delete_category(self):\n pass",
"def test_warning_on_duplicate_category(self):\n self.client.login(username='hodor', password='hodor')\n Perms.objects.create(user=self.user, access_level=2).save()\n response = self.client.post('/categories/add/', {'categoryType': 'tr0npr0n'})\n self.assertRedirects(response, '/categories/')\n response2 = self.client.post('/categories/add/', {'categoryType': 'tr0npr0n'})\n self.assertContains(response2, \"already exists\")",
"def test_add_category_to_asset(self):\n pass",
"def test_delete_category_does_not_exist(self):\n self.delete_does_not_exist_fail('hats')",
"def test_create_recipe_category(self):\n self.signup('Bo', 'Theo', '[email protected]', 'Bo1995', 'Bo1995')\n self.login('[email protected]', 'Bo1995')\n self.dashboard()\n self.category('JunkFood')\n self.dashboard()\n self.recipe_dashboard()\n rv = self.create_recipe('cakes', 'blah, blah, blah....mix ingredient, heat')\n self.assertIn(b'Recipe created', rv.data)",
"def sample_category(name='place'):\n return Category.objects.create(name=name)",
"def test_add_category_success(self):\n category = json.dumps({\n 'name': 'Asian',\n })\n response = self.client.post('/category', data=category,\n headers={\"Authorization\": self.token})\n self.assertEqual(response.status_code, 201)\n self.assertIn('asian', response.data.decode())",
"def test_edit_with_duplicate_name(self):\n self.client.login(username='hodor', password='hodor')\n Perms.objects.create(user=self.user, access_level=2).save()\n # Edit the category\n response = self.client.post('/categories/edit/%s' % self.category1.id, {'categoryType': 'AnotherTestCategoryType'})\n self.assertContains(response, \"already exists\")",
"def test_category_addition_twice(self):\n login = self.autheniticate()\n token = json.loads(login.data.decode()).get('token')\n self.app.post(category_url,\n data=json.dumps(self.data),\n headers=dict(Authorization=\"Bearer \" + token),\n content_type='application/json')\n res = self.app.post(category_url,\n data=json.dumps(self.data),\n headers=dict(Authorization=\"Bearer \" + token),\n content_type='application/json')\n res1 = json.loads(res.data.decode())\n self.assertEqual(res1['message'], 'Category already exists')\n self.assertEqual(res.status_code, 409)",
"def test_can_create_job_category(self):\n\t\tself.job_category.save()\n\t\tjob_category_instance = JobCategory.objects.get(pk=1)\n\t\tself.assertEqual(\n\t\t\tself.category,\n\t\t\tjob_category_instance.category,\n\t\t\t\"Job categories don't match.\"\n\t\t)",
"def create_category(self, name):\n logger.info('CategoryOfProduct category create initiated')\n newname = name\n try:\n with Transaction().start(DBNAME, 1) as transaction:\n categories = self.Category.search([('name', '=', newname), ('parent', '=', 'Ingredients')])\n parent = self.Category.search(['name', '=', 'Ingredients'])\n if categories:\n return False\n category = self.Category()\n if parent:\n category.parent = parent[-1]\n category.name = newname\n category.save()\n transaction.cursor.commit()\n return True\n except Exception:\n if settings.level == 10:\n logger.exception('raised exception')\n return False",
"def test_dashboard_recipe_created_with_category(self):\n self.signup('Bo', 'Theo', '[email protected]', 'Bo1995', 'Bo1995')\n self.login('[email protected]', 'Bo1995')\n self.dashboard()\n self.category('JunkFood')\n self.dashboard()\n rv = self.recipe_dashboard()\n self.assertIn(b'JunkFood', rv.data)",
"def create_category(name):\n return Category.objects.create(name=name)",
"def test_create_category(self):\n self.assertEqual(Project.objects.count(), 2)\n\n url = reverse('projectroles:api_project_create')\n post_data = {\n 'title': NEW_CATEGORY_TITLE,\n 'type': PROJECT_TYPE_CATEGORY,\n 'parent': '',\n 'description': 'description',\n 'readme': 'readme',\n 'public_guest_access': False,\n 'owner': str(self.user.sodar_uuid),\n }\n response = self.request_knox(url, method='POST', data=post_data)\n\n self.assertEqual(response.status_code, 201, msg=response.content)\n self.assertEqual(Project.objects.count(), 3)\n # Assert object content\n new_category = Project.objects.get(title=NEW_CATEGORY_TITLE)\n model_dict = model_to_dict(new_category)\n model_dict['readme'] = model_dict['readme'].raw\n expected = {\n 'id': new_category.pk,\n 'title': new_category.title,\n 'type': new_category.type,\n 'parent': None,\n 'description': new_category.description,\n 'readme': new_category.readme.raw,\n 'public_guest_access': False,\n 'archive': False,\n 'full_title': new_category.title,\n 'has_public_children': False,\n 'sodar_uuid': new_category.sodar_uuid,\n }\n self.assertEqual(model_dict, expected)\n # Assert role assignment\n self.assertEqual(\n RoleAssignment.objects.filter(\n project=new_category, user=self.user, role=self.role_owner\n ).count(),\n 1,\n )\n # Assert API response\n expected = {\n 'title': NEW_CATEGORY_TITLE,\n 'type': PROJECT_TYPE_CATEGORY,\n 'parent': None,\n 'description': new_category.description,\n 'readme': new_category.readme.raw,\n 'public_guest_access': False,\n 'sodar_uuid': str(new_category.sodar_uuid),\n }\n self.assertEqual(json.loads(response.content), expected)",
"def test_add_category_empty_name(self):\n category = json.dumps({\n 'name': \"\",\n })\n response = self.client.post('/category', data=category,\n headers={\"Authorization\": self.token})\n self.assertEqual(response.status_code, 400)\n self.assertIn('Missing name', response.data.decode())",
"def test_create_category_nested(self):\n self.assertEqual(Project.objects.count(), 2)\n\n url = reverse('projectroles:api_project_create')\n post_data = {\n 'title': NEW_CATEGORY_TITLE,\n 'type': PROJECT_TYPE_CATEGORY,\n 'parent': str(self.category.sodar_uuid),\n 'description': 'description',\n 'readme': 'readme',\n 'public_guest_access': False,\n 'owner': str(self.user.sodar_uuid),\n }\n response = self.request_knox(url, method='POST', data=post_data)\n\n self.assertEqual(response.status_code, 201, msg=response.content)\n self.assertEqual(Project.objects.count(), 3)\n new_category = Project.objects.get(title=NEW_CATEGORY_TITLE)\n model_dict = model_to_dict(new_category)\n model_dict['readme'] = model_dict['readme'].raw\n expected = {\n 'id': new_category.pk,\n 'title': new_category.title,\n 'type': new_category.type,\n 'parent': self.category.pk,\n 'description': new_category.description,\n 'readme': new_category.readme.raw,\n 'public_guest_access': False,\n 'archive': False,\n 'full_title': self.category.title + ' / ' + new_category.title,\n 'has_public_children': False,\n 'sodar_uuid': new_category.sodar_uuid,\n }\n self.assertEqual(model_dict, expected)\n self.assertEqual(\n RoleAssignment.objects.filter(\n project=new_category, user=self.user, role=self.role_owner\n ).count(),\n 1,\n )\n expected = {\n 'title': NEW_CATEGORY_TITLE,\n 'type': PROJECT_TYPE_CATEGORY,\n 'parent': str(self.category.sodar_uuid),\n 'description': new_category.description,\n 'readme': new_category.readme.raw,\n 'public_guest_access': False,\n 'sodar_uuid': str(new_category.sodar_uuid),\n }\n self.assertEqual(json.loads(response.content), expected)",
"def test_create_cat_object():\n from .scripts.initializedb import create_cat_object\n cat_object = create_cat_object(\"a\", \"b\", \"c\", \"c\")\n assert isinstance(cat_object, Category)",
"def test_category_mixed(self):\n self.go200('minus_upload')\n self.formfile('minus_upload', 'file', AUDIO_FILE)\n self.fv('minus_upload', 'categories', 'onecat')\n self.fv('minus_upload', 'add_category', 'yuppie')\n self.submit200()\n minus = MinusRecord.objects.all()[0]\n self.assert_equal(minus.categories.count(), 2)\n self.assert_equal(minus.categories.all()[0].name, 'onecat')\n self.assert_equal(minus.categories.all()[1].name, 'yuppie')",
"def test_add_category_space_name(self):\n category = json.dumps({\n 'name': \" \",\n })\n response = self.client.post('/category', data=category,\n headers={\"Authorization\": self.token})\n self.assertEqual(response.status_code, 400)\n self.assertIn('A space is not a name', response.data.decode())"
]
| [
"0.86326134",
"0.81314254",
"0.79928046",
"0.785919",
"0.78289926",
"0.77278626",
"0.7714236",
"0.7689189",
"0.76465833",
"0.7560281",
"0.7554323",
"0.7551021",
"0.7518037",
"0.74838006",
"0.7428445",
"0.7390729",
"0.73875856",
"0.73158264",
"0.7306888",
"0.72817504",
"0.727735",
"0.7245144",
"0.7208353",
"0.72067004",
"0.72007775",
"0.7180684",
"0.7157226",
"0.71561295",
"0.7123744",
"0.71041495"
]
| 0.83984613 | 1 |
Test updating a category to existing name fails | def test_update_category_to_existing_name(self):
sample_category()
category = sample_category(name='House')
url = category_details_url(category.id)
res = self.client.put(url, {"name": "place"})
category.refresh_from_db()
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(
res.data['errors']['name'][0],
'This field must be unique.') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_update_category(self):\n pass",
"def test_update(self, init_db, category):\n category_name = fake.alphanumeric()\n category.update(name=category_name)\n assert category.name == category_name",
"def test_update_category(self):\n category = sample_category()\n url = category_details_url(category.id)\n self.client.put(url, {\"name\": \"school\"})\n category.refresh_from_db()\n self.assertEqual(category.name, 'school')",
"def test_update_category(self):\n self.update_success(self.test_data['pants'], self.test_data['shirts'])",
"def test_edit_category(self):\n rv = self.client().post(\n '/categories/',\n data={'category_name': 'Sauces'})\n self.assertEqual(rv.status_code, 201)\n rv = self.client().put(\n '/categories/1',\n data={\n \"name\": \"Soups and Sauces\"\n })\n #self.assertEqual(rv.status_code, 200)\n results = self.client().get('/categories/1')\n #self.assertIn('Soups and', str(results.data))",
"def test_edit_category(self):\n self.signup('Bo', 'Theo', '[email protected]', 'Bo1995', 'Bo1995')\n self.login('[email protected]', 'Bo1995')\n self.dashboard()\n self.category('Breakfast')\n self.dashboard()\n rv = self.edit_category('JunkFood')\n self.assertIn(b'Category successfully updated', rv.data)",
"def test_edit_category(self):\n response = self.client.put('/api/v1/category/1',\n data=json.dumps(category[3]),\n content_type='application/json',\n headers=self.admin_headers)\n self.assertEqual(response.status_code, 201)\n self.assertIn('Apparels', str(response.data))",
"def test_category_update(self):\n login = self.autheniticate()\n token = json.loads(login.data.decode()).get('token')\n self.app.post(category_url,\n data=json.dumps(self.data),\n headers=dict(Authorization=\"Bearer \" + token),\n content_type='application/json')\n res = self.app.put('/api/v2/categories/1',\n data=json.dumps(self.pdata),\n headers=dict(Authorization=\"Bearer \" + token),\n content_type='application/json')\n res1 = json.loads(res.data.decode())\n self.assertEqual(res1['status'], 'Updated!')\n self.assertEqual(res.status_code, 200)",
"def test_edit_with_duplicate_name(self):\n self.client.login(username='hodor', password='hodor')\n Perms.objects.create(user=self.user, access_level=2).save()\n # Edit the category\n response = self.client.post('/categories/edit/%s' % self.category1.id, {'categoryType': 'AnotherTestCategoryType'})\n self.assertContains(response, \"already exists\")",
"def test_add_category_existing_name(self):\n category = json.dumps({\n 'name': 'Asian',\n })\n self.client.post('/category', data=category,\n headers={\"Authorization\": self.token})\n response = self.client.post('/category', data=category,\n headers={\"Authorization\": self.token})\n self.assertEqual(response.status_code, 409)\n self.assertIn('Name Asian exists', response.data.decode())",
"def test_update_category(self):\n with self.assertRaises(QiitaDBUnknownIDError):\n self.tester.update_category('country', {\"foo\": \"bar\"})\n\n with self.assertRaises(QiitaDBColumnError):\n self.tester.update_category('missing column',\n {'1.SKM7.640188': 'stuff'})\n\n negtest = self.tester['1.SKM7.640188']['country']\n\n mapping = {'1.SKB1.640202': \"1\",\n '1.SKB5.640181': \"2\",\n '1.SKD6.640190': \"3\"}\n\n self.tester.update_category('country', mapping)\n\n self.assertEqual(self.tester['1.SKB1.640202']['country'], \"1\")\n self.assertEqual(self.tester['1.SKB5.640181']['country'], \"2\")\n self.assertEqual(self.tester['1.SKD6.640190']['country'], \"3\")\n self.assertEqual(self.tester['1.SKM7.640188']['country'], negtest)\n\n # test updating a required_sample_info\n mapping = {'1.SKB1.640202': \"1\",\n '1.SKB5.640181': \"2\",\n '1.SKD6.640190': \"3\"}\n self.tester.update_category('required_sample_info_status_id', mapping)\n self.assertEqual(\n self.tester['1.SKB1.640202']['required_sample_info_status'],\n \"received\")\n self.assertEqual(\n self.tester['1.SKB5.640181']['required_sample_info_status'],\n \"in_preparation\")\n self.assertEqual(\n self.tester['1.SKD6.640190']['required_sample_info_status'],\n \"running\")\n self.assertEqual(\n self.tester['1.SKM7.640188']['required_sample_info_status'],\n \"completed\")\n\n # testing that if fails when trying to change an int column value\n # to str\n st = SampleTemplate.create(self.metadata, self.new_study)\n mapping = {'2.Sample1': \"no_value\"}\n with self.assertRaises(ValueError):\n st.update_category('int_column', mapping)",
"def test_create_category_with_existing_name(self):\n sample_category()\n res = self.client.post(CATEGORY_URL, {\"name\": \"place\"})\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(\n res.data['errors']['name'][0],\n 'This field must be unique.')",
"def test_put_category(self):\n self.assertEqual(Project.objects.count(), 2)\n\n url = reverse(\n 'projectroles:api_project_update',\n kwargs={'project': self.category.sodar_uuid},\n )\n put_data = {\n 'title': UPDATED_TITLE,\n 'type': PROJECT_TYPE_CATEGORY,\n 'parent': '',\n 'description': UPDATED_DESC,\n 'readme': UPDATED_README,\n 'public_guest_access': False,\n }\n response = self.request_knox(url, method='PUT', data=put_data)\n\n self.assertEqual(response.status_code, 200, msg=response.content)\n self.assertEqual(Project.objects.count(), 2)\n\n self.category.refresh_from_db()\n model_dict = model_to_dict(self.category)\n model_dict['readme'] = model_dict['readme'].raw\n expected = {\n 'id': self.category.pk,\n 'title': UPDATED_TITLE,\n 'type': PROJECT_TYPE_CATEGORY,\n 'parent': None,\n 'description': UPDATED_DESC,\n 'readme': UPDATED_README,\n 'public_guest_access': False,\n 'archive': False,\n 'full_title': UPDATED_TITLE,\n 'has_public_children': False,\n 'sodar_uuid': self.category.sodar_uuid,\n }\n self.assertEqual(model_dict, expected)\n\n expected = {\n 'title': UPDATED_TITLE,\n 'type': PROJECT_TYPE_CATEGORY,\n 'parent': None,\n 'description': UPDATED_DESC,\n 'readme': UPDATED_README,\n 'public_guest_access': False,\n 'archive': False,\n 'roles': {\n str(self.category.get_owner().sodar_uuid): {\n 'role': PROJECT_ROLE_OWNER,\n 'user': self.get_serialized_user(self.user_owner_cat),\n 'inherited': False,\n 'sodar_uuid': str(self.category.get_owner().sodar_uuid),\n }\n },\n 'sodar_uuid': str(self.category.sodar_uuid),\n }\n self.assertEqual(json.loads(response.content), expected)",
"def test_edit_recipe_category(self):\n self.signup('Bo', 'Theo', '[email protected]', 'Bo1995', 'Bo1995')\n self.login('[email protected]', 'Bo1995')\n self.dashboard()\n self.category('JunkFood')\n self.dashboard()\n self.recipe_dashboard()\n self.create_recipe('cakes', 'blah, blah, blah....mix ingredient, heat')\n rv = self.edit_recipe('edited cakes', 'edited blah blah blah spoon , heat')\n self.assertIn(b'Recipe successfully updated', rv.data)",
"def test_update_child_category(self):\n self.add_success(self.test_data['pants'])\n self.add_success(self.test_data['shirts'])\n\n self.add_success(self.test_data['jeans'])\n rv = self.get('pants')\n assert not in_response(rv, 'This category is empty.')\n assert in_response(rv, 'Jeans')\n\n self.edit_success('jeans', self.test_data['t-shirts'])\n rv = self.get('pants')\n assert in_response(rv, 'This category is empty.')\n assert not in_response(rv, 'Jeans')\n assert not in_response(rv, 'T-Shirts')\n rv = self.get('shirts')\n assert not in_response(rv, 'This category is empty.')\n assert in_response(rv, 'T-Shirts')\n assert not in_response(rv, 'Jeans')",
"def test_add_same_category(self):\n response = self.client.post('/api/v1/categories',\n data=json.dumps(category[0]),\n content_type='application/json',\n headers=self.admin_headers)\n self.assertEqual(response.status_code, 409)\n self.assertIn('category with name already exist',\n str(response.data))",
"def test_patch_category(self):\n self.assertEqual(Project.objects.count(), 2)\n\n url = reverse(\n 'projectroles:api_project_update',\n kwargs={'project': self.category.sodar_uuid},\n )\n patch_data = {\n 'title': UPDATED_TITLE,\n 'description': UPDATED_DESC,\n 'readme': UPDATED_README,\n }\n response = self.request_knox(url, method='PATCH', data=patch_data)\n\n self.assertEqual(response.status_code, 200, msg=response.content)\n self.assertEqual(Project.objects.count(), 2)\n\n self.category.refresh_from_db()\n model_dict = model_to_dict(self.category)\n model_dict['readme'] = model_dict['readme'].raw\n expected = {\n 'id': self.category.pk,\n 'title': UPDATED_TITLE,\n 'type': PROJECT_TYPE_CATEGORY,\n 'parent': None,\n 'description': UPDATED_DESC,\n 'readme': UPDATED_README,\n 'public_guest_access': False,\n 'archive': False,\n 'full_title': UPDATED_TITLE,\n 'has_public_children': False,\n 'sodar_uuid': self.category.sodar_uuid,\n }\n self.assertEqual(model_dict, expected)\n self.assertEqual(self.category.get_owner().user, self.user_owner_cat)\n\n expected = {\n 'title': UPDATED_TITLE,\n 'type': PROJECT_TYPE_CATEGORY,\n 'parent': None,\n 'description': UPDATED_DESC,\n 'readme': UPDATED_README,\n 'public_guest_access': False,\n 'archive': False,\n 'roles': {\n str(self.category.get_owner().sodar_uuid): {\n 'role': PROJECT_ROLE_OWNER,\n 'user': self.get_serialized_user(self.user_owner_cat),\n 'inherited': False,\n 'sodar_uuid': str(self.category.get_owner().sodar_uuid),\n }\n },\n 'sodar_uuid': str(self.category.sodar_uuid),\n }\n self.assertEqual(json.loads(response.content), expected)",
"def test_update_task_invalid_category_error(self):\n task_id = util.MOCK_UUID_4\n category = \"unk\"\n\n rv = TEST_CLIENT.patch(\n f\"/tasks/{task_id}\",\n json={\n \"category\": category,\n },\n )\n result = rv.json()\n valid_str = \"DEFAULT,DATASETS,DESCRIPTIVE_STATISTICS,FEATURE_ENGINEERING,PREDICTOR,COMPUTER_VISION,NLP,MONITORING\"\n expected = {\n \"message\": f\"Invalid category. Choose any of {valid_str}\",\n \"code\": \"InvalidCategory\",\n }\n self.assertDictEqual(expected, result)\n self.assertEqual(rv.status_code, 400)",
"def test_update_category_slug_special(self):\n rv = self.app.post(self.get_category_url('add'), data=dict(\n name='Pants', slug='pants'\n ), follow_redirects=True)\n\n rv = self.app.post(self.get_category_url('pants', 'edit'), data=dict(\n name='Polo Shirts', slug='polo shirts'\n ), follow_redirects=True)\n assert b'Edit Pants' in rv.data\n assert b'Slug is formatted incorrectly' in rv.data\n\n rv = self.app.post(self.get_category_url('pants', 'edit'), data=dict(\n name=':)', slug=':)'\n ), follow_redirects=True)\n assert b'Edit Pants' in rv.data\n assert b'Slug is formatted incorrectly' in rv.data\n\n rv = self.app.post(self.get_category_url('pants', 'edit'), data=dict(\n name='Add', slug='add'\n ), follow_redirects=True)\n assert b'Edit Pants' in rv.data\n assert b'Slug \"add\" is not allowed' in rv.data",
"def test_edit_non_existing_item(self):\n response = self.client.put('/api/v1/category/200',\n data=json.dumps(category[3]),\n content_type='application/json',\n headers=self.admin_headers)\n self.assertEqual(response.status_code, 404)\n self.assertIn('category with id 200 does not exist',\n str(response.data))",
"def test_update_skills_category_when_name_is_already_taken(\n self,\n mock_skills_category_repo_get,\n mock_skills_category_controller_request_params,\n mock_skills_category_repo_find_first,\n ):\n # Arrange\n with self.app.app_context():\n mock_skills_category_repo_get.return_value = self.mock_skills_category\n mock_skills_category_repo_find_first.return_value = (\n self.mock_skills_category\n )\n mock_skills_category_controller_request_params.return_value = (\n \"Mock name\",\n \"Mock help\",\n 1,\n )\n skills_category_controller = SkillCategoryController(self.request_context)\n\n # Act\n result = skills_category_controller.update_skills_category(1)\n\n # Assert\n assert result.status_code == 400\n assert (\n result.get_json()[\"msg\"] == \"Skills Category with this name\"\n \" already exists\"\n )",
"def test_delete_category(self):\n pass",
"def test_delete_category_does_not_exist(self):\n self.delete_does_not_exist_fail('hats')",
"async def update_recipe_category(category: str, new_category: CategoryIn, session: Session = Depends(generate_session)):\n\n try:\n return db.categories.update(session, category, new_category.dict())\n except Exception:\n raise HTTPException(status.HTTP_400_BAD_REQUEST)",
"def test_create_category(self):\n pass",
"def test_update_preferences_by_category(self):\n # This method utilises the PUT request method and will make changes to the Canvas instance. This needs consideration.\n pass",
"def test_category_mixed_on_edit(self):\n self.go200('minus_upload')\n self.formfile('minus_upload', 'file', AUDIO_FILE)\n self.fv('minus_upload', 'categories', 'onecat')\n self.submit200()\n minus = MinusRecord.objects.all()[0]\n self.assert_equal(minus.categories.count(), 1)\n self.go200('minus_edit', [self.superuser, minus.id])\n self.fv('minus_upload', 'add_category', 'yuppie')\n self.submit200()\n self.assert_equal(minus.categories.count(), 2)\n self.assert_equal(minus.categories.all()[0].name, 'onecat')\n self.assert_equal(minus.categories.all()[1].name, 'yuppie')",
"def test_24_admin_update_category(self):\r\n self.create()\r\n obj = db.session.query(Category).get(1)\r\n _name = obj.name\r\n category = obj.dictize()\r\n\r\n # Anonymous user GET\r\n url = '/admin/categories/update/%s' % obj.id\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Anonymous users should be redirected to sign in\"\r\n assert dom.find(id='signin') is not None, err_msg\r\n # Anonymous user POST\r\n res = self.app.post(url, data=category, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Anonymous users should be redirected to sign in\"\r\n assert dom.find(id='signin') is not None, err_msg\r\n\r\n # Authenticated user but not admin GET\r\n self.signin(email=self.email_addr2, password=self.password)\r\n res = self.app.post(url, follow_redirects=True)\r\n err_msg = \"Non-Admin users should get 403\"\r\n assert res.status_code == 403, err_msg\r\n # Authenticated user but not admin POST\r\n res = self.app.post(url, data=category, follow_redirects=True)\r\n err_msg = \"Non-Admin users should get 403\"\r\n assert res.status_code == 403, err_msg\r\n self.signout()\r\n\r\n # Admin GET\r\n self.signin(email=self.root_addr, password=self.root_password)\r\n res = self.app.get(url, follow_redirects=True)\r\n err_msg = \"Category should be listed for admin user\"\r\n assert _name in res.data, err_msg\r\n # Check 404\r\n url_404 = '/admin/categories/update/5000'\r\n res = self.app.get(url_404, follow_redirects=True)\r\n assert res.status_code == 404, res.status_code\r\n # Admin POST\r\n res = self.app.post(url, data=category, follow_redirects=True)\r\n err_msg = \"Category should be updated\"\r\n assert \"Category updated\" in res.data, err_msg\r\n assert category['name'] in res.data, err_msg\r\n updated_category = db.session.query(Category).get(obj.id)\r\n assert updated_category.name == obj.name, err_msg\r\n # With not valid form\r\n category['name'] = None\r\n res = self.app.post(url, data=category, follow_redirects=True)\r\n assert \"Please correct the errors\" in res.data, err_msg",
"def test_warning_on_duplicate_category(self):\n self.client.login(username='hodor', password='hodor')\n Perms.objects.create(user=self.user, access_level=2).save()\n response = self.client.post('/categories/add/', {'categoryType': 'tr0npr0n'})\n self.assertRedirects(response, '/categories/')\n response2 = self.client.post('/categories/add/', {'categoryType': 'tr0npr0n'})\n self.assertContains(response2, \"already exists\")",
"def test_update_notification_category(client):\n create_user_response = create_user(client, TEST_USER_NAME, TEST_USER_PASS)\n assert create_user_response.status_code == HttpStatus.created_201.value\n\n new_notification_category_name_one = 'Error 1'\n post_response_one = create_notification_category(\n client,\n new_notification_category_name_one)\n assert post_response_one.status_code == HttpStatus.created_201.value\n\n post_response_data_one = json.loads(post_response_one.get_data(as_text=True))\n new_notification_category_url = post_response_data_one['url']\n new_notification_category_name_two = 'Error 2'\n data = {'name': new_notification_category_name_two}\n patch_response = client.patch(\n new_notification_category_url,\n headers=get_authentication_headers(TEST_USER_NAME, TEST_USER_PASS),\n data=json.dumps(data))\n assert patch_response.status_code == HttpStatus.ok_200.value\n\n get_response = client.get(\n new_notification_category_url,\n headers=get_authentication_headers(TEST_USER_NAME, TEST_USER_PASS))\n get_response_data = json.loads(get_response.get_data(as_text=True))\n assert get_response_data['name'] == new_notification_category_name_two"
]
| [
"0.8705725",
"0.86055994",
"0.8067761",
"0.79751843",
"0.7777514",
"0.7758227",
"0.7564963",
"0.7562341",
"0.7552205",
"0.7283096",
"0.72652644",
"0.72646534",
"0.7263289",
"0.72263676",
"0.7210747",
"0.7179932",
"0.7075194",
"0.7023446",
"0.7023264",
"0.69821405",
"0.6903937",
"0.6862581",
"0.6853384",
"0.68030244",
"0.6761968",
"0.67434186",
"0.6690238",
"0.6617635",
"0.66076523",
"0.65751714"
]
| 0.86579794 | 1 |
Extract the images into a numpy array. | def _extract_images(image_paths):
num_images = len(image_paths)
data = np.zeros((num_images, _IMAGE_SIZE, _IMAGE_SIZE, _NUM_CHANNELS))
for i in range(num_images):
image_path = image_paths[i]
print('Extracting images from: ', image_path)
image = imageio.imread(image_path)
data[i] = image
return data | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _extract_images(self, filename):\n log.info('Extracting', filename)\n with gzip.open(filename) as bytestream:\n magic = self._read32(bytestream)\n if magic != 2051:\n raise ValueError(\n 'Invalid magic number %d in MNIST image file: %s' %\n (magic, filename))\n num_images = self._read32(bytestream)\n rows = self._read32(bytestream)\n cols = self._read32(bytestream)\n buf = bytestream.read(rows * cols * num_images)\n data = np.frombuffer(buf, dtype=np.uint8)\n data = data.reshape(num_images, rows, cols, 1)\n return data",
"def extract(self, images):\n if images.shape[0] > self.ram_size:\n self.logger.warning(f'Number of inputs on RAM is larger than '\n f'{self.ram_size}. Please use '\n f'`self.get_batch_inputs()` to split the inputs! '\n f'Otherwise, it may encounter OOM problem!')\n\n results = []\n for batch_images in self.get_batch_inputs(images):\n results.append(self._extract(batch_images))\n\n return np.concatenate(results, axis=0)",
"def extract_images(filename,lx):\n print('Extracting', filename,'aaaaaa')\n \n data=numpy.loadtxt(filename,dtype='int64')\n dim=data.shape[0]\n data=data.reshape(dim, lx, lx, 1) \n # Convert shape from [num examples, rows, columns, depth]\n # to [num examples, rows*columns] (assuming depth == 1)\n data = data.reshape(data.shape[0],\n data.shape[1] * data.shape[2])\n # Convert from [0, 255] -> [0.0, 1.0].\n data = data.astype(numpy.float64)\n # images = numpy.multiply(images, 1.0 / 255.0) # commented since it is ising variables\n data = numpy.multiply(data, 1.0 ) # multiply by one, instead\n print(data.shape)\n return data",
"def create_image_array(files_list):\n im_array = np.array([np.array(cv2.imread(file)) for file in files_list])\n return im_array",
"def features_to_np_array(self, images):\n \n images = list(images)\n \n images = np.stack(images, axis=0)\n \n return images",
"def get_images(self) -> Sequence[Optional[np.ndarray]]:\n raise NotImplementedError",
"def extract_images(filename):\n\tprint('Extracting', filename)\n\twith gzip.open(filename) as bytestream:\n\t\tmagic = _read32(bytestream)\n\t\tif magic != 2051:\n\t\t\traise ValueError('Invalid magic number %d in MNIST image file: %s' %(magic, filename))\n\t\tnum_images = _read32(bytestream)\n\t\trows = _read32(bytestream)\n\t\tcols = _read32(bytestream)\n\t\tbuf = bytestream.read(rows * cols * num_images)\n\t\tdata = numpy.frombuffer(buf, dtype=numpy.uint8)\n\t\tdata = data.reshape(num_images, rows, cols, 1)\n\t\treturn data",
"def getimgs():",
"def _listOfImagesToNumpy(self, images):\n # build image data array, y_labels\n for i in range(0, len(images)):\n if self.image_size is not None:\n img = images[i].resize(self.image_size)\n else:\n img = images[i]\n img_arr = img_to_array(img)\n if i == 0:\n dims = [len(images)] + list(img_arr.shape)\n X_data = np.zeros(shape=dims)\n X_data[i, :, :, :] = img_arr\n\n return X_data",
"def extract_images(filename):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n magic = _read32(bytestream)\n if magic != 2051:\n raise ValueError(\n 'Invalid magic number %d in MNIST image file: %s' %\n (magic, filename))\n num_images = _read32(bytestream)\n rows = _read32(bytestream)\n cols = _read32(bytestream)\n buf = bytestream.read(rows * cols * num_images)\n data = np.frombuffer(buf, dtype=np.uint8)\n data = data.reshape(num_images, rows, cols, 1)\n return data",
"def get_data(path):\n all_images_as_array=[]\n label=[]\n for filename in os.listdir(path):\n try:\n if re.match(r'positive',filename):\n label.append(1)\n else:\n label.append(0)\n img=cv2.imread(path + filename)\n (b, g, r)=cv2.split(img)\n img=cv2.merge([r,g,b])\n np_array = np.asarray(img)\n l,b,c = np_array.shape\n np_array = np_array.reshape(l*b*c,)\n all_images_as_array.append(np_array)\n except:\n continue\n return np.array(all_images_as_array), np.array(label)",
"def extract_images(filename):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n magic = _read32(bytestream)\n if magic != 2051:\n raise ValueError(\n 'Invalid magic number %d in MNIST image file: %s' %\n (magic, filename))\n num_images = _read32(bytestream)[0]\n rows = _read32(bytestream)[0]\n cols = _read32(bytestream)[0]\n #print('check', magic, num_images, rows, cols, rows * cols * num_images)\n buf = bytestream.read(rows * cols * num_images)\n data = numpy.frombuffer(buf, dtype=numpy.uint8)\n data = data.reshape(num_images, rows, cols, 1)\n return data",
"def _images(path):\r\n with gzip.open(path) as f:\r\n # First 16 bytes are magic_number, n_imgs, n_rows, n_cols\r\n pixels = np.frombuffer(f.read(), 'B', offset=16)\r\n return pixels.reshape(-1, 784).astype('float32') / 255",
"def array_from_img(image):\n return np.array(image)",
"def get_images(self):\n \n return self.img_lst",
"def _convert_images(raw):\n # Convert the raw images from the data-files to floating-points.\n #raw_float = np.array(raw, dtype=float) / 255.0\n\n # Reshape the array to 4-dimensions.\n images = raw.reshape([-1, num_channels, img_size, img_size])\n\n # Reorder the indices of the array.\n images = images.transpose([0, 2, 3, 1])\n\n return images",
"def _get_images(self):\n raw_outputs = self.interface.get_data(self.target_charge,\n self.charge_deviation,\n n_samples=self.n_samples)\n\n # apply roi to images\n roi_images = []\n for i in range(self.n_samples):\n roi_images += [apply_roi(raw_outputs['raw_images'][i], raw_outputs['ROI'])]\n\n # process and identify blobs in image\n min_size = 100\n outputs = {}\n for ele in self.output_keys:\n outputs[ele] = []\n\n for i in range(len(roi_images)):\n processed_image_data = image_processing.process_and_fit(roi_images[i],\n min_size)\n\n for ele in self.output_keys:\n if ele == 'image_check':\n outputs[ele] += [image_processing.check_image(processed_image_data['binary_image'],\n processed_image_data['smoothed_image'])]\n elif ele == 'processed_images':\n outputs[ele] += [processed_image_data['smoothed_image']]\n else:\n outputs[ele] += [processed_image_data[ele]]\n\n for ele in self.output_keys:\n outputs[ele] = np.array(outputs[ele])\n\n # add in raw data\n outputs.update(raw_outputs)\n\n # if we need to, get averaged results\n if self.average_measurements:\n avg_keys = ['rms_x', 'rms_y', 'CX', 'CY', 'n_blobs', 'FWHMX', 'FWHMY', 'centroid_offset']\n for key in avg_keys:\n outputs[key] = np.nanmean(outputs[key])\n\n return outputs",
"def _extract_array(tiffs: list[np.ndarray], idx: int, shape: tuple[int, ...], dtype: type | np.dtype) -> np.ndarray:\n feature_arrays = (np.atleast_3d(img)[..., idx] for img in tiffs)\n return np.asarray(list(feature_arrays), dtype=dtype).reshape(*shape, 1)",
"def extract_images(f):\n\tprint('Extracting', f.name)\n\twith gzip.GzipFile(fileobj=f) as bytestream:\n\t\tmagic = _read32(bytestream)\n\t\tif magic != 2051:\n\t\t\traise ValueError('Invalid magic number %d in MNIST image file: %s' %\n\t\t\t\t\t\t\t\t\t\t\t (magic, f.name))\n\t\tnum_images = _read32(bytestream)\n\t\trows = _read32(bytestream)\n\t\tcols = _read32(bytestream)\n\t\tbuf = bytestream.read(rows * cols * num_images)\n\t\tdata = numpy.frombuffer(buf, dtype=numpy.uint8)\n\t\tdata = data.reshape(num_images, rows, cols, 1)\n\t\treturn data",
"def extract_features(self, images: List[np.ndarray]) -> List[np.ndarray]:\n pass",
"def read_image(images_root):\n im_array = np.load(images_root)\n return im_array",
"def load_images(self, image_path):\n X_train = []\n\n # Load all files from the image path using Image.open.\n for i in recursive_list(image_path):\n # Open images as ???\n img = Image.open(i)\n # Convert to NP array.\n img = np.asarray(img)\n # Append them into higher order array.\n if img.shape == (128, 128, 3):\n X_train.append(img)\n\n # return all the images concatenated as a 4D array\n return np.asarray(X_train)",
"def get_array(self) -> numpy.array:\r\n \r\n return self.pic_array",
"def extract_images(f):\n print('Extracting', f.name)\n with gzip.GzipFile(fileobj=f) as bytestream:\n magic = _read32(bytestream)\n if magic != 2051:\n raise ValueError('Invalid magic number %d in MNIST image file: %s' %\n (magic, f.name))\n num_images = _read32(bytestream)\n rows = _read32(bytestream)\n cols = _read32(bytestream)\n buf = bytestream.read(rows * cols * num_images)\n data = np.frombuffer(buf, dtype=np.uint8)\n data = data.reshape(num_images, rows, cols, 1)\n return data",
"def extract_data(filename, num_images):\n filepath = os.path.join(WORK_DIRECTORY, filename)\n print('Extracting', filepath)\n with open(filepath, mode='rb') as bytestream:\n buf = bytestream.read(IMAGE_SIZE * IMAGE_SIZE * num_images * NUM_CHANNELS)\n data = numpy.frombuffer(buf, dtype=numpy.uint8).astype(numpy.float32)\n data = (data - (PIXEL_DEPTH / 2.0)) / PIXEL_DEPTH\n data = data.reshape(num_images, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS)\n return data",
"def _extract(self, images):\n if (images.ndim != 4 or images.shape[0] <= 0 or\n images.shape[0] > self.batch_size or images.shape[1] not in [1, 3]):\n raise ValueError(f'Input images should be with shape [batch_size, '\n f'channel, height, width], where '\n f'`batch_size` no larger than {self.batch_size}, '\n f'`channel` equals to 1 or 3!\\n'\n f'But {images.shape} is received!')\n if images.shape[1] == 1:\n images = np.tile(images, (1, 1, 1, 3))\n if images.shape[1] != self.image_channels:\n raise ValueError(f'Number of channels of input image, which is '\n f'{images.shape[1]}, is not supported by the current '\n f'perceptual model, which requires '\n f'{self.image_channels} channels!')\n x = torch.from_numpy(images).type(torch.FloatTensor).to(self.run_device)\n f = self.net(x)\n return f.to(self.cpu_device).detach().numpy()",
"def __readImages(self, filename):\n print 'Reading images from %s ...' % filename\n images = []\n with open(filename, 'rb') as infile:\n infile.read(4) # ignore magic number\n count = struct.unpack('>i', infile.read(4))[0]\n rows = struct.unpack('>i', infile.read(4))[0]\n columns = struct.unpack('>i', infile.read(4))[0]\n\n for i in xrange(count):\n data = infile.read(rows*columns)\n image = np.fromstring(data, dtype=np.uint8)\n image = image.reshape((rows, columns))\n image = 255 - image # now black digit on white background\n images.append(image)\n return images",
"def extract_images(f):\n print('Extracting', f.name)\n with gzip.GzipFile(fileobj=f) as bytestream:\n magic = _read32(bytestream)\n if magic != 2051:\n raise ValueError('Invalid magic number %d in MNIST image file: %s' %(magic, f.name))\n num_images = _read32(bytestream)\n rows = _read32(bytestream)\n cols = _read32(bytestream)\n buf = bytestream.read(rows * cols * num_images)\n data = np.frombuffer(buf, dtype=np.uint8)\n data = data.reshape(num_images, rows, cols, 1)\n return data",
"def extract_images(f):\n print('Extracting', f.name)\n with gzip.GzipFile(fileobj=f) as bytestream:\n magic = _read32(bytestream)\n if magic != 2051:\n raise ValueError('Invalid magic number %d in MNIST image file: %s' %\n (magic, f.name))\n num_images = _read32(bytestream)\n rows = _read32(bytestream)\n cols = _read32(bytestream)\n buf = bytestream.read(rows * cols * num_images)\n data = np.frombuffer(buf, dtype=np.uint8)\n data = data.reshape(num_images, rows, cols, 1)\n return data",
"def get_img_array(myzipfile, imgid, shape=(299,299)):\n img_arr = np.zeros(shape=(512, 512, 3), dtype=np.float32)\n img_green = Image.open(myzipfile.open(f'{imgid}_green.png'))\n img_blue = Image.open(myzipfile.open(f'{imgid}_blue.png'))\n img_red = Image.open(myzipfile.open(f'{imgid}_red.png'))\n img_yellow = Image.open(myzipfile.open(f'{imgid}_yellow.png'))\n img_arr[:,:,0] = np.divide(np.array(img_green), 255)\n img_arr[:,:,1] = np.divide(np.array(img_blue), 255)/2 + np.divide(np.array(img_yellow), 255)/2\n img_arr[:,:,2] = np.divide(np.array(img_red), 255)/2 + np.divide(np.array(img_red), 255)/2\n img_arr = cv2.resize(img_arr, shape)\n return img_arr"
]
| [
"0.72095007",
"0.7152573",
"0.70705694",
"0.7013361",
"0.6969041",
"0.6916433",
"0.68338263",
"0.683087",
"0.67723185",
"0.67375094",
"0.67287177",
"0.67180645",
"0.67137927",
"0.6679731",
"0.6623957",
"0.6600087",
"0.658568",
"0.6582823",
"0.6553165",
"0.65411156",
"0.653439",
"0.65142107",
"0.6511361",
"0.6504597",
"0.6484486",
"0.6478932",
"0.64787346",
"0.6478409",
"0.6472726",
"0.64695203"
]
| 0.77781427 | 0 |
Aggregate metric value across towers. | def _aggregate_across_towers(metrics_collections, metric_value_fn, *args):
def fn(distribution, *a):
"""Call `metric_value_fn` in the correct control flow context."""
if hasattr(distribution, '_outer_control_flow_context'):
# If there was an outer context captured before this method was called,
# then we enter that context to create the metric value op. If the
# caputred context is `None`, ops.control_dependencies(None) gives the
# desired behavior. Else we use `Enter` and `Exit` to enter and exit the
# captured context.
# This special handling is needed because sometimes the metric is created
# inside a while_loop (and perhaps a TPU rewrite context). But we don't
# want the value op to be evaluated every step or on the TPU. So we
# create it outside so that it can be evaluated at the end on the host,
# once the update ops have been evaluted.
# pylint: disable=protected-access
if distribution._outer_control_flow_context is None:
with tf.control_dependencies(None):
metric_value = metric_value_fn(distribution, *a)
else:
distribution._outer_control_flow_context.Enter()
metric_value = metric_value_fn(distribution, *a)
distribution._outer_control_flow_context.Exit()
# pylint: enable=protected-access
else:
metric_value = metric_value_fn(distribution, *a)
if metrics_collections:
tf.add_to_collections(metrics_collections, metric_value)
return metric_value
return distribution_strategy_context.get_tower_context().merge_call(fn, *args) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _aggregate_across_towers(metrics_collections, metric_value_fn, *args):\n def fn(distribution, *a):\n \"\"\"Call `metric_value_fn` in the correct control flow context.\"\"\"\n if hasattr(distribution, '_outer_control_flow_context'):\n # If there was an outer context captured before this method was called,\n # then we enter that context to create the metric value op. If the\n # caputred context is `None`, ops.control_dependencies(None) gives the\n # desired behavior. Else we use `Enter` and `Exit` to enter and exit the\n # captured context.\n # This special handling is needed because sometimes the metric is created\n # inside a while_loop (and perhaps a TPU rewrite context). But we don't\n # want the value op to be evaluated every step or on the TPU. So we\n # create it outside so that it can be evaluated at the end on the host,\n # once the update ops have been evaluted.\n\n # pylint: disable=protected-access\n if distribution._outer_control_flow_context is None:\n with ops.control_dependencies(None):\n metric_value = metric_value_fn(distribution, *a)\n else:\n distribution._outer_control_flow_context.Enter()\n metric_value = metric_value_fn(distribution, *a)\n distribution._outer_control_flow_context.Exit()\n # pylint: enable=protected-access\n else:\n metric_value = metric_value_fn(distribution, *a)\n if metrics_collections:\n ops.add_to_collections(metrics_collections, metric_value)\n return metric_value\n\n return distribute_lib.get_tower_context().merge_call(fn, *args)",
"def compute_metrics(self):\n pass",
"def aggregate(all_metrics, reducer, suffix):\n # Collect metric separately\n separated_metrics = {} # type: dict[frozenset, list[dict]]\n for el in all_metrics:\n key = frozenset(el[\"metric\"][\"dimensions\"].items())\n if key not in separated_metrics:\n separated_metrics[key] = [el]\n else:\n separated_metrics[key].append(el)\n\n # Collect all dimensions\n dims = {}\n for metric_dims in separated_metrics.keys():\n for prop, val in dict(metric_dims).iteritems():\n if prop in dims:\n dims[prop].add(val)\n else:\n dims[prop] = set(val)\n\n # Sort each metric\n for _, metric in separated_metrics.iteritems():\n metric.sort(key=lambda v: v[\"metric\"][\"timestamp\"])\n\n separated_metrics = sorted(separated_metrics.values(), key=len)\n separated_metrics.reverse()\n\n # Compute the new values\n new_values = []\n all_timestamps = map(\n lambda l: map(\n lambda x: x[\"metric\"][\"timestamp\"], l),\n separated_metrics)\n metric_count = len(separated_metrics)\n for index in range(0, len(separated_metrics[0])):\n new_value = reducer[0](\n separated_metrics[0][index][\"metric\"][\"value\"],\n metric_count)\n new_timestamp = separated_metrics[0][index][\"metric\"][\"timestamp\"]\n for metric_index in range(1, metric_count):\n new_value = reducer[1](new_value, helpers.interpolate(\n new_timestamp,\n separated_metrics[metric_index],\n all_timestamps[metric_index]\n ), metric_count)\n new_values.append((new_timestamp, new_value))\n\n # Aggregate the other details:\n metric_name = separated_metrics[0][0][\"metric\"][\"name\"] + suffix\n meta = separated_metrics[0][0][\"meta\"]\n new_metrics = [\n helpers.create_agg_metric(\n metric_name,\n meta,\n dims,\n val[0],\n val[1]\n ) for val in new_values\n ]\n return new_metrics",
"def weighted_metrics(self):\n return None",
"def compute(self) -> Any:\n per_class, micro, macro, weighted = get_aggregated_metrics(\n tp=self.statistics[\"tp\"],\n fp=self.statistics[\"fp\"],\n fn=self.statistics[\"fn\"],\n support=self.statistics[\"support\"],\n zero_division=self.zero_division,\n )\n return per_class, micro, macro, weighted",
"def to_metric(self):\r\n if self.units != 'metric':\r\n self.units = 'metric'\r\n for statement in self.statements:\r\n statement.to_metric()\r\n for tool in iter(self.tools.values()):\r\n tool.to_metric()\r\n for primitive in self.primitives:\r\n primitive.to_metric()\r\n for hit in self.hits:\r\n hit.to_metric()",
"def sum(self, key, value):\n self._metrics[key] += value",
"def metrics_group():",
"def generate_aggregate_data(self):\n output = np.zeros((self.number_cycles, self.number_towers))\n\n for cycle in range(self.number_cycles):\n for user in range(self.number_users):\n for tower in range(self.number_towers):\n output[cycle][tower] += self.traces[user][cycle] == tower\n\n return output",
"def aggregate_metrics(metrics):\n if len(metrics) == 1:\n return metrics[0]\n else:\n agg_metrics = metrics[0]\n for metric in agg_metrics.keys():\n vals = [x[metric] for x in metrics]\n agg_metrics[metric] = [np.mean(vals), np.std(vals)]\n return agg_metrics",
"def get_metric(self, data_row: pd.Series) -> float:",
"def sum(self):\n\n return time_stat(self, stat=\"sum\")",
"def aggregate_rating(self) -> object:\n return self._aggregate_rating",
"def _aggregation_target(self):\n ...",
"def calculate_agrigate(self):\n self.total = 0.0\n for rec in self.data:\n self.total = self.total + rec[\"value\"]\n\n self.agrigate_data = {\n \"site\": self.site,\n \"utc\": self.timestamp_utc,\n \"local\": self.timestamp_local,\n \"tag\": \"TOTAL\",\n \"value\": round(self.total, 3)}\n self.data.append(self.agrigate_data)",
"def add_aggregate_temp(self, value: float) -> float:\n # Check if aggregate samples are too old.\n if self.last_sample_time is not None:\n last_sample_time2 = datetime.fromtimestamp(self.last_sample_time)\n now = datetime.now()\n threshold_time = now - timedelta(hours=1)\n if last_sample_time2 < threshold_time:\n # Too old, clear samples.\n self.samples = []\n\n self.samples.append(value)\n self.samples = self.samples[-self.sample_size:]\n agg_value = reduce(\n lambda a, b: a + b,\n self.samples\n ) / len(self.samples)\n self.last_sample_time = datetime.now().timestamp()\n return agg_value",
"def _aggregate(self, *params): \n serialized_params = np.array([self._serialize(client) for client in params])\n serialized_aggregation = self._aggregate(*serialized_params)\n aggregated_weights = self._deserialize(serialized_aggregation)\n \n return aggregated_weights",
"def compute(self) -> Any:\n # ddp hotfix, could be done better\n # but metric must handle DDP on it's own\n if self._ddp_backend == \"xla\":\n device = get_device()\n for key in self.statistics:\n key_statistics = torch.tensor([self.statistics[key]], device=device)\n key_statistics = xm.all_gather(key_statistics).sum(dim=0).cpu().numpy()\n self.statistics[key] = key_statistics\n elif self._ddp_backend == \"ddp\":\n for key in self.statistics:\n value: List[np.ndarray] = all_gather(self.statistics[key])\n value: np.ndarray = np.sum(np.vstack(value), axis=0)\n self.statistics[key] = value\n\n per_class, micro, macro, weighted = get_aggregated_metrics(\n tp=self.statistics[\"tp\"],\n fp=self.statistics[\"fp\"],\n fn=self.statistics[\"fn\"],\n support=self.statistics[\"support\"],\n zero_division=self.zero_division,\n )\n if self.compute_per_class_metrics:\n return per_class, micro, macro, weighted\n else:\n return [], micro, macro, weighted",
"def compute(self) -> Any:\n # ddp hotfix, could be done better\n # but metric must handle DDP on it's own\n if self._ddp_backend == \"xla\":\n device = get_device()\n for key in self.statistics:\n key_statistics = torch.tensor([self.statistics[key]], device=device)\n key_statistics = xm.all_gather(key_statistics).sum(dim=0).cpu().numpy()\n self.statistics[key] = key_statistics\n elif self._ddp_backend == \"ddp\":\n for key in self.statistics:\n value: List[np.ndarray] = all_gather(self.statistics[key])\n value: np.ndarray = np.sum(np.vstack(value), axis=0)\n self.statistics[key] = value\n\n per_class, micro, macro, weighted = get_aggregated_metrics(\n tp=self.statistics[\"tp\"],\n fp=self.statistics[\"fp\"],\n fn=self.statistics[\"fn\"],\n support=self.statistics[\"support\"],\n zero_division=self.zero_division,\n )\n if self.compute_per_class_metrics:\n return per_class, micro, macro, weighted\n else:\n return [], micro, macro, weighted",
"def metrics(self):\n raise NotImplementedError(\"metrics\")",
"def aggregate_results(self):\n\n raise NotImplementedError",
"def calc_single_metric(trains, metric, tau):\n logger.info(\"Calculating metric %s for time_scale %s.\" % (metric, str(tau)))\n return metrics[metric](trains, tau)",
"def compute_metrics(self, results: list) -> dict:",
"def metric(self):\n return self.__metric",
"def calculate_metrics(self):\n self.data_stats = self.sqlContext.read.format(\"org.apache.spark.sql.cassandra\").options(table=self.cassandra_trip_table, keyspace=self.cassandra_keyspace).load()\n self.data_stats = self.data_stats.groupBy(['time_block','day','month','borough_name']).agg(func.avg('num_trips').alias('mean'))",
"def calc_stat_values(self):",
"def total_tr(self):\r\n return sum(map(lambda x: self.times[x]['tr'], self.times))",
"def with_sum_mean_reduction(self):\n return self.with_reduction(lambda x: x.sum(1).mean(0))",
"def sum_values(self):\n raise NotImplementedError",
"def aggregate(global_params, running_aggregate, aggregation_result):\n running_ref = running_aggregate.get_ref('values')\n agg_ref = aggregation_result.get_ref('values')\n for i in range(global_params.dims):\n running_ref[i] += agg_ref[i]\n return running_aggregate"
]
| [
"0.6526208",
"0.61006385",
"0.60297525",
"0.59245074",
"0.584058",
"0.5800489",
"0.56997293",
"0.56921005",
"0.56350535",
"0.5630873",
"0.56030244",
"0.5590685",
"0.5583606",
"0.55782723",
"0.5570165",
"0.5558878",
"0.55482966",
"0.5541951",
"0.5541951",
"0.55373263",
"0.553548",
"0.55235434",
"0.5480568",
"0.5464475",
"0.5456113",
"0.5441247",
"0.5435816",
"0.54234546",
"0.5416521",
"0.541534"
]
| 0.6474678 | 1 |
Call `metric_value_fn` in the correct control flow context. | def fn(distribution, *a):
if hasattr(distribution, '_outer_control_flow_context'):
# If there was an outer context captured before this method was called,
# then we enter that context to create the metric value op. If the
# caputred context is `None`, ops.control_dependencies(None) gives the
# desired behavior. Else we use `Enter` and `Exit` to enter and exit the
# captured context.
# This special handling is needed because sometimes the metric is created
# inside a while_loop (and perhaps a TPU rewrite context). But we don't
# want the value op to be evaluated every step or on the TPU. So we
# create it outside so that it can be evaluated at the end on the host,
# once the update ops have been evaluted.
# pylint: disable=protected-access
if distribution._outer_control_flow_context is None:
with tf.control_dependencies(None):
metric_value = metric_value_fn(distribution, *a)
else:
distribution._outer_control_flow_context.Enter()
metric_value = metric_value_fn(distribution, *a)
distribution._outer_control_flow_context.Exit()
# pylint: enable=protected-access
else:
metric_value = metric_value_fn(distribution, *a)
if metrics_collections:
tf.add_to_collections(metrics_collections, metric_value)
return metric_value | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fn(distribution, *a):\n if hasattr(distribution, '_outer_control_flow_context'):\n # If there was an outer context captured before this method was called,\n # then we enter that context to create the metric value op. If the\n # caputred context is `None`, ops.control_dependencies(None) gives the\n # desired behavior. Else we use `Enter` and `Exit` to enter and exit the\n # captured context.\n # This special handling is needed because sometimes the metric is created\n # inside a while_loop (and perhaps a TPU rewrite context). But we don't\n # want the value op to be evaluated every step or on the TPU. So we\n # create it outside so that it can be evaluated at the end on the host,\n # once the update ops have been evaluted.\n\n # pylint: disable=protected-access\n if distribution._outer_control_flow_context is None:\n with ops.control_dependencies(None):\n metric_value = metric_value_fn(distribution, *a)\n else:\n distribution._outer_control_flow_context.Enter()\n metric_value = metric_value_fn(distribution, *a)\n distribution._outer_control_flow_context.Exit()\n # pylint: enable=protected-access\n else:\n metric_value = metric_value_fn(distribution, *a)\n if metrics_collections:\n ops.add_to_collections(metrics_collections, metric_value)\n return metric_value",
"def compute_value(callback, graph):\n return callback(graph)",
"def lambda_handler(event, context):\n get_other_metrics(event)",
"def compute_value(self, *args, **kwargs):\n\n return None",
"def doFunc(self):\n val = self.func(self.value())\n if val:\n self.setValue(val)",
"def applyFuncOnValues(self, func):\r\n self._value = func(self._value)",
"def _aggregate_across_towers(metrics_collections, metric_value_fn, *args):\n def fn(distribution, *a):\n \"\"\"Call `metric_value_fn` in the correct control flow context.\"\"\"\n if hasattr(distribution, '_outer_control_flow_context'):\n # If there was an outer context captured before this method was called,\n # then we enter that context to create the metric value op. If the\n # caputred context is `None`, ops.control_dependencies(None) gives the\n # desired behavior. Else we use `Enter` and `Exit` to enter and exit the\n # captured context.\n # This special handling is needed because sometimes the metric is created\n # inside a while_loop (and perhaps a TPU rewrite context). But we don't\n # want the value op to be evaluated every step or on the TPU. So we\n # create it outside so that it can be evaluated at the end on the host,\n # once the update ops have been evaluted.\n\n # pylint: disable=protected-access\n if distribution._outer_control_flow_context is None:\n with ops.control_dependencies(None):\n metric_value = metric_value_fn(distribution, *a)\n else:\n distribution._outer_control_flow_context.Enter()\n metric_value = metric_value_fn(distribution, *a)\n distribution._outer_control_flow_context.Exit()\n # pylint: enable=protected-access\n else:\n metric_value = metric_value_fn(distribution, *a)\n if metrics_collections:\n ops.add_to_collections(metrics_collections, metric_value)\n return metric_value\n\n return distribute_lib.get_tower_context().merge_call(fn, *args)",
"def do(self, fun):\n with self.mutex:\n self.value = fun(self.value)\n return self.value",
"def execute(self, event, context):\n # pylint: disable=no-self-use\n logger = logging.getLogger(__name__)\n try:\n opts = produce_options(True)\n if opts.log_level:\n logging.basicConfig(level=logging.getLevelName(opts.log_level))\n controller = FunctionController(opts)\n result = controller.handle(event, context)\n return result.response\n except ValueError as e:\n logger.error(\"Value error: %s\", e)\n exit()",
"def _aggregate_across_towers(metrics_collections, metric_value_fn, *args):\n def fn(distribution, *a):\n \"\"\"Call `metric_value_fn` in the correct control flow context.\"\"\"\n if hasattr(distribution, '_outer_control_flow_context'):\n # If there was an outer context captured before this method was called,\n # then we enter that context to create the metric value op. If the\n # caputred context is `None`, ops.control_dependencies(None) gives the\n # desired behavior. Else we use `Enter` and `Exit` to enter and exit the\n # captured context.\n # This special handling is needed because sometimes the metric is created\n # inside a while_loop (and perhaps a TPU rewrite context). But we don't\n # want the value op to be evaluated every step or on the TPU. So we\n # create it outside so that it can be evaluated at the end on the host,\n # once the update ops have been evaluted.\n\n # pylint: disable=protected-access\n if distribution._outer_control_flow_context is None:\n with tf.control_dependencies(None):\n metric_value = metric_value_fn(distribution, *a)\n else:\n distribution._outer_control_flow_context.Enter()\n metric_value = metric_value_fn(distribution, *a)\n distribution._outer_control_flow_context.Exit()\n # pylint: enable=protected-access\n else:\n metric_value = metric_value_fn(distribution, *a)\n if metrics_collections:\n tf.add_to_collections(metrics_collections, metric_value)\n return metric_value\n\n return distribution_strategy_context.get_tower_context().merge_call(fn, *args)",
"def evaluate(self, metric, resource):\n\n # Extract values from JSON path result\n self._values = ContextUtils.manage_values_from_json(metric.value)\n\n # Create new metric with extracted values\n new_metric = ContextUtils.replace_metric_value(metric,\n len(self._values))\n\n # Call parent evaluate method with new updated metric\n return super().evaluate(new_metric, resource)",
"def evaluate():\n\t\t\t\tif not hasattr(evaluate, 'value'):\n\t\t\t\t\tevaluate.value = func()\n\t\t\t\treturn evaluate.value",
"def dispatch_value(metric, value, type):\n log_verbose('Sending metric: %s=%s as type %s' % (metric, value,type))\n\n val = collectd.Values(plugin='redis_metrics')\n val.type = type\n val.type_instance = metric\n val.values = [value]\n val.dispatch()",
"def apply(self, value):\n raise NotImplementedError",
"def call_metric_function(metric_fn,\n y_true,\n y_pred=None,\n weights=None,\n mask=None):\n if mask is not None:\n mask = math_ops.cast(mask, y_pred.dtype)\n if weights is None:\n # Use mask as sample weight.\n weights = mask\n else:\n # Update dimensions of weights to match with mask.\n weights = math_ops.cast(weights, dtype=y_pred.dtype)\n mask, _, weights = losses_utils.squeeze_or_expand_dimensions(\n mask, sample_weight=weights)\n weights *= mask\n\n if y_pred is not None:\n return metric_fn(y_true, y_pred, sample_weight=weights)\n # `Mean` metric only takes a single value.\n return metric_fn(y_true, sample_weight=weights)",
"def critic_compute_vvalue(dict_states, val_func):\n\n values = val_func.predict(dict_states['states'])\n dict_states['values'] = values",
"def _computeValueFunction(self, nbDims, low, high, retstep=False):\n # algorithms performing in discrete space will have a discrete\n # value function that cannot be evaluated at any point - only on the\n # ones for which they have been setup based on the problem it has been\n # setup to solve\n def __round(vec):\n return tuple(int(x) for x in vec)\n\n def __notround(vec):\n return vec\n\n _round = __notround\n if self._algo.DOMAIN['state'] == Spaces.Discrete:\n _round = __round\n\n allParams, stepSizes = self._discretizer.discretize(retstep=True)\n\n allActions = self._problem.getActionsList()\n reducer = max if self.reducer == 'max' else mean\n\n # returns a list\n data = [\n utils.extends({\n key: state[k]\n for k, key in enumerate(self.getKeys(nbDims))\n }, z=reducer([\n self._algo.actionValue(_round(state), action)\n for action in allActions]))\n for state in allParams\n ]\n if retstep:\n return data, stepSizes\n return data",
"def sample_event_key_evaluator(response, payload, value):\n try:\n if value != \"\":\n exec (\"global value1; value1 = \" + value)\n value = value1\n return value\n except Exception as _:\n return value",
"def log_metric(key, value, step=None):\n mlflow.log_metric(key, value, step=step)",
"def process(self, value, context=None):\n return value",
"def update_action_value(self, state, action, value):\n self.value_function[to_table_index(state, action)] = value",
"def _evaluate_fn(model, dataset):\n # Reset the local variables so that the returned metrics are computed using\n # the given data. Similar to the `reset_states` method of `tf.metrics.Metric`.\n for var in model.local_variables:\n if var.initial_value is not None:\n var.assign(var.initial_value)\n else:\n var.assign(tf.zeros_like(var))\n\n def eval_fn(dummy_state, batch):\n \"\"\"Evaluates the model on a batch.\"\"\"\n model.forward_pass(batch, training=False)\n return dummy_state\n\n # Evaluate on the dataset.\n dataset.reduce(initial_state=0, reduce_func=eval_fn)\n\n # Obtain the metrics.\n results = collections.OrderedDict()\n local_outputs = model.report_local_outputs()\n for name, metric in local_outputs.items():\n if isinstance(metric, list) and (len(metric) == 2):\n # Some metrics returned by `report_local_outputs()` can have two scalars:\n # one represents `sum`, and the other represents `count`. Ideally, we want\n # to return a single scalar for each metric.\n results[name] = metric[0] / metric[1]\n else:\n results[name] = metric[0] if isinstance(metric, list) else metric\n return results",
"def __call__(self, output, target, *args, **kwargs):\n _, y_pred = output.topk(1, 1, True, True)\n y_pred = y_pred.t().detach().cpu().numpy()[0]\n y_true = target.detach().cpu().numpy()\n self.pfm = self.metric_func(y_true, y_pred)\n return self.pfm",
"def function_to_execute(event, context):\r\n action = base64.b64decode(event['data']).decode('utf-8')\r\n\r\n if (action == \"calculate_exceedances_for_last_28_days\"):\r\n calculate_exceedances_for_last_28_days()",
"def handle(self, rsm_ctx):\n runtime_properties = rsm_ctx.get_execution_result()\n\n rsm_ctx.log(\n 'info',\n 'Got {} runtime_properties after execution',\n runtime_properties.keys()\n )\n\n self._process_runtime_properties(\n rsm_ctx,\n runtime_properties,\n self.VALUE_TYPE_USAGE\n )",
"def test_value(self, missing_mock):\n missing_mock.return_value = False\n self._MetricSourceAgeMetricTest__metric_source.datetime = MagicMock(return_value=datetime.datetime.now())\n\n result = self.__metric.value()\n\n self.assertTrue(missing_mock.called)\n self.assertTrue(self._MetricSourceAgeMetricTest__metric_source.datetime.assert_called_once)\n self.assertEqual(0, result)",
"def wrappedFn(*args, **kw):\n setCurrent(context)\n fn(*args, **kw)",
"def __call__(self, vals, style, caller, threshold, return_lines=False):\n\n self._eval_args(vals, style, threshold, return_lines)\n\n handler = self._handlers.get(type(vals), self._default)\n return handler(vals, style, caller, threshold, return_lines)",
"def evaluateFunction(self):\n self.functionValue = np.round(self.function.evaluate(self.position[0], self.position[1]), 2)",
"def attribute_error_handler(value_to_return):\n def inner(func):\n def wrapper(*args, **kwargs):\n try:\n return func(*args, **kwargs)\n except AttributeError:\n return value_to_return\n return wrapper\n return inner"
]
| [
"0.6583132",
"0.58839256",
"0.5767293",
"0.574908",
"0.55788815",
"0.5566902",
"0.548517",
"0.5422161",
"0.537603",
"0.5365945",
"0.52946395",
"0.52842236",
"0.51841676",
"0.5158378",
"0.515165",
"0.51359975",
"0.51274526",
"0.5114945",
"0.5100913",
"0.50856",
"0.5031937",
"0.50305986",
"0.50181776",
"0.50095695",
"0.4997479",
"0.49778244",
"0.49419314",
"0.49299756",
"0.49269888",
"0.49190697"
]
| 0.6464017 | 1 |
Initializes to count the number of null lists in a specific feature path. When required_paths is also passed, rows which are null for all of the required paths will not be counted as missing. | def __init__(self,
path: types.FeaturePath,
required_paths: Optional[Iterable[types.FeaturePath]] = None):
self._path = path
if required_paths:
self._required_paths = tuple(sorted(required_paths))
else:
self._required_paths = None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def calc_null(self):\n null = 0\n for x in range(0, self.tot_col):\n for y in range(1, self.tot_rows + 1):\n if self.file_list[y][x].lower() == 'null':\n null += 1\n print('Total number of null fields: ' + str(null))\n results.append('Total number of null fields: ' + str(null))",
"def count_null(self): \n print('Null Counts:', self.X.isnull().sum()[self.X.isnull().sum() > 0])",
"def test_null_count(self):\n\n ld = Lambdata(self.df)\n num_nulls = ld.null_count()\n self.assertEqual(num_nulls, 3)",
"def _fields_num(self):\n return len(self.paths)",
"def test_count_ways_null():\n assert f.count_ways(0) == 1",
"def count_paths_with_zero_intervals(self):\n zeros = []\n for path in self.paths:\n # print(\"Checking path {}\".format(path))\n has_zero = 0\n for arc in path:\n # lb = self.arc_info[arc][\"lower_bound\"]\n # ub = self.arc_info[arc][\"upper_bound\"]\n # print(\"{} {} interval\".format(lb,ub))\n if (self.arc_info[arc][\"upper_bound\"] -\n self.arc_info[arc][\"lower_bound\"]) == 0:\n has_zero = 1\n zeros.append(has_zero)\n print(zeros)\n return(sum(zeros))",
"def count_interests(rows: List[Row]) -> int:\n return len([row for row in rows if row[\"interest\"] is not None])",
"def test_number_of_nulls(self):\n self.assertEqual(em.number_of_nulls(self.test_df), 3)",
"def data_flow_null_count(self) -> int:\n return self.graph_count - int(\n self.graph_tuple_stats.data_flow_steps_count or 0\n )",
"def null_count(df):\n return df.isnull().sum().sum()",
"def reportnulls(self):\n self.null_counts = self.df.isnull().sum().sort_values(ascending=False)\n\n # return count of null values\n return self.null_counts",
"def number_of_pathsets(pathset_paths_df):\n return pathset_paths_df[Passenger.TRIP_LIST_COLUMN_TRIP_LIST_ID_NUM].nunique()",
"def key(\n cls,\n path: types.FeaturePath,\n required_paths: Optional[Iterable[types.FeaturePath]] = None\n ) -> Tuple[Union[Text, types.FeaturePath], ...]:\n key_tuple = ('CountMissingGenerator', path)\n if required_paths:\n key_tuple += tuple(sorted(required_paths))\n return key_tuple",
"def __len__(self):\n return len(self.paths)",
"def __len__(self):\n return len(self.paths)",
"def __len__(self):\n return len(self.A_paths)",
"def __len__(self):\n return len(self.A_paths)",
"def __len__(self):\n return len(self.A_paths)",
"def __len__(self):\n return len(self.A_paths)",
"def n_good_features_(self):\n return np.sum(self.important_features_)",
"def numPaths(self):\n if self.numpaths > -1:\n return self.numpaths\n\n if self.jolt == 0:\n return 1\n\n paths = 0\n for parent in self.parents:\n paths += parent.numPaths()\n \n return paths",
"def features_size(self) -> int:\n return len(self.data[0].features) if len(self.data) > 0 and self.data[0].features is not None else None",
"def calc_empty(self):\n empty = 0\n for x in range(0, self.tot_col):\n for y in range(1, self.tot_rows + 1):\n if self.file_list[y][x] == '':\n empty += 1\n #print(csv_list[y][x] + ' %s %s' % (x, y))\n return empty",
"def num_empty(self):\n count = 0\n for i in self.__buckets:\n if i.size() == 0:\n count += 1\n return count",
"def __len__(self):\n if self.path_is_string:\n if self.path:\n return 1\n else:\n return 0\n else:\n if self.path_type in (list, tuple):\n if not any(item for item in self.path):\n return 0\n return len(self.path)",
"def count_examples(filepaths):\n n = 0\n for f in filepaths:\n for r in tf.python_io.tf_record_iterator(f):\n n += 1\n return n",
"def extract_path_count(self, metapaths=None, start_nodes=None, end_nodes=None, verbose=False, n_jobs=1,\n return_sparse=False, sparse_df=True):\n\n return self._extract_metapath_feaures(metapaths=metapaths, start_nodes=start_nodes, end_nodes=end_nodes,\n verbose=verbose, n_jobs=n_jobs, return_sparse=return_sparse,\n sparse_df=sparse_df, func=mt.count_paths, mats=self.adj_matrices,\n message='Path Count')",
"def none_count(d):\n return six.moves.reduce(lambda x, y: x + 1 if y == None else x, d.values(), 0)",
"def test_count_when_data_is_not_present(self):\n\n temp_data = []\n\n tt = TemperatureTracker()\n result = tt.count_from(temp_data)\n self.assertEqual(result, 0)",
"def test_get_contig_lengths_null_contigs(self):\n ret = self.getImpl().get_contig_lengths(self.ctx, self.obj_ref, None)\n self.assertEqual(ret[0], {})"
]
| [
"0.57643074",
"0.5636303",
"0.56160426",
"0.5423167",
"0.53524774",
"0.5331745",
"0.5315419",
"0.5221362",
"0.5192976",
"0.5125796",
"0.5100069",
"0.5041495",
"0.5023727",
"0.49956",
"0.49956",
"0.49719247",
"0.49719247",
"0.49719247",
"0.49719247",
"0.49430317",
"0.4916951",
"0.48947763",
"0.4886706",
"0.48424935",
"0.48411456",
"0.4834308",
"0.4831059",
"0.48145342",
"0.4807547",
"0.4800117"
]
| 0.57114744 | 1 |
r""" Calculates the sum over all dimensions, except the first (batch dimension), and excluding the last n_dims. This function will ignore the first dimension and it will not aggregate over the last n_dims dimensions. | def sum_over_all_but_batch_and_last_n(
tensor: torch.Tensor, n_dims: int
) -> torch.Tensor:
if tensor.dim() == n_dims + 1:
return tensor
else:
dims = list(range(1, tensor.dim() - n_dims))
return tensor.sum(dim=dims) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def sum_except_batch(x, num_dims=1):\n return x.reshape(*x.shape[:num_dims], -1).sum(-1)",
"def dim_zero_sum(x: Tensor) ->Tensor:\n return torch.sum(x, dim=0)",
"def sum(self, axis=None, keepdims=False):\n return F.Sum.apply(self, axis, keepdims)",
"def sum_to_0d(x):\n assert_equal(x.ndim, 1)\n return np.squeeze(np.sum(x, keepdims=True))",
"def sum(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n min_count: int | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n return self.reduce(\n duck_array_ops.sum,\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def acumsum (a,dimension=None):\r\n if dimension == None:\r\n a = N.ravel(a)\r\n dimension = 0\r\n if type(dimension) in [ListType, TupleType, N.ndarray]:\r\n dimension = list(dimension)\r\n dimension.sort()\r\n dimension.reverse()\r\n for d in dimension:\r\n a = N.add.accumulate(a,d)\r\n return a\r\n else:\r\n return N.add.accumulate(a,dimension)",
"def sum(self, axis=None, keepdims=False, dtype=None, out=None):\n return np.add.reduce(self, out=out, axis=axis, keepdims=keepdims, dtype=dtype)",
"def sum(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n min_count: int | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n return self.reduce(\n duck_array_ops.sum,\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n numeric_only=True,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def sum(tensor, axis=None):\n raise NotImplementedError",
"def my_sum(a, axis, count):\n if a.shape[axis] == count:\n return a.sum(axis)\n elif a.shape[axis] == 1:\n return count * a.sum(axis)\n else:\n raise IndexError('Cannot be broadcast: a.shape=%s, axis=%d, count=%d' % (a.shape, axis, count))",
"def sum(self, dim=None):\n if dim is None:\n x = self.flatten()\n else:\n x = self.transpose(0, dim)\n\n # Add all BinarySharedTensors\n while x.size(0) > 1:\n extra = None\n if x.size(0) % 2 == 1:\n extra = x[0]\n x = x[1:]\n x0 = x[: (x.size(0) // 2)]\n x1 = x[(x.size(0) // 2) :]\n x = x0 + x1\n if extra is not None:\n x.share = torch_cat([x.share, extra.share.unsqueeze(0)])\n\n if dim is None:\n x = x.squeeze()\n else:\n x = x.transpose(0, dim).squeeze(dim)\n return x",
"def sum(input, axis=None, dtype=None, keepdims=False, acc_dtype=None):\r\n\r\n out = elemwise.Sum(axis=axis, dtype=dtype, acc_dtype=acc_dtype)(input)\r\n\r\n if keepdims:\r\n out = makeKeepDims(input, out, axis)\r\n return out",
"def sum(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n min_count: int | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"sum\",\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.sum,\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def sum(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n min_count: int | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"sum\",\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.sum,\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def sum(self, axis: int = 0):\r\n self.values = self.values.sum(axis=axis)\r\n self.layers = [None]\r\n return self.copy()",
"def asum (a, dimension=None,keepdims=0):\r\n if type(a) == N.ndarray and a.dtype in [N.int_, N.short, N.ubyte]:\r\n a = a.astype(N.float_)\r\n if dimension == None:\r\n s = N.sum(N.ravel(a))\r\n elif type(dimension) in [IntType,FloatType]:\r\n s = N.add.reduce(a, dimension)\r\n if keepdims == 1:\r\n shp = list(a.shape)\r\n shp[dimension] = 1\r\n s = N.reshape(s,shp)\r\n else: # must be a SEQUENCE of dims to sum over\r\n dims = list(dimension)\r\n dims.sort()\r\n dims.reverse()\r\n s = a *1.0\r\n for dim in dims:\r\n s = N.add.reduce(s,dim)\r\n if keepdims == 1:\r\n shp = list(a.shape)\r\n for dim in dims:\r\n shp[dim] = 1\r\n s = N.reshape(s,shp)\r\n return s",
"def sum(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n min_count: int | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"sum\",\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n numeric_only=True,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.sum,\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n numeric_only=True,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def sum(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n min_count: int | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"sum\",\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n numeric_only=True,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.sum,\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n numeric_only=True,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def conv_reduce_sum(x, result_shape, padding, strides):\n if len(result_shape) == 3:\n return conv2d_reduce_sum(x, result_shape[0], result_shape[1],\n padding, strides)\n elif len(result_shape) == 2:\n return conv1d_reduce_sum(x, result_shape[0], padding, strides[0])\n else:\n raise ValueError()",
"def conv1d_reduce_sum(x, input_length, padding, stride):\n # Sum over the output channels.\n lam_sum = tf.reduce_sum(x, axis=3)\n\n num_classes = x.shape[0].value\n batch_size = tf.shape(x)[1]\n kernel_length = x.shape[4].value\n input_channels = x.shape[5].value\n\n # Temporarily combine the (num_classes, batch_size, in_layer_channels) dims\n # while applying a transpose convolution.\n # Also use (kernel_length) as the channels\n # as we'll apply the transpose convolution to each kernel point separately.\n lam_squeezed = tf.transpose(lam_sum, perm=[0, 1, 4, 2, 3])\n lam_squeezed = tf.reshape(lam_squeezed, shape=(\n [num_classes * batch_size * input_channels] +\n x.shape[2:3].as_list() +\n [kernel_length]))\n\n # De-convolve each elementary (i.e. one-hot) filter with the corresponding\n # slice of lambda.\n diagonal_kernel = tf.reshape(\n tf.eye(kernel_length, dtype=x.dtype),\n shape=[kernel_length, 1, kernel_length])\n lam_deconv = tf.nn.conv1d_transpose(\n lam_squeezed,\n diagonal_kernel,\n output_shape=([num_classes * batch_size * input_channels] +\n [input_length, 1]),\n padding=padding,\n strides=stride)\n\n # The resulting de-convolution has shape\n # (num_classes*batch_size*in_layer_channels,\n # in_layer_length, 1).\n # Make it match mu_in.\n result = tf.reshape(lam_deconv, shape=(\n [num_classes, batch_size, input_channels] +\n lam_deconv.shape[1:2].as_list()))\n return tf.transpose(result, perm=[0, 1, 3, 2])",
"def all_reduce_sum_gradients(grads_and_vars):\n grads_and_vars = list(grads_and_vars)\n filtered_grads_and_vars = filter_empty_gradients(grads_and_vars)\n if filtered_grads_and_vars:\n if strategy_supports_no_merge_call():\n grads = [pair[0] for pair in filtered_grads_and_vars]\n reduced = distribute_lib.get_strategy().extended._replica_ctx_all_reduce( # pylint: disable=protected-access\n ds_reduce_util.ReduceOp.SUM, grads)\n else:\n # TODO(b/183257003): Remove this branch\n reduced = distribute_lib.get_replica_context().merge_call(\n _all_reduce_sum_fn, args=(filtered_grads_and_vars,))\n else:\n reduced = []\n # Copy 'reduced' but add None gradients back in\n reduced_with_nones = []\n reduced_pos = 0\n for g, v in grads_and_vars:\n if g is None:\n reduced_with_nones.append((None, v))\n else:\n reduced_with_nones.append((reduced[reduced_pos], v))\n reduced_pos += 1\n assert reduced_pos == len(reduced), \"Failed to add all gradients\"\n return reduced_with_nones",
"def sum(x, reduce_instance_dims=True, name=None): # pylint: disable=redefined-builtin\n return _numeric_combine(x, np.sum, reduce_instance_dims, name)",
"def cumsum(tensor, axis=None):\n raise NotImplementedError",
"def _sum_grad(x, axis, dout):\n # input_shape = [2, 3] axis = [1]\n input_shape = shape_op(x)\n # output_shape_kept_dims = [2, 1]\n output_shape_kept_dims = reduced_shape(input_shape, axis)\n # tile_scaling = [1, 3]\n tile_scaling = tuple_div(input_shape, output_shape_kept_dims)\n grad = reshape(dout, output_shape_kept_dims)\n return tile(grad, tile_scaling)",
"def flat_dim(self):\n return np.sum([c.flat_dim for c in self.spaces])",
"def local_sum(a,tshape, padval):\n\n # zero-padding\n a = ndpad(a,tshape, padval)\n\n # difference between shifted copies of an array along a given dimension\n def shiftdiff(a,tshape,shiftdim):\n ind1 = [slice(None,None),]*a.ndim\n ind2 = [slice(None,None),]*a.ndim\n ind1[shiftdim] = slice(tshape[shiftdim],a.shape[shiftdim]-1)\n ind2[shiftdim] = slice(0,a.shape[shiftdim]-tshape[shiftdim]-1)\n return a[ind1] - a[ind2]\n\n # take the cumsum along each dimension and subtracting a shifted version\n # from itself. this reduces the number of computations to 2*N additions\n # and 2*N subtractions for an N-dimensional array, independent of its\n # size.\n #\n # See:\n # <http://www.idiom.com/~zilla/Papers/nvisionInterface/nip.html>\n for dd in xrange(a.ndim):\n a = np.cumsum(a,dd)\n a = shiftdiff(a,tshape,dd)\n return a",
"def conv2d_reduce_sum(x, input_height, input_width, padding, strides):\n # Sum over the output channels.\n lam_sum = tf.reduce_sum(x, axis=4)\n\n num_classes = x.shape[0].value\n batch_size = tf.shape(x)[1]\n kernel_height = x.shape[5].value\n kernel_width = x.shape[6].value\n input_channels = x.shape[7].value\n\n # Temporarily combine the (num_classes, batch_size, in_layer_channels) dims\n # while applying a transpose convolution.\n # Also combine (kernel_height, kernel_width), using them as the channels\n # as we'll apply the transpose convolution to each kernel point separately.\n lam_squeezed = tf.transpose(lam_sum, perm=[0, 1, 6, 2, 3, 4, 5])\n lam_squeezed = tf.reshape(lam_squeezed, shape=(\n [num_classes * batch_size * input_channels] +\n x.shape[2:4].as_list() +\n [kernel_height * kernel_width]))\n\n # De-convolve each elementary (i.e. one-hot) filter with the corresponding\n # slice of lambda.\n diagonal_kernel = tf.reshape(\n tf.eye(kernel_height * kernel_width, dtype=x.dtype),\n shape=[kernel_height, kernel_width, 1, kernel_height * kernel_width])\n lam_deconv = tf.nn.conv2d_transpose(\n lam_squeezed, diagonal_kernel, output_shape=(\n [num_classes * batch_size * input_channels] +\n [input_height, input_width, 1]),\n padding=padding, strides=([1] + list(strides) + [1]))\n\n # The resulting de-convolution has shape\n # (num_classes*batch_size*in_layer_channels,\n # in_layer_height, in_layer_width, 1).\n # Make it match mu_in.\n result = tf.reshape(lam_deconv, shape=(\n [num_classes, batch_size, input_channels] +\n lam_deconv.shape[1:3].as_list()))\n return tf.transpose(result, perm=[0, 1, 3, 4, 2])",
"def normalize_to_sum_one(in_tensor, tensor_rank, sum_one_indices_cnt=0):\n if sum_one_indices_cnt == 0:\n total_sum = tf.reduce_sum(in_tensor)\n return in_tensor / total_sum\n\n tensor_shape = tf.shape(in_tensor)\n sum_tensor = tf.reduce_sum(in_tensor, reduction_indices=range(sum_one_indices_cnt, tensor_rank), keep_dims=True)\n denominator = tf.tile(sum_tensor, tf.concat(0, [tf.ones([sum_one_indices_cnt], dtype=dtypes.int32),\n tensor_shape[sum_one_indices_cnt:]]))\n return in_tensor / denominator",
"def _reduce(self, values):\n\n if self.dim is None:\n return values\n\n if isinstance(self.dim, str):\n dim = [self.dim]\n else:\n dim = self.dim\n\n if self.reduce_func is None:\n for d in values.dims:\n if d not in dim:\n values = values.isel(**{d: 0})\n return values\n else:\n other_dims = [d for d in values.dims if d not in dim]\n return values.reduce(self.reduce_func, dim=other_dims)",
"def sum_right_most(x, ndim):\n if ndim == 0:\n return x\n axes = list(range(-ndim, 0))\n return x.sum(axes)"
]
| [
"0.7724387",
"0.63933766",
"0.623022",
"0.617324",
"0.6025804",
"0.59841615",
"0.5975488",
"0.59079736",
"0.58502185",
"0.58405554",
"0.5831169",
"0.5810228",
"0.5778869",
"0.5778869",
"0.5761769",
"0.57385844",
"0.57123697",
"0.57123697",
"0.56297374",
"0.561928",
"0.5616455",
"0.55859786",
"0.5561574",
"0.55499923",
"0.55329186",
"0.5519495",
"0.54935724",
"0.54900336",
"0.54832447",
"0.5483044"
]
| 0.798387 | 0 |
Returns an ndb.Model entity that the urlsafe key points to. Checks that the type of entity returned is of the correct kind. Raises an error if the key String is malformed or the entity is of the incorrect kind | def get_by_urlsafe(urlsafe, model):
try:
key = ndb.Key(urlsafe=urlsafe)
except TypeError:
raise endpoints.BadRequestException('Invalid Key')
except Exception as e:
if e.__class__.__name__ == 'ProtocolBufferDecodeError':
raise endpoints.BadRequestException('Invalid Key')
else:
raise
entity = key.get()
if not entity:
return None
if not isinstance(entity, model):
raise ValueError('Incorrect Kind')
return entity | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _getEntityByWebsafeKey(websafeKey, kind):\n # Ensure that the websafe key is valid\n key = _raiseIfWebsafeKeyNotValid(websafeKey, kind)\n # Get the entity\n entity = key.get()\n if not entity:\n raise endpoints.NotFoundException(\n \"No '%s' entity found using websafe key: %s\" %\n (kind, websafeKey))\n # If all is well, return the entity\n return entity",
"def get_or_create(cls, key, urlsafe=False, **kwargs):\n if urlsafe:\n key = ndb.Key(urlsafe=key)\n ent = key.get()\n if ent is not None:\n return (ent, False) # False meaning \"not created\"\n ent = cls(**kwargs)\n ent.key = key\n ent.put()\n return (ent, True) # True meaning \"created\"",
"def test4ValidEntity(id):\n object = None\n try:\n object = ndb.Key(urlsafe=id).get()\n except Exception, e:\n if e.__class__.__name__ == ProtocolBufferDecodeError:\n object = None\n return object",
"def _raiseIfWebsafeKeyNotValid(websafeKey, kind):\n # Check that websafeKey is not None\n if not websafeKey:\n raise endpoints.BadRequestException(\n \"Websafe key not provided for '%s'\" % kind)\n # Try to decode the websafe key into a real key\n try:\n key = ndb.Key(urlsafe=websafeKey)\n except:\n raise endpoints.BadRequestException(\n \"Websafe key provided for '%s' could not be decoded: %s\" %\n (kind, websafeKey))\n # Ensure that the key is of the desired kind\n if key.kind() != kind:\n raise endpoints.BadRequestException(\n \"Websafe key is not of the '%s' kind: %s\" % (kind, websafeKey))\n # If all is well, return the key\n return key",
"def get_album_key_by_keystr(keystr):\n attr_err = 'Keystrings must be an instance of base string, recieved: %s' % keystr\n kind_err = 'Expected urlsafe keystr for kind %s but received keystr for kind %s instead.'\n if not keystr or not isinstance(keystr, basestring):\n raise RuntimeError(attr_err)\n\n key = ndb.Key(urlsafe=keystr)\n if not key.kind() == PHOTOALBUM_KIND:\n raise RuntimeError(kind_err % (PHOTOALBUM_KIND, key.kind()))\n\n return key",
"def get_entity_by_key(cls, key):\n db_key = \"entity:\" + str(key)\n result = cls.db.hgetall(db_key)\n return (Entity.build(result) if type(result) is dict else None)",
"def _getNDBKey(websafe_key_to_get):\n return ndb.Key(urlsafe=websafe_key_to_get)",
"def _get_dict_model(cls, key, model, spec):\n try:\n return model[key]\n except KeyError:\n raise ObjectNotFoundError(path=spec[\"full_path\"])",
"def __getitem__(self, key):\n if isinstance(key, str):\n phone = EntityDatabase.parse_phone(key)\n if phone:\n return self._phone_id[phone]\n else:\n key = key.lstrip('@').lower()\n return self._entities[self._username_id[key]]\n\n if isinstance(key, int):\n return self._entities[key] # normal IDs are assumed users\n\n if isinstance(key, TLObject):\n sc = type(key).SUBCLASS_OF_ID\n if sc == 0x2d45687:\n # Subclass of \"Peer\"\n return self._entities[utils.get_peer_id(key, add_mark=True)]\n elif sc in {0x2da17977, 0xc5af5d94, 0x6d44b7db}:\n # Subclass of \"User\", \"Chat\" or \"Channel\"\n return key\n\n raise KeyError(key)",
"def get(self, key):\n result = self.search({\n \"field\": \"identity.key\",\n \"operator\": \"=\",\n \"value\": key})\n if len(result) > 1:\n raise SarasvatiException(\"Entity is not unique {}\".format(key))\n return result[0] if len(result) > 0 else None",
"def _load_entity(client, entity_type, entity_id, parent_key=None):\n\n key = _load_key(client, entity_type, entity_id, parent_key)\n entity = client.get(key)\n log('retrieved entity: ' + entity_type + ' for ID: ' + str(entity_id))\n return entity",
"def get_by_id(self, model, key_name):\n return model.get_by_id(key_name)",
"def test_key_kind(self):\r\n parent = ParentKind.objects.create(pk=1)\r\n child = ChildKind.objects.create(\r\n pk=2, parent=parent, parents=[parent.pk])\r\n self.assertEqual(child.parent.pk, parent.pk)\r\n self.assertEqual(child.parents[0], parent.pk)\r\n\r\n from google.appengine.api.datastore import Get\r\n from google.appengine.api.datastore_types import Key\r\n parent_key = Key.from_path(parent._meta.db_table, 1)\r\n child_key = Key.from_path(child._meta.db_table, 2)\r\n parent_entity = Get(parent_key)\r\n child_entity = Get(child_key)\r\n parent_column = child._meta.get_field('parent').column\r\n parents_column = child._meta.get_field('parents').column\r\n self.assertEqual(child_entity[parent_column], parent_key)\r\n self.assertEqual(child_entity[parents_column][0], parent_key)",
"def _load_key(client, entity_type, entity_id=None, parent_key=None):\n\n key = None\n if entity_id:\n key = client.key(entity_type, entity_id, parent=parent_key)\n else:\n # this will generate an ID\n key = client.key(entity_type)\n return key",
"def get_entity(endpoint):\n _entity, _id = parser_endpoint(endpoint)\n\n return _entity",
"def get_datastore_key(model, pk):\n\n kind = get_top_concrete_parent(model)._meta.db_table\n return Key.from_path(kind, pk)",
"def get_by_unique_key(self, unique_key, name, datastore=None, item_type=None):\n storage = self.storage(datastore)\n model = storage.get_by_unique_key(unique_key, name, item_type=item_type)\n # unless forcing ES datastore, check write storage if not found in read\n # if datastore == 'database' and storage is self.read:\n # Old is above - See C4-30\n # if not specifically specifying datastore=elasticsearch, always fall back to DB\n if not datastore == 'elasticsearch':\n if model is None:\n return self.write.get_by_unique_key(unique_key, name) # no need to pass item_type since it's write\n return model",
"def _get_raw_entity_kind(cls, entity_kind_or_model_classpath):\n return entity_kind_or_model_classpath",
"def GetEntityViaMemcache(entity_key):\n entity = memcache.get(entity_key)\n if entity is not None:\n return entity\n key = ndb.Key(urlsafe=entity_key)\n entity = key.get()\n if entity is not None:\n memcache.set(entity_key, entity)\n return entity",
"def url_for_object(self, key: typing.Optional[str]=None) -> str:\n ...",
"def _GetCompleteKeyOrError(arg):\n if isinstance(arg, Key):\n key = arg\n elif isinstance(arg, basestring):\n key = Key(arg)\n elif isinstance(arg, Entity):\n key = arg.key()\n elif not isinstance(arg, Key):\n raise datastore_errors.BadArgumentError(\n 'Expects argument to be an Entity or Key; received %s (a %s).' %\n (arg, typename(arg)))\n assert isinstance(key, Key)\n\n if not key.has_id_or_name():\n raise datastore_errors.BadKeyError('Key %r is not complete.' % key)\n\n return key",
"def read(cls, key, mode = FetchMode.All):\n assert isinstance(key, (basestring, Key))\n namespace, kind, member = Schema.Get(cls)\n if isinstance(key, Key):\n assert kind == key.kind, \"Mismatched Model, reading a %s with %s\" % (kind, key.kind)\n return Lisa.read(key, mode)\n else: \n key = Key(namespace, kind, key)\n return Lisa.read(key, mode)",
"def get_model(self, key: str = None, **kwargs) -> Dict:\n raise NotImplementedError",
"def Get(cls, model):\n try:\n model = model if isinstance(model, type) else model.__class__\n return cls.keys[id(model)]\n except KeyError:\n raise BadModelError(\"Class: %s is not a valid Model; \",(model))",
"def validate(self, entity_id):\n\n safe_entity = self.find_by_id(entity_id)\n if not safe_entity:\n raise EntityNotFound(\"The param_id = {} is not valid.\".format(entity_id))\n\n return safe_entity",
"def decode_model_instance(hashid):\n try:\n return get_encoded_object_or_404(hashid).obj\n except Http404: # pragma: no cover\n return None",
"def get_object_or_404(klass, key):\n obj = klass.get(key)\n if not obj:\n return '', 404\n return obj",
"def fetch_entity(endpoint, values):\n values['entity'] = Entity.objects.get_or_404(name=values['entity'])",
"def test_fromDictEntitiesURL(self):\n data = {\n \"urls\": [\n {\n \"url\": \"http://t.co/0JG5Mcq\",\n \"display_url\": u\"blog.twitter.com/2011/05/twitte\\xe2\",\n \"expanded_url\": \"http://blog.twitter.com/2011/05/twitter-for-mac-update.html\",\n \"indices\": [\n 84,\n 103\n ]\n }\n ],\n }\n entities = platform.Entities.fromDict(data)\n self.assertEquals('http://t.co/0JG5Mcq', entities.urls[0].url)",
"def get_kind(klass, obj):\n if isinstance(obj, basestring):\n return str(obj.split('_')[0])\n elif isinstance(obj, Model):\n return obj.__class__.__name__\n elif isinstance(obj, ndb.Key):\n return obj.kind()\n else:\n raise Exception('Model.get_kind() invalid input: {} ({}).'\n .format(obj, str(type(obj))))"
]
| [
"0.73048353",
"0.65032816",
"0.64167804",
"0.6294501",
"0.62330955",
"0.5972219",
"0.59239554",
"0.5729749",
"0.5718736",
"0.5496142",
"0.54275656",
"0.54200107",
"0.5398956",
"0.5366968",
"0.53621364",
"0.5358711",
"0.5357161",
"0.53120214",
"0.53034353",
"0.53009325",
"0.52477145",
"0.52318895",
"0.52059",
"0.51799953",
"0.5165578",
"0.51567996",
"0.51567006",
"0.51411796",
"0.51335937",
"0.5107501"
]
| 0.8238842 | 0 |
Returns an Player (ndb.Model) entity for a given username and game. First verify username and game entity are valid, raises error if not. Then returns the Player entity for the given user in the game. Raises an error if a Player is not found. | def get_player_by_game(username, game):
# check to make sure User exists
if not check_user_exists(username):
raise endpoints.NotFoundException(
'{} does not exist!'.format(username))
# check to see if game is a valid Game entity
if not isinstance(game, Game):
raise endpoints.NotFoundException('Game not found')
# check to see if Player is in this game
player = Player.query(
ancestor=game.key).filter(
Player.name == username.title()).get()
if not player:
raise endpoints.NotFoundException(
'{} is not in this game'.format(username))
else:
return player | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_game(username):\n user = User.objects.get(username=username)\n if user.two_player_game_id != None:\n return TwoPlayerGame.objects.get(\n game_id=user.two_player_game_id), \"two\"\n if user.four_player_game_id != None:\n return FourPlayerGame.objects.get(\n game_id=user.four_player_game_id), \"four\"\n return None, None",
"def get_player(self, player):\n return self._db.Players.find_one({'Name' :\n re.compile(player, re.IGNORECASE)})",
"def get_game_playing(username=None):\n\tchannel_data = get_info(username, use_fallback=False)\n\tif not channel_data or not channel_data['live']:\n\t\treturn None\n\tif channel_data.get('game_id'):\n\t\treturn Game(int(channel_data['game_id']), channel_data['game_name'])\n\treturn None",
"def get_player(playerName):\n return players_col.find_one({\"name\": playerName})",
"def get_game(user_id):\n data = g.db.find_one({'_id': user_id})\n if data:\n return Game(data['game'])\n return None",
"def search_player_by_username(database, username, check_exist_only=False):\n if database not in constants.VALID_DATABASES:\n raise ValueError('database is invalid')\n\n username = username.upper()\n\n params = {\n 'db': database,\n 'search': username\n }\n\n html_content = _call(\n players_base_url + 'view_player.php',\n parser='html',\n params=params\n )\n\n node = html_content.xpath('(//table/tr[position() = 2])[1]')\n\n if check_exist_only:\n return False if not node else True\n else:\n if not node:\n return None\n\n return Player.load(database, node[0], alternative=True)",
"def get_player(self, user_id):\n players = [p for p in self.players if p.id() == user_id]\n return players[0] if players else None",
"def get_game_player_stats(self, user):\n try:\n return GamePlayer.objects.get(game=self, player=user)\n except GamePlayer.DoesNotExist:\n return None",
"def _get_player(self, player_name):\n return self._collection.find_one({'name': player_name})",
"def get_player(self, player_name, player_type):\n\n # Return correct player class\n if player_type == \"human\":\n return Player(player_name)\n if player_type == \"computer\":\n return ComputerPlayer(player_name)",
"def get_player(self, player_id):\r\n\r\n for player in self.players:\r\n if player.player_id == player_id:\r\n return player\r\n\r\n raise Exception(\"A player with the given id {0} was not found in the player list\".format(player_id))",
"def retrieve_existing_player(self, player_id: str) -> Player:\n if player_id in self.player_df[\"player_id\"].tolist():\n player = self.player_df.loc[self.player_df[\"player_id\"] == player_id, \"player\"].tolist()[0]\n return player\n else:\n raise ValueError(f\"no player found with ID {player_id}\")",
"async def get_game_by_player(player_id):\n return ex.first_result(await ex.conn.fetchrow(\"SELECT gameid FROM blackjack.games WHERE player1 = $1 OR player2 = $1\", player_id))",
"async def get_by_username(db: Session, username: str) -> User:\n return db.query(User).filter(User.username == username).first()",
"def player(self, name):\n\n self.name = name\n q = Query()\n data = TinyDB('app/data/db_player.json').table('players')\n\n self.search_result = data.search(\n (q.name == self.name) |\n (q.surname == self.name)\n )\n\n if len(self.search_result) == 0:\n v_menu.View().search('player_none')\n return 'None'\n\n elif len(self.search_result) == 1:\n v_menu.View().search_players(\n 'find_player',\n self.search_result[0]['name'],\n self.search_result[0]['surname'],\n self.search_result[0]['birthday'],\n self.search_result[0]['rank']\n )\n return self.search_result[0]['id']\n\n elif len(self.search_result) >= 2:\n for i in range(len(self.search_result)):\n v_menu.View().search_players(\n 'find_players',\n self.search_result[i]['name'],\n self.search_result[i]['surname'],\n self.search_result[i]['birthday'],\n self.search_result[i]['rank'], i+1\n )\n\n self.player_number = c_input.Input().select_menu_number(\n len(self.search_result))\n\n return self.search_result[self.player_number-1]['id']",
"def get_player(self, name: str, platform: ALPlatform, skip_tracker_rank=False) -> ALPlayer:\n basic_player_stats: list = self.basic_player_stats(name, platform, skip_tracker_rank)\n assert len(basic_player_stats) == 1\n event_info: list = self.events(\n player_name=name,\n platform=platform,\n action=ALAction.INFO\n )\n events: list = list()\n tracked_player: dict\n for tracked_player in event_info[0].get('data'):\n if name == tracked_player.get('name') and \\\n platform.value == tracked_player.get('platform'):\n events = self.events(\n player_name=name,\n platform=platform,\n action=ALAction.GET\n )\n return ALPlayer(basic_player_stats_data=basic_player_stats[0], events=events)",
"def get_user_by_username(\n self, username: str, session: Session = None\n ) -> Optional[User]:\n session = session or self.get_session\n return (\n session.query(self.user_model)\n .filter(self.user_model.username == username)\n .one_or_none()\n )",
"def find_user_by_username(username: str) -> User:\n\n # Find user with this username, or None if there isn't any\n return User.query.filter_by(username=username).first()",
"def find_by_username(cls, username):\n ## Setup Connection & Cursor\n connection, cursor = Database.connect_to_db()\n\n ## Find the user\n query = \"SELECT * FROM {table} WHERE username=?\".format(table=cls.TABLE_NAME)\n result = cursor.execute(query, (username,)) ## Parameter must always be a tuple\n row = result.fetchone() ## Returns None if no results\n\n ## Create User object if we get data back\n if row:\n user = cls(*row)\n else:\n user = None\n\n ## Close Connection\n connection.close()\n\n return user",
"def find_or_add_player(firstname, lastname, owner, **kwargs):\n res = find_player(firstname, lastname)\n if res.exists():\n return res.first()\n else:\n obj = Object(owner=owner)\n obj.save()\n player = Player(\n object=obj,\n firstname=firstname,\n lastname=lastname,\n **kwargs\n )\n player.save()\n return player",
"def get_user(username):\n users = get_users()\n for user in users:\n if user['username'] == username:\n return user\n\n raise UserNotFound",
"def user_by_name(username):\n user = User.query.filter(User.username == username).one_or_none()\n return user",
"def players(self, game: str) -> Response:\n\n endpoint = '/api/players'\n query = f'?game={game}'\n return self.fetch(endpoint, query)",
"def get_player(self, num):\n\n name = input(f\"What is the name for player number {num}? \")\n player = Player(name)\n return player",
"def _get_profile(self, season, player):\n try:\n try:\n player = int(player)\n except ValueError:\n player = player.lower()\n player_list = season.get_season_data()[\"proPlayers\"]\n for p in player_list:\n if p[\"id\"] == player:\n return p\n if p[\"name\"].lower() == player:\n return p\n except Exception as e:\n error_msg = (\"Failed to retrieve player profile data: {}\"\n \"\".format(str(e)))\n raise PlayerDataException(error_msg)",
"def _findUser(username):\r\n user = None\r\n try:\r\n user = PongUser.objects.get(username=username)\r\n except User.DoesNotExist:\r\n user = None\r\n finally:\r\n return user",
"def get(cls, username, server, bucket=None):\n\t\tusername = cls._clean_username(username)\n\t\tif not username:\n\t\t\traise IDMException(\"you must provide a username\")\n\t\t\n\t\tres = cls.find_on({'type': 'user', 'username': username}, server, bucket)\n\t\tif res and len(res) > 0:\n\t\t\treturn res[0]\n\t\traise IDMException(\"no user with the given username\", 404)",
"def get_object_with_player(self, search_string):\n search_string = utils.to_unicode(search_string).lstrip('*') \n dbref = self.dbref(search_string)\n if not dbref: \n # not a dbref. Search by name.\n player_matches = User.objects.filter(username__iexact=search_string)\n if player_matches:\n dbref = player_matches[0].id\n # use the id to find the player\n return self.get_object_with_user(dbref)",
"def getPlayer(self, playerName, team=None):\n if team is None:\n teams = self.players.keys()\n elif team.lower() in self.players.keys():\n teams = [team.lower()]\n else:\n return None\n \n for team in teams:\n for player in self.players[team]:\n if playerName == player.name:\n return player\n return None",
"def get(self, id=None):\n if not id:\n position = request.args.get(\"position\")\n logger.info(\n f\"Retrieving all players, optionally filtered by position={position}\"\n )\n\n return self._get_all_players(position), 200\n\n logger.info(f\"Retrieving player by id {id}\")\n\n try:\n return self._get_player_by_id(id), 200\n except NoResultFound:\n abort(404, message=\"Player not found\")"
]
| [
"0.65232056",
"0.63248277",
"0.6170685",
"0.60794777",
"0.59741324",
"0.59616375",
"0.5943671",
"0.5908297",
"0.5905852",
"0.5870364",
"0.5857468",
"0.5836295",
"0.5746015",
"0.5728419",
"0.57253647",
"0.5602715",
"0.55801475",
"0.5562985",
"0.55489415",
"0.55399925",
"0.5517415",
"0.55160636",
"0.55148125",
"0.55110735",
"0.550645",
"0.5484051",
"0.54782766",
"0.5472387",
"0.54723424",
"0.546825"
]
| 0.8235693 | 0 |
if there are a start position, then add defaulttag item till that start position and add foundtag item form start with length n | def add_item(items, coder, tag, start, n):
if start is not None:
# close opened items
add_zero_item(items, coder, tag, start) # default tag
items[tag][coder].append(item(b=start, l=n-start, v=1)) # found tag | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def extend_pos(self, start: int, end: int) -> None:",
"def __init__(self, start_index: int, tag: str):\n self.start_index = start_index\n self.limit = 10\n self.tag = tag.lower()",
"def handle_starttag(self, tag, attrs):\n if tag == \"a\":\n curr_tag = Tag(tag)\n for attribute in attrs:\n curr_tag.add_attribute(attribute[0], attribute[1])\n self.current_tags.append(curr_tag)\n elif tag == \"img\":\n curr_tag = Tag(tag)\n for attribute in attrs:\n curr_tag.add_attribute(attribute[0], attribute[1])\n self.images.append(curr_tag)",
"def insert(self, index, *elements, **kw):\n # add support for tagged_text on input!\n if PyVers_f >= 3.4:\n index_i = super().index(index)\n super().insert(index, *elements)\n lb_elements = super().get(index_i, index_i + len(elements) - 1)\n for x, elem in enumerate(lb_elements, index_i):\n _, attrs, text = split_chunk(elem)\n if attrs:\n opts, _, case = parse_tag_attrs(attrs, {}, {}, **kw)\n if case:\n text = getattr(text, case)()\n super().insert(x, text)\n super().itemconfig(x, **opts)\n super().delete(x + 1)\n else:\n # bug in earlier Py versions causes above to fail on 1st elem\n elems_to_process = elements[:: 1 if index == tk.END else -1]\n for elem in elems_to_process: # elements[::-1]:\n if type(elem) in (list, tuple):\n elem1 = [\n e.replace(\"{\", r\"\\{\").replace(\"}\", r\"\\}\") for e in elem\n ]\n elem2 = [\"{%s}\" % e if \" \" in e else e for e in elem1]\n elem = \" \".join(elem2)\n _, attrs, text = split_chunk(elem)\n if attrs:\n opts, _, case = parse_tag_attrs(attrs, {}, {}, **kw)\n if case:\n text = getattr(text, case)()\n super().insert(index, text)\n super().itemconfig(index, **opts)\n else:\n super().insert(index, text)",
"def add_nodes_from(pos, node):\n if node.name is not None:\n # I wanna take care of the children of the content node\n children_list = []\n root = Content(pos=pos, tag=node.name)\n content_list.append((root, []))\n pos += 1\n for child in node.children:\n children_list.append(pos)\n pos = add_nodes_from(pos, child)\n content_list[root.pos] = (root, children_list)\n return pos\n\n else:\n leaf = Content(pos=pos, content=str(node))\n content_list.append((leaf, []))\n return pos + 1",
"def add_zero_item(items, coder, tag, start):\n if items[tag][coder]:\n it = items[tag][coder][-1]\n zero_start = it.b + it.l\n else:\n zero_start = 0\n if start - zero_start:\n items[tag][coder].append(item(b=zero_start, l=start - zero_start, v=0))",
"def default_startpos(self) -> Dict[AtomKey, numpy.array]:\n ...",
"def _default_context(tokens, i):\n left = tokens[i - 1].lower() if i != 0 else \"*START*\"\n right = tokens[i + 1].lower() if i != len(tokens) - 1 else \"*END*\"\n return (left, right)",
"def assign_tags_to_content(doc, style_tag):\n header_para = [] # list with headers and paragraphs\n header_dict = {}\n first = True # boolean operator for first header\n previous_s = {} # previous span\n\n for page in doc:\n blocks = page.getText(\"dict\")[\"blocks\"]\n i=0\n for b in blocks: # iterate through the text blocks\n if b['type'] == 0: # this block contains text\n\n # REMEMBER: multiple fonts and sizes are possible IN one block\n\n block_string = \"\" # text found in block\n for l in b[\"lines\"]: # iterate through the text lines\n for s in l[\"spans\"]: # iterate through the text spans\n if s['text'].strip(): # removing whitespaces:\n if first:\n previous_s = s\n first = False\n s_key = (int(s['text'].isupper()), int('bold' in s['font'].lower()), float(s['size']))\n block_string = style_tag[s_key] + s['text']\n else:\n s_key = (int(s['text'].isupper()), int('bold' in s['font'].lower()), float(s['size']))\n previous_key = (int(previous_s['text'].isupper()), int('bold' in previous_s['font'].lower()), float(previous_s['size']))\n if s_key == previous_key:\n\n if block_string and all((c == \"|\") for c in block_string):\n # block_string only contains pipes\n block_string = style_tag[s_key] + s['text']\n if block_string == \"\":\n # new block has started, so append size tag\n block_string = style_tag[s_key] + s['text']\n else: # in the same block, so concatenate strings\n block_string += \" \" + s['text']\n\n else:\n header_para.append(block_string)\n if block_string.startswith(\"<h\"):\n if style_tag[previous_key] in header_dict:\n header_dict[style_tag[previous_key]].append(block_string[block_string.index(\">\")+1:])\n else:\n header_dict[style_tag[previous_key]] = [block_string[block_string.index(\">\")+1:]]\n block_string = style_tag[s_key] + s['text']\n\n previous_s = s\n\n # new block started, indicating with a pipe\n block_string += \"|\"\n\n header_para.append(block_string)\n if block_string.startswith(\"<h\"):\n if style_tag[s_key] in header_dict:\n header_dict[style_tag[s_key]].append(block_string[block_string.index(\">\")+1:])\n else:\n header_dict[style_tag[s_key]] = [block_string[block_string.index(\">\")+1:]]\n return header_para, header_dict",
"def tagger():",
"def tag_bioes(tags, match_index, term_length):\n\n if term_length == 1:\n tags[match_index] = \"S\"\n else:\n for i in range(term_length):\n if i == 0:\n tags[match_index + i] = \"B\"\n elif i == term_length - 1:\n tags[match_index + i] = \"E\"\n else:\n tags[match_index + i] = \"I\"\n return tags",
"def extend(self):\n # -1 in the segments means that starts counting in the end of the list\n self.add_segment(self.segments[-1].position())",
"def __init__(self, tagged_sents, default_tag='nc0s000'):\n self._default_tag = default_tag",
"def GachaCraftNodeExcelStartTag_Vector(builder, numElems):\n return StartTag_Vector(builder, numElems)",
"def xml_tag(inp_tag, out_tag):\n tag_dict = OrderedDict()\n sub_tag = OrderedDict()\n for i in range(len(inp_tag)):\n if inp_tag[i] not in out_tag:\n tag_dict[inp_tag[i]] = [inp_tag[i-1]]\n return tag_dict",
"def _genPosTags(self, tagged):\n return [pos for (token, pos) in tagged]",
"def insert_relation_tags(tokenized_text, indices):\n \n # order tags by actual index in sentence\n indices = [i for ind in indices for i in ind]\n tags = [\"<e1>\", \"</e1>\", \"<e2>\", \"</e2>\"]\n order = np.argsort(indices)\n indices = [indices[i] for i in order]\n tags = [tags[i] for i in order]\n \n adjust = 0\n for ix, tag in zip(indices, tags):\n tokenized_text.insert(ix + adjust, tag)\n adjust += 1\n \n return tokenized_text",
"def getTags(number=None):",
"def _addPrefixes(data):\n prevTags = None\n newData = []\n\n for n, (token, tags) in enumerate(data):\n\n newTags = []\n\n for t in tags:\n p = \"B\" if ((prevTags is None) or (t not in prevTags)) else \"I\"\n newTags.append(\"%s-%s\" % (p, t))\n\n newData.append((token, newTags))\n prevTags = tags\n\n return newData",
"def get_items(self, start, stop, next_position=None):",
"def recipe12_2():\n from xml.sax.handler import ContentHandler\n import xml.sax\n class countHandler(ContentHandler):\n def __init__(self):\n self.tags={}\n def startElement(self,name,attr):\n self.tags[name]=self.tags.get(name,0)+1\n parser=xml.sax.make_parser()\n handler=countHandler()\n parser.setContentHandler(handler)\n try:\n parser.parse('sample.xml')\n except IOError:\n sys.stderr.write('File sample.xml not found\\n')\n tags=handler.tags.keys()\n tags.sort()\n for tag in tags:\n print tag,handler.tags[tag]",
"def NewStartingIndex(self) -> int:",
"def handle_starttag(self, tag, attrs):\n \n if self.intermediate_tags > 0:\n self.intermediate_tags += 1\n return\n \n self.filtering = self.typogrify._should_be_filtered(tag, attrs)\n self.intermediate_tags = 1 if not self.filtering else 0",
"def full_pos_tag(self, sentence):\n tags = self.pos_tag(sentence)\n for i in range(len( tags)):\n tags[i] = self.get_complete_tag(tags[i])\n return tags",
"def insert(self, word):\n now = self.tree\n for i in word:\n now[i] = now.setdefault(i,{})\n now = now[i]\n now['end']=True",
"def add_to_beginning(self, domino):\n self.chain.insert(0, domino)",
"def __init__(self, tagged_sents, default_tag='nc0s000'):\n \n self.default_tag = default_tag\n \n self.word_tags = defaultdict(lambda: defaultdict(int))\n \n for sent in list(tagged_sents):\n for word, tag in sent:\n self.word_tags[word][tag] += 1\n\n self.word_tags = dict(self.word_tags)",
"def __init__(self, text, tag, start ,end):\n\n self.text = six.text_type(text)\n self.tag = copy.copy(tag)\n self.end = end\n self.start = start",
"def _postprocess(self, tags: List[str], words: List[str], pos: List[str]):\n result = list()\n\n i = 0\n for tag in tags:\n if (\"<\" not in tag) and (\">\" not in tag):\n if pos:\n result.append(f\"{words[i]}/{pos[i]}\")\n else:\n result.append(words[i])\n i += 1\n else:\n result.append(tag)\n\n return \" \".join(result)",
"def addPrefix(self):\n closeRqd = None\n for item, next in map(None, self, self[1:]):\n lastSibling = not next or next.level != item.level\n if item.prefix and item.textLines and closeRqd == None:\n item.textLines[0] = item.prefix + item.textLines[0]\n closeRqd = item.suffix\n if closeRqd != None and (lastSibling or\n not item.equalPrefix(next)):\n if item.textLines:\n item.textLines[-1] = item.textLines[-1] + closeRqd\n else:\n item.textLines = [closeRqd]\n closeRqd = None"
]
| [
"0.57692724",
"0.57584",
"0.5506527",
"0.54777753",
"0.5397803",
"0.5319494",
"0.5314077",
"0.5247034",
"0.52080256",
"0.5207023",
"0.5185454",
"0.5167013",
"0.5088132",
"0.5079091",
"0.5039901",
"0.5026523",
"0.5013085",
"0.50089514",
"0.5003757",
"0.50031894",
"0.4975165",
"0.49651366",
"0.49550897",
"0.49548903",
"0.49272093",
"0.49269757",
"0.49233702",
"0.49119285",
"0.49109998",
"0.49080315"
]
| 0.6109693 | 0 |
Create the sign evaluation message. | def to_msg(self):
return SignEvaluationMsg(
self.position.to_geometry_msg(), self.desc, *self.evaluate()
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def sign(self,msg,s):\n # $y_s = E_k^{-1} \\left( y_{s+1} \\oplus \\dots E_k^{-1} \\left( y_n \\oplus E_k^{-1} \\left(z\\right)\\right)\\dots\\right) \\oplus E_k \\left( y_{s-1} \\oplus \\dots E_k \\left( y_1 \\oplus v \\right)\\dots\\right)$\n self.permut(msg)\n x,y = [],[]\n for i in range(len(self.kl)):\n if i != s:\n xi = random.randint(0,2**self.l-1)\n x.append(xi)\n y.append(self.trap(xi,self.kl[i].e,self.kl[i].n))\n vi = reduce (lambda vi,i:self.E(i^vi),y[s:],0)\n yc = reduce (lambda yc,i:self.E(i^yc),y[:s],vi)\n x.insert(s,self.trap(yc,self.kl[s].d,self.kl[s].n))\n return itob64(vi) + ' ' + ' '.join('%s'%itob64(xi) for xi in x)",
"def sign(sk: SecretKey, msgs: List[bytes]) -> Signature:\n assert(len(msgs) == len(sk.y))\n\n # pick generator\n h = G1.generator()\n exponent = sk.x + sum([y_i * Bn.from_binary(m_i)\n for (y_i, m_i) in zip(sk.y.values(), msgs)])\n\n return Signature(h, h**exponent) # type:ignore",
"def sign(self,msg,s):\n # $y_s = E_k^{-1} \\left( y_{s+1} \\oplus \\dots E_k^{-1} \\left( y_n \\oplus E_k^{-1} \\left(z\\right)\\right)\\dots\\right) \\oplus E_k \\left( y_{s-1} \\oplus \\dots E_k \\left( y_1 \\oplus v \\right)\\dots\\right)$\n root_link,limit = 1234567,56 # find a more secure root and an optimal limit!\n link = itob64(pow(root_link,self.kl[s].p + self.kl[s].q,self.kl[s].n)%(1<<limit))\n #link = itob64(1000000L) easy to tally !\n self.permut(msg + link)\n x,y = [],[]\n for i in range(len(self.kl)):\n if i != s:\n xi = random.randint(0,2**self.l-1)\n x.append(xi)\n y.append(self.trap(xi,self.kl[i].e,self.kl[i].n))\n vi = reduce (lambda vi,i:self.E(i^vi),y[s:],0)\n yc = reduce (lambda yc,i:self.E(i^yc),y[:s],vi)\n x.insert(s,self.trap(yc,self.kl[s].d,self.kl[s].n))\n return link + ' ' + itob64(vi) + ' ' + ' '.join('%s'%itob64(xi) for xi in x)",
"def sign(self, msg):\n z = int.from_bytes(helper.hash256(msg), \"big\")\n k = self.deterministic_k(z)\n k_inv = pow(k, N-2, N)\n r = (k*G).x.num\n s = (z + r * self.secret) * k_inv % N\n if s > N/2:\n s = N - s\n\n return Signature(r, s)",
"def sign(self):\n private_key = serialization.load_pem_private_key(\n binascii.unhexlify(self.sender_private_key.encode('utf8')),\n password=None,\n backend=default_backend()\n )\n signature = private_key.sign(\n str(self.to_dict()).encode('utf8'),\n padding.PSS(\n mgf=padding.MGF1(hashes.SHA256()),\n salt_length=padding.PSS.MAX_LENGTH\n ),\n hashes.SHA256()\n )\n\n return signature",
"def build_tex(self,signs='-',request_parens=False):\n return self._build_tex(signs,request_parens)",
"def _build_signature(self):\n sig_contents = \\\n self.payload + \".\" + \\\n b64encode(b\"application/xml\").decode(\"ascii\") + \".\" + \\\n b64encode(b\"base64url\").decode(\"ascii\") + \".\" + \\\n b64encode(b\"RSA-SHA256\").decode(\"ascii\")\n sig_hash = SHA256.new(sig_contents.encode(\"ascii\"))\n cipher = PKCS1_v1_5.new(self.private_key)\n sig = urlsafe_b64encode(cipher.sign(sig_hash))\n key_id = urlsafe_b64encode(bytes(self.author_handle, encoding=\"utf-8\"))\n return sig, key_id",
"def SIGN(self, signingKey, message, seqNum, cipher_encrypt):\n\t\treturn self.MAC(cipher_encrypt, signingKey, seqNum, message)",
"def sign(self, payload):\n raise NotImplementedError",
"def generate_signed_message(method, headers_dict, body_dict, access_key, secret_key):\r\n message = signing_format_message(method, headers_dict, body_dict)\r\n\r\n # hmac needs a byte string for it's starting key, can't be unicode.\r\n hashed = hmac.new(secret_key.encode('utf-8'), message, sha256)\r\n signature = binascii.b2a_base64(hashed.digest()).rstrip('\\n')\r\n authorization_header = \"SSI {}:{}\".format(access_key, signature)\r\n\r\n message += '\\n'\r\n return message, signature, authorization_header",
"def _create_msg(self, tr_id, payload, confirm, expire_time, encoding):\n tmp = [\"<SSAP_message><transaction_type>INSERT</transaction_type>\",\n \"<message_type>REQUEST</message_type>\"]\n tmp.extend([\"<transaction_id>\", str(tr_id), \"</transaction_id>\"])\n tmp.extend([\"<node_id>\", str(self.node_id), \"</node_id>\"])\n tmp.extend([\"<space_id>\", str(self.targetSS), \"</space_id>\"])\n tmp.extend(['<parameter name=\"insert_graph\" encoding=\"%s\">' % encoding.upper(),\n str(payload), \"</parameter>\"])\n tmp.extend(['<parameter name = \"confirm\">',\n str(confirm).upper(),\n \"</parameter>\",\n \"</SSAP_message>\"])\n return \"\".join(tmp)",
"def __sign(self, text):\n signature = HMAC.new(self.sign_key, text.encode('utf-8'), SHA256).digest()\n return base64.standard_b64encode(signature)",
"def Sign(self, msg):\n # Need to chose a random k per-message, SystemRandom() is available\n # since Python 2.4.\n k = random.SystemRandom().randint(2, self.key.q-1)\n (r, s) = self.key.sign(util.Hash(msg), k)\n return util.MakeDsaSig(r, s)",
"def rsa_sign(message, privatekey):\r\n \r\n # A key object is created to interact with the PyCrypto\r\n # encryption suite. The object contains key data and\r\n # the necessary rsa functions.\r\n temp_key_obj = _rsa_keydict_to_keyobj(privatekey = privatekey) \r\n \r\n return _rsa_chopstring(message, temp_key_obj, temp_key_obj.sign)",
"def encode(msg: Message) -> bytes:\n msg = cast(SigningMessage, msg)\n signing_msg = signing_pb2.SigningMessage()\n signing_msg.message_id = msg.message_id\n dialogue_reference = msg.dialogue_reference\n signing_msg.dialogue_starter_reference = dialogue_reference[0]\n signing_msg.dialogue_responder_reference = dialogue_reference[1]\n signing_msg.target = msg.target\n\n performative_id = msg.performative\n if performative_id == SigningMessage.Performative.SIGN_TRANSACTION:\n performative = signing_pb2.SigningMessage.Sign_Transaction_Performative() # type: ignore\n skill_callback_ids = msg.skill_callback_ids\n performative.skill_callback_ids.extend(skill_callback_ids)\n skill_callback_info = msg.skill_callback_info\n performative.skill_callback_info.update(skill_callback_info)\n terms = msg.terms\n Terms.encode(performative.terms, terms)\n raw_transaction = msg.raw_transaction\n RawTransaction.encode(performative.raw_transaction, raw_transaction)\n signing_msg.sign_transaction.CopyFrom(performative)\n elif performative_id == SigningMessage.Performative.SIGN_MESSAGE:\n performative = signing_pb2.SigningMessage.Sign_Message_Performative() # type: ignore\n skill_callback_ids = msg.skill_callback_ids\n performative.skill_callback_ids.extend(skill_callback_ids)\n skill_callback_info = msg.skill_callback_info\n performative.skill_callback_info.update(skill_callback_info)\n terms = msg.terms\n Terms.encode(performative.terms, terms)\n raw_message = msg.raw_message\n RawMessage.encode(performative.raw_message, raw_message)\n signing_msg.sign_message.CopyFrom(performative)\n elif performative_id == SigningMessage.Performative.SIGNED_TRANSACTION:\n performative = signing_pb2.SigningMessage.Signed_Transaction_Performative() # type: ignore\n skill_callback_ids = msg.skill_callback_ids\n performative.skill_callback_ids.extend(skill_callback_ids)\n skill_callback_info = msg.skill_callback_info\n performative.skill_callback_info.update(skill_callback_info)\n signed_transaction = msg.signed_transaction\n SignedTransaction.encode(\n performative.signed_transaction, signed_transaction\n )\n signing_msg.signed_transaction.CopyFrom(performative)\n elif performative_id == SigningMessage.Performative.SIGNED_MESSAGE:\n performative = signing_pb2.SigningMessage.Signed_Message_Performative() # type: ignore\n skill_callback_ids = msg.skill_callback_ids\n performative.skill_callback_ids.extend(skill_callback_ids)\n skill_callback_info = msg.skill_callback_info\n performative.skill_callback_info.update(skill_callback_info)\n signed_message = msg.signed_message\n SignedMessage.encode(performative.signed_message, signed_message)\n signing_msg.signed_message.CopyFrom(performative)\n elif performative_id == SigningMessage.Performative.ERROR:\n performative = signing_pb2.SigningMessage.Error_Performative() # type: ignore\n skill_callback_ids = msg.skill_callback_ids\n performative.skill_callback_ids.extend(skill_callback_ids)\n skill_callback_info = msg.skill_callback_info\n performative.skill_callback_info.update(skill_callback_info)\n error_code = msg.error_code\n ErrorCode.encode(performative.error_code, error_code)\n signing_msg.error.CopyFrom(performative)\n else:\n raise ValueError(\"Performative not valid: {}\".format(performative_id))\n\n signing_bytes = signing_msg.SerializeToString()\n return signing_bytes",
"def Sign(self, msg):\n # Need to chose a random k per-message, SystemRandom() is available\n # since Python 2.4.\n k = random.SystemRandom().randint(2, self.key.q - 1)\n (r, s) = self.key.sign(util.Hash(msg), k)\n return util.MakeDsaSig(r, s)",
"def sign(self, message):\n return Signature(self._sk.sign(message))",
"def sign(self, message):\n\n assert len(message) == 32\n assert self.sec is not None\n r, s = do_ecdsa_sign(self.G, self.sec, message, self.optim)\n r0, s0 = r.binary(), s.binary()\n assert len(r0) <= 32 and len(s0) <= 32\n sig = pack(\"H32sH32s\", len(r0), r0, len(s0), s0)\n return sig",
"def _sign(self, data, salt):\r\n strBuffer = \"\"\r\n # print data.keys()\r\n for k in sorted(data.iterkeys()):\r\n\r\n # Handle the BOOL special case\r\n v = data[k]\r\n if type(v) == bool:\r\n if v:\r\n v = 1\r\n else:\r\n v = 0\r\n data[k] = v\r\n\r\n # Update buffer\r\n strBuffer += \"%s=%s\\n\" % (str(k).lower(), vmcp.myquote(str(v)))\r\n\r\n # Append salt\r\n strBuffer += salt\r\n return strBuffer",
"def sign(priv_key: rsa.RSAPrivateKey, msg: bytes) -> Signature:\n return priv_key.sign(msg, PADDING, HASH)",
"def _generate_signature(self, key, msg):\n key = to_bytes(key)\n msg = to_bytes(msg)\n\n hash_obj = hmac.new(key, msg=msg, digestmod=hashlib.sha256)\n digest = hash_obj.digest() # abstract\n\n signature = base64.b64encode(digest) # Signature\n return to_unicode(signature)",
"def sign(self, msg: Dict) -> Dict:\n ser = serialize_msg_for_signing(msg, topLevelKeysToIgnore=[f.SIG.nm,\n f.SIGS.nm])\n bsig = self.naclSigner.signature(ser)\n sig = base58.b58encode(bsig).decode(\"utf-8\")\n return sig",
"def GenSampleSignature(text):\r\n demo_keypair = ('RSA.mVgY8RN6URBTstndvmUUPb4UZTdwvwmddSKE5z_jvKUEK6yk1'\r\n 'u3rrC9yN8k6FilGj9K0eeUPe2hf4Pj-5CmHww=='\r\n '.AQAB'\r\n '.Lgy_yL3hsLBngkFdDw1Jy9TmSRMiH6yihYetQ8jy-jZXdsZXd8V5'\r\n 'ub3kuBHHk4M39i3TduIkcrjcsiWQb77D8Q==')\r\n\r\n signer = SignatureAlgRsaSha256(demo_keypair)\r\n return signer.Sign(text)",
"def sign(self, body, external_aad, private_key):",
"def sign(self, message, private_key):\n sk = private_key\n vk = sk.get_verifying_key()\n\n self.public_key = vk\n\n # This would be the Ed25519ph version (JavaScript ES7):\n # const message = crypto.createHash('sha512')\n # .update(Buffer.concat([this.messagePrefix, this.message]))\n # .digest()\n\n self.signature = sk.sign(message, encoding='bytes')",
"def sign(self) -> str:\n raise NotImplementedError(\"Please implement algorithm specific sign() method\")",
"def sign_plaintext(client, request):\n return plaintext_signature(client.client_secret, client.token_secret)",
"def build_and_sign(builder, dest_address, payment_amount, prioritizer_seed=None):\n builder.append_payment_op(dest_address, str(payment_amount))\n builder.sign(builder.keypair.seed().decode())\n\n # prioritize transaction by adding a prioritizer signature\n if prioritizer_seed:\n builder.sign(prioritizer_seed)\n\n return builder.hash_hex(), builder.gen_xdr().decode()",
"def create_signature(self, string_to_sign: str) -> str:\n begin_signature = hmac.new(key=base64.b64decode(self.secret),\n msg=string_to_sign.encode(),\n digestmod=hashlib.sha1)\n end_signature = begin_signature.digest()\n final_signature = base64.b64encode(end_signature).decode()\n return final_signature",
"def get_signature_xml() -> str:\n return render_to_string(\"saml/xml/signature.xml\", {})"
]
| [
"0.64031225",
"0.6378018",
"0.61032414",
"0.6040611",
"0.5963941",
"0.59369916",
"0.5801996",
"0.5800221",
"0.57777756",
"0.5704505",
"0.56650835",
"0.56564415",
"0.5642582",
"0.56367344",
"0.56246454",
"0.5612385",
"0.5597548",
"0.55866754",
"0.5567704",
"0.5535878",
"0.5520031",
"0.5512765",
"0.5508083",
"0.54909086",
"0.5480725",
"0.54577976",
"0.54277205",
"0.5392805",
"0.53868926",
"0.53777325"
]
| 0.73843765 | 0 |
Sum over the evaluations. | def sum_evaluations(evaluations):
def add_evaluations(e1, e2):
"""Add two evaluations.
If Signs do not have detections the distance is -1, therefore the distance needs
to be handled separately.
"""
true_positive = e1[0] + e2[0]
false_positive = e1[1] + e2[1]
distance = e1[2] + e2[2]
if e1[2] < 0 and e2[2] < 0:
distance = -1
elif e1[2] < 0:
distance = e2[2]
elif e2[2] < 0:
distance = e1[2]
return true_positive, false_positive, distance
length = len(evaluations)
tp, fp, d = functools.reduce(add_evaluations, evaluations)
return tp, fp, float(d) / float(length) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def sum(self):\n return sum(self.values)",
"def evaluate(self, solution, total = 0):\n for objective in self.objectives:\n total = total + objective(solution)\n return total",
"def sum(self) -> float:\n return sum(self.values)",
"def evaluate(self,**d):\r\n\t\t\r\n\t\t# evaluate terms\r\n\t\tv = [i.evaluate(**d) for i in self]\r\n\t\t\r\n\t\t# sum results\r\n\t\tc = Pa(v).sum()\r\n\t\t\r\n\t\treturn c",
"def sum(self):\n return self._reduce_for_stat_function(F.sum, only_numeric=True)",
"def summation(self):\n return sum(self.read_ints())",
"def sum(self):\n return sum(self.items())",
"def sum_values(self):\n raise NotImplementedError",
"def with_sum_sum_reduction(self):\n return self.with_reduction(lambda x: x.sum())",
"def _sum(self):\n s = 0\n for element, value in self.items():\n s += value\n return s",
"def run(self):\n self.evaluate()\n self.accumulate()\n self.summarize()",
"def sum(self):\n return self.aggregate(np.sum)",
"def sum (self):\n return self.values.sum ()",
"def sum (self):\n return self.values.sum ()",
"def sum(self):\n return self.vsum",
"def sum(self):\n return sum(self._values.values())",
"def test_sum_expression(self):\n # The logic of SumExpression is checked in the above tests (which include\n # addition and subtraction). Here, we only check that constructing a\n # SumExpression flattens the list.\n structure_memoizer = {\n defaults.DENOMINATOR_LOWER_BOUND_KEY: 0.0,\n defaults.GLOBAL_STEP_KEY: tf.compat.v2.Variable(0, dtype=tf.int32)\n }\n\n term_values = [0, 1, 2, 3, 4]\n\n def create_dummy_expression(value):\n \"\"\"Creates an empty `Expression` with the given extra constraints.\"\"\"\n basic_expression_object = basic_expression.BasicExpression(\n [term.TensorTerm(value)])\n return expression.ExplicitExpression(basic_expression_object,\n basic_expression_object)\n\n expressions = [create_dummy_expression(value) for value in term_values]\n\n # Each of our Expressions contains exactly one term, so by checking its\n # value we can uniquely determine which subexpression is which.\n def term_value(expression_object):\n terms = expression_object.penalty_expression._terms\n self.assertEqual(1, len(terms))\n return terms[0].tensor(structure_memoizer)\n\n sum1 = expression.SumExpression([expressions[0], expressions[1]])\n sum2 = expression.SumExpression([expressions[2]])\n sum3 = expression.SumExpression([expressions[3]])\n sum4 = expression.SumExpression([expressions[4]])\n sum5 = expression.SumExpression([sum3, sum4])\n sum6 = expression.SumExpression([sum1, sum2, sum5])\n\n actual_expressions = sum6._expressions\n self.assertEqual(5, len(actual_expressions))\n for ii in xrange(5):\n self.assertEqual(ii, term_value(expressions[ii]))\n self.assertEqual(ii, term_value(actual_expressions[ii]))",
"def sum(self):\n return sum(self.times)",
"def sum(self):\n return sum(self.times)",
"def sum(self):\n return sum(self.times)",
"def compute(self, *args, **kwargs):\n for node in self.evaluation_sequence:\n node.evaluate()",
"def sum(self):\n if self.isscalar():\n s = self.defval\n else:\n if self.defval:\n msg = \"Sum of a tensor wish defval != 0 not implemented.\"\n raise NotImplementedError(msg)\n s = 0\n for v in self.sects.values():\n s += np.sum(v)\n return s",
"def eval_sum(parse_result):\r\n total = 0.0\r\n current_op = operator.add\r\n for token in parse_result:\r\n if token == '+':\r\n current_op = operator.add\r\n elif token == '-':\r\n current_op = operator.sub\r\n else:\r\n total = current_op(total, token)\r\n return total",
"def sum(self, values):\n return self.aggregate(values, \"sum\")",
"def sum(self):\n return self._summarize(lambda c: c.sum)",
"def val_sum(self, axis = None):\n f = self.to_Poly()\n return f.val_sum(axis).to_TaylorGrid(self.params)",
"def sum_values(values):\n return (sum(values))",
"def evaluate(self, seq, begin, end, *args):\n return reduce(operator.add, args, [])",
"def sum(self):\n return np.dot(self.data.T, self.weights)",
"def sum(self):\n # skipna == True\n # only_numerical == True\n # skipna == True\n return self._lift(\"sum\")"
]
| [
"0.6946212",
"0.68665934",
"0.6806886",
"0.6783867",
"0.676219",
"0.6713398",
"0.6688774",
"0.66768426",
"0.6672747",
"0.6663228",
"0.66507614",
"0.6645578",
"0.66443914",
"0.66443914",
"0.66300213",
"0.6587658",
"0.6506838",
"0.6465377",
"0.6465377",
"0.6465377",
"0.64633495",
"0.64295685",
"0.63862044",
"0.6378994",
"0.63516474",
"0.6302918",
"0.6299806",
"0.6285926",
"0.6240757",
"0.6211265"
]
| 0.70826775 | 0 |
Add two evaluations. If Signs do not have detections the distance is 1, therefore the distance needs to be handled separately. | def add_evaluations(e1, e2):
true_positive = e1[0] + e2[0]
false_positive = e1[1] + e2[1]
distance = e1[2] + e2[2]
if e1[2] < 0 and e2[2] < 0:
distance = -1
elif e1[2] < 0:
distance = e2[2]
elif e2[2] < 0:
distance = e1[2]
return true_positive, false_positive, distance | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def sum_evaluations(evaluations):\n\n def add_evaluations(e1, e2):\n \"\"\"Add two evaluations.\n\n If Signs do not have detections the distance is -1, therefore the distance needs\n to be handled separately.\n \"\"\"\n true_positive = e1[0] + e2[0]\n false_positive = e1[1] + e2[1]\n distance = e1[2] + e2[2]\n if e1[2] < 0 and e2[2] < 0:\n distance = -1\n elif e1[2] < 0:\n distance = e2[2]\n elif e2[2] < 0:\n distance = e1[2]\n return true_positive, false_positive, distance\n\n length = len(evaluations)\n tp, fp, d = functools.reduce(add_evaluations, evaluations)\n return tp, fp, float(d) / float(length)",
"def addAllNumericHas (self, other):\n \n if self.hasOutErrorPackets():\n if other.hasOutErrorPackets():\n self.outErrorPackets += other.outErrorPackets\n \n if self.hasInErrorPackets():\n if other.hasInErrorPackets():\n self.inErrorPackets += other.inErrorPackets\n \n if self.hasInDiscardPackets():\n if other.hasInDiscardPackets():\n self.inDiscardPackets += other.inDiscardPackets\n \n if self.hasOutUnicastPackets():\n if other.hasOutUnicastPackets():\n self.outUnicastPackets += other.outUnicastPackets\n \n if self.hasInMulticastPackets():\n if other.hasInMulticastPackets():\n self.inMulticastPackets += other.inMulticastPackets\n \n if self.hasOutBroadcastPackets():\n if other.hasOutBroadcastPackets():\n self.outBroadcastPackets += other.outBroadcastPackets\n \n if self.hasInBroadcastPackets():\n if other.hasInBroadcastPackets():\n self.inBroadcastPackets += other.inBroadcastPackets\n \n if self.hasOutMulticastPackets():\n if other.hasOutMulticastPackets():\n self.outMulticastPackets += other.outMulticastPackets\n \n if self.hasInUnknownProtocolPackets():\n if other.hasInUnknownProtocolPackets():\n self.inUnknownProtocolPackets += other.inUnknownProtocolPackets\n \n if self.hasOutDiscardPackets():\n if other.hasOutDiscardPackets():\n self.outDiscardPackets += other.outDiscardPackets\n \n if self.hasInUnicastPackets():\n if other.hasInUnicastPackets():\n self.inUnicastPackets += other.inUnicastPackets\n \n if self.hasOutOctets():\n if other.hasOutOctets():\n self.outOctets += other.outOctets\n \n if self.hasInOctets():\n if other.hasInOctets():\n self.inOctets += other.inOctets\n \n \n pass",
"def _distance_last_evaluations(self):\n if self.X.shape[0] < 2:\n # less than 2 evaluations\n return np.inf\n return np.sqrt(np.sum((self.X[-1, :] - self.X[-2, :]) ** 2))",
"def __add__(self, other):\n\t\tif isinstance(other, Value):\n\t\t\treturn Value(self.val + other.val, sqrt(self.error**2 + other.error**2))\n\t\telse:\n\t\t\treturn Value(self.val + other, self.error)",
"def _compute_results(self):\n self.Y_best = best_value(self.Y)\n self.x_opt = self.X[np.argmin(self.Y),:]\n self.fx_opt = np.min(self.Y)\n self.distance = self._compute_distance_betw_consecutive_x()",
"def __add__(self, other):\n try:\n total = {self.var: 1, other.var: 1}\n return AutoDiffReverse(self.val + other.val, None, der=total)\n except AttributeError:\n return AutoDiffReverse(self.val + other, None, {self.var: 1})",
"def similarity(self, e1, e2):\n\t\tpass",
"def _pairwise_dist(self,s1,s2):\n\n return 0.0",
"def calc(operand_1, operand_2):\n return operand_1 + operand_2",
"def calc(operand_1, operand_2):\n return operand_1 + operand_2",
"def calc(operand_1, operand_2):\n\n return operand_1 + operand_2",
"def addAllNumericHas (self, other):\n \n if self.hasEpoch():\n if other.hasEpoch():\n self.epoch += other.epoch\n \n if self.hasUtcOffsetMinutes():\n if other.hasUtcOffsetMinutes():\n self.utcOffsetMinutes += other.utcOffsetMinutes\n \n \n pass",
"def __add__(self, other):\n if not self.stations[1] == other.stations[0]:\n if self.stations[0] == other.stations[1]:\n return other.__add__(self)\n return None\n if not (self.got and other.got):\n return None\n if self.t_cut != None and np.abs(self.t - other.t) > self.t_cut:\n if self.t > other.t:\n other.got = False\n else:\n self.got = False\n return 0\n if self.t >= other.t:\n self.rho = maps.dp_doub(self.t - other.t, other.T, *self.rho)\n else:\n other.rho = maps.dp_doub(other.t - self.t, self.T, *other.rho)\n \n rho = maps.swap(self.lam_BSM, *(self.rho + other.rho))\n t = max(self.t, other.t)\n N = max(self.N, other.N)\n return Link(self.left, other.right, self.c, self.T, N=N, t=t, rho_init=rho, got=True, direct=False, t_cut=self.t_cut, lam_BSM=self.lam_BSM, k = self.k)",
"def add(self, frames):\n score = self.calc_score(frames)\n self.scores.append(score)\n\n if len(self.scores) >= self.length and score > self.threshold:\n return score, True\n\n # This calc is slow and probably should be done\n # without deque -> numpy arrays if possible.\n mean, stdv = np.mean(self.scores), np.std(self.scores)\n self.threshold = mean + (3 * stdv)\n\n return score, False",
"def _add_criterion(cls, first, second, operator):\n assert isinstance(first, TargetingCriterion)\n assert isinstance(second, TargetingCriterion)\n assert operator in cls.OPERATOR\n\n op1, targets1 = first.get_data()\n op2, targets2 = second.get_data()\n\n first_is_extendable = op1 == operator or len(targets1) <= 1\n second_is_extendable = op2 == operator or len(targets2) <= 1\n # Are the targets of the same python type?\n same_types = type(targets1[0]) == type(targets2[0])\n\n if op1 == cls.OPERATOR.NOT or op2 == cls.OPERATOR.NOT:\n # If either operator is a not, do not\n # attempt to simplify the response\n new_target_list = [first, second]\n elif first_is_extendable and second_is_extendable and same_types:\n # If both summands are in agreement with new operator,\n # and their types match, then concatenate their targets\n new_target_list = targets1 + targets2\n else:\n new_target_list = [first, second]\n\n return TargetingCriterion(\n new_target_list,\n operator,\n )",
"def distance(self, other):\n ...",
"def __add__(self, other):\n new_measure = Measure()\n settings = [\"raw\", \"fil\"]\n\n for rf in settings:\n new_measure.hit1[rf] = (self.hit1[rf] + other.hit1[rf])\n new_measure.hit3[rf] = (self.hit3[rf] + other.hit3[rf])\n new_measure.hit10[rf] = (self.hit10[rf] + other.hit10[rf])\n new_measure.mrr[rf] = (self.mrr[rf] + other.mrr[rf])\n new_measure.mr[rf] = (self.mr[rf] + other.mr[rf])\n return new_measure",
"def add(value1, value2):\n return 1 / (1.0 / value1 + 1.0 / value2)",
"def __add__(self,other):\n self.numerator=self.numerator*other.denominator\n other.numerator=self.denominator*other.numerator\n resultnumerator = self.numerator+other.numerator\n resultdenominator = self.denominator*other.denominator \n newvalues = (resultnumerator,resultdenominator)\n return newvalues",
"def __iadd__(self,value):\n if isinstance(value,LiveStat):\n raise Exception(\"Cannot sum statistics\")\n if value.vcount < 1 or self.vcount < 1:\n raise Exception(\"Cannot sum empty statistics\")\n else:\n # sum of two considered pairwise: z_i = stat(x_i + y_i)\n #\n # data have different weights due to number of samples.. TODO\n self.vmin += value.vmin \n self.vmax += value.vmax\n self.vmean += value.vmean\n self.vsum += value.vsum\n # variance is sum of variance?\n self.vm2 += value.vm2\n # TODO vm3 vm4\n self.vcount = min(value.vcount,self.vcount)\n self.vcountsq = self.vcount**2\n self.dirty = True\n print (\"add Missing: M3 and M4\")\n else:\n # constant bias\n if self.vmin is not None:\n self.vmin += value\n self.vmax += value\n self.vmean += value\n self.vsum += self.vcount*value\n print (\"add Missing: M3 and M4\")\n self.dirty = True\n return self",
"def __add__(self,other):\n if isinstance(other, point):\n return self.add_points(other)\n else:\n return self.add_points_tuple(other)",
"def addition(self, a, b):\n if not check_arguments(a, b): # check if arguments are numbers\n self.last_result = a + b",
"def add(self, other):\n\n def merge_dicts(d1, d2):\n \"\"\"\n Merge two dictionaries\n\n param d1: dictionary changed in place to have combined values\n type d1: dictionary(key -> set)\n param d2: dictioanry to be merged\n type d2: dictionary(key -> set)\n \"\"\"\n for key,value in d2.items():\n if key not in d1:\n d1[key] = value\n else:\n d1[key] |= value\n \n self.num_documents += other.num_documents\n self.num_expressions += other.num_expressions\n self.global_expressions += other.global_expressions\n self.expressions_with_e += other.expressions_with_e\n self.num_keywords += other.num_keywords\n merge_dicts(self.missing_tags, other.missing_tags)\n merge_dicts(self.problem_files, other.problem_files)",
"def distance(self, a, b):\n raise NotImplementedError()",
"def forward(self, triples):\n lhs = self.get_lhs(triples)\n lhs_bias = self.bias_head(triples[:, 0])\n rhs = self.get_rhs(triples)\n rhs_bias = self.bias_tail(triples[:, 2])\n\n sim_score, dist = self.similarity_score(lhs, rhs)\n return sim_score + lhs_bias + rhs_bias, dist",
"def __iadd__(self, other):\n\n if not isinstance(other, Unigram):\n return NotImplemented\n\n self.sort_by_tokens(other.dictionary.tokens)\n\n for i in range(len(other.dictionary)):\n other_count = other.Nx[i]\n self.Nx[i] += other_count\n\n self.N = sum(self.Nx)\n\n return self",
"def __iadd__(self, other: FunctionType):\n self.truths = self.truths | {other}\n return self",
"def __add__(self, other):\n assert isinstance(other, Solution)\n assert self.dim == other.dim\n _ = [self.tolist()] if len(self.shape) == 1 else self.tolist()\n __ = [other.tolist()] if len(other.shape) == 1 else other.tolist()\n return Solution(_ + __, self.fitness.tolist() + other.fitness.tolist(),\n self.n_eval.tolist() + other.n_eval.tolist(), \n var_name=self.var_name, verbose=self.verbose)",
"def __add__(self, other):\n if isinstance(other, NeuralQueryExpression):\n self._check_type_compatibility(self.type_name, other.type_name, 'add')\n provenance = NQExprProvenance(\n operation='add', inner=self.provenance, other=other.provenance)\n return self.context.as_nql(self.tf + other.tf, self.type_name, provenance)\n else:\n # hopefully a constant\n provenance = NQExprProvenance(\n operation='add',\n inner=self.provenance,\n args=(None, other),\n other=NQExprProvenance(operation='constant'))\n return self.context.as_nql(self.tf + other, self.type_name, provenance)",
"def _add_op(value, sample_args, rationals_allowed):\n entropy, sample_args = sample_args.peel()\n if rationals_allowed and sample_args.count >= 3:\n x = number.integer_or_rational(entropy, True)\n else:\n x = number.integer(entropy, True)\n if random.choice([False, True]):\n op_args = [x, value - x]\n else:\n op_args = [value - x, x]\n return ops.Add, op_args, sample_args"
]
| [
"0.75192755",
"0.5600311",
"0.5589506",
"0.5509544",
"0.5471695",
"0.54424715",
"0.5433446",
"0.5314487",
"0.5303053",
"0.5303053",
"0.52639925",
"0.5242543",
"0.52394116",
"0.5227583",
"0.52234477",
"0.52131516",
"0.52124393",
"0.52061206",
"0.5189503",
"0.51505655",
"0.51423836",
"0.51397276",
"0.5131071",
"0.5125783",
"0.5125412",
"0.5113296",
"0.5102009",
"0.508177",
"0.50780374",
"0.50773597"
]
| 0.7382814 | 1 |
Calculate the evaluation for the given ids. | def get_evaluations(self, ids):
evaluations = [(self.signs[i].evaluate(), self.signs[i].desc) for i in ids]
descriptions = list({self.signs[i].desc for i in ids})
evaluations_per_sign = [
([e for e, desc in evaluations if desc == description], description)
for description in descriptions
]
summed_evaluations = [
Sign.sum_evaluations(per_sign) for per_sign, _ in evaluations_per_sign
]
return summed_evaluations, descriptions | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def evaluate(self) -> Dict[str, Any]:\n kwargs = {\"ids\": self._ids}\n return {\n metric.value: self._metric_funcs[metric](\n self._targets, self._preds, **kwargs\n )\n for metric in self._metrics\n }",
"def compute(self, *args, **kwargs):\n for node in self.evaluation_sequence:\n node.evaluate()",
"def evaluation_fn(\n state: tff.learning.templates.LearningAlgorithmState,\n sampled_client_ids: Tuple[List[str], List[str]]) -> OrderedDict[str, Any]:\n valid_client_ids, test_client_ids = sampled_client_ids\n raw_valid_metrics = valid_clients_eval_computation(\n training_process.get_model_weights(state), valid_client_ids)\n raw_test_metrics = test_clients_eval_computation(\n training_process.get_model_weights(state), test_client_ids)\n return collections.OrderedDict([\n (constants.VALID_CLIENTS_KEY, raw_valid_metrics),\n (constants.TEST_CLIENTS_KEY, raw_test_metrics)\n ])",
"def multiple_eval_for_loops_v1():",
"def multiple_eval_for_loops_v2():",
"def evaluate(self,**d):\r\n\t\t\r\n\t\t# evaluate terms\r\n\t\tv = [i.evaluate(**d) for i in self]\r\n\t\t\r\n\t\t# sum results\r\n\t\tc = Pa(v).sum()\r\n\t\t\r\n\t\treturn c",
"def build_eval_metrics(self, predict_ids, labels, nwords, params):\n raise NotImplementedError()",
"def evaluate(predictions, ids, label_identifiers):\n\n labels = []\n #For every prediction\n for i in range(len(predictions)):\n sentence_predictions = predictions[i]\n id_sequence = ids[i]\n sequence_labels = []\n counter = 0\n #For every predicted token\n for j in range(len(id_sequence)):\n word_prediction = sentence_predictions[j]\n id = id_sequence[j]\n #Take only the lemmas that have to be disambiguated\n if not id == '0':\n #Extract the identifiers of the sensekeys associated to the lemma\n indexes = label_identifiers[i][counter]\n new_predictions = []\n #Check if the identifier is a number \n for elem in indexes:\n try:\n index = int(elem)\n new_predictions.append(predictions[i][j][index])\n except ValueError:\n #If is not, MFS was applied\n new_predictions.append(elem)\n #Do the argmax on the extracted prediction indexes\n argmax = np.argmax(new_predictions)\n label = label_identifiers[i][counter][argmax]\n sequence_labels.append(label)\n counter += 1\n labels.append(sequence_labels)\n\n return labels",
"def eval(self, gt_paths, pred_paths):\n assert self.num_worker is not None, \"Parameter 'num_worker' is not assigned\"\n assert len(gt_paths) == len(pred_paths), \"Size must equal!\"\n \n dists = list()\n iterable = [(gt_paths[i], pred_paths[i] ) for i in range(len(gt_paths))]\n for result in tqdm.tqdm(self.pool.istarmap(self.eval_method.eval_fn, iterable), total=len(iterable)):\n dists.append(result)\n\n return dists",
"def query_set_sim(self, en_ids, weights):\n # fielded_weights = self.__get_weights(weights)\n scorer = ScorerMLM(econfig.LUCENE, self.query, {}) # {'field_weights': fielded_weights})\n\n p_t_theta_d = {}\n for t in set(self.query.split()):\n p_t_theta_d[t] = 0\n for en in en_ids:\n lucene_doc_id = scorer.lucene.get_lucene_document_id(en)\n p_t_theta_d[t] += scorer.get_mlm_term_prob(lucene_doc_id, weights, t)\n score = self.nllr(self.query, p_t_theta_d, weights)\n if score is None:\n return 0\n return math.exp(score)",
"def evaluator(self, candidates, args):\r\n raise NotImplementedError",
"def evaluator(self, candidates, args):\n\t\traise NotImplementedError",
"def evaluate(self, csls, evals, mode=\"csls\"):\n metrics = {}\n for eval_func in evals:\n assert hasattr(self, eval_func), \\\n \"Eval Function {0} not found\".format(eval_func)\n metrics = getattr(self, eval_func)(csls, metrics, mode=mode)\n return metrics",
"def evaluator(self, candidates, args):\r\n fitness = []\r\n if self._use_ants:\r\n for candidate in candidates:\r\n total = 0\r\n for c in candidate:\r\n total += c.value\r\n fitness.append(total)\r\n else:\r\n for candidate in candidates:\r\n total_value = 0\r\n total_weight = 0\r\n for c, i in zip(candidate, self.items):\r\n total_weight += c * i[0]\r\n total_value += c * i[1]\r\n if total_weight > self.capacity:\r\n fitness.append(self.capacity - total_weight)\r\n else:\r\n fitness.append(total_value)\r\n return fitness",
"def evaluate(self, data, label_indices, evaluator=np.argmax):\n\n assert(len(data) == len(label_indices))\n\n assert(len(data) == len(label_indices))\n assert(data[0].shape == (self.sizes[0], 1))\n for idx in label_indices:\n assert(idx >= 0 and idx < self.sizes[-1])\n\n results = [(evaluator(self.feedforward(x)), y)\n for (x, y) in zip(data, label_indices)]\n\n num = len(data)\n num_correct = sum(int(x == y) for (x, y) in results)\n num_incorrect = num - num_correct\n accuracy = num_correct / num\n\n return {'num_testing': len(data),\n 'num_correct': num_correct,\n 'num_incorrect': num_incorrect,\n 'accuracy': accuracy}",
"def evaluator(self, candidates, args):\r\n fitness = []\r\n if self._use_ants:\r\n for candidate in candidates:\r\n total = 0\r\n for c in candidate:\r\n total += self.weights[c.element[0]][c.element[1]]\r\n last = (candidate[-1].element[1], candidate[0].element[0])\r\n total += self.weights[last[0]][last[1]]\r\n fitness.append(1 / total)\r\n else:\r\n for candidate in candidates:\r\n total = 0\r\n for src, dst in zip(candidate, candidate[1:] + [candidate[0]]):\r\n total += self.weights[src][dst]\r\n fitness.append(1 / total)\r\n return fitness",
"def eval(self):\n\n # parameters initialize\n torch = import_optional_dependency(\"torch\")\n eval_total = 0\n eval_correct = 0\n eval_loss = 0\n self._set_eval()\n\n # display the information\n if self.info:\n print(f\"\\rEvaluating...\", end=\"\")\n\n # start eval part\n for i, (source, target) in enumerate(self.eval_dataset):\n # send data to device\n source = source.to(self.device)\n target = target.to(self.device)\n\n result = self.model(source)\n eval_loss += self.criterion(result, target).item()\n _, predicted = torch.max(result.data, 1)\n eval_total += target.size(0)\n eval_correct += (predicted == target).sum().item()\n\n accuracy = eval_correct / eval_total\n eval_loss = eval_loss / eval_total\n\n if self.info:\n print(f\"\\rEvaluation loss: { eval_loss } | Accuracy: { accuracy }\")\n\n return eval_loss, accuracy",
"def evaluate(self) -> int:",
"def evaluate_batch(self, pipelines):",
"def _evaluate(self,\n logits,\n predictions,\n data, evaluation_fn,\n max_eval_batches=None,\n calculate_scores=True,\n write_results=False):\n # counting the evaluation batches\n num_eval_batches = 0\n # logits and predictions from the model\n all_logits = []\n all_predictions = []\n # fetched data that led to the predictions\n # dictionary of {seq_1: [], seq_2: [], target: []}\n all_fetched_data = collections.defaultdict(list)\n try:\n while True:\n # sample predictions\n (fetched_logits,\n fetched_predictions,\n fetched_data) = self._fetch_data_batch(\n logits=logits, predictions=predictions, data=data)\n\n # Cache the data\n all_logits += fetched_logits\n all_predictions += fetched_predictions\n all_fetched_data[\"target\"] += fetched_data[\"target\"]\n\n # break the loop if max_eval_batches is set\n num_eval_batches += 1\n if (max_eval_batches and\n num_eval_batches >= max_eval_batches):\n break\n\n except tf.errors.OutOfRangeError:\n pass\n\n # Evaluate\n scores = None\n if calculate_scores:\n scores = evaluation_fn(\n all_predictions,\n all_fetched_data[\"seq_1\"], # Should be empty\n all_fetched_data[\"seq_2\"], # Should be empty\n all_fetched_data[\"target\"])\n\n if write_results:\n _write_results_to_csv(\n all_logits,\n all_predictions,\n all_fetched_data,\n output_dir=os.path.join(\n self._logdir, RESULTS_CSV_FNAME))\n\n return len(all_predictions), scores",
"def fit_and_eval(self):\n clf_numbers = range(len(self.clfs))\n _parallel_fit_eval_number = partial(_parallel_fit_eval, data=self.data, clfs=self.clfs,\n evaluators=self.evaluators, scoring=self.scoring)\n # ToDo do it parallel\n #pool = Pool()\n #fit_and_eval_results = pool.map(_parallel_fit_eval_number, clf_numbers)\n fit_and_eval_results = []\n for i in clf_numbers:\n fit_and_eval_results.append(_parallel_fit_eval_number(i))\n self._update_result(fit_and_eval_results, clf_numbers)",
"def evaluate(golds, preds):\n correct_words = 0\n correct_sentences = 0\n\n words_total = 0.0\n sentences_total = 0.0\n\n for gold, pred in zip(golds, preds):\n # check whether entire tag sequence was correct\n sentences_total += 1\n if pred == gold:\n correct_sentences += 1\n\n # check individual tags for correctness\n for predicted_tag, gold_tag in zip(pred, gold):\n words_total += 1\n if predicted_tag == gold_tag:\n correct_words += 1\n\n return (correct_sentences/sentences_total, correct_words/words_total)",
"def evaluate(self, triples):\n all_result = self.forward_eval(triples) # b x num_entities\n # uses tail as index to get target score\n target_result = all_result.gather(dim=-1, index=triples[:, 2].unsqueeze(-1))\n return all_result, target_result",
"def evaluate(self) -> Dict[str, float]:\n eval_dataloader = self.get_eval_dataloader()\n\n output = self._prediction_loop(eval_dataloader, description=\"Evaluation\")\n return output.metrics",
"def score(self, urlids, wordids):\r\n\t\tself.urlids = urlids\r\n\t\tself.wordids = wordids\r\n\t\tself.scores = self.tf_score()\r\n\t\treturn self.scores",
"def evaluate(self):\n pass",
"def evaluate(self):\n pass",
"def compute_assignment_rules(self, simulation_result, species_ids):\n simulation_result_dict = {}\n for pos, id in enumerate(species_ids):\n simulation_result_dict[id] = simulation_result[:, pos]\n result = {}\n for ar in self.model.assignment_rules:\n result[ar] = eval(self.model.assignment_rules[ar][True],\n self.model.external_species_concentrations,\n simulation_result_dict)\n return result",
"def evaluate(self) :\n pass",
"def evaluate(self, inputs, targets):\n error = 0\n for input, target in zip(inputs, targets):\n output = self.feedforward(input)\n error += self.c(output, target)\n return error"
]
| [
"0.6405527",
"0.626387",
"0.614188",
"0.60892797",
"0.5997984",
"0.59179175",
"0.58075917",
"0.5800151",
"0.5796685",
"0.57785195",
"0.5775786",
"0.5748048",
"0.5720896",
"0.5712271",
"0.5695938",
"0.567937",
"0.5655204",
"0.5627428",
"0.56098115",
"0.55980873",
"0.5597552",
"0.55545",
"0.55392975",
"0.55262196",
"0.55206525",
"0.5519924",
"0.5519924",
"0.54994947",
"0.5491263",
"0.54866916"
]
| 0.7392089 | 0 |
Create the plots from the detected signs. | def create_plots(self):
shutil.rmtree(self.param.path, ignore_errors=True)
os.makedirs(self.param.path)
ids = list(range(len(self.signs)))
"""True positives"""
values, kinds = self.get_evaluations(ids)
plots.create_plot(
kinds,
[e[0] for e in values], # True positives
save_dir=self.param.path,
y_label="number_tp",
file_name="number_tp",
title="Amount of true positives",
)
# Only signs with at least one detection!
ids = [i for i, _ in enumerate(self.signs) if self.signs[i].evaluate()[2] > 0]
values, kinds = self.get_evaluations(ids)
"""Distance"""
plots.create_plot(
kinds,
values=[e[2] for e in values], # Distances
save_dir=self.param.path,
y_label="distance",
file_name="distance",
title="Distance",
)
"""Precision"""
plots.create_plot(
kinds,
# Precision signs with at least one detection are used, e[0]+e[1] > 0)
values=[e[0] / (e[0] + e[1]) for e in values],
save_dir=self.param.path,
y_label="precision",
file_name="precision",
title="Precision",
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def make_asimov_significance_plots(self):\n import matplotlib.pyplot as plt\n plt.rcParams['text.usetex'] = True\n outdir = os.path.join(self.outdir, 'Significances')\n mkdir(outdir)\n maintitle = self.make_main_title(\n end='Asimov Analysis Significances',\n end_center=True\n )\n\n # Want to ensure the resulting y range can show all of the plots\n # Therefore find the max and min sig of the whole set of data_sets\n maxsig = None\n minsig = None\n # xrange is easier\n hrange = self.inj_param_vals[-1]-self.inj_param_vals[0]\n xlims = [self.inj_param_vals[0]-0.1*hrange,\n self.inj_param_vals[-1]+0.1*hrange]\n\n for i in range(len(self.data_sets)):\n\n significances = self.deltachi2_significance(\n wh_to_th_metrics=self.wh_to_th[i]['metrics'],\n th_to_wh_metrics=self.th_to_wh[i]['metrics']\n )\n\n truth = self.labels[i][\n self.labels.keys()[0]].dict['data_name'].split('_')[0]\n plotlabel = 'True %s'%self.tex_axis_label(truth)\n \n self.make_1d_graph(\n xvals=self.inj_param_vals,\n yvals=significances,\n xlabel=self.inj_param_name,\n xunits=self.inj_param_units,\n ylabel=None,\n yunits=None,\n marker=self.marker_style(truth),\n color=self.plot_colour(truth),\n plotlabel=plotlabel,\n xlims=xlims\n )\n\n if maxsig is None:\n maxsig = max(significances)\n else:\n maxsig = max(maxsig, max(significances))\n if minsig is None:\n minsig = min(significances)\n else:\n minsig = min(minsig, min(significances))\n\n # Give a more descriptive y-axis label if only one thing being plotted\n if len(self.data_sets) == 1:\n alt = self.labels[\n self.labels.keys()[0]].dict['%s_name'%(\n self.wh_to_th[0]['params']['altfit'])].split('_')[0]\n plt.ylabel(r'%s from %s Significance $\\left(\\sigma\\right)$'%(\n self.tex_axis_label(truth),\n self.tex_axis_label(alt)\n ))\n else:\n plt.ylabel(r'Significance $\\left(\\sigma\\right)$', fontsize=24)\n\n vrange = maxsig - minsig\n plt.ylim(minsig-0.1*vrange, maxsig+0.2*vrange)\n plt.title(maintitle, fontsize=16)\n plt.legend(loc='best')\n plt.tight_layout()\n save_end = \"%s_asimov_significances\"%(self.inj_param_name)\n self.save_plot(outdir=outdir, end=save_end, truth=truth)\n if self.extra_points is not None:\n yminextra, ymaxextra = self.add_extra_points()\n yminall = min(yminextra, minsig)\n ymaxall = max(ymaxextra, maxsig)\n vrange = ymaxall - yminall\n if yminall == 0:\n plt.ylim(yminall, ymaxall+0.2*vrange)\n else:\n plt.ylim(yminall-0.1*vrange, ymaxall+0.3*vrange)\n plt.legend(loc='upper left')\n save_end = \"%s_asimov_significances_w_extra_points\"%(\n self.inj_param_name)\n self.save_plot(outdir=outdir, end=save_end, truth=truth)\n plt.close()",
"def plot(self):\n\t\tself.plotOfSpect()",
"def plot_detections(self, draw_landmarks=True, draw_facelines=True, muscle=False, pose=False):\n\n from PIL import Image\n import matplotlib.pyplot as plt\n from matplotlib.patches import Rectangle\n import seaborn as sns\n from textwrap import wrap\n sns.set_context(\"paper\", font_scale=2.0)\n\n # check how many images.\n inputs = self.input().unique()\n all_axes = []\n for imagefile in inputs:\n f, axes = plt.subplots(1, 3, figsize=(15, 7))\n ax = axes[0]\n try:\n if os.path.exists(imagefile):\n color = \"w\"\n # draw base image\n im = Image.open(imagefile)\n ax.imshow(im)\n image_exists = True\n else:\n image_exists = False\n color = \"k\"\n except:\n color = \"k\"\n print(f\"Input image {imagefile} not found.\")\n image_exists = False\n\n sub_data = self.query(\"input==@imagefile\")\n for i in range(len(sub_data)):\n # draw landmarks\n row = sub_data.iloc[[i]]\n landmark = row.landmark().values[0]\n currx = landmark[:68]\n curry = landmark[68:]\n if draw_landmarks:\n if draw_facelines:\n draw_lineface(currx, curry, ax=ax, color=color, linewidth=3)\n else:\n draw_lineface(currx, curry, ax=ax, color=color, linewidth=0)\n # muscle\n if muscle:\n au20index = [\n f\"AU{str(i).zfill(2)}\"\n for i in [\n 1,\n 2,\n 4,\n 5,\n 6,\n 7,\n 9,\n 10,\n 12,\n 14,\n 15,\n 17,\n 18,\n 20,\n 23,\n 24,\n 25,\n 26,\n 28,\n 43,\n ]\n ]\n aus = row.aus().T.reindex(index=au20index).fillna(0).T.values[0]\n draw_muscles(currx, curry, au=aus, ax=ax, all=\"heatmap\")\n # facebox\n facebox = row.facebox().values[0]\n rect = Rectangle(\n (facebox[0], facebox[1]),\n facebox[2],\n facebox[3],\n linewidth=2,\n edgecolor=\"cyan\",\n fill=False,\n )\n ax.add_patch(rect)\n\n # facepose\n if pose:\n draw_facepose(pose=row.facepose().values[0], facebox=facebox, ax=ax)\n\n if image_exists:\n if sub_data.input().any():\n ax.set_title(\n \"\\n\".join(wrap(sub_data.input().unique()[0], 30)), loc=\"left\", wrap=True, fontsize=14\n )\n else:\n ax.set_title(\"\\n\".join(wrap(imagefile, 30)), loc=\"left\", wrap=True, fontsize=14)\n ax.set(ylim=ax.get_ylim()[::-1])\n ax.set_aspect(\"equal\", \"box\")\n\n # plot AUs\n sub_data.aus().T.plot(kind=\"barh\", ax=axes[1])\n axes[1].invert_yaxis()\n axes[1].get_legend().remove()\n axes[1].set(xlim=[0, 1.1], title=\"Action Units\")\n\n # plot emotions\n sub_data.emotions().T.plot(kind=\"barh\", ax=axes[2])\n axes[2].invert_yaxis()\n axes[2].get_legend().remove()\n axes[2].set(xlim=[0, 1.1], title=\"Emotions\")\n\n plt.tight_layout()\n plt.show()\n all_axes.append(axes)\n return axes",
"def main():\n colors = {\n 0: 'w',\n 1: 'g',\n 2: 'r',\n 3: 'c',\n 4: 'm',\n 5: 'y',\n 6: 'k',\n 7: 'b',\n UNKNOWN_EMOTION: '0.1'\n }\n\n plot_data = { emotion: ([], []) for emotion in EMOTIONS }\n\n subjects = get_subjects()\n for subject in subjects:\n image_sequences = get_image_sequences(subject)\n for image_sequence in image_sequences:\n emotion = read_emotion(subject, image_sequence)\n X, Y = read_peak_landmarks(subject, image_sequence)\n\n plot_data[emotion][0].append(X)\n plot_data[emotion][1].append(Y)\n\n for emotion in EMOTIONS:\n if emotion == UNKNOWN_EMOTION or len(plot_data[emotion][0]) == 0:\n continue\n\n X = np.concatenate(plot_data[emotion][0])\n Y = np.concatenate(plot_data[emotion][1])\n plt.scatter(X, Y, color=colors[emotion], alpha=0.5, s=20, lw=0, label=EMOTIONS[emotion])\n\n plt.xlabel('X pixel position of landmark.')\n plt.ylabel('Y pixel position of landmark.')\n plt.legend()\n plt.grid(True)\n plt.show()",
"def plot(self):\n\t\tself.plotOfSpect().plot()",
"def make_plot(counts):\n # YOUR CODE HERE\n posX=[]\n posY=[]\n negX=[]\n negY=[]\n\t\n count=1\n for i in counts:\n\tif len(i)!=0:\t\n\t\tposX.append(count)\n\t posY.append(i[0][1])\n\t\tnegX.append(count)\n\t negY.append(i[1][1])\n\t count=count+1\n\t\n line1, =plt.plot(posX,posY,marker=\"o\",label=\"Positive\",color=\"g\")\n line2, =plt.plot(negX,negY,marker=\"o\",label=\"Negative\",color=\"r\")\n plt.xlabel('Time Step')\n plt.ylabel('Word Count')\n plt.title('Basic Twitter Sentiment Analytics')\n plt.tight_layout(pad=0.4, w_pad=0.5, h_pad=1.0)\n plt.legend(handler_map={line1: HandlerLine2D(numpoints=2)})\n plt.show()",
"def plot(self):\n pass",
"def plot(self) -> List[matplotlib.figure.Figure]:\n figs = []\n # Figure 1: Position\n fig = self.plot_kine_var(1, self.trial_name, ('X (mm)', 'Y (mm)', 'Z (mm)'), self.prev_filled[0],\n self.smoothed[0], self.filled[0], self.sfs[0])\n figs.append(fig)\n\n # Figure 2: Orientation\n fig = self.plot_kine_var(2, self.trial_name, ('Flex/Ext (deg)', 'Lat Flex (deg)', 'Axial (deg)'),\n self.prev_filled[1], self.smoothed[1], self.filled[1], self.sfs[1])\n figs.append(fig)\n\n return figs",
"def save_figs(self):\n if not self._fitted:\n self.fit()\n #self._message(\"Saving plots...\")\n # 1. Generate the required PNG plots\n # 1.1 Truncation plots\n for i,s in enumerate(self.samples):\n fig,ax=plt.subplots(1,2,figsize=(8,4))\n cyct=np.arange(self.nvalues)\n cycf=np.arange(self._cutoffidx[i])\n cycd=0.5*(cyct[1:]+cyct[:-1])\n ax[0].plot(cyct,self.samplesdata[:,i],'k.-',linewidth=0.5,label=\"Full series\")\n ax[0].plot(cycf,self.samplesdata[:self._cutoffidx[i],i],'r-',linewidth=1,label=\"Truncated\")\n ax[0].set_xlim([0,self.nvalues-1])\n ax[0].set_ylim([0,self.samplesdata.max()*1.1])\n ax[0].set_xlabel(\"Cycle\")\n ax[0].set_ylabel(\"Fluorescence (a.u.)\")\n ax[0].set_title(\"Detected fluorescence\")\n plt.legend(loc='upper left',frameon=False)\n # First derivative\n ax[1].plot(cycd,self.samplesdatadiff[:,i],'k.-',linewidth=0.5)\n ax[1].axvline(self._cutoffidx[i],color='r')\n ax[1].set_xlim([0,self.nvalues-1])\n ax[1].set_ylim([self.samplesdatadiff.min()*1.1,self.samplesdatadiff.max()*1.1])\n ax[1].set_xlabel(\"Cycle\")\n ax[1].set_ylabel(\"dF/dCycle (a.u.)\")\n ax[1].set_title(\"Fluorescence rate\")\n plt.tight_layout()\n fn=get_valid_fname(self.samples[i])\n figname=\"%s_%s_%s.svg\"%(self.ID,\"01truncation\",fn)\n self.info['samples'][s]['Data truncation for fitting']=figname\n plt.savefig('%s/%s'%(self.info['resultsdir'],figname))\n plt.close() \n # 1.2 Fitting plots\n for i,s in enumerate(self.samples):\n fig,ax=plt.subplots(1,3,figsize=(12,4))\n cyct=np.arange(self.nvalues)\n cycf=np.arange(self._cutoffidx[i])\n ax[0].plot(cyct,self.samplesdata[:,i],'k:',linewidth=0.5,label=\"Full series\")\n ax[0].plot(cycf,self.samplesdata[:self._cutoffidx[i],i],'r.-',linewidth=0.5,label=\"Truncated\")\n #ax[0].plot(cycf,self.mak3fpre[s],'y-',linewidth=1,label=\"prefit\")\n ax[0].plot(cycf,self.mak3fluorescence[s],'g-',linewidth=1,label=\"MAK3 fit\")\n ax[0].axvline(self._cutoffidx[i],color='k')\n ax[0].set_xlim([0,self.nvalues-1])\n ax[0].set_ylim([0,self.samplesdata.max()*1.1])\n ax[0].set_xlabel(\"Cycle\")\n ax[0].set_ylabel(\"Fluorescence (a.u.)\")\n ax[0].set_title(\"Detected fluorescence\")\n ax[0].legend(loc='upper left',frameon=False)\n # DNA levels\n ax[1].plot(cycf,self.mak3concentration[s],'g-',linewidth=1,label=\"MAK3\")\n ax[1].axvline(self._cutoffidx[i],color='k')\n ax[1].set_xlim([0,self.nvalues-1])\n ax[1].set_ylim([0,self.mak3concentration[s].max()*1.1])\n ax[1].set_xlabel(\"Cycle\")\n ax[1].set_ylabel(\"concentration (a.u.)\")\n ax[1].set_title(\"estimated cDNA levels\")\n # Efficiency\n ax[2].plot(cycf,self.mak3efficiency[s],'b-',linewidth=1,label=\"MAK3\")\n ax[2].axvline(self._cutoffidx[i],color='k')\n ax[2].set_xlim([0,self.nvalues-1])\n ax[2].set_ylim([0,1.1])\n ax[2].set_xlabel(\"Cycle\")\n ax[2].set_ylabel(\"Efficiency\")\n ax[2].set_title(\"Amplification efficiency\") \n plt.tight_layout()\n fn=get_valid_fname(self.samples[i])\n figname=\"%s_%s_%s.svg\"%(self.ID,\"02mak3\",fn)\n self.info['samples'][s]['MAK3 Fitting']=figname\n plt.savefig('%s/%s'%(self.info['resultsdir'],figname))\n plt.close()\n # 2 Initial concentrations\n figwdth=np.maximum(5,0.4*self.nsamples+1)\n fig,ax=plt.subplots(1,1,figsize=(figwdth,7))\n v=list(self.initialConcentration.values())\n k=list(self.initialConcentration.keys())\n ax.bar(0.75+np.arange(self.nsamples),v,facecolor='k',width=0.5)\n ax.set_xticks(1+np.arange(self.nsamples))\n ax.set_xticklabels(k,rotation=90)\n ax.set_xlim([0,self.nsamples+1])\n plt.tight_layout()\n figname=\"%s_%s_.svg\"%(self.ID,\"00initialConcentration\")\n self.info['figname_initialConcentration']=figname\n plt.savefig('%s/%s'%(self.info['resultsdir'],figname))\n plt.close()\n # 3 Fitting Error\n fig,ax=plt.subplots(1,1,figsize=(figwdth,7))\n v=list(self.fitting_error.values())\n k=list(self.fitting_error.keys())\n ax.bar(0.75+np.arange(self.nsamples),v,facecolor='k',width=0.5)\n ax.set_xticks(1+np.arange(self.nsamples))\n ax.set_xticklabels(k,rotation=90)\n ax.set_xlim([0,self.nsamples+1])\n ax.set_ylim([0,1e-2])\n plt.tight_layout()\n figname=\"%s_%s_.svg\"%(self.ID,\"00fittingError\")\n self.info['figname_fittingError']=figname\n plt.savefig('%s/%s'%(self.info['resultsdir'],figname))\n # 4 kinetic constant\n fig,ax=plt.subplots(1,1,figsize=(figwdth,7))\n v=list(self.k.values())\n k=list(self.k.keys())\n ax.bar(0.75+np.arange(self.nsamples),v,facecolor='k',width=0.5)\n ax.set_xticks(1+np.arange(self.nsamples))\n ax.set_xticklabels(k,rotation=90)\n ax.set_xlim([0,self.nsamples+1])\n plt.tight_layout()\n figname=\"%s_%s_.svg\"%(self.ID,\"00kineticConstant\")\n self.info['figname_k']=figname\n plt.savefig('%s/%s'%(self.info['resultsdir'],figname))\n # 5 background fluorescence\n fig,ax=plt.subplots(1,1,figsize=(figwdth,7))\n v=list(self.Fb.values())\n k=list(self.Fb.keys())\n ax.bar(0.75+np.arange(self.nsamples),v,facecolor='k',width=0.5)\n ax.set_xticks(1+np.arange(self.nsamples))\n ax.set_xticklabels(k,rotation=90)\n ax.set_xlim([0,self.nsamples+1])\n plt.tight_layout()\n figname=\"%s_%s_.svg\"%(self.ID,\"00bkgFluorescence\")\n self.info['figname_Fb']=figname\n plt.savefig('%s/%s'%(self.info['resultsdir'],figname))\n # 6 slope\n fig,ax=plt.subplots(1,1,figsize=(figwdth,7))\n v=list(self.slope.values())\n k=list(self.slope.keys())\n ax.bar(0.75+np.arange(self.nsamples),v,facecolor='k',width=0.5)\n ax.set_xticks(1+np.arange(self.nsamples))\n ax.set_xticklabels(k,rotation=90)\n ax.set_xlim([0,self.nsamples+1])\n ax.set_ylim([0,0.025])\n plt.tight_layout()\n figname=\"%s_%s_.svg\"%(self.ID,\"00fluorescenceSlope\")\n self.info['figname_slope']=figname\n plt.savefig('%s/%s'%(self.info['resultsdir'],figname))",
"def draw_all_plots(self):\n\n plot_names = []\n e = self.find_di_tri(self.lang_found)\n letter_dct = e[1]\n di_dct = e[2]\n tri_dct = e[3]\n\n plot_name = self.lang_found + '_letters'\n self.wykres(letter_dct, 'Wyres liter', 'litera', plot_name, 0)\n plot_names.append(plot_name)\n plot_name = self.lang_found + '_digram'\n self.wykres(di_dct, 'Wykres digramów', 'digram', plot_name, 1)\n plot_names.append(plot_name)\n plot_name = self.lang_found + '_trigram'\n self.wykres(tri_dct, 'Wykres trigramów', 'trigram', plot_name, 2)\n plot_names.append(plot_name)\n\n for cnt, plt_scn in enumerate(self.plot_scenes):\n pic = QtGui.QPixmap(self.img_dir + '/' + plot_names[cnt] + \".png\")\n plt_scn.setPixmap(pic.scaled(427, 320, Qt.KeepAspectRatio))",
"def plot_visualization(path_results, x_data, y_data, variant_mode, nb_classes, signal_test, args):\n\n\t#path_tsne = path_results + \"/Visualization/train/\" + str(args.step) + \"_2d.csv\"\n\t#data_frame = pd.read_csv(path_tsne)\n\t\n\tpath_maping = path_results + \"/Maping/\" + str(args.subject).split(\".txt\")[0] + \"/\"\n\tfilename = path_maping + \"maping_\" + str(args.step) + \"_\" + str(args.subject).split(\".txt\")[0] + \"_stick\" + str(args.stick) + \".png\"\n\n\tprint(\"path_save maping\", path_maping)\n\n\tif not os.path.exists(path_maping):\n\t\tos.makedirs(path_maping)\n\n\t#print(\"path_tsne\", path_tsne)\n\n\tlabel_maping = np.array([10])\n\n\tx_data = np.concatenate((x_data,signal_test),axis=0)\n\ty_data = np.concatenate((y_data,label_maping),axis=0)\n\n\tprint(\"x_data concatenate\",x_data.shape)\n\tprint(\"y_data concatenate\",y_data.shape)\n\n\tdata_frame = tsne_2d(x_data, y_data)\n\n\t\n\t\n\tgroups = data_frame.groupby('label')\n\n\tcluster_names, cluster_colors = get_target_names_dr(nb_classes, args.mode, args, variant_mode)\n\n\tfig = plt.figure(figsize=(20, 10))\n\tax = fig.add_subplot(111)\n\tax.margins(0.05) # Optional, just adds 5% padding to the autoscaling\n\tfor name, group in groups:\n\t\t\n\t\tif cluster_names[name] == str(args.subject):\n\t\t\tax.scatter(group.x, group.y, marker='D', s=150, edgecolors = 'face',label=cluster_names[name], color=cluster_colors[name])\n\t\telse:\n\t\t\tax.scatter(group.x, group.y, marker='o', label=cluster_names[name], color=cluster_colors[name])\n\n\tax.legend(numpoints=1) #show legend with only 1 point\n\tplt.savefig(filename) #save the plot",
"def detect_sign(points: np.array, visualize: bool = False, min_ratio: float = 0.75) -> None:\n scene = io.SceneViewer(points) # Initialization of scene view\n sign_detector = alg.BiModal() # Initialization of bi modal detector\n\n if visualize:\n scene.show_cluster(points, False, title='Scene preview')\n scene.show_cluster(points, True, title='Scaled scene preview') # Cluster preview\n\n sign_detector.fit_kde(points[:, -1])\n dens, dens_x = sign_detector.produce_density_arr(points[:, -1], 100) # Generation of smooth density function\n mode_status = sign_detector.detect_modes(dens_x, dens) # Mode detection and threshold detection\n\n io.status_report(mode_status) # Stops the execution if not a sign\n\n points_plate, points_pole = sign_detector.separate_by_thresh(points) # Two sets of points - pole and plate\n\n if visualize:\n scene.show_cluster(points_plate, True, title='Plate preview') # Plate preview\n scene.show_cluster(points_pole, True, title='Pole preview') # Pole preview\n\n plate_plane = alg.Plate(min_ratio) # At least 75% of points have to form a plane\n plate_status = plate_plane.detect_plane_coefs(points_plate) # Detect the 3D plane coefficients and normal\n\n io.status_report(plate_status) # Stops the execution if not a sign\n\n if visualize:\n scene.show_cluster(np.concatenate((plate_plane.inliers, plate_plane.outliers), 0),\n True, title='Plate inliers vs outliers') # Inliers and outliers preview\n\n projected_points = plate_plane.project_to_plate(points_plate) # Project plate points to plate, including outliers\n\n if visualize:\n scene.show_cluster(projected_points, True, title='Projection preview') # Preview the projected points\n\n projected_points, img = plate_plane.rotate_plane(projected_points) # Align plate plane with xy plane\n\n if visualize:\n io.show_image(img, title='Points in pixel space') # Preview the image\n\n img = plate_plane.detect_shapes(img, visualize) # Check what shape suits the most to plate\n\n if visualize:\n io.show_image(img, title='Points bounded by sign estimated plate shape')",
"def create_plots(self):\n if not os.path.exists(self.output_folder):\n os.makedirs(self.output_folder)\n self.sse_plot()\n self.avg_sse_plot()",
"def create_graphic(X):\n plt.close('all')\n plt.figure(figsize=(12,6))\n sns.set(style='darkgrid', palette='bright')\n for i,j in enumerate(X): \n plt.subplot(2, 3, (i+1))\n plt.text(X[j], 0, X[j], color='black')\n plt.axvline(x=X[j], linestyle='--', c='red')\n sns.distplot(data[j].dropna(), bins=30, kde=False)\n plt.tight_layout()\n img = io.BytesIO()\n plt.savefig(img, format='png')\n img.seek(0)\n graph_url = base64.b64encode(img.getvalue()).decode()\n graph = 'data:image/png;base64,{}'.format(graph_url)\n return graph",
"def plot_array(self):\n locations = np.array([getattr(pulsar, 'location') for pulsar in self.pulsars])\n fig = plt.figure()\n ax = plt.subplot(111, projection=\"hammer\")\n for location in locations:\n plt.plot(location.ra, location.dec, '.', color='b')\n return fig",
"def make_figure(self, traces):\n pass",
"def generatePlot(data):\n addendum = \"\"\n destination = \"D:\\\\Research\\\\scripts\\\\Results\\\\FullSet1\\\\$FilteredPlots\\\\take 4\\\\\"\n if len(data.detections.smallIncrease) != 0:\n addendum = \"small increases\\\\\"\n if len(data.detections.smallDecrease) != 0:\n addendum = \"small decreases\\\\\"\n if len(data.detections.largeIncrease) != 0:\n addendum = \"large increases\\\\\"\n if len(data.detections.largeDecrease) != 0:\n addendum = \"large decreases\\\\\"\n if addendum == \"\":\n addendum = \"no decreases\\\\\"\n \n plt.figure(1)\n plt.subplot(211)\n #print np.min(data.magdata), np.max(data.magdata)\n axes = plt.gca()\n axes.set_title(\"Year: '{year}, Day: {day}\".format(year=data.calendarDay[:2], day=data.calendarDay[3:] ))\n axes.set_ylim([np.min(data.magdata)-1.2,np.max(data.magdata)+0.25])\n axes.set_ylabel(r'$\\mathbf{B}$ (nT)' )\n\n #plot formatting\n formats = dates.DateFormatter('%H:%M:%S')\n axes.xaxis.set_major_locator(dates.MinuteLocator())\n axes.xaxis.set_major_formatter(formats)\n \n br, = pp.plot(dates.date2num(data.timestamps),[row[0] for row in data.magdata],label='$B_r$')\n bt, = pp.plot(dates.date2num(data.timestamps),[row[1] for row in data.magdata],label='$B_t$')\n bn, = pp.plot(dates.date2num(data.timestamps),[row[2] for row in data.magdata],label='$B_n$')\n b0, = pp.plot(dates.date2num(data.timestamps),[row[3] for row in data.magdata],label='$B_0$')\n print len(data.detections.rotationBoundary)\n if len(data.detections.rotationBoundary) == 1:\n rotation, = pp.plot([dates.date2num(data.detections.rotationBoundary), dates.date2num(data.detections.rotationBoundary)], [np.min(data.magdata)-1,np.max(data.magdata)+0.25], linestyle='--', color = 'm', alpha = 0.4, label='$RB$')\n else:\n for index, value in enumerate(data.detections.rotationBoundary):\n rotation, = pp.plot([dates.date2num(value), dates.date2num(value)], [np.min(data.magdata)-1,np.max(data.magdata)+0.25], linestyle='--', color = 'm', alpha = 0.4, label='$RB$')\n if len(data.detections.rotationBoundary) != 0:\n pp.legend(handles=[br,bt,bn,b0,rotation], loc='lower left', ncol=4, fancybox=True, framealpha=0.5)\n else:\n pp.legend(handles=[br,bt,bn,b0], loc='lower left', ncol=4, fancybox=True, framealpha=0.5)\n\n start, end = axes.get_xlim()\n axes.xaxis.set_ticks(np.arange(start, end, (end-start)/5))\n \n \n\n plt.subplot(212)\n axes2 = plt.gca()\n #plot formatting\n formats = dates.DateFormatter('%H:%M:%S')\n axes2.xaxis.set_major_locator(dates.MinuteLocator())\n axes2.xaxis.set_major_formatter(formats)\n axes2.set_ylabel(r'$\\theta$ (deg)' )\n rotations, = pp.plot(dates.date2num(data.detections.rotationTimeTags),data.detections.rotations)\n #pp.legend(handles=[rotations], loc='lower left', ncol=4, fancybox=True, framealpha=0.5)\n \n\n outplotname = 'Plot ' + str(len(os.listdir(destination+addendum)) + 1)+ ' ' + data.timestamps[0].strftime('%y-%j-%H%M%S') + '.pdf'\n completename = os.path.join(destination+addendum,outplotname)\n plt.savefig(completename, bboxinches='tight')\n plt.clf()\n\n outplotname = 'Plot ' + str(len(os.listdir(destination+'rawdata\\\\'+addendum)) + 1)+ ' ' + data.timestamps[0].strftime('%y-%j-%H%M%S') + ' rawdata.csv'\n completename1 = os.path.join(destination+'rawdata\\\\'+addendum,outplotname)\n generateDataFile(data.rawdata,completename1)\n\n print \"Done generating plot...\"",
"def make_plot(x,y):",
"def make_plot(counts):\n cn1 = []\n cn2 = []\n time = []\n\n for x in counts:\n y1 = x[0]\n cn1.append(y1[1])\n y2 = x[1]\n cn2.append(y2[1])\n\n for i in range(len(counts)):\n time.append(i)\n\n posLine = plt.plot(time, cn1,'bo-', label='Positive')\n negLine = plt.plot(time, cn2,'go-', label='Negative')\n plt.axis([0, len(counts), 0, max(max(cn1), max(cn2))+50])\n plt.xlabel('Time step')\n plt.ylabel('Word count')\n plt.legend(loc = 'upper left')\n plt.show()\n plt.savefig(\"plot.png\", format=\"png\")",
"def assemblePlot(self):\n self.clearPlot()\n self.axes = self.figure.add_subplot(111)\n\n # Reset handles\n self._fluxOverlayHandles = []\n self._magneticAxisHandle = None\n self._orbitHandles = []\n self._separatrixOverlayHandle = None\n self._wallCrossSectionOverlayHandle = None\n\n # Plot image\n self.plotEq()\n\n # Plot overlays\n self.plotOverlays()\n\n self.adjustAxes()",
"def plot_reconstruction_diagnostics(self, figsize=(20, 10)):\n figs = []\n fig_names = []\n\n # upsampled frequency\n fx_us = tools.get_fft_frqs(2 * self.nx, 0.5 * self.dx)\n fy_us = tools.get_fft_frqs(2 * self.ny, 0.5 * self.dx)\n\n # plot different stages of inversion\n extent = tools.get_extent(self.fy, self.fx)\n extent_upsampled = tools.get_extent(fy_us, fx_us)\n\n for ii in range(self.nangles):\n fig = plt.figure(figsize=figsize)\n grid = plt.GridSpec(3, 4)\n\n for jj in range(self.nphases):\n\n # ####################\n # separated components\n # ####################\n ax = plt.subplot(grid[jj, 0])\n\n to_plot = np.abs(self.separated_components_ft[ii, jj])\n to_plot[to_plot <= 0] = np.nan\n plt.imshow(to_plot, norm=LogNorm(), extent=extent)\n\n circ = matplotlib.patches.Circle((0, 0), radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ)\n\n if jj == 0:\n plt.title('O(f)otf(f)')\n plt.scatter(self.frqs[ii, 0], self.frqs[ii, 1], edgecolor='r', facecolor='none')\n plt.scatter(-self.frqs[ii, 0], -self.frqs[ii, 1], edgecolor='r', facecolor='none')\n elif jj == 1:\n plt.title('m*O(f-fo)otf(f)')\n plt.scatter(self.frqs[ii, 0], self.frqs[ii, 1], edgecolor='r', facecolor='none')\n elif jj == 2:\n plt.title('m*O(f+fo)otf(f)')\n plt.scatter(-self.frqs[ii, 0], -self.frqs[ii, 1], edgecolor='r', facecolor='none')\n\n plt.setp(ax.get_xticklabels(), visible=False)\n plt.setp(ax.get_yticklabels(), visible=False)\n\n plt.xlim([-2 * self.fmax, 2 * self.fmax])\n plt.ylim([2 * self.fmax, -2 * self.fmax])\n\n # ####################\n # deconvolved component\n # ####################\n ax = plt.subplot(grid[jj, 1])\n\n plt.imshow(np.abs(self.components_deconvolved_ft[ii, jj]), norm=LogNorm(), extent=extent)\n\n if jj == 0:\n plt.scatter(self.frqs[ii, 0], self.frqs[ii, 1], edgecolor='r', facecolor='none')\n plt.scatter(-self.frqs[ii, 0], -self.frqs[ii, 1], edgecolor='r', facecolor='none')\n elif jj == 1:\n plt.scatter(self.frqs[ii, 0], self.frqs[ii, 1], edgecolor='r', facecolor='none')\n elif jj == 2:\n plt.scatter(-self.frqs[ii, 0], -self.frqs[ii, 1], edgecolor='r', facecolor='none')\n\n circ = matplotlib.patches.Circle((0, 0), radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ)\n\n if jj == 0:\n plt.title('deconvolved component')\n plt.setp(ax.get_xticklabels(), visible=False)\n plt.setp(ax.get_yticklabels(), visible=False)\n\n plt.xlim([-2 * self.fmax, 2 * self.fmax])\n plt.ylim([2 * self.fmax, -2 * self.fmax])\n\n # ####################\n # shifted component\n # ####################\n ax = plt.subplot(grid[jj, 2])\n\n # avoid any zeros for LogNorm()\n cs_ft_toplot = np.abs(self.components_shifted_ft[ii, jj])\n cs_ft_toplot[cs_ft_toplot <= 0] = np.nan\n plt.imshow(cs_ft_toplot, norm=LogNorm(), extent=extent_upsampled)\n plt.scatter(0, 0, edgecolor='r', facecolor='none')\n\n circ = matplotlib.patches.Circle((0, 0), radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ)\n\n if jj == 1:\n circ2 = matplotlib.patches.Circle(-self.frqs[ii], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ2)\n elif jj == 2:\n circ2 = matplotlib.patches.Circle(self.frqs[ii], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ2)\n\n if jj == 0:\n plt.title('shifted component')\n plt.setp(ax.get_xticklabels(), visible=False)\n plt.setp(ax.get_yticklabels(), visible=False)\n\n plt.xlim([-2 * self.fmax, 2 * self.fmax])\n plt.ylim([2 * self.fmax, -2 * self.fmax])\n\n # ####################\n # normalized weights\n # ####################\n ax = plt.subplot(grid[jj, 3])\n\n to_plot = self.weights[ii, jj] / self.weight_norm\n to_plot[to_plot <= 0] = np.nan\n im2 = plt.imshow(to_plot, norm=LogNorm(), extent=extent_upsampled)\n im2.set_clim([1e-5, 1])\n fig.colorbar(im2)\n\n circ = matplotlib.patches.Circle((0, 0), radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ)\n\n if jj == 1:\n circ2 = matplotlib.patches.Circle(-self.frqs[ii], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ2)\n elif jj == 2:\n circ2 = matplotlib.patches.Circle(self.frqs[ii], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ2)\n\n if jj == 0:\n plt.title('normalized weight')\n plt.setp(ax.get_xticklabels(), visible=False)\n plt.setp(ax.get_yticklabels(), visible=False)\n\n plt.xlim([-2 * self.fmax, 2 * self.fmax])\n plt.ylim([2 * self.fmax, -2 * self.fmax])\n\n plt.suptitle('period=%0.3fnm at %0.3fdeg=%0.3frad, f=(%0.3f,%0.3f) 1/um\\n'\n 'mod=%0.3f, min mcnr=%0.3f, wiener param=%0.2f\\n'\n 'phases (deg) =%0.2f, %0.2f, %0.2f, phase diffs (deg) =%0.2f, %0.2f, %0.2f' %\n (self.periods[ii] * 1e3, self.angles[ii] * 180 / np.pi, self.angles[ii],\n self.frqs[ii, 0], self.frqs[ii, 1], self.mod_depths[ii, 1], np.min(self.mcnr[ii]), self.wiener_parameter,\n self.phases[ii, 0] * 180/np.pi, self.phases[ii, 1] * 180/np.pi, self.phases[ii, 2] * 180/np.pi,\n 0, np.mod(self.phases[ii, 1] - self.phases[ii, 0], 2*np.pi) * 180/np.pi,\n np.mod(self.phases[ii, 2] - self.phases[ii, 0], 2*np.pi) * 180/np.pi))\n\n figs.append(fig)\n fig_names.append('sim_combining_angle=%d' % (ii + 1))\n\n # #######################\n # net weight\n # #######################\n figh = plt.figure(figsize=figsize)\n grid = plt.GridSpec(1, 2)\n plt.suptitle('Net weight, Wiener param = %0.2f' % self.wiener_parameter)\n\n ax = plt.subplot(grid[0, 0])\n net_weight = np.sum(self.weights, axis=(0, 1)) / self.weight_norm\n im = ax.imshow(net_weight, extent=extent_upsampled, norm=PowerNorm(gamma=0.1))\n\n figh.colorbar(im, ticks=[1, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 1e-2, 1e-3, 1e-4, 1e-5])\n\n ax.set_title(\"non-linear scale\")\n circ = matplotlib.patches.Circle((0, 0), radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ)\n\n circ2 = matplotlib.patches.Circle((0, 0), radius=2*self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ2)\n\n circ3 = matplotlib.patches.Circle(self.frqs[0], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ3)\n\n circ4 = matplotlib.patches.Circle(-self.frqs[0], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ4)\n\n circ5 = matplotlib.patches.Circle(self.frqs[1], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ5)\n\n circ6 = matplotlib.patches.Circle(-self.frqs[1], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ6)\n\n circ7 = matplotlib.patches.Circle(self.frqs[2], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ7)\n\n circ8 = matplotlib.patches.Circle(-self.frqs[2], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ8)\n\n ax.set_xlim([-2 * self.fmax, 2 * self.fmax])\n ax.set_ylim([2 * self.fmax, -2 * self.fmax])\n\n ax = plt.subplot(grid[0, 1])\n ax.set_title(\"linear scale\")\n im = ax.imshow(net_weight, extent=extent_upsampled)\n\n figh.colorbar(im)\n circ = matplotlib.patches.Circle((0, 0), radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ)\n\n circ2 = matplotlib.patches.Circle((0, 0), radius=2 * self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ2)\n\n circ3 = matplotlib.patches.Circle(self.frqs[0], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ3)\n\n circ4 = matplotlib.patches.Circle(-self.frqs[0], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ4)\n\n circ5 = matplotlib.patches.Circle(self.frqs[1], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ5)\n\n circ6 = matplotlib.patches.Circle(-self.frqs[1], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ6)\n\n circ7 = matplotlib.patches.Circle(self.frqs[2], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ7)\n\n circ8 = matplotlib.patches.Circle(-self.frqs[2], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ8)\n\n ax.set_xlim([-2 * self.fmax, 2 * self.fmax])\n ax.set_ylim([2 * self.fmax, -2 * self.fmax])\n\n figs.append(figh)\n fig_names.append('net_weight')\n\n return figs, fig_names",
"def boot_induvidual_plot(self): # Setting up induvidual plots\n self.plot_traits = list([self.plt_0.subplot2grid((2, 5), (0, 0)), self.plt_0.subplot2grid((2, 5), (0, 1)),\n self.plt_0.subplot2grid((2, 5), (0, 2)), self.plt_0.subplot2grid((2, 5), (0, 3)),\n self.plt_0.subplot2grid((2, 5), (0, 4))])\n\n # creatng list of plot objects\n\n for x in range(len(self.X_transp)): # Iterating over each attributes patient\n\n present=self.plot_traits[x]\n # Selecting a particular plot object\n present.set_facecolor('orange')\n # setting face color\n present.scatter(self.np_0.arange(len(self.list_patient_names)),self.X_transp[x],c='blue')\n # drawing a scatter plot of this attribute\n\n present.xaxis.set_major_locator(plt.MultipleLocator(1))\n\n present.set_xlabel('Patient ID', fontweight='bold')\n # setting X-LABEL\n present.set_ylabel(self.list_attributes[x], fontweight='bold')\n # setting Y-LABEL\n present.title.set_text(self.list_attributes[x]+\" Variation\")\n # setting Title\n\n present = self.plt_0.subplot2grid((2, 5), (1, 0), colspan=5)\n # to plot the present's status\n present.scatter(self.X_reduced_transp[0], self.X_reduced_transp[1], c='red')\n # plotting in the BOTTOM-PLOT\n\n present.set_xlabel(\"Principle Component -1\", fontweight='bold')\n # setting X-LABEL\n present.set_ylabel(\"Principle Component -2\", fontweight='bold')\n # setting Y-LABEL\n\n for x in range(len(self.list_patient_names)): # Naming each patient with ID\n self.list_patient_names[x] = \"Patient \" + str(x)\n # Eg: Patient 0,Patient 1...\n for i, txt in enumerate(self.list_patient_names): # This is used to enumerate the scatter plots label\n present.annotate(txt, (self.X_reduced_transp[0][i] + 1, self.X_reduced_transp[1][i]), fontsize=10, c='black')\n # Coonecting with present",
"def plot(path, subjects):\n transformToXYZmm = np.array([[-3.125, 0, 0, 81.250], [0, 3.125, 0, -115.625], [0, 0, 6, -54.000], [0, 0, 0, 1.000]])\n data = data_load.load_data(path, subjects)\n dimx = int(data[0][\"meta\"][\"dimx\"][0])\n dimy = int(data[0][\"meta\"][\"dimy\"][0])\n dimz = int(data[0][\"meta\"][\"dimz\"][0])\n coordToCol = data[0][\"meta\"][\"coordToCol\"][0][0]\n images = {}\n max_val = 0\n voxels = np.load(\"data/general_selected_500_1.npy\")\n directory = os.listdir(\"data/input/\")\n bar = pyprind.ProgBar(len(directory), title='Info extraction and Image Building')\n bar2 = pyprind.ProgBar(len(images.keys()), title='Saving Pictures')\n for file in directory:\n file_name = \"data/input/{}\".format(file)\n fh = open(file_name)\n activation_values = np.asarray(list(map(lambda x: float(x), filter(lambda x: x != '', fh.read().split(\",\")))))\n fh.close()\n plot_matrix = np.zeros((dimx, dimy, dimz))\n for x in range(dimx):\n for y in range(dimy):\n for z in range(dimz):\n indice = coordToCol[x][y][z]\n if indice != 0:\n if indice in list(voxels):\n voxel_indice = list(voxels).index(indice)\n value = activation_values[voxel_indice]\n if abs(value) > max_val:\n max_val = abs(value)\n plot_matrix[x][y][z] = value\n image = nib.Nifti1Image(plot_matrix, transformToXYZmm)\n images[file_name] = image\n bar.update(force_flush=True)\n print(bar)\n for image in images:\n plotting.plot_glass_brain(images[image], display_mode='ortho', vmax=max_val, plot_abs=False, threshold=None, colorbar=True, output_file=\"{}-wom1.png\".format(image))\n bar2.update(force_flush=True)\n print(bar2)",
"def vis_detections(im, dets):\n im = im[:, :, (2, 1, 0)]\n fig, ax = plt.subplots(figsize=(12, 12))\n ax.imshow(im, aspect='equal')\n\n for det in dets:\n bbox = det[:4]\n score = det[-2]\n\n ax.add_patch(\n plt.Rectangle((bbox[0], bbox[1]),\n bbox[2] - bbox[0],\n bbox[3] - bbox[1], fill=False,\n edgecolor='red', linewidth=2)\n )\n ax.text(bbox[0], bbox[1] - 2,\n '{:s} {:.3f}'.format(det[-1], score),\n bbox=dict(facecolor='blue', alpha=0.5),\n fontsize=14, color='white')\n\n ax.set_title(('eye and pupil detections'), fontsize=14)\n plt.axis('off')\n plt.tight_layout()\n plt.draw()",
"def plot_tsnes():\n # Two environments (for main paper figure. All for final figure)\n ENVS = [\n \"BipedalWalker-v3\",\n #\"LunarLander-v2\",\n #\"Pendulum-v0\"\n \"Acrobot-v1\",\n #\"CartPole-v1\"\n ]\n ALGO_TYPES = [\n \"stablebaselines\",\n \"stablebaselines\",\n \"wann\",\n \"wann\",\n ]\n ALGO_NAMES = [\n \"A2C\",\n \"PPO\",\n \"NEAT\",\n \"CMAES\",\n ]\n ALGO_PRETTY_NAMES = [\n \"A2C\",\n \"PPO\",\n \"NEAT\",\n \"CMA-ES\"\n ]\n\n REWARD_SCALES = {\n \"Pendulum-v0\": [-1600, -200],\n \"Acrobot-v1\": [-500, -100],\n \"LunarLander-v2\": [-230, 200],\n \"BipedalWalker-v3\": [-100, 300],\n \"CartPole-v1\": [0, 500]\n }\n\n figure, axs = pyplot.subplots(\n figsize=[6.4 * 2, 4.8],\n nrows=2,\n ncols=4,\n gridspec_kw={'hspace': 0, 'wspace': 0},\n )\n\n for plot_i in range(2):\n env = ENVS[plot_i]\n reward_scale = REWARD_SCALES[env]\n for algo_i in range(len(ALGO_TYPES)):\n column_idx = (algo_i % 2) + plot_i * 2\n row_idx = 0 if algo_i <= 1 else 1\n ax = axs[row_idx, column_idx]\n algo_type = ALGO_TYPES[algo_i]\n algo_name = ALGO_NAMES[algo_i]\n algo_pretty_name = ALGO_PRETTY_NAMES[algo_i]\n\n experiment_glob = \"experiments/{}_{}_{}_*\".format(algo_type, env, algo_name)\n experiment_paths = glob(experiment_glob)\n tsnes = []\n rewards = []\n for experiment_path in experiment_paths:\n pivector_paths = glob(os.path.join(experiment_path, \"pivectors\", \"*\"))\n population_tsnes = []\n population_rewards = []\n for path in pivector_paths:\n data = np.load(path)\n population_tsnes.append(data[\"tsne\"])\n population_rewards.append(data[\"average_episodic_reward\"])\n data.close()\n tsnes.append(population_tsnes)\n rewards.append(population_rewards)\n tsnes = np.concatenate(tsnes, axis=0)\n rewards = np.concatenate(rewards, axis=0)\n\n # Min-max normalization\n rewards = (rewards - reward_scale[0]) / (reward_scale[1] - reward_scale[0])\n\n scatter = ax.scatter(\n tsnes[:, 0],\n tsnes[:, 1],\n c=rewards,\n cmap=\"plasma\",\n s=1,\n vmin=0,\n vmax=1\n )\n\n ax.text(0.98, 0.98, algo_pretty_name, horizontalalignment=\"right\", verticalalignment=\"top\", transform=ax.transAxes)\n ax.set_xticks([])\n ax.set_yticks([])\n # Hide spines, the outer edges\n ax.spines[\"top\"].set_alpha(0.2)\n ax.spines[\"bottom\"].set_alpha(0.2)\n ax.spines[\"left\"].set_alpha(0.2)\n ax.spines[\"right\"].set_alpha(0.2)\n # Hide edge spines and bolden mid-spines\n if row_idx == 0:\n ax.spines[\"top\"].set_visible(False)\n else:\n ax.spines[\"bottom\"].set_visible(False)\n if column_idx == 0:\n ax.spines[\"left\"].set_visible(False)\n elif column_idx == 1:\n ax.spines[\"right\"].set_alpha(1.0)\n elif column_idx == 2:\n ax.spines[\"left\"].set_alpha(1.0)\n elif column_idx == 3:\n ax.spines[\"right\"].set_visible(False)\n\n # Add titles\n if row_idx == 0 and (column_idx == 0 or column_idx == 2):\n ax.set_title(env.split(\"-\")[0], x=1.0)\n\n cbaxes = figure.add_axes([0.4, 0.94, 0.2, 0.02])\n cbar = figure.colorbar(scatter, orientation=\"horizontal\", cax=cbaxes)\n cbar.set_ticks([0.0, 0.5, 1.0])\n cbar.set_ticklabels([\"Min\", \"Reward\", \"Max\"])\n cbar.ax.xaxis.set_ticks_position('top')\n cbar.ax.xaxis.set_label_position('top')\n cbar.ax.tick_params(labelsize=\"small\", length=0)\n figure.tight_layout()\n figure.savefig(\"figures/tsnes.png\", dpi=200, bbox_inches=\"tight\", pad_inches=0.0)",
"def generate_plots(self):\n freq_to_channel = {v: k for k, v in self.frequency_dict.iteritems()}\n data_axes = None\n for index, frequency in enumerate(sorted(freq_to_channel)):\n channel = freq_to_channel[frequency]\n td_f = self.frequency_dict[channel]\n title = 'Volume Backscatter (Sv) :Channel #%d: Frequency: %.1f kHz' % (channel, td_f)\n data_axes = self._generate_plot(self.ax[index], self.power_data_dict[channel], title,\n self.min_db, self.max_db)\n\n if data_axes:\n self._display_x_labels(self.ax[2], self.data_times)\n self.fig.tight_layout(rect=[0, 0.0, 0.97, 1.0])\n self._display_colorbar(self.fig, data_axes)",
"def plot4(self, plog=False):\n\n probs = pd.read_csv(self.probfile)\n\n plt.rc('font', size=14)\n fig, ax = plt.subplots()\n plt.plot(self.ds.freq, self.snr, 'k-', alpha=0.5, zorder=1)\n\n # plot the SNR range to search across when finding snr_modes\n for idx, line in enumerate(self.ds.mode_id['f0']):\n w = np.exp(self.ds.mode_id['w0'][idx])\n plt.axvline(x=line-w, color='b', linestyle='-', alpha=0.4)\n plt.axvline(x=line+w, color='b', linestyle='-', alpha=0.4)\n\n # overplot the predicted SNR values at the modes\n plt.scatter(probs['f0'], probs['SNR_Kepler'], label='Kepler - 4yrs', alpha=1, zorder=2)\n plt.scatter(probs['f0'], probs['SNR_TESS365'], label='TESS - 1 yr', alpha=1, zorder=3)\n plt.scatter(probs['f0'], probs['SNR_TESS27'], label='TESS - 27 days', alpha=1, zorder=4)\n\n if plog:\n plt.xscale('log')\n plt.yscale('log')\n plt.xlabel(r'$\\nu$ / $\\rm \\mu Hz$')\n plt.ylabel(r'SNR')\n\n mn = min(star.ds.mode_id['f0']) -\\\n (max(star.ds.mode_id['f0'])-min(star.ds.mode_id['f0']))/7.\n mx = max(star.ds.mode_id['f0']) +\\\n (max(star.ds.mode_id['f0'])-min(star.ds.mode_id['f0']))/7.\n plt.xlim([mn,mx])\n\n plt.legend()\n plt.title('KIC ' + str(self.ds.epic))\n plt.show()\n fig.savefig(os.getcwd() + os.sep + 'DetTest1_plots' + os.sep +\\\n 'plot4_SNR' + self.ds.epic + '.pdf')",
"def make_plots(self):\n\n # main fixation times data frame\n average_fixation_df = pd.DataFrame()\n\n # create a data frame with the fixation times for each participant, create a box plot with it,\n # and append it to the main data frame\n for idx, dataframe in enumerate(self.cGOM_dataframes):\n aois = EyeTracking.areas_of_interest(dataframe)\n participant_fixations = EyeTracking.fixations(aois, dataframe)\n\n Plot.make_boxplot(data_frame=participant_fixations,\n figure_save_path=self.PARTICIPANT_FIGURE_PATH.format(idx + 1),\n title='Average fixation duration: participant {}'.format(idx + 1),\n ylabel='Fixation duration [s]',\n xlabel='Area of interest'\n )\n\n average_fixation_df = average_fixation_df.append(participant_fixations, ignore_index=True)\n\n # create a bar plot and a box plot with the fixations of all participants or\n # do nothing if no cGOM data is provided\n try:\n Plot.make_boxplot(data_frame=average_fixation_df,\n figure_save_path=self.BOX_PLOT_FIGURE_PATH,\n title='Average fixation duration',\n ylabel='Fixation duration [s]',\n xlabel='Area of interest'\n )\n Plot.make_barplot(data_frame=average_fixation_df,\n figure_save_path=self.BAR_PLOT_FIGURE_PATH,\n title='Average fixation duration',\n ylabel='Fixation duration [s]',\n xlabel='Area of interest'\n )\n except ValueError:\n pass",
"def allDirectionalityRatios(ratioFunction):\n if not os.path.exists(\"savedHeatmaps\"):\n os.mkdir(\"savedHeatmaps\")\n wildRatio = np.log(ratioFunction(\"Wildtype_0min_BglII_rep1\"))\n for j, dataset in enumerate(datasets):\n ax = plt.subplot(len(datasets), 1, j + 1)\n curRatio = (ratioFunction(dataset))\n plt.title(\"{1}, r = {0:.2f}, p={2:.2e}\".format(pearsonr(curRatio, wildRatio)[0], names[dataset],\n pearsonr(curRatio, wildRatio)[1]), fontsize=10)\n plt.tick_params(axis='both', which='major', labelsize=10)\n plt.tick_params(axis='both', which='minor', labelsize=8)\n plt.plot(curRatio)\n plt.ylim((0.25, 0.75))\n plt.xlim((0, len(curRatio)))\n #plt.ylim((0, 1))\n plt.yticks((0.25, 0.5, 0.75))\n geneCoor = [1162773, 3509071, 1180887, 543099, 1953250, 2522439, 3328524, 1503879, 900483, 242693, 3677144, 3931680, 3677704, 3762707, 3480870, 3829656, 1424678, 901855, 1439056, 3678537]\n genePos = [i / 10000. for i in geneCoor]\n #genePos = []\n for lpos in genePos:\n plt.vlines(lpos , -.8, .8, alpha=0.2, linewidth=1, color=\"black\")\n plt.xticks([0, 50, 100, 150, 200, 250, 300, 350, 400], [\"\" for i in xrange(9)], fontsize=98)\n removeAxes(ax=ax)\n plt.subplots_adjust(0.07, 0.05, 0.94, 0.95, 0.2, 0.5)\n\n\n\n plt.show()\n exit()",
"def Diagnostic_plot2(self):\n\n probs = pd.read_csv(self.probfile)\n\n fig, ax = generalPlot(xaxis=r'$\\nu / \\mu$Hz', yaxis=r'$P_{\\rm det}$')\n plt.scatter(probs['f0'], probs['Pdet_Kepler'], label='Kepler - 4yrs')\n plt.scatter(probs['f0'], probs['Pdet_TESS365'], label='TESS - 1 yr')\n plt.scatter(probs['f0'], probs['Pdet_TESS27'], label='TESS - 27 days')\n plt.legend(loc='lower right')\n plt.ylim([0,1])\n plt.show()\n fig.savefig(os.getcwd() + os.sep + 'DetTest1_plots' + os.sep +\\\n 'DetTest_Diagnostic_plot2_Pdet' + self.ds.epic + '.pdf')\n\n fig, ax = generalPlot(xaxis=r'$\\nu / \\mu$Hz', yaxis=r'SNR')\n plt.scatter(probs['f0'], probs['SNR_Kepler'], label='Kepler - 4yrs')\n plt.scatter(probs['f0'], probs['SNR_TESS365'], label='TESS - 1 yr')\n plt.scatter(probs['f0'], probs['SNR_TESS27'], label='TESS - 27 days')\n plt.legend(loc='lower right')\n #plt.ylim([0,1])\n plt.show()\n fig.savefig(os.getcwd() + os.sep + 'DetTest1_plots' + os.sep +\\\n 'DetTest_Diagnostic_plot2_SNR' + self.ds.epic + '.pdf')"
]
| [
"0.6569927",
"0.61876655",
"0.6008588",
"0.5985453",
"0.594897",
"0.5938306",
"0.5926033",
"0.590905",
"0.590554",
"0.59010196",
"0.5889001",
"0.58831525",
"0.58794993",
"0.5871761",
"0.58256704",
"0.5796685",
"0.57645845",
"0.57590383",
"0.57559466",
"0.5752957",
"0.57412344",
"0.57332146",
"0.5721992",
"0.57119507",
"0.56724167",
"0.56348395",
"0.5628696",
"0.5621737",
"0.5606727",
"0.55878866"
]
| 0.72643757 | 0 |
Publish an RVIZ marker on the publisher's topic. | def _publish_point_marker(
self,
point: Point,
id: int,
ns="simulation/sign_evaluation",
):
rospy.logdebug(f"display point {point}")
marker = visualization.get_marker(
frame_id="sim_world",
rgba=[255, 0, 255, 255],
id=id,
type=2,
ns=ns,
scale=0.05,
duration=1 / self.param.rate,
)
marker.pose.position = point.to_geometry_msg()
try:
self.marker_publisher.publish(marker)
except Exception as err:
rospy.logerr(err) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def publish(self, message: str) -> None:",
"def on_publish( client, userdata, mid ):\n logging.info( \"Data published successfully.\" )",
"def on_publish(client, userdata, mid):\n print(\"Message Published.\")",
"def on_publish(mqttc, obj, mid):\n logger.debug(\"MQTT PUBLISH: mid: \" + str(mid))",
"def publish():\n pass",
"def publish(self, data=None):\n rospy.loginfo(\"Message published on topic %s\", self.topic)",
"def publish(self, kpi_dict):\n pass",
"async def publish(self, body, routing_key=None):\n pass # pragma: no cover",
"def publish(self, node, topic, data={}, on_publish=None, on_response=None):\n pass",
"def on_publish(client, userdata, mid):\n print('on_publish')\n print(\" userdata:\" + str(userdata))\n print(\" mid:\" + str(mid))\n print()",
"def on_publish(client: mqtt.Client, userdata: Any, mid: int) -> None:\n logging.info(f\"Successfully published a message: mid={mid}\")",
"def publisher(self, iTag, msgType, addr):\r\n return ROSPublisher(self, iTag, msgType, addr)",
"def publish(self, publisher):\n publisher._send(self.payload.event, self.info, *self.payload.args,\n **self.payload.kwargs)",
"def publish_to_simulation(self, topic, message, **kwargs):\n pass",
"def on_publish(unused_client, unused_userdata, unused_mid):\n\tprint('on_publish')",
"def publish(self, message: model.MQTTMessage):\n self.client.publish(message.topic, payload=message.get_payload())",
"def on_publish(self, mqtt_client, userdata, mid):\n logging.debug(\"DEBUG - publish ack received\")",
"def publish(self):\n return",
"def publish(self, id: uplink.Path):\n pass",
"def publish_mqtt(self, topic, data={}, on_publish=None, on_response=None, inject_rid=True):\n payload = data\n\n # If this is a dict and we're allowed to inject a request ID, do so\n # Injecting a request ID allows the nodes to respond and us to execute callbacks\n if (type(data) is dict) and inject_rid:\n data['rid'] = str(shortuuid.uuid())\n\n # JSON encode dicts, lists and stuff\n if type(data) in [dict, list, tuple]:\n payload = json.dumps(data)\n\n result, mid = self.mqtt.publish(topic, payload, qos=1)\n\n if on_publish:\n self.publish_callbacks[mid] = on_publish\n\n if on_response and data and data.get('rid', None):\n self.response_callbacks[data['rid']] = on_response\n\n self.publishes.append(mid)\n\n while mid in self.publishes:\n self.wait()",
"def test_publish(self):\n target_arn = 'testing'\n supercuboid_key = 'acd123'\n message_id = '123456'\n receipt_handle = 'a1b2c3d4'\n message = serializer.encodeIngestMessage(supercuboid_key, message_id, receipt_handle)\n self.sns.publish(self.topic_arn, message)\n message = self.sns.subscribe(self.topic_arn)",
"def publish(self, topic:str, data:bytes) -> None:\n\t\tself.mqttClient.publish(topic, data)",
"def publish(self, message):\n logger.info(\"Publishing to topic [{0}]: {1}\".format(self._topic_name, message))\n self._executor.send(json.dumps({\n 'op': 'publish',\n 'id': 'publish:{0}:{1}'.format(self._topic_name, self._id),\n 'topic': self._topic_name,\n 'msg': message\n }))",
"def pubISS():\n \n status = iss.positionStatus()\n place = iss.place\n countdown = iss.stringCountdown()\n if status != None:\n logging.info(\"Publishing ISS data. Status [{}], Place [{}], Countdown [{}]\".format(status, place, countdown))\n clientISS.publish(config.iss_status_topic, status)\n clientISS.publish(config.iss_place_topic, place)\n clientISS.publish(config.iss_countdown_topic, countdown)",
"def publish(self, event):\n self.pubsub_router.send(event)",
"def publish(self):\n msg_imu1, msg_mag1, msg_imu2, msg_mag2, msg_imu, msg_mag= self._create_msg()\n self.pub_imu.publish(msg_imu)\n self.pub_mag.publish(msg_mag)\n #------Uncomment to publish IMUs data separately------",
"def publish(self, topic, msg):\n\t\tself.topic = topic\n\t\tself.msg = msg \n\t\tself.client.publish(self.topic, self.msg)",
"def publish( self, topic, data, qos = 1, retain = False ):\n logging.info( \"Publishing to topic %s\" %topic )\n self.client.publish( topic, data, qos = qos, retain = retain )",
"def publishEvent(eventName,publisher, msg):",
"def on_publish(unused_client, unused_userdata, unused_mid):\n print('on_publish')\n status_light.on()"
]
| [
"0.6357063",
"0.62376064",
"0.6155916",
"0.61463344",
"0.6132296",
"0.61243963",
"0.6114466",
"0.6062014",
"0.6028903",
"0.58927435",
"0.58779794",
"0.5839856",
"0.5829416",
"0.5820119",
"0.58172786",
"0.58139145",
"0.57845473",
"0.57736254",
"0.57331103",
"0.57204676",
"0.56659764",
"0.56077665",
"0.56045586",
"0.5593101",
"0.55812913",
"0.55735415",
"0.5572931",
"0.5538816",
"0.55288756",
"0.55196464"
]
| 0.6295984 | 1 |
Add the detection to the closest sign. | def add_detection(self, point: Point, desc: str):
closest_sign = Sign.closest(self.signs, point)
if closest_sign.position.distance(point) < self.param.distance_threshold:
closest_sign.detections.append((point, desc))
self.evaluation_publisher.publish(closest_sign.to_msg()) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def detect_traffic_sign(image, mask, THRESHOLD = 100):\n \n\n contours = findContour(mask)\n sign, coordinate = findLargestSign(image, contours, 0.55, 10) #0.55 10\n k = cv2.waitKey(1)\n if k == ord(' '): \n x = np.random.randint(0,100)\n y = np.random.randint(0,100)\n z = np.random.randint(30,100)\n z = image[x:x+z,y,y+z] # wait for ESC key to exit\n cv2.imwrite('{}.png'.format(np.random.randint(1,50000)),z)\n # cv2.imwrite('Image/image_{}.png'.format(np.random.randint(1,10000)),image)\n # cv2.imwrite('../data/left/{}.png'.format(np.random.randint(1,5000)),sign)\n cv2.imshow('sign',sign)\n cv2.waitKey(1)\n cv2.rectangle(image, coordinate[0],coordinate[1],(0,255,0))\n# plt.figure()\n# plt.imshow(sign)\n# plt.title('detect sign')\n sign_gray_image = cv2.cvtColor(sign,cv2.COLOR_BGR2GRAY)\n _,sign_gray_image = cv2.threshold(sign_gray_image,127,255,cv2.THRESH_BINARY_INV)\n sign_gray_image = cv2.bitwise_not(sign_gray_image)\n \n (subHeight, subWidth) = sign_gray_image.shape[0]/5,sign_gray_image.shape[1]/5\n subHeight = int(subHeight)\n subWidth = int(subWidth)\n\n # cv2.rectangle(sign, (int(0.5*subWidth), 1*subHeight), (int(1.5*subWidth), 2*subHeight), (0,255,0),2) # left block\n # cv2.rectangle(sign, (int(3.5*subWidth), 1*subHeight), (int(4.5*subWidth), 2*subHeight), (255,0,0),2) # right block\n # plt.figure()\n # plt.imshow(sign_gray_image,cmap='gray')\n # plt.title('sign')\n leftBlock = sign_gray_image[int(1.5*subHeight):int(2.5*subHeight), int(0.5*subWidth):int(1.5*subWidth)]\n rightBlock = sign_gray_image[int(1.5*subHeight):int(2.5*subHeight), int(3.5*subWidth):int(4.5*subWidth)]\n \n # print(np.sum(leftBlock),np.sum(rightBlock),'left-----right')\n leftFraction = np.sum(leftBlock)/(leftBlock.shape[0]*leftBlock.shape[1])\n rightFraction = np.sum(rightBlock)/(rightBlock.shape[0]*rightBlock.shape[1])\n segments = (leftFraction, rightFraction)\n # print(coordinate[1],'coordinate')\n box = list(coordinate)\n if (leftFraction > rightFraction and leftFraction > 100):\n return 'left',coordinate\n elif (leftFraction < rightFraction and rightFraction > 100):\n return 'right',coordinate",
"def FindClosestPoint(self, ):\n ...",
"def FindClosestInsertedPoint(self, ):\n ...",
"def yield_sign_detection(img_in):\n img = img_in.copy()\n red_color_map = red_masking(img)\n\n # kernel = np.ones((7, 7), np.uint8)\n # red_color_map = cv2.filter2D(red_color_map, -1, kernel)\n # red_color_map = cv2.erode(red_color_map, np.ones((5, 5)))\n red_color_map = cv2.dilate(red_color_map, np.ones((5, 5)))\n canny_edges = cv2.Canny(red_color_map, threshold1=50, threshold2=250, apertureSize=5)\n\n min_line_length = 20\n max_pixel_gap = 5\n hough_lines = cv2.HoughLinesP(image=canny_edges,\n rho=.5,\n theta=np.pi / 180,\n threshold=25,\n minLineLength=min_line_length,\n maxLineGap=max_pixel_gap\n )\n if hough_lines is None:\n return None, None\n\n hough_lines = hough_lines[0, :]\n lines = remove_duplicates(hough_lines, dist=10)\n\n mid_x = None\n mid_y = None\n\n if len(lines) >= 6:\n # Fuzzy Logic here.\n # find the line that has the longest length and has a slope of 1. This line will\n # give us the vertex point we are interested in.\n line_slopes = np.array([calculate_slope(x[0], x[1], x[2], x[3]) for x in lines])\n line_lengths = np.array([calculate_line_length(x[0], x[1], x[2], x[3]) for x in lines])\n # the lines with a slope of 2 are the ones that will be found for this sign. Get these lines and store them.\n sloped_idx = np.where(line_slopes == 2)[0]\n\n if len(sloped_idx) < 2:\n return None, None\n\n # loop over the valid slops and figure out max lenght\n max_length = 0\n for sid in sloped_idx:\n idx_length = line_lengths[sid]\n if idx_length > max_length:\n max_length = idx_length\n\n v_idx = np.where(line_lengths == max_length)[0][0]\n vertex_line = lines[v_idx]\n\n # once the vertex line is found, all lines that are not within the length from the x1 can be ignore.\n max_x_1 = np.int(vertex_line[0] + max_length)\n min_x_1 = np.int(vertex_line[0] - max_length)\n # from lines delete all lines where x1 < min_x_1 and all lines where x1 > max_x_1\n l_list = lines.tolist()\n for idx, line in enumerate(l_list):\n if line[0] < min_x_1:\n del l_list[idx]\n elif line[0] > max_x_1:\n del l_list[idx]\n f_line = np.array(l_list)\n\n if len(f_line) < 6:\n return None, None # not a valid sign\n\n mid_x = vertex_line[0]\n x_point_2 = vertex_line[2]\n y_point = vertex_line[3]\n t = abs(x_point_2 - mid_x) * 2\n top_x_1, top_x_2 = [\n np.int(x_point_2 - t),\n np.int(x_point_2)\n ]\n mid_y = np.int(y_point + ((top_x_2 - top_x_1) * np.cos(np.pi / 6) -\n ((top_x_2 - top_x_1) / 2) / np.cos(np.pi / 6)))\n\n return mid_x, mid_y",
"def copysign(x, y):\n return 0.0",
"def FindClosestPointWithinRadius(self, p_float, , p_float_4):\n ...",
"def abs(self):\n return self * self.sign()",
"def testMatchSwarpNearestExposure(self):\n self.compareToSwarp(\"nearest\", useWarpExposure=True, atol=60)",
"def stop_sign_detected_callback(self, msg):\n\n # distance of the stop sign\n corners = msg.corners\n dx = corners[3] - corners[1]\n dy = corners[2] - corners[0]\n\n r = dx/dy # aspect ratio\n\n rdist = np.array([.15, .20, .25, .30,.35, .40, .45, .50])\n pixelheight = np.array([139, 102, 82, 64, 56, 50, 44, 40])\n if dy > pixelheight[-1] and dy < pixelheight[0]:\n dist = np.interp(dy, pixelheight[::-1], rdist[::-1])\n else:\n return\n\n # Get location of camera with respect to the map\n try:\n (translation,rotation) = self.tf_listener.lookupTransform('/map', '/camera', rospy.Time(0))\n xcam = translation[0]\n ycam = translation[1]\n zcam = translation[2]\n euler = tf.transformations.euler_from_quaternion(rotation)\n thetacam = euler[2]\n except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):\n return\n\n # Get angle of robot with respect to the map\n try:\n (translation,rotation) = self.tf_listener.lookupTransform('/map', '/base_footprint', rospy.Time(0))\n euler = tf.transformations.euler_from_quaternion(rotation)\n thetarobot = euler[2]\n except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):\n return\n\n # Now we have pose of robot, we want to determine stop sign angle relative\n # to camera frame\n thstopsign = (wrapToPi(msg.thetaright) + wrapToPi(msg.thetaleft))/2.\n zstopsign = dist*np.cos(-thstopsign)\n xstopsign = dist*np.sin(-thstopsign)\n\n x = xcam + xstopsign*np.cos(thetacam) - zstopsign*np.sin(thetacam) \n y = ycam + xstopsign*np.sin(thetacam) + zstopsign*np.cos(thetacam)\n\n # Now that we have x and y coord of stop sign in world frame, append coord\n found = False\n for i in range(len(self.stopSigns[0])):\n xcur = self.stopSigns[0][i]\n ycur = self.stopSigns[1][i]\n thetarobotcur = self.stopSigns[2][i]\n distance = np.sqrt((x - xcur)**2 + (y - ycur)**2)\n n = self.stopSignCounts[i]\n if distance < .2:\n if n < 100:\n # We have found the same stop sign as before\n xnew = (n/(n+1.))*xcur + (1./(n+1))*x\n ynew = (n/(n+1.))*ycur + (1./(n+1))*y\n thetarobotnew = (n/(n+1.))*thetarobotcur + (1./(n+1))*thetarobot\n self.stopSigns[0][i] = xnew\n self.stopSigns[1][i] = ynew\n self.stopSigns[2][i] = thetarobotnew\n self.stopSignCounts[i] += 1\n found = True\n \n if not found:\n # Found a new one, append it\n self.stopSigns[0].append(x)\n self.stopSigns[1].append(y)\n self.stopSigns[2].append(thetarobot)\n self.stopSignCounts.append(1)",
"def traffic_sign_detection(img_in):\n raw_img = np.copy(img_in)\n DetectedObj = {}\n\n ################################### \n ### Detecting the traffic light ###\n ################################### \n\n thresh1 = 110\n thresh2 = 60\n cannyEdges = cv2.Canny(img_in, thresh1, thresh2)\n\n circles = cv2.HoughCircles(cannyEdges,cv2.HOUGH_GRADIENT, 1, 20, param1=50,param2=26,minRadius=0,maxRadius=50)\n circles_selected = select_three(circles)\n\n if circles_selected != None:\n column = circles_selected[1][0]\n row = circles_selected[1][1]\n coordinates = (column, row)\n DetectedObj['Traffic_Sign'] = coordinates\n #cv2.circle(img_in, (circle[0], circle[1]), circle[2], (255, 0, 0), 2)\n\n\n ################################### \n ### Detecting the No_Entry sign ###\n ################################### \n\n for circle in circles[0, :]:\n column = circle[0]\n row = circle[1]\n coordinates = (column, row)\n state_pixels = img_in[int(row), int(column), :]\n if state_pixels[0] > 230 and state_pixels[1] > 230 and state_pixels[2] > 230 :\n DetectedObj['No_Entry'] = coordinates\n\n ################################# \n ### Detecting the Yield sign ###\n #################################\n\n coordinates = yield_sign_detection(img_in)\n if coordinates != None:\n DetectedObj['Yield'] = coordinates\n\n ################################# \n ### Detecting the Stop sign ###\n #################################\n\n coordinates = stop_sign_detection(img_in)\n if coordinates != None:\n DetectedObj['Stop'] = coordinates\n\n ################################# \n ### Detecting the Construction###\n #################################\n\n coordinates = construction_sign_detection(img_in)\n if coordinates != None:\n DetectedObj['Construction'] = coordinates\n\n ################################# \n ### Detecting the Warning_Sign###\n #################################\n\n coordinates = warning_sign_detection(img_in)\n if coordinates != None:\n DetectedObj['Warning_Sign'] = coordinates\n\n return DetectedObj\n raise NotImplementedError",
"def do_not_enter_sign_detection(img_in):\n img_in = img_in.copy()\n img = red_masking(img_in)\n # display_img(img, 'DNE_RED')\n img = process_base_image(img, (7, 7))\n # Assumption made that a DNE sign will always have at least a\n # radius of 5.\n min_radius = 15\n max_radius = np.int(img.shape[1] / 2)\n circles = hough_circles(img, 1, 10, 30, 30, min_radius, max_radius)\n\n if circles is not None:\n circles = np.uint16(np.around(circles))\n circles = circles[0, :]\n # since multiple circles might be found, the correct one\n circle_mid_colors = [pixel_color(img, x[0], x[1]) for x in circles]\n valid_idx = circle_mid_colors.index(np.max(circle_mid_colors))\n the_sign = circles[valid_idx]\n else:\n return None, None\n\n output = (the_sign[0], the_sign[1])\n return output",
"def fix_point_arithmetic(self):\n\n return self._fixpntar",
"def stop_sign_detection(img_in):\n\n thresh1 = 110\n thresh2 = 60\n cannyEdges = cv2.Canny(img_in, thresh1, thresh2)\n # cv2.imshow(\"test\", cannyEdges)\n\n lines = cv2.HoughLinesP(cannyEdges, rho=1, theta=np.pi /90, threshold=20, minLineLength=20, maxLineGap=1)\n\n Line_list = []\n Angle_45 = []\n Angle_M45 = []\n\n for line in lines:\n line = line.flatten()\n line_instance = Line(line)\n if line_instance.length < 500 and line_instance.angle != 0 and RedSide(img_in,line_instance):\n Line_list.append(line_instance)\n Angle_45.append(np.abs(line_instance.angle - 45))\n Angle_M45.append(np.abs(line_instance.angle + 45))\n\n if len(Angle_45) < 2:\n return None\n if len(Angle_M45) < 2:\n return None\n \n # index = np.argsort(Angle_45)\n # line1 = Line_list[index[0]]\n # line2 = Line_list[index[1]]\n\n index = np.argsort(Angle_M45)\n line1 = Line_list[index[0]]\n line2 = Line_list[index[1]]\n\n if line1.angle < -50 or line1.angle > -40 or line2.angle < -50 or line2.angle > -40 :\n return None\n\n #Mark the line we use to determine the center\n # cv2.line(img_in,(line1.line[0],line1.line[1]), (line1.line[2], line1.line[3]),(255, 0, 0), 3)\n # cv2.line(img_in,(line2.line[0],line2.line[1]), (line2.line[2], line2.line[3]),(255, 0, 0), 3)\n\n column45 = int((line1.mid[0] + line2.mid[0])/2)\n row45 = int((line1.mid[1] + line2.mid[1])/2)\n\n # columnM45 = int((line3.mid[0] + line4.mid[0])/2)\n # rowM45 = int((line3.mid[1] + line4.mid[1])/2)\n\n # column = (column45 + columnM45)//2 + 1\n # row = (row45 + rowM45)//2 + 1\n coordinates = (column45, row45)\n\n # cv2.circle(img_in, coordinates, 2, (255, 0, 0), 2)\n # cv2.imshow('detected lines',img_in)\n\n return coordinates\n raise NotImplementedError",
"def closer_ang(x,a,dir=0):\r\n if dir == 0:\r\n return a + smaller_ang(x-a)\r\n elif dir == 1:\r\n return a + (x-a)%(2*pi)\r\n elif dir == -1:\r\n return a + (x-a)%(2*pi) - 2*pi",
"def add_signal_align_predictions(self):\n # TODO call signalalign if not called\n sa_events = self.get_signalalign_events()\n # cut out duplicates\n sa_events = np.unique(sa_events)\n events = self.get_resegment_basecall()\n predictions = match_events_with_signalalign(sa_events=sa_events, event_detections=events)\n # rna reference positions are on 5' edge aka right side of kmer\n if self.rna:\n predictions[\"reference_index\"] -= self.kmer_index\n else:\n predictions[\"reference_index\"] += self.kmer_index\n self.aligned_signal.add_label(predictions, name=\"full_signalalign\", label_type='prediction')\n return True",
"def Spatial_Extension(self, dist, thresh_calc=True):\n if self.halo_name == 'Einasto':\n extension = 0.0397018 * dist ** -0.929435 * self.halo_mass ** 0.309235 *\\\n self.c ** -0.765418\n elif self.halo_name == 'NFW':\n extension = 0.0026794 * dist ** -0.999045 * self.halo_mass ** 0.432234\n else:\n if thresh_calc:\n if 10. ** (self.J(dist, 2.) - self.J_pointlike(dist)) < 0.68:\n return 2.\n if 10. ** (self.J(dist, .1) - self.J_pointlike(dist)) > 0.68:\n return 0.1\n\n theta_tab = np.logspace(np.log10(180. / np.pi * np.arctan(self.max_radius / dist)) - 4,\n np.log10(180. / np.pi * np.arctan(self.max_radius / dist)), 20)\n ang68 = np.zeros(theta_tab.size)\n for i, theta in enumerate(theta_tab):\n ang68[i] = 10. ** self.J(dist, theta) / 10. ** self.J_pointlike(dist) - 0.68\n print np.column_stack((theta_tab, ang68))\n theta_tab = theta_tab[(ang68 < 0.30) & (ang68 > -0.66)]\n ang68 = ang68[(ang68 < 0.30) & (ang68 > -0.66)]\n full_tab = np.logspace(np.log10(theta_tab[0]), np.log10(theta_tab[-1]), 100)\n #print np.column_stack((theta_tab, ang68))\n interp = np.log10(np.abs(interpola(np.log10(full_tab), np.log10(theta_tab), ang68)))\n extension = full_tab[np.argmin(interp)]\n #print extension\n return extension",
"def nearest_test_pulse(self):",
"def fixed_signs(popt1, popt2):\n if np.sign(popt1[0]) != np.sign(popt2[0]):\n print(\"Changing signs ...\")\n if popt2[2] < 0:\n popt2[2] += np.pi\n else:\n popt2[2] -= np.pi\n popt2[0] *= -1\n return popt1, popt2",
"def yield_sign_detection(img_in):\n thresh1 = 110\n thresh2 = 60\n cannyEdges = cv2.Canny(img_in, thresh1, thresh2)\n # cv2.imshow(\"test\", cannyEdges)\n\n lines = cv2.HoughLinesP(cannyEdges, rho=1, theta=np.pi /90, threshold=30, minLineLength=20, maxLineGap=1)\n\n Line_list_60 = []\n Line_list_M60 = []\n Angle_60 = []\n Angle_M60 = []\n\n for line in lines:\n line = line.flatten()\n line_instance = Line(line)\n\n if line_instance.angle > 35 and line_instance.angle < 85: \n # print(line_instance.line, line_instance.angle)\n Angle_60.append(line_instance.length)\n Line_list_60.append(line_instance)\n\n if line_instance.angle > -85 and line_instance.angle < -35: \n # print(line_instance.line, line_instance.angle) \n Angle_M60.append(line_instance.length)\n Line_list_M60.append(line_instance)\n \n index = np.argsort(Angle_60)\n line1 = Line_list_60[index[-1]].line\n # cv2.line(img_in,(line1[0],line1[1]), (line1[2], line1[3]),(255, 0, 0), 3)\n\n index = np.argsort(Angle_M60)\n line3 = Line_list_M60[index[-1]].line\n # cv2.line(img_in,(line3[0],line3[1]), (line3[2], line3[3]),(255, 0, 0), 3)\n\n # cv2.show('test', img_in)\n X_60 = max(line1[0], line1[2])\n X_M60 = min(line3[0], line3[2])\n column = int ((X_60 + X_M60)/2)\n\n left_Y = min(line1[1], line1[3])\n mid_Y_60 = max(line1[1], line1[3])\n mid_Y_M60 = max(line3[1], line3[3])\n right_Y = min(line3[1], line3[3])\n row = int ((left_Y + (mid_Y_60+mid_Y_M60)/2 + right_Y)/3)\n coordinates = (column, row)\n\n pixels = img_in[row, column, :]\n if pixels[0] > 220 and pixels[1] > 220 and pixels[2] > 220 :\n # cv2.circle(img_in, coordinates, 2, (255, 0, 0), 2)\n return coordinates\n else:\n return None\n\n\n raise NotImplementedError",
"def rising_sign(cls, fixed_date):\n i = quotient(float(cls.tropical_longitude(fixed_date)), 30)\n return [1670/1800, 1795/1800, 1935/1800, 1935/1800, 1795/1800, 1670/1800][mod(i, 6)]",
"def return_zeropoint():\n return 22.5",
"def get_angle_sign(self):\n # Assumption: 90deg phase line placed after LEFT antenna\n # Therefore: Voltage > _VOLTAGE_CENTER -> Need to turn \"left\" (counter-clockwise)\n voltage = sum(self._fifo_stack) / len(self._fifo_stack)\n print(f\"Voltage: {voltage}\")\n #return voltage, True # For debugging only\n\n if voltage > self._MAX_EXPECTED_VOLTAGE: # Very close to reference voltage -> no beacon detected\n return None, True\n if voltage < self._VOLTAGE_R_THRESHOLD: # Need to turn right (clockwise)\n confidence = (self._VOLTAGE_R_THRESHOLD - voltage) > self._CONFIDENCE_THRESHOLD_TURN\n return -1, confidence\n elif voltage > self._VOLTAGE_L_THRESHOLD: # Need to turn left (c-clockwise)\n confidence = (voltage - self._VOLTAGE_L_THRESHOLD) > self._CONFIDENCE_THRESHOLD_TURN\n return +1, confidence\n else:\n confidence = min(voltage - self._VOLTAGE_R_THRESHOLD, self._VOLTAGE_L_THRESHOLD - voltage) > self._CONFIDENCE_THRESHOLD_FORWARD\n return 0, confidence",
"def extension(distance, radius):\n return distance - radius",
"def goal(target, prediction):\n return closest_point_on_segment(prediction, target)",
"def onImageEdge(self,tolerance=1):\n # this has to be one to deal with blob library weirdness that goes deep down to opencv\n return ( self.distanceToNearestEdge() <= tolerance )",
"def fix_point_arithmetic(self, value):\n\n self._fixpntar = value == 'true'",
"def set_sign(self):\r\n # sign is just an auxiliary coefficient used in some equations:\r\n if self.k == 1: self.sign = -1\r\n if self.k == 0: self.sign = +1",
"def closest_to(self, a, b):\n diff_a = abs(a.ts - self.ts)\n diff_b = abs(b.ts - self.ts)\n if diff_a < diff_b and diff_a < TIME_THRESHOLD:\n return a\n elif diff_b < TIME_THRESHOLD:\n return b\n return None",
"def construction_sign_detection(img_in):\n\n thresh1 = 110\n thresh2 = 60\n cannyEdges = cv2.Canny(img_in, thresh1, thresh2)\n # cv2.imshow(\"test\", cannyEdges)\n\n lines = cv2.HoughLinesP(cannyEdges, rho=1, theta=np.pi /180, threshold=40, minLineLength=30, maxLineGap=2)\n\n Line_list = []\n Angle_45 = []\n Angle_M45 = []\n\n for line in lines:\n line = line.flatten()\n line_instance = Line(line)\n if line_instance.length < 500 and line_instance.angle != 0 and ConsSide(img_in,line_instance):\n Line_list.append(line_instance) \n # cv2.line(img_in,(line[0],line[1]), (line[2], line[3]),(255, 0, 0), 3)\n Angle_45.append(np.abs(line_instance.angle - 45))\n Angle_M45.append(np.abs(line_instance.angle + 45))\n\n if len(Angle_45) == 0:\n return None\n if len(Angle_M45) == 0:\n return None\n\n index = np.argsort(Angle_45)\n line1 = Line_list[index[0]]\n line2 = Line_list[index[1]]\n\n index = np.argsort(Angle_M45)\n line3 = Line_list[index[0]]\n line4 = Line_list[index[1]]\n\n column45 = int((line1.mid[0] + line2.mid[0])/2)\n row45 = int((line1.mid[1] + line2.mid[1])/2)\n\n columnM45 = int((line3.mid[0] + line4.mid[0])/2)\n rowM45 = int((line3.mid[1] + line4.mid[1])/2)\n\n # print(line1.line, line1.angle, line1.length)\n # print(line3.line, line3.angle, line3.length)\n # cv2.line(img_in,(line1.line[0],line1.line[1]), (line1.line[2], line1.line[3]),(255, 0, 0), 3)\n # cv2.line(img_in,(line2.line[0],line2.line[1]), (line2.line[2], line2.line[3]),(255, 0, 0), 3)\n # cv2.line(img_in,(line3.line[0],line3.line[1]), (line3.line[2], line3.line[3]),(255, 0, 0), 3)\n # cv2.line(img_in,(line4.line[0],line4.line[1]), (line4.line[2], line4.line[3]),(255, 0, 0), 3)\n\n column = (column45 + columnM45)//2 + 1\n row = (row45 + rowM45)//2 + 1\n coordinates = (column, row)\n\n # cv2.circle(img_in, coordinates, 2, (255, 0, 0), 2)\n return coordinates\n raise NotImplementedError",
"def match(self, enc, threshold=None, optimize=False):\n\n\t\tif len(self.encodings) == 0:\n\t\t\t# no encodings yet\n\t\t\treturn -1, 1.0\n\n\t\t# compare enc to known-face-encodings to get all euclidean distances.\n\t\tdistances = np.linalg.norm(self.encodings - enc, axis=1)\n\n\t\t# get the minimum distance\t\t\n\t\tface_index = np.argmin(distances)\n\t\tmin_distance = distances[face_index]\n\n\t\t# optimization if min_distance >= threshold\n\t\tif threshold and min_distance >= threshold:\n\t\t\tif not optimize:\n\t\t\t\treturn -1, min_distance\n\n\t\t\tprint('*** distance > threshold ({} > {})'.format(min_distance, threshold))\n\t\t\ttop_two = np.argsort(distances)[:2]\n\t\t\tidx1 = top_two[0]\n\t\t\tname1 = self.get_name(idx1)\n\t\t\tprint('\\ttop 1: {} - {:.5f}'.format(name1, distances[idx1]))\n\t\t\tidx2 = top_two[1]\n\t\t\tname2 = self.get_name(idx2)\n\t\t\tprint('\\ttop 2: {} - {:.5f}'.format(name2, distances[idx2]))\n\t\t\t\n\t\t\td1 = distances[idx1]\n\t\t\td2 = distances[idx2]\n\n\t\t\t# discard if names differ\n\t\t\tif name1 != name2:\n\t\t\t\tif abs(d1 - d2) < 0.06:\n\t\t\t\t\treturn -1, min_distance\n\t\t\telse: # name1 == name2\n\t\t\t\t# discard if same name but distance differ (2 after point)\n\t\t\t\tif int(d1 * 100) != int(d2 * 100):\n\t\t\t\t\treturn -1, min_distance\n\t\t\t\n\t\treturn face_index, min_distance"
]
| [
"0.5513667",
"0.5390601",
"0.5299022",
"0.52472854",
"0.5236474",
"0.5220663",
"0.5186746",
"0.5184309",
"0.5168466",
"0.5166982",
"0.5121159",
"0.50838625",
"0.50461096",
"0.5044699",
"0.50344473",
"0.5026088",
"0.501678",
"0.501594",
"0.50108033",
"0.5007771",
"0.49960783",
"0.49668372",
"0.49531025",
"0.4945526",
"0.49380946",
"0.48959574",
"0.4894266",
"0.4877639",
"0.48722327",
"0.48602694"
]
| 0.60529 | 0 |
(DEPRECATED) Generate an HTML5 appcache. Should be run after wq optimize, as some of the manifest entries will be inferred from the build log. Note that browser vendors are deprecating support for Application Cache in favor of Service Workers. The `wq appcache` command will be removed in wq.app 2.0. Use the `wq serviceworker` command instead. | def appcache(config, version):
click.echo("Warning: Application Cache is deprecated by browser vendors.")
time.sleep(10)
if 'appcache' not in config:
raise click.UsageError(
"appcache section not found in %s" % config.filename
)
if 'optimize' not in config:
raise click.UsageError(
"optimize section not found in %s" % config.filename
)
conf = config['appcache']
indir = config['optimize']['appDir']
outdir = config['optimize']['dir']
# Open output files
s_acpath = indir + '/' + conf['name']
b_acpath = outdir + '/' + conf['name']
s_ac = open(s_acpath, 'w')
b_ac = open(b_acpath, 'w')
# Start in source directory - read @imports from main CSS file
s_css = [conf['css']]
s_css.extend(_parse_css_urls(
indir, conf['css'], conf.get('css-ignore', None)
))
# Built CSS file contains image URLs from the @import-ed CSS files above
images = _parse_css_urls(outdir, conf['css'], conf.get('css-ignore', None))
# build.txt contains a list of built javascript files and their sources
s_js, b_js = _parse_js_buildfile(outdir + '/build.txt')
b_css = [conf['css']]
# Collect path names and create appcaches
cache = list(conf['cache'])
source_cache = cache + s_js + s_css + images
built_cache = cache + b_js + b_css + images
network = list(conf['network'])
fallback = list(conf['fallback'])
s_ac.write(APPCACHE_TMPL % {
'version': '%s_dev' % version,
'cache': '\n'.join(source_cache),
'network': '\n'.join(network),
'fallback': '\n'.join(fallback)
})
b_ac.write(APPCACHE_TMPL % {
'version': version,
'cache': '\n'.join(built_cache),
'network': '\n'.join(network),
'fallback': '\n'.join(fallback)
})
s_ac.close()
b_ac.close()
print("%s: %s items" % (s_acpath, len(source_cache + network + fallback)))
print("%s: %s items" % (b_acpath, len(built_cache + network + fallback))) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def offline_command(args):\n\n list_local_files()\n\n if os.path.exists(MANIFEST_FILENAME) and not options.force:\n print \"%s already exists (use -f to overwrite).\" % MANIFEST_FILENAME\n\n if not os.path.exists(MANIFEST_FILENAME) or options.force:\n print \"Creating file %s.\" % MANIFEST_FILENAME\n default_manifest = (\n \"CACHE MANIFEST\\n\"\n \"# Cache files for offline access - see http://diveintohtml5.org/offline.html\\n\"\n \"\\n\"\n \"/lib/beta/js/pf-client.min.js\\n\"\n \"/lib/beta/css/client.css\\n\"\n \"/static/images/appbar/green-left.png\\n\"\n \"/static/images/appbar/green-center.png\\n\"\n \"/static/images/appbar/green-right.png\\n\"\n \"/static/images/appbar/down.png\\n\"\n \"/static/images/appbar/logo.png\\n\"\n \"\\n\"\n \"NETWORK:\\n\"\n \"*\\n\\n\"\n )\n manifest = open(MANIFEST_FILENAME, 'w')\n manifest.write(default_manifest + AUTOGEN_LINE)\n manifest.close()\n print default_manifest + AUTOGEN_LINE\n\n update_manifest(True)",
"def get_app_cache_dir(appname, *args):\n import ubelt as ub\n ub.schedule_deprecation(\n modname='ubelt', name='get_app_cache_dir and ensure_app_cache_dir', type='function',\n migration='use ubelt.Path.appdir(type=\"cache\") instead',\n deprecate='1.2.0', error='2.0.0', remove='2.1.0')\n dpath = join(platform_cache_dir(), appname, *args)\n return dpath",
"def _find_cache():\n app = _find_app()\n return app.cache",
"def manifest(branch):\n env.timestamp = str(int(time.time()))\n env.branch = branch\n manifest = 'app/cache.manifest'\n env.rev = local('git log -1 --format=format:%%H %s@{0}' % env.branch,\n capture=True)\n with open(manifest, 'w') as fh:\n fh.write('CACHE MANIFEST\\n\\n')\n fh.write('# {0}\\n'.format(env.timestamp))\n fh.write('# {0}\\n'.format(env.branch))\n fh.write('# {0}\\n\\n'.format(env.rev))\n fh.write('CACHE:\\n')\n for root, dirs, files in os.walk(BUILD_DIR):\n for filename in files:\n path = os.path.join(root, filename)\n if filename[0] != '.':\n if path != manifest:\n rel_path = os.path.relpath(path, BUILD_DIR)\n fh.write('/static/{0}\\n'.format(rel_path))\n fh.write('\\n\\nNETWORK:\\n')\n for url in cache_exempt:\n fh.write('{0}\\n'.format(url))\n local(\"cat %s\" % manifest)",
"def enable() -> dict:\n return {\"method\": \"ApplicationCache.enable\", \"params\": {}}",
"def cache(cachedir=None):\n if cachedir is not None:\n os.environ['VIPY_CACHE'] = remkdir(cachedir)\n GLOBAL['CACHE'] = cachedir\n return os.environ['VIPY_CACHE'] if 'VIPY_CACHE' in os.environ else None",
"def cacheOptionsForBuild(self):",
"def handle_no_cache(context):\n logger.error(\n _(\"Could not locate wily cache, the cache is required to provide insights.\")\n )\n p = input(_(\"Do you want to run setup and index your project now? [y/N]\"))\n if p.lower() != \"y\":\n exit(1)\n else:\n revisions = input(_(\"How many previous git revisions do you want to index? : \"))\n revisions = int(revisions)\n path = input(_(\"Path to your source files; comma-separated for multiple: \"))\n paths = path.split(\",\")\n context.invoke(build, max_revisions=revisions, targets=paths, operators=None)",
"def get_application_cache_for_frame(\n frameId: page.FrameId,\n) -> Generator[dict, dict, ApplicationCache]:\n response = yield {\n \"method\": \"ApplicationCache.getApplicationCacheForFrame\",\n \"params\": {\"frameId\": str(frameId)},\n }\n return ApplicationCache.from_json(response[\"applicationCache\"])",
"def write_cache(feed):\n if ARGV.get(NOCACHE_OPT):\n return\n CACHE['feed'] = feed\n CACHE['last-request'] = str(time.time())\n CACHE['max-age'] = feed.headers['Cache-Control'].split('=')[1]\n save_datfile()",
"def register_caching(app):\n if 'DEBUG' in app.config and app.config['DEBUG']:\n @app.after_request\n def after_request(response):\n response.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate, public, max-age=0\"\n response.headers[\"Expires\"] = 0\n response.headers[\"Pragma\"] = \"no-cache\"\n return response",
"def app_nocache(app):\n @app.after_request\n def add_header(r):\n r.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate\"\n r.headers[\"Pragma\"] = \"no-cache\"\n r.headers[\"Expires\"] = \"0\"\n r.headers['Cache-Control'] = 'public, max-age=0'\n return r",
"def cache_file(cache_key):\n\n return MASTOOLS_DIR / f\"{cache_key}_cache.json\"",
"def get_esi_app():\n if 'esi_app' in cache:\n return cache.get('esi_app')\n\n esi_app = EsiApp(cache_time=86400).get_latest_swagger\n\n try:\n cache.set('esi_app', esi_app, timeout=86400)\n except Exception:\n logger.exception(\"Failed to store ESI Application in cache\")\n\n return esi_app",
"def prepare_app():\n application = service.Application('buildbot-worker')\n master = (GATEWAY\n if conf.Washer.FORCE_GATEWAY\n else conf.Buildbot.BUILDMASTER)\n worker = Worker(master,\n conf.Buildbot.BUILDMASTER_PORT,\n conf.Buildbot.WORKERNAME,\n conf.Buildbot.WORKERPASS,\n conf.Buildbot.BASEDIR,\n conf.Buildbot.KEEPALIVE,\n umask=None,\n maxdelay=conf.Buildbot.MAXDELAY,\n numcpus=None,\n allow_shutdown=None,\n maxRetries=None)\n worker.setServiceParent(application)\n\n class InlineApplication(UnixApplicationRunner):\n def createOrGetApplication(self):\n nonlocal application\n return application\n\n options = ServerOptions()\n options[\"nodaemon\"] = not conf.Washer.DAEMON\n options[\"logfile\"] = conf.Washer.LOG_FILE\n\n commands.register()\n\n return InlineApplication(options)",
"def setup_cache_busting(app, bust_extensions=True):\n app.url_defaults(cache_busting_url_defaults_factory(app, bust_extensions))",
"def dynCache(*args, **kwargs)->None:\n pass",
"def CleanCachedResult(g_params): # {{{\n bsname = \"clean_cached_result\"\n gen_logfile = g_params['gen_logfile']\n gen_errfile = g_params['gen_errfile']\n path_tmp = os.path.join(g_params['path_static'], \"tmp\")\n name_server = g_params['name_server']\n if 'MAX_KEEP_DAYS_CACHE' in g_params:\n MAX_KEEP_DAYS_CACHE = g_params['MAX_KEEP_DAYS_CACHE']\n else:\n MAX_KEEP_DAYS_CACHE = 480\n binpath_script = os.path.join(g_params['webserver_root'], \"env\", \"bin\")\n py_scriptfile = os.path.join(binpath_script, f\"{bsname}.py\")\n jsonfile = os.path.join(path_tmp, f\"{bsname}.json\")\n myfunc.WriteFile(json.dumps(g_params, sort_keys=True), jsonfile, \"w\")\n lockname = f\"{bsname}.lock\"\n lock_file = os.path.join(g_params['path_log'], lockname)\n webcom.loginfo(f\"Clean cached results older than {MAX_KEEP_DAYS_CACHE} days\",\n gen_logfile)\n cmd = [\"python\", py_scriptfile, \"-i\", jsonfile,\n \"-max-keep-day\", f\"{MAX_KEEP_DAYS_CACHE}\"]\n cmdline = \" \".join(cmd)\n if ('CLEAN_CACHED_RESULT_IN_QD' in g_params\n and g_params['CLEAN_CACHED_RESULT_IN_QD']):\n webcom.RunCmd(cmd, gen_logfile, gen_errfile)\n elif not os.path.exists(lock_file):\n bash_scriptfile = f\"{path_tmp}/{bsname}-{name_server}.sh\"\n code_str_list = []\n code_str_list.append(\"#!/bin/bash\")\n code_str_list.append(cmdline)\n code = \"\\n\".join(code_str_list)\n myfunc.WriteFile(code, bash_scriptfile, mode=\"w\", isFlush=True)\n os.chmod(bash_scriptfile, 0o755)\n os.chdir(path_tmp)\n cmd = ['sbatch', bash_scriptfile]\n cmdline = \" \".join(cmd)\n verbose = False\n if 'DEBUG' in g_params and g_params['DEBUG']:\n verbose = True\n webcom.loginfo(f\"Run cmdline: {cmdline}\", gen_logfile)\n webcom.RunCmd(cmd, gen_logfile, gen_errfile, verbose)",
"def update_manifest(explicit=False):\n if not os.path.exists(MANIFEST_FILENAME):\n return\n\n manifest_file = open(MANIFEST_FILENAME, 'r')\n parts = manifest_file.read().partition('\\n' + AUTOGEN_LINE)\n manifest_file.close()\n if parts[1] == '':\n if explicit:\n print \"%s has no AUTOGENERATE section\" % MANIFEST_FILENAME\n return\n\n commands = [line for line in parts[2].split('\\n') if line.startswith('#!')]\n excludes = []\n for command in commands:\n match = re.match(r'#!\\s*EXCLUDE:\\s*(.*)\\s*$', command)\n if options.verbose:\n print \"Excluding paths beginning with '%s'\" % match.group(1)\n if match:\n excludes.extend(re.split(r\",\\s*\", match.group(1)))\n\n cached_files = []\n hash_lines = []\n\n paths = options.local_listing.keys()\n paths.sort()\n size = 0\n for path in paths:\n info = options.local_listing[path]\n if path == MANIFEST_FILENAME or path == META_FILENAME or \\\n info['size'] > MAX_FILE_SIZE or \\\n is_data_path(path) or \\\n prefix_match(excludes, path):\n continue\n cached_files.append(path)\n hash_lines.append(\"%s=%s\" % (path, info['sha1']))\n size += info['size']\n\n manifest_lines = [parts[0], AUTOGEN_LINE, AUTOGEN_EXPLAIN]\n manifest_lines.extend(commands)\n manifest_lines.extend((\n \"# TOTAL FILES: %s (%s bytes)\" % (intcomma(len(cached_files)), intcomma(size)),\n \"# SIGNATURE: %s\" % hashlib.sha1('\\n'.join(hash_lines)).hexdigest(),\n \"CACHE:\",\n ))\n manifest_lines.extend(cached_files)\n\n manifest_file = open(MANIFEST_FILENAME, 'w')\n manifest_file.write('\\n'.join(manifest_lines) + '\\n')\n manifest_file.close()\n\n # Make sure the listing for the manifest file is up to date\n # so it will be uploaded if changed.\n update_local_listing(MANIFEST_FILENAME)",
"def app():\n # temp directory for testing\n path = tempfile.mkdtemp()\n\n # create the app with test config\n test_config = {\n \"TESTING\": True,\n \"DATABASE\": \"db/live.db\",\n \"SECRET_KEY\": \"testing\",\n \"FLASK_INSTANCE_FOLDER\": path,\n \"CACHE\": True,\n \"CACHE_HOST\": \"localhost\",\n \"CACHE_PORT\": \"6379\",\n \"CACHE_STATUS\": \"COLD\"}\n\n app = create_app(test_config)\n\n yield app\n\n # flush cache after testing\n host = test_config[\"CACHE_HOST\"]\n port = test_config[\"CACHE_PORT\"]\n rcxn = redis.Redis(host=host, port=port, db=0)\n\n # clean up temp folder\n shutil.rmtree(path)",
"def cache_handler(event, context):\n events.cache()",
"def recache(self, phys):\r\n self.myOutputCache.initialize(phys.app)\r\n\r\n for output in self.myOutputs:\r\n output.initialize(phys.app)\r\n output.run(1)",
"def hotdeploy_noreq():\n _run_deploy(do_update_requirements=False)\n collectstatic()\n restart()",
"def default_cache_dir() -> str:\n running_on_colab = 'google.colab' in sys.modules\n if running_on_colab:\n base_dir = '/tmp'\n else:\n base_dir = os.path.expanduser('~')\n cache_dir = os.path.join(base_dir, '.cache/fedjax')\n return cache_dir",
"def do_cache(*args, **kws):\n resp = self.response\n out = resp.out\n namespace = ''\n if self.cache_nsfuncs.get(func, None):\n namespace = self.cache_nsfuncs[func](self.request)\n p = urlsplit(self.request.url)[2]\n c = memcache.get(p, namespace)\n if c:\n # in case cache is found, use it \n # instead of rendering by calling function.\n out.write(c['body'])\n for k, i in c['hdr'].items():\n resp.headers[k] = i\n return\n\n r = func(*args, **kws)\n expire = self.cache_expires.get(func, 0)\n if expire == 0:\n return\n out.seek(0)\n try:\n p = urlsplit(self.request.url)[2]\n memcache.set(p, {'hdr':resp.headers,'body':out.read()},\n expire, namespace=namespace)\n logging.debug('%s is cahed' % p)\n except:\n memcache.flush_all()\n logging.debug('memcache is flashed.')",
"def use_cached_files(self, cache_key):\r\n pass",
"def run_cache_dir(args):\n # Launch the Dash server.\n logger.info(\"Starting Dash web server on %s:%d\", args.host, args.port)\n if settings.CACHE_TYPE == \"filesystem\" and not settings.CACHE_DIR:\n with tempfile.TemporaryDirectory(prefix=\"scelvis.cache.\") as tmpdir:\n logger.info(\"Using cache directory %s\", tmpdir)\n settings.CACHE_DIR = tmpdir\n run_upload_dir(args)\n else:\n run_upload_dir(args)\n logger.info(\"Web server stopped. Have a nice day!\")",
"def app():\n return aplicattion",
"def generate_requirements(output_path=None):\n from django.conf import settings\n reqs = set()\n \n for app in settings.INSTALLED_APPS:\n if app in mapping.keys():\n reqs |= set(mapping[app])\n if output_path is None:\n print \"--extra-index-url=http://opensource.washingtontimes.com/pypi/simple/\"\n for item in reqs:\n print item\n else:\n try:\n out_file = open(output_path, 'w')\n out_file.write(\"--extra-index-url=http://opensource.washingtontimes.com/pypi/simple/\\n\")\n for item in reqs:\n out_file.write(\"%s\\n\" % item)\n finally:\n out_file.close()",
"def build_finished(app, exception):\n if app.config.offline_skin_js_path is not None:\n copy_static_entry(path.join(app.builder.srcdir, app.config.offline_skin_js_path), path.join(app.builder.outdir, '_static'), app.builder)\n if app.config.offline_wavedrom_js_path is not None:\n copy_static_entry(path.join(app.builder.srcdir, app.config.offline_wavedrom_js_path), path.join(app.builder.outdir, '_static'), app.builder)"
]
| [
"0.6182627",
"0.6134158",
"0.60425276",
"0.5894826",
"0.5606136",
"0.5565214",
"0.552516",
"0.5501652",
"0.54487354",
"0.538457",
"0.5367333",
"0.5328549",
"0.5269285",
"0.5257637",
"0.51493615",
"0.51167154",
"0.5082973",
"0.50464684",
"0.5039921",
"0.4998034",
"0.49817517",
"0.49816522",
"0.49727747",
"0.49680978",
"0.49658042",
"0.49614143",
"0.49437323",
"0.49418086",
"0.49356169",
"0.49073485"
]
| 0.7762304 | 0 |
Generate a dataframe containing the covariate X, and observations Y The X's are generated uniformly over each of the supplied segments. | def generate_data(func, points, seed=0):
np.random.seed(seed)
data = []
for segment in points:
x = np.linspace(*segment["xlim"], num=segment["n_points"])
distribution = func(x)
# Generate observations
y = distribution.rvs()
df = pd.DataFrame({"x": x, "y": y})
data.append(df)
return pd.concat(data, ignore_index=True) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def generate_training_data_2D():\n c11 = np.random.uniform(0.05, 1.50, 100)\n c12 = np.random.uniform(-1.50, 1.50, 100)\n c21 = np.random.uniform(-1.50, -0.05, 100)\n c22 = np.random.uniform(-1.50, 1.50, 100)\n c1 = np.array([[i, j] for i, j in zip(c11, c12)])\n c2 = np.array([[i, j] for i, j in zip(c21, c22)])\n\n points = plt.figure()\n plt.plot(c1[:, 0], c1[:, 1], 'o', c2[:, 0], c2[:, 1], '*')\n plt.show()\n plt.close()\n\n return c1, c2",
"def from_dataframe(df):\n X = sm.add_constant(np.array(df['x']))\n y = np.array(df['y']).reshape(-1,1)\n return y, X",
"def prepare_covariates(self, nodes):\n size = len(nodes)\n X = np.zeros((size, 2))\n print(\"Indiv shape: \", X.shape)\n return np.array(X)",
"def cdf(self, points):\n if self._y_cdf is not None:\n x = points[:, 0]\n y = points[:, 1]\n\n # map the y coordinate first.\n y_out = self._y_cdf(y)\n\n # select which x quantile curve to use.\n x_curve = (y_out - self.y_min) * self.y_res / (self.y_max - self.y_min)\n x_curve = np.floor(x_curve).astype(\"int\")\n\n # map the x coordinate.\n x_range = np.arange(x.shape[0])\n x_out = np.zeros_like(x)\n for i in range(self.y_res):\n mask = x_curve == i\n x_out[x_range[mask]] = self._x_cdfs[i](x[mask])\n\n x_out = tf.cast(x_out, dtype=points.dtype)\n y_out = tf.cast(y_out, dtype=points.dtype)\n return np.column_stack((x_out, y_out))\n else:\n raise RuntimeError(\n \"CumulativeDensityFunction: Must call compute() with the correct \"\n \"direction before evaluation.\"\n )",
"def _(x: Iterable, y: Iterable, ddof: int = 1) -> DataFrame:\n # ddof: numpy v1.5+\n return numpy.cov(x, y, ddof=ddof)[0][1]",
"def construct_df(t,y):\n\n df = np.zeros((3,3))\n\n df[0][0] = 77.27*(1.0 - y(1) -2.*8.375e-6*y(0))\n df[0][1] = 77.27*(1.0 -y(0) )\n df[0][2] = 0.0;\n df[1][0] = -1.0/77.27;\n df[1][1] = (-1.0/77.27)*(1.0+y(0))\n df[1][2] = 1.0/77.27\n df[2][0] = 0.161\n df[2][1] = 0.0\n df[2][2] = -0.161\n\n return df",
"def corr_vars( start=1, stop=10, step=1, mu=0, sigma=3, func=lambda x: x ):\n \n # Generate x\n x = np.arange(start, stop, step) \n \n # Generate random noise\n e = np.random.normal(mu, sigma, x.size)\n \n # Generate y values as y = func(x) + e\n y = np.zeros(x.size)\n \n for ind in range(x.size):\n y[ind] = func(x[ind]) + e[ind]\n \n return (x,y)",
"def get_covariates_df(dataset_name: str) -> pd.DataFrame:\n path = Path(dataset_name) / COVARIATES_FILE\n return get_dataframe(path)",
"def makeFromCoorinates(ncols, nrows, frame_width, frame_height):\n\n dx = frame_width/float(ncols)\n dy = frame_height/float(nrows)\n out = []\n x = int(round(dx/2.0))\n\n for c in xrange(ncols):\n y = int(round(dy/2.0))\n for r in xrange(nrows):\n out.append((int(round(x)),int(round(y))))\n y += dy\n\n x += dx\n return out",
"def generate_complete_pairwise_dataset(X, Y):\n try:\n n_instances, n_objects, n_features = X.shape\n Y = Y.astype(int)\n Y -= np.min(Y)\n orderings = ranking_ordering_conversion(Y)\n x_sorted = [X[i, orderings[i], :] for i in range(n_instances)]\n del orderings\n except ValueError:\n # TODO Add the code to change the rankings to orderings and sort X according to that\n logger = logging.getLogger(\"generate_complete_pairwise_dataset\")\n logger.error(\"Value Error: {}, {} \".format(X[0], Y[0]))\n x_sorted = X\n del Y\n y_double = []\n x_train1 = []\n x_train2 = []\n y_single = []\n for features in x_sorted:\n x1, x2, y1, y2 = generate_pairwise_instances(features)\n x_train1.extend(x1)\n x_train2.extend(x2)\n y_double.extend(y1)\n y_single.extend(y2)\n x_train1 = np.array(x_train1)\n x_train2 = np.array(x_train2)\n x_train = x_train1 - x_train2\n y_double = np.array(y_double)\n y_single = np.array(y_single)\n return x_train, x_train1, x_train2, y_double, y_single",
"def create_continuous_data(n_samples, n_var=10, n_dependent=0, pos_ratio=0, noise_ratio=0, n_cluster=1, mean=None, cov=None, random_state=None):\n\n if random_state is not None: np.random.seed(random_state)\n\n n_samples = n_samples//n_cluster\n X_cluster, y_cluster, mean0_cluster, mean1_cluster, cov0_cluster, cov1_cluster = [], [], [], [], [], []\n\n for cluster_idx in range(n_cluster):\n # Group indicator\n #group = sp.binom.rvs(p=0.25, n=1, size=N\n n_neg = int(n_samples*(1-pos_ratio))\n n_pos = n_samples-n_neg\n y_cluster.append(np.concatenate([np.zeros(n_neg), np.ones(n_pos)]))\n\n idx_dependent = n_var - n_dependent\n\n if mean is None:\n basic_mean = 0 #np.random.uniform(size=no_var)\n mean0 = np.random.normal(loc=basic_mean, scale=1, size=n_var)\n mean1 = np.random.normal(loc=basic_mean, scale=1, size=n_var)\n else:\n mean0 = mean[cluster_idx][0]\n mean1 = mean[cluster_idx][1]\n \n\n # # Noise are variables with same distribution in majority and minority class\n # if noise_ratio != 0:\n # n_noise = int(noise_ratio*n_var)\n # noise_idx = n_var - n_noise\n # X_noise = sp.multivariate_normal.rvs(mean=mean0[noise_idx:], cov=cov0[noise_idx:,noise_idx:],\n # size=n_samples).reshape([n_samples,-1])\n \n cov0, cov1 = None, None\n\n X1 = []\n X0 = []\n # Independent variables\n if n_var-n_dependent > 0:\n X1.append(sp.norm.rvs(loc=mean1[:idx_dependent], scale=1, size=[n_pos, n_var-n_dependent]) )\n X0.append(sp.norm.rvs(loc=mean0[:idx_dependent], scale=1, size=[n_neg, n_var-n_dependent]) )\n\n # Dependent variables\n if n_dependent>0:\n if cov is None:\n cov0 = sp.invwishart.rvs(df=n_var*1, scale=np.eye(n_dependent)*1)\n cov1 = sp.invwishart.rvs(df=n_var*1, scale=np.eye(n_dependent)*1)\n else:\n cov0 = cov[cluster_idx][0]\n cov1 = cov[cluster_idx][1]\n\n X1.append( sp.multivariate_normal.rvs(mean=mean1[idx_dependent:],\n cov= cov1, size=n_pos)\n )\n X0.append( sp.multivariate_normal.rvs(mean=mean0[idx_dependent:],\n cov= cov0, size=n_neg)\n )\n \n X0 = np.hstack([*X0])\n X1 = np.hstack([*X1])\n X_cluster.append(np.vstack([X0, X1]))\n #X = np.hstack([X, X_noise])\n\n mean0_cluster.append(mean0)\n mean1_cluster.append(mean1)\n cov0_cluster.append(cov0)\n cov1_cluster.append(cov1)\n\n if n_cluster == 1:\n X = X_cluster[0]\n y = y_cluster[0]\n else:\n X = np.vstack(X_cluster)\n y = np.hstack(y_cluster)\n\n #return {\"X\":X, \"y\":y,\"mean0\":mean0,\"mean1\":mean1, \"cov0\":cov0, \"cov1\":cov1}\n return X, y, mean0_cluster, mean1_cluster, cov0_cluster, cov1_cluster",
"def gen_num_seq(nums):\n X = np.zeros([nums, 10, 20], dtype=float)\n y = np.zeros([nums, 10, 20], dtype=float)\n for i in range(nums):\n start = np.random.randint(0, 10)\n num_seq = np.arange(start, start + 10)\n X[i] = to_categorical(num_seq, n_col=20)\n y[i] = np.roll(X[i], -1, axis=0)\n y[:, -1, 1] = 1 # Mark endpoint as 1\n return X, y",
"def continous_correlation(independent, dependent):\n\n # Init of resulting DataFrame\n corr_data = pd.DataFrame()\n for indep_var in independent.columns:\n # Drop nans\n combined = pd.concat([independent[indep_var], dependent], axis=1).dropna()\n # Write down sample size\n corr_data.loc[indep_var, \"sample_size\"] = len(combined)\n # Calculate correlations with scipy methods\n corr_data.loc[indep_var, 'pearson_rho'], corr_data.loc[indep_var, 'pearson'] = pearsonr(combined[indep_var],\n combined[\n dependent.name])\n corr, corr_data.loc[indep_var, 'kendall'] = kendalltau(combined[indep_var], combined[dependent.name])\n corr_data.loc[indep_var, 'log_pearson_rho'], corr_data.loc[indep_var, 'log_pearson'] = nan, nan\n corr_data.loc[indep_var, f'log({indep_var})_{dependent.name}_rho'], \\\n corr_data.loc[indep_var, f'log({indep_var})_{dependent.name}_pearson'] = nan, nan\n return corr_data",
"def generate_correlation_map(x, y):\n\tmu_x = x.mean(1)\n\tmu_y = y.mean(1)\n\tn = x.shape[1]\n\tif n != y.shape[1]:\n\t\traise ValueError('x and y must ' +\n\t\t\t\t\t\t 'have the same number of timepoints.')\n\ts_x = x.std(1, ddof=n - 1)\n\ts_y = y.std(1, ddof=n - 1)\n\tcov = np.dot(x,\n\t\t\t\t y.T) - n * np.dot(mu_x[:, np.newaxis],\n\t\t\t\t\t\t\t\t mu_y[np.newaxis, :])\n\treturn cov / np.dot(s_x[:, np.newaxis], s_y[np.newaxis, :])",
"def make_segments(x, y):\n\n points = np.array([x, y]).T.reshape(-1, 1, 2)\n segments = np.concatenate([points[:-1], points[1:]], axis=1)\n return segments",
"def make_segments(x, y):\n\n points = np.array([x, y]).T.reshape(-1, 1, 2)\n segments = np.concatenate([points[:-1], points[1:]], axis=1)\n return segments",
"def make_segments(x, y):\n\n points = np.array([x, y]).T.reshape(-1, 1, 2)\n segments = np.concatenate([points[:-1], points[1:]], axis=1)\n return segments",
"def make_segments(x, y):\n\n points = np.array([x, y]).T.reshape(-1, 1, 2)\n segments = np.concatenate([points[:-1], points[1:]], axis=1)\n return segments",
"def generate_train_data(comps: List[pd.DataFrame], concen_upper_bound=1000, num_per_combination=1000):\n cps = [to_int_index(c) for c in comps]\n cps = [zero_end_interpolation(c) for c in comps]\n cps = alignment(cps)\n cps = [scale_dataframe(c) for c in cps]\n\n samples = []\n for n_class in range(1, len(cps) + 1):\n comps_roller = ComponentRoller(cps, n_class)\n concen_roller = ConcentrationRoller(1, concen_upper_bound, n_class)\n for i in range(num_per_combination):\n picked_comps, label = comps_roller.roll()\n concen_vector = concen_roller.roll_unique(label)\n the_sample = pd.Series(name=label, data=np.sum(picked_comps * concen_vector, axis=1))\n samples.append(the_sample)\n if i % 100 == 0:\n print('组合数{}: 第{}个样本 --- 标签{},浓度比{}'.format(n_class, i, label, concen_vector))\n df = pd.concat(samples, axis=1)\n return df.values.T, np.array(_to_vectors(df.columns.tolist()))",
"def generate_data(self, s=0):\n np.random.seed(s)\n \n L_b = np.linalg.cholesky(self.cov_b)\n L_t = np.linalg.cholesky(self.cov_t)\n \n self.x_benchmark = np.dot(L_b,(np.random.randn(self.n_points,2) + self.mean_b).T).T\n self.x_trial = np.dot(L_t,(np.random.randn(self.n_points,2) + self.mean_t).T).T",
"def convergence_dataframe(self):\n round_is = range(self.run.n_rounds)\n substeps = self.run.params.subbuild_uptos\n coords = np.array(list(itertools.product(round_is, substeps)))\n steps = self.run.params.spt * coords[:, 0] + coords[:, 1]\n\n conv_vals = np.asarray(\n [[c.converged for c in cs] for cs in self.load_convergences()]\n ).reshape(-1)\n\n df = pd.DataFrame(dict(\n round_i=coords[:, 0], steps=steps, converged=conv_vals\n )).set_index('steps')\n\n return df",
"def get_covariate_pairs(self):\n if self.covariate_field not in self.matrix.obs.columns:\n raise ValueError(\"Covariate value not available in dataset\")\n from itertools import product\n covariate = set(self.matrix.obs[self.covariate_field])\n return product(covariate, covariate)",
"def create_sample_dataframe():\n ax_readings = []\n ay_readings = []\n az_readings = []\n mx_readings = []\n my_readings = []\n mz_readings = []\n gx_readings = []\n gy_readings = []\n gz_readings = []\n activity_list = [LABELS_NAMES[0] for _ in range(SEGMENT_TIME_SIZE)]\n\n\n for _ in range(SEGMENT_TIME_SIZE):\n ax_readings.append(random.uniform(-10,10))\n ay_readings.append(random.uniform(-10,10))\n az_readings.append(random.uniform(-10,10))\n mx_readings.append(random.uniform(-10,10))\n my_readings.append(random.uniform(-10,10))\n mz_readings.append(random.uniform(-10,10))\n gx_readings.append(random.uniform(-10,10))\n gy_readings.append(random.uniform(-10,10))\n gz_readings.append(random.uniform(-10,10))\n\n data_dict = {\n COLUMN_NAMES[0]: activity_list, COLUMN_NAMES[1]: ax_readings,\n COLUMN_NAMES[2]: ay_readings, COLUMN_NAMES[3]: az_readings,\n COLUMN_NAMES[4]: gx_readings, COLUMN_NAMES[5]: gy_readings,\n COLUMN_NAMES[6]: gz_readings, COLUMN_NAMES[7]: mx_readings,\n COLUMN_NAMES[8]: my_readings, COLUMN_NAMES[9]: mz_readings\n }\n\n df = pd.DataFrame(data=data_dict)\n return df",
"def icdf(self, points):\n if self._y_icdf is not None:\n x = points[:, 0]\n y = points[:, 1]\n\n # map the y coordinate first.\n y_out = self._y_icdf(y)\n\n # select which x quantile curve to use.\n x_curve = y_out * (self.y_res - 1)\n x_curve = np.floor(x_curve).astype(\"int\")\n\n # map the x coordinate.\n x_range = np.arange(x.shape[0])\n x_out = np.zeros_like(x)\n for i in range(self.y_res):\n mask = x_curve == i\n x_out[x_range[mask]] = self._x_icdfs[i](x[mask])\n\n x_out = tf.cast(x_out, dtype=points.dtype)\n y_out = tf.cast(y_out, dtype=points.dtype)\n return np.column_stack((x_out, y_out))\n else:\n raise RuntimeError(\n \"CumulativeDensityFunction: Must call compute() with the correct \"\n \"direction before evaluation.\"\n )",
"def sim_reg_data(xmin, xmax, ymin, ymax, n, sd):\n import pandas as pd\n import numpy.random as nr\n \n w = nr.normal(loc = 0, scale = sd, size = n)\n \n xstep = float(xmax - xmin)/float(n - 1)\n ystep = float(ymax - ymin)/float(n - 1)\n \n x = []\n xcur = xmin\n y = []\n ycur = ymin\n for i in range(n):\n x.append(xcur)\n xcur += xstep\n y.append(ycur + w[i])\n ycur += ystep\n \n out = pd.DataFrame([x, y]).transpose()\n# print (out)\n out.columns = ['x', 'y']\n# print (out)\n return out",
"def runcircos(self):\n pd.read_csv(self.cns, sep=\"\\t\")[\n [\"chromosome\", \"start\", \"end\", \"tcn\"]\n ].rename({\"chromosome\": \"chrm\", \"tcn\": \"cns\"}, axis=1).to_csv(\n self.segs, index=None\n )\n\n passed_svs = [\n sv\n for sv in self.svs.values()\n ]\n circos_sv_file = os.path.join(\n self.out_dir, \"circos_svs.tsv\"\n )\n circos_df = pd.DataFrame(\n [\n (\"chr\" + sv.chr1, sv.pos1, sv.pos1, \"chr\" + sv.chr2, sv.pos2, sv.pos2)\n for sv in passed_svs\n ],\n columns=[\n \"Chromosome\",\n \"chromStart\",\n \"chromEnd\",\n \"Chromosome.1\",\n \"chromStart.1\",\n \"chromEnd.1\",\n ],\n )\n circos_df.to_csv(circos_sv_file, index=None)",
"def generate_2d_cs_plot(data, atom1=\"CA\", atom2=\"CB\", resid_li=\"\", from_frame='all', to_frame='all', soluplots=False):\n\n #Generate an empty dataframe and pop out the mean and the deviation from the CS/frame pickle\n result = pd.DataFrame()\n data.set_index(['resSeq','name'], inplace=True)\n data.drop(data.columns[len(data.columns)-1], axis=1, inplace=True)\n data.drop(data.columns[len(data.columns)-1], axis=1, inplace=True)\n\n # If frames were selected, drop also all columns were\n if from_frame != 'all':\n frames = ['resname', 'resname_s']+[ str(f) for f in range(int(from_frame), int(to_frame)+1)]\n data = data.filter(frames, axis=1)\n\n # Take all residues if none were submitted\n if not resid_li:\n resid_li = {index[0] for index in data.index.values}\n\n # Sort residue ids numerically\n resid_li = [ str(i) for i in sorted([int(x) for x in resid_li]) ]\n\n #loop over the residues selcted by the user\n for item in resid_li:\n try: \n df1 = data.loc[int(item),atom1] #select atom 1 the row from the dataframe which matches the inputs from the user\n df2 = data.loc[int(item),atom2] #select atom 2 the row from the dataframe which matches the inputs from the user\n resname = data.loc[[int(item),'CA'], 'resname'].unique()[0]\n # Option to make \"Solution NMR predictions\": make a distribution out of average and variance of our cs values, and plot it\n if soluplots:\n np1=np.array(df1[2:])\n np2=np.array(df2[2:])\n dist1 = np.random.normal(np1.mean(), np1.std()/10, len(np1))\n dist2 = np.random.normal(np2.mean(), np2.std()/10, len(np2))\n df_e1 = pd.DataFrame(data=dist1, columns=[atom1]) #Build the plotting dataframe\n df_e2 = pd.DataFrame(data=dist2, columns=[atom2])\n else:\n df_e1=df1.to_frame(name=atom1)\n df_e2=df2.to_frame(name=atom2)\n except Exception as e:\n continue\n temp_df = pd.concat([df_e1,df_e2], axis=1, join=\"inner\") #concatenate all the residues dataframe into a bigger one\n temp_df[\"IDs\"]=str(item)+' '+resname #give them different ids to have differnete colors in the plot\n result = result.append(temp_df) #build the final DF\n\n # Put atoms in avail_res_atoms dictionary (which I dont remember exactly what does but seems important)\n avail_res_atoms = {\n \"%s.%s\"%(item,atom1) : {\"display\":False,\"color\":\"#FF0000\"},\n \"%s.%s\"%(item,atom2) : {\"display\":False,\"color\":\"#FF0000\"},\n }\n\n # If there are no atoms matching this selection\n if result.empty: \n return('', ['error'])\n\n #plot\n fig = px.density_contour(result, x=atom1, y=atom2, color=\"IDs\", labels={\n atom1 : \"Chemical shift for \"+str(atom1)+\" (ppm)\",\n atom2 : \"Chemical shift for \"+str(atom2)+\" (ppm)\",\n \"IDs\": \"Residue ID\"},\n color_discrete_sequence=px.colors.qualitative.Dark24)\n # Reverse axis\n fig['layout']['yaxis']['autorange'] = \"reversed\"\n fig['layout']['xaxis']['autorange'] = \"reversed\"\n\n\n fig.update_layout(legend=dict(\n itemsizing='constant',\n itemclick='toggleothers',\n itemdoubleclick='toggle',\n ))\n\n #Skip hover info when scrolling through the plot\n fig.update_traces(hoverinfo='skip', hovertemplate=None)\n\n # Return plot\n p = pt.offline.plot(fig, include_plotlyjs=False, output_type='div')\n return(p,avail_res_atoms)",
"def X(self) -> np.ndarray:\n cols = [col for col in self._obj.names.covariates if col in self._obj.columns]\n if not cols:\n raise KeyError(\"No known covariates in CausalFrame\")\n return self._obj[cols].to_numpy()",
"def create_multivariat(mean, cov, n,show):\n\tif n==1:\n\t\tx=np.random.default_rng().multivariate_normal(mean, cov)\n\telse:\n\t\tx=np.random.default_rng().multivariate_normal(mean, cov, n)\n\tif show:\n\t \tdf=pd.DataFrame({'x':x[:,0],'y':x[:,1]})\n\t \tsns.jointplot(data=df,x='x',y='y')\n\treturn x",
"def get_cv(X, y):\n# date = pd.to_datetime(X['date'])\n# n_days = (date.max() - date.min()).days\n n_splits = 8\n fold_length = X.shape[0]/n_splits\n arr = np.array(list(range(X.shape[0])))\n np.random.shuffle(arr)\n for i in range(n_splits):\n test = arr[int(i*fold_length):int((i+1)*fold_length)]\n train = np.concatenate((arr[:int(i*fold_length)],\n arr[int((i+1)*fold_length):]))\n yield(arr[train], arr[test])"
]
| [
"0.5358331",
"0.53441787",
"0.51789",
"0.5127328",
"0.5110928",
"0.5071385",
"0.5048293",
"0.50151145",
"0.50039065",
"0.49815464",
"0.49476844",
"0.4927507",
"0.49263746",
"0.4925004",
"0.49133477",
"0.49133477",
"0.49133477",
"0.49133477",
"0.48974207",
"0.48693144",
"0.48640376",
"0.48547694",
"0.48372677",
"0.48208666",
"0.48192757",
"0.48036176",
"0.48012888",
"0.4797471",
"0.47651172",
"0.47587878"
]
| 0.6094589 | 0 |
Like scandir, but recursively. Will skip everything in the skip array, but only at the top level directory. Returns SEntry objects. If in_restricted is true, all returned entries will be marked as restricted even if their permissions are not restricted. | def recursedir(path='.', skip=[], alwaysskip=['.~tmp~'], in_restricted=False):
for dentry in scandir(path):
if dentry.name in skip:
continue
if dentry.name in alwaysskip:
continue
if dentry.name.startswith('.nfs'):
continue
# Skip things which are not at least group readable
# Symlinks are followed here so that clients won't see dangling
# symlinks to content they can't transfer. It's the default, but to
# avoid confusion it's been made explicit.
try:
s = dentry.stat(follow_symlinks=True)
except os.error:
print('Could not stat {0}. Dangling symlink?'.format(dentry.name), file=sys.stderr)
continue
if not (s.st_mode & stat.S_IRGRP):
# print('{} is not group readable; skipping.'.format(dentry.path))
continue
se = SEntry(dentry, in_restricted)
if dentry.is_dir(follow_symlinks=False):
this_restricted = in_restricted
if not se.readable_world:
# print('{} is not world readable; marking as restricted.'.format(se.path), file=sys.stderr)
this_restricted = True
# Don't pass skip here, because we only skip in the top level
for re in recursedir(se.path, alwaysskip=alwaysskip, in_restricted=this_restricted):
yield re
yield se | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def walker(path: str, skip_on_eacces: bool) -> List[str]:\n entries = []\n\n # TODO: Would moving walker to a generator yield a performance increase, or lead to\n # higher disk contention due to the hasher running at the same time?\n try:\n with os.scandir(path) as scan:\n for handle in scan:\n try:\n # Recurse on directories, but not symlinks.\n if handle.is_dir() and not handle.is_symlink():\n entries.extend(walker(handle.path, skip_on_eacces))\n\n # Track files, but not symlinks.\n if handle.is_file() and not handle.is_symlink():\n entries.append(handle.path)\n except PermissionError:\n if not skip_on_eacces:\n raise\n except OSError:\n # This is usually due to too many levels of symlinks. However, other\n # cases are likely with a large enough input.\n continue\n except NotADirectoryError:\n entries.append(path)\n\n return list(set(entries))",
"def get_items(path, only=None):\n path = os.path.expanduser(path)\n ps = [os.path.join(path, n)\n for n in os.listdir(path)\n if not n.startswith('.') and len(n) == 4]\n ps = [p for p in ps if os.path.isdir(p)]\n if only is not None:\n ps = [p for p in ps if nmrex.utils.fname(p) in only]\n return ps",
"def scandir(url: str) -> Iterable[DirEntry]:\n authenticated = credentials.authenticate(url)\n return SCANNER_REGISTRY.get_handler(authenticated.scheme).scandir(authenticated)",
"def scandir(path='.'):\r\n for name in os.listdir(path):\r\n yield GenericDirEntry(path, name)",
"def listdir_nohidden(path):\n\treturn glob.glob(os.path.join(path, '*'))",
"def iter_installed_distributions(\n self,\n local_only: bool = True,\n skip: Container[str] = stdlib_pkgs,\n include_editables: bool = True,\n editables_only: bool = False,\n user_only: bool = False,\n ) -> Iterator[BaseDistribution]:\n it = self.iter_distributions()\n if local_only:\n it = (d for d in it if d.local)\n if not include_editables:\n it = (d for d in it if not d.editable)\n if editables_only:\n it = (d for d in it if d.editable)\n if user_only:\n it = (d for d in it if d.in_usersite)\n return (d for d in it if d.canonical_name not in skip)",
"def getFilesOnly(self,files):\n filesOnly = []\n for f in files:\n if not f['is_dir']:\n filesOnly.append(f)\n return filesOnly",
"def getImmediateSubdirectories(dir):",
"def scandir(self):\n return (self._join(dir_entry) for dir_entry in scandir(self.absolute))",
"def _get_all_entries(entry_list: List[str], keep_top_dir: bool) -> List[Path]:\n all_files = []\n\n entry_list = [Path(entry) for entry in entry_list]\n\n if keep_top_dir:\n return entry_list\n\n for entry in entry_list:\n if entry.is_dir():\n all_files.extend(entry.iterdir())\n else:\n all_files.append(entry)\n return all_files",
"def walk(top=None, excluded=('.git', '.ve', '_static', 'build', 'fixtures')):\n if not top:\n top = os.getcwd()\n\n for root, dirs, files in os.walk(top):\n for directory in excluded:\n if directory in dirs:\n dirs.remove(directory)\n for name in files:\n yield os.path.join(root, name), name",
"def filtered_walk(\n path: str,\n include: Optional[List[str]] = None,\n exclude: Optional[List[str]] = None\n) -> Iterator[str]:\n exclude = exclude or []\n\n if not isdir(path):\n raise ValueError(\"Cannot walk files, only directories: {}\".format(path))\n\n files = os.listdir(path)\n for name in files:\n filename = normpath(join(path, name))\n\n # If excluded, completely skip it. Will not recurse into directories\n if search_globs(filename, exclude):\n continue\n\n # If we have a whitelist and the pattern matches, yield it. If the\n # pattern didn't match and it's a dir, it will still be recursively\n # processed.\n if not include or match_globs(filename, include):\n yield filename\n\n if isdir(filename):\n for p in filtered_walk(filename, include, exclude):\n yield p",
"def get_filtered_dir_list(self) -> typing.List[str]:\n if self._regex is None:\n self.build_regex()\n\n return [\n elem\n for elem in self.super_dir()\n if self._regex.fullmatch(elem)\n ]",
"def filter(self):\n self._printer('Standard Walk')\n count = Counter(length=3)\n for directory in self.directory:\n self._printer('Searching ' + directory)\n for root, directories, files in os.walk(directory, topdown=self.topdown):\n root = root[len(str(directory)) + 1:]\n self._printer(str(count.up) + \": Explored path - \" + str(root), stream=True)\n if self.filters.validate(root):\n # Check that non-empty folders flag is on and we're at the max directory level\n if self.filters.non_empty_folders and self.filters.get_level(root) == self.filters.max_level:\n # Check that the path is not an empty folder\n if os.path.isdir(directory + os.sep + root):\n # Get paths in folder without walking directory\n paths = os.listdir(directory + os.sep + root)\n\n # Check that any of the paths are files and not just directories\n if paths and any(os.path.isfile(os.path.join(directory, p)) for p in paths):\n self.add_path(directory, root)\n\n else:\n for filename in files:\n fullname = os.path.join(root, filename)\n if self.filters.validate(fullname):\n # Join the two strings in order to form the full filepath.\n self.add_path(directory, fullname)",
"def scanDirectories(directory, includes = [\"*\"], excludes = []):\n\treturn scanAll(directory, includes, excludes)[2]",
"def list_dir_no_hidden(path):\n\n return glob(os.path.join(path, \"*\"))",
"def _scan_directory(self, root_path, name_patterns=None, exclude=None, inc_dirs=None, max_level=None):\n\n name_patterns = name_patterns or []\n exclude = exclude or []\n inc_dirs = inc_dirs or 0\n max_level = max_level or -1\n\n paths=[]\n\n # Generates a tuple of allowed file types\n if '' in name_patterns: name_patterns.remove('')\n if '@Invalid()' in name_patterns: name_patterns.remove('@Invalid()')\n name_patterns = [i.strip('.*') for i in name_patterns]\n name_patterns = tuple(name_patterns)\n\n # Generates list of forbided strings from direcory paths\n if '' in exclude: exclude.remove('')\n\n # Gets the max depth from a system level\n root_path = root_path.rstrip(os.path.sep)\n assert os.path.isdir(root_path)\n num_sep = root_path.count(os.path.sep) + 1\n\n # Walks down directory tree adding to paths[]\n for walk_root, walk_dirs, walk_files in os.walk(root_path):\n if self.should_terminate():\n return paths\n\n # Checks the level is valid\n num_sep_this = walk_root.count(os.path.sep)\n if (num_sep + max_level > num_sep_this) or (max_level == -1):\n\n if not any(ext in walk_root for ext in exclude):\n\n # If indexing directories add the current directory to the index.\n if inc_dirs:\n paths.append(walk_root)\n\n if name_patterns:\n for name in walk_files:\n if name.endswith(name_patterns):\n paths.append(os.path.join(walk_root, name))\n\n return paths",
"def filter_files(self, path):\n excludes = r'|'.join([fnmatch.translate(x) for x in self.project.EXCLUDES]) or r'$.'\n for root, dirs, files in os.walk(path, topdown=True):\n dirs[:] = [d for d in dirs if not re.match(excludes, d)]\n dirs[:] = [os.path.join(root, d) for d in dirs]\n rel_path = os.path.relpath(root, path)\n\n paths = []\n for f in files:\n if rel_path == '.':\n file_path = f\n else:\n file_path = os.path.join(rel_path, f)\n if not re.match(excludes, file_path):\n paths.append(f)\n\n files[:] = paths\n yield root, dirs, files",
"def __walk_tree(self):\n for root, dirnames, files in os.walk(self.path, topdown=True):\n self.dirCount += 1\n # Create a tuple with the file size, the file name and the files inode (for tracking hard links).\n files = [\n (os.lstat(os.path.join(root, fi)).st_size, os.path.join(root, fi), os.lstat(os.path.join(root, fi)).st_ino) for fi\n in files if (os.lstat(os.path.join(root, fi)).st_size > self.size)]\n self.fileList.extend(files)\n if len(self.excludeList) > 0:\n dirnames[:] = [dir for dir in dirnames if dir not in self.excludeList]\n if not self.cross_mount_points:\n dirnames[:] = [dir for dir in dirnames if not os.path.ismount(os.path.join(root, dir))]",
"def protectedfiles(self):\n return self._protectedpaths",
"def set_in_files():\r\n\tindatadir = '/nobackup/ejblom/reddit'\r\n\tcom_dir = '/comments'\r\n\tsubm_dir = '/submissions'\r\n\tglob_end = '/filtered*'\r\n\tcom_glob_str = indatadir + com_dir + glob_end\r\n\tsubm_glob_str = indatadir + subm_dir + glob_end\r\n\tinfilenames = sorted(glob.glob(com_glob_str)) + sorted(glob.glob(subm_glob_str))\r\n\treturn infilenames",
"def filtered_walk(rootdir, filter_fn, include_dirs=None, exclude_dirs=None, get_dirs=False):\n flist = []\n dlist = []\n for root, dirs, files in os.walk(rootdir):\n if include_dirs and len(set(root.split(os.sep)).intersection(set(include_dirs))) == 0:\n ## Also try re.search in case we have patterns\n if re.search(\"|\".join(include_dirs), root):\n pass\n else:\n continue\n if exclude_dirs and len(set(root.split(os.sep)).intersection(set(exclude_dirs))) > 0:\n continue\n if exclude_dirs and re.search(\"|\".join(exclude_dirs), root):\n continue\n dlist = dlist + [os.path.join(root, x) for x in dirs]\n flist = flist + [os.path.join(root, x) for x in filter(filter_fn, files)]\n if get_dirs:\n return dlist\n else:\n return flist",
"def scandir(path='.'):\r\n dir_p = opendir(path.encode(file_system_encoding))\r\n if not dir_p:\r\n raise posix_error(path)\r\n try:\r\n result = Dirent_p()\r\n while True:\r\n entry = Dirent()\r\n if readdir_r(dir_p, entry, result):\r\n raise posix_error(path)\r\n if not result:\r\n break\r\n name = entry.d_name.decode(file_system_encoding)\r\n if name not in ('.', '..'):\r\n yield PosixDirEntry(path, name, entry.d_type)\r\n finally:\r\n if closedir(dir_p):\r\n raise posix_error(path)",
"def _recursive_scan(directory=None, file_extension='.dvl'):\n directory = directory or app.config['DEVICE_LOG_DRIVE']\n\n for entry in os.scandir(directory):\n if entry.is_dir(follow_symlinks=False):\n yield from _recursive_scan(entry)\n elif os.path.splitext(entry.name)[1] == file_extension:\n yield entry",
"def _findFiles(filespec, filesToSkip=[], dirsToSkip=[]):\n if filespec[-3:] == '...':\n # This indicates to recursively process the dir.\n startDir = os.path.dirname(filespec) or os.curdir\n if '*' in filespec or '?' in filespec:\n raise _FindError(\"Do not support both glob patterns and '...': \"\\\n \"'%s'\" % filespec)\n if os.path.isfile(startDir):\n raise _FindError(\"Only support '...' on directories: '%s'.\"\\\n % filespec)\n if not os.path.isdir(startDir):\n raise _FindError(\"'%s' directory does not exist.\" % startDir)\n files = []\n os.path.walk(startDir, _addOrSkipFiles,\n (files, filesToSkip, dirsToSkip))\n return files\n else:\n allFiles = glob.glob(filespec)\n files = []\n for file in allFiles:\n dirName, fileName = os.path.split(file)\n if not dirName:\n dirName = os.curdir\n _addOrSkipFiles((files, filesToSkip, dirsToSkip), dirName,\n [fileName])\n return files",
"def getImmediateFiles(aDir):\n return [name for name in os.listdir(aDir)\n if os.path.isfile(os.path.join(aDir,name))]",
"def walk(self):\n if self.parallelize:\n self.filepaths = Sprinter(self.directory, self.filters, self.full_paths, self.pool_size,\n self._printer).sprinter()\n else:\n self.filepaths = Crawler(self.directory, self.filters, self.full_paths, self.topdown,\n self._printer).crawler()\n return self._get_filepaths()",
"def scantree(path):\n # type: (str) -> os.DirEntry\n for entry in scandir(path):\n if entry.is_dir(follow_symlinks=True):\n # due to python2 compat, cannot use yield from here\n for t in scantree(entry.path):\n yield t\n else:\n yield entry",
"def files_in_dir(search_dir, ignored_regex_objects: List) -> List:\n\n file_paths = []\n dir_list = os.listdir(search_dir)\n for filename in dir_list:\n\n search_dir_abspath = os.path.abspath(search_dir)\n full_name = os.path.join(search_dir_abspath, filename)\n if os.path.isdir(full_name):\n # ignore directory\n continue\n\n if os.path.islink(full_name):\n # ignore symlink\n # http://stackoverflow.com/questions/15718006/check-if-directory-is-symlink\n continue\n\n if expression_helper.is_string_matched_in_regular_expression_objects(filename, ignored_regex_objects):\n # ignore this file\n continue\n\n file_paths.append(filename)\n\n return file_paths",
"def scan(\n input, exclusions_file, output, all_access_levels, skip_open_report\n): # pragma: no cover\n if exclusions_file:\n # Get the exclusions configuration\n with open(exclusions_file, \"r\") as yaml_file:\n try:\n exclusions_cfg = yaml.safe_load(yaml_file)\n except yaml.YAMLError as exc:\n logger.critical(exc)\n exclusions = Exclusions(exclusions_cfg)\n else:\n exclusions = DEFAULT_EXCLUSIONS\n\n if os.path.isfile(input):\n scan_account_authorization_file(\n input, exclusions, output, all_access_levels, skip_open_report\n )\n if os.path.isdir(input):\n logger.info(\n \"The path given is a directory. Scanning for account authorization files and generating report.\"\n )\n input_files = get_authorization_files_in_directory(input)\n for file in input_files:\n logger.info(f\"Scanning file: {file}\")\n scan_account_authorization_file(\n file, exclusions, output, all_access_levels, skip_open_report\n )"
]
| [
"0.58776116",
"0.54903525",
"0.51267457",
"0.51127154",
"0.49117744",
"0.49114066",
"0.48921844",
"0.48819953",
"0.48574495",
"0.48382702",
"0.48305023",
"0.48272744",
"0.47720328",
"0.47461796",
"0.46840212",
"0.46832395",
"0.46400335",
"0.46174994",
"0.460325",
"0.45985925",
"0.458855",
"0.45846564",
"0.45685038",
"0.4559204",
"0.451355",
"0.45052308",
"0.4502175",
"0.4498919",
"0.44815752",
"0.44757792"
]
| 0.7487002 | 0 |
Split data into train and test set. Train set is split with stratify label ratio between label_ratio_low and label_ratio_high. | def split_stratify_train(data: pd.DataFrame, label_ratio_low: float, label_ratio_high: float, test_size=0.2):
while True:
X_train, X_test, y_train, y_test = train_test_split(data.drop(columns=['LABEL']), data['LABEL'],
test_size=test_size)
if (y_train.sum() / len(y_train) >= label_ratio_low) and (y_train.sum() / len(y_train) <= label_ratio_high):
break
logger.info(f'Label 1 ratio of train set after split:{y_train.sum() / len(y_train)}')
return X_train, X_test, y_train, y_test | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def data_split(data, labels, train_ratio=0.5, rand_seed=42):\n\n assert 0 <= train_ratio <= 1, \"Error: training set ratio must be between 0 and 1\"\n\n x_train, x_temp, y_train, y_temp = train_test_split(data,\n labels,\n train_size=train_ratio,\n random_state=rand_seed)\n\n x_val, x_test, y_val, y_test = train_test_split(x_temp,\n y_temp,\n train_size=0.5,\n random_state=rand_seed)\n\n return x_train, x_val, x_test, y_train, y_val, y_test",
"def split_to_train_test(split_ratio, input_data):\n\n data = input_data.drop_duplicates()\n data = data.sample(frac = 1)\n data = np.r_[data]\n rows, columns = data.shape\n a = int(rows*split_ratio)\n train_data = data[0: a]\n test_data = data[a: rows+1]\n\n return train_data, test_data",
"def split_dataset(self, test_size=0.20):\n\t\t(self.training_data, self.test_data, self.training_labels, self.test_labels) = train_test_split(self.training_data, self.training_labels, test_size=test_size)",
"def dataset_stratified_split(split: float, dataset: np.ndarray, labels: np.ndarray) -> \\\n (np.ndarray, np.ndarray, np.ndarray, np.ndarray):\n train_X, test_X, train_Y, test_Y = train_test_split(dataset,\n labels,\n test_size=split,\n stratify=labels,\n random_state=config.RANDOM_SEED,\n shuffle=True)\n return train_X, test_X, train_Y, test_Y",
"def data_split(dataset, val_ratio=0.1, test_ratio=0.1, seed=1234):\n\n\t# How you grab the labels will depend on what type of Pytorch Dataset object 'dataset' is\n\t# (i.e. ImageFolder/DatasetFolder or not)\n\n\t# For fun, check the method resolution order (MRO) of 'dataset'\n\tprint('Dataset object\\'s inheritance: ', type(dataset).__mro__)\n\n\t# Determine what kind of Dataset object it is, then grab labels\n\t# Warning: currently this will break for anything other than an ImageFolder or CIFAR10 train set\n\tif isinstance(dataset, datasets.CIFAR10):\n\t\tlabels = dataset.train_labels\n\telif isinstance(dataset, datasets.ImageFolder):\n\t\tlabels = [img[1] for img in dataset.imgs]\n\telse:\n\t\terror('Dataset not supported yet')\n\n\t# Calculate class priors, (number in class)/(size of dataset)\n\tidcs = [i for i in range(len(dataset))]\n\tsamples_per_class = np.bincount(np.array(labels))\n\tpriors = samples_per_class/len(labels)\n\n\t# Number of samples in each class for val and test set \n\tval_per_class = np.ceil(samples_per_class*val_ratio).astype(np.int)\n\ttest_per_class = np.ceil(samples_per_class*test_ratio).astype(np.int)\n\n\t# Copy and shuffle the labels and corresponding indices to randomize before splitting\n\tshuffled_labels = list(labels)\n\tshuffled_idcs = list(idcs)\n\trandom.Random(seed).shuffle(shuffled_labels)\n\trandom.Random(seed).shuffle(shuffled_idcs)\n\n\t# Iterate through, grabbing indices for each class to place in validation set\n\t# until the desired number is reached\n\tval_idcs = []\n\tval_counts = np.zeros(val_per_class.shape)\n\n\tfor i, l in zip(shuffled_idcs, shuffled_labels):\n\t\t# Check if validation set quota has been reached yet for this class\n\t\tif val_counts[l] < val_per_class[l]:\n\t\t\tval_idcs.append(i)\n\t\t\tval_counts[l] += 1\n\n\t\t# Check if stopping point is reached\n\t\tif (val_counts == val_per_class).all():\n\t\t\tbreak\n\n\t# Repeat for test set\n\ttest_idcs = []\n\ttest_counts = np.zeros(test_per_class.shape)\n\tfor i, l in zip(shuffled_idcs, shuffled_labels):\n\t\t# Check if this index is already in val set\n\t\tif i in val_idcs:\n\t\t\tcontinue\n\n\t\t# Check if test set quota has been reached yet for this class\n\t\tif test_counts[l] < test_per_class[l]:\n\t\t\ttest_idcs.append(i)\n\t\t\ttest_counts[l] += 1\n\n\t\t# Check if stopping point is reached\n\t\tif (test_counts == test_per_class).all():\n\t\t\tbreak\n\n\t# Get train indices too (all the remaining samples not in val or test)\n\ttrain_idcs = [j for j in idcs if j not in val_idcs+test_idcs]\n\n\t# Split the data\n\ttrain = Subset(dataset, train_idcs)\n\tval = Subset(dataset, val_idcs)\n\ttest = Subset(dataset, test_idcs)\n\n\treturn train, val, test",
"def split_train_test(X, Y, ratio=0.3):\n # Splitting the dataset into the Training set and Test set\n return train_test_split(X, Y, test_size = ratio, random_state = 0)",
"def __split_dataset(self):\n self.train, self.valid, _, _ = train_test_split(self.data, self.data, test_size=0.2)\n self.valid, self.test, _, _ = train_test_split(self.valid, self.valid, test_size=0.5)",
"def split_data(data, labels):\r\n # Split the data into train and test\r\n X_train, X_test, y_train, y_test = train_test_split(data, labels, test_size=0.30, random_state = 42)\r\n return(X_train, y_train, X_test, y_test)",
"def split_data_into_train_and_test(raw_training_data):\n train_set, test_set = train_test_split(raw_training_data, test_size=0.2, random_state=42)\n return train_set, test_set",
"def split_data(test_data, split_ratio):\n split_index = int(split_ratio * len(test_data))\n \n # randomly permute the values in place\n random.shuffle(test_data)\n \n # take slices of the determined size\n training_set = copy.copy(test_data[:split_index])\n test_data = copy.copy(test_data[split_index:])\n\n return training_set, test_data",
"def split_data(self, data):\n\n train_df, test_df = train_test_split(data, test_size=self.test_size, \n random_state=0, \n stratify=data[self.outcome_name])\n\n # print(\"Splitting data into training with \", train_df.shape, \"sampes and \",\n # test_df.shape, \"testing samples\")\n\n return train_df, test_df",
"def train_test_split(measurements: np.ndarray, split: float = 0.8) -> (np.ndarray, np.ndarray):\n labels_measurements = [m.label for m in measurements]\n labels = np.unique(labels_measurements)\n\n for i, l in enumerate(labels):\n indices_label = np.argwhere(np.array(labels_measurements) == l).flatten()\n\n num_samples = indices_label.size\n if i == 0:\n measurements_train = measurements[indices_label][:int(split*num_samples)]\n measurements_test = measurements[indices_label][int(split*num_samples):]\n else:\n measurements_train = np.append(measurements_train, measurements[indices_label][:int(split*num_samples)])\n measurements_test = np.append(measurements_test, measurements[indices_label][int(split*num_samples):])\n\n np.random.shuffle(measurements_train)\n np.random.shuffle(measurements_test)\n\n return measurements_train, measurements_test",
"def splitData(data, class_label, seed, ratio):\n\t\n\trandom.seed(seed)\n\tsubset = data.clone()\n\tsize_data = subset.data.shape[0]\n\tn = int(np.floor(size_data * ratio)) # number of datasets in train\n\tindex = random.sample(range(1, size_data), n)\n\tsplit_list = [item for item in [0] for i in range(size_data)]\n\t\n\tfor i in index:\n\t\tsplit_list[i]=1\n\t\n\treturn split_list #returns list of indeces where 0 is test and 1 is training data ",
"def split_train_test(df_train, labels):\n n_train = np.shape(df_train)[0]\n X = {'train': [], 'holdout': []} # features\n Y = {'train': [], 'holdout': []} # labels\n p10 = int(0.1 * n_train)\n X['holdout'] = df_train.iloc[-p10:]\n Y['holdout'] = labels[-p10:]\n X['train'] = df_train.iloc[:(n_train - p10)]\n Y['train'] = labels[:(n_train - p10)]\n return X, Y",
"def split_data(train, parameters):\n labels = train.labels\n train_indices, val_indices = train_test_split(range(len(labels)),\n stratify=labels,\n random_state=parameters['seed'],\n test_size=parameters['validation_size'])\n return train_indices, val_indices",
"def train_val_test_split(data):\n raise NotImplementedError",
"def split_dataset(dataset, train_percentage, feature_headers, target_header):\r\n\r\n train_x, test_x, train_y, test_y = train_test_split(dataset[feature_headers], dataset[target_header],\r\n train_size=train_percentage, random_state=42)\r\n return train_x, test_x, train_y, test_y",
"def split_data(x, y, ratio, seed=1):\n \"\"\"\n Assemble the 3 label vectors with the original ordering \n Input:\n - x (ndarray) : binary prediction for set 1\n - y (ndarray) : binary prediction for set 2\n - ratio (ndarray) : binary prediction for set 3\n - seed (float) : indices of the data points in set 1 \n Output: \n - train_x (ndarray) : binary prediction for set 1\n - train_y (ndarray) : binary prediction for set 2\n - test_x (ndarray) : binary prediction for set 3\n - test_y (ndarray) : indices of the data points in set 1\n \"\"\"\n # set seed and shuffle the indices\n np.random.seed(seed)\n shuffle_indices = np.random.permutation(np.arange(len(y)))\n shuffled_y = y[shuffle_indices]\n shuffled_x = x[shuffle_indices]\n \n #splits the set according to the ratio on the shuffled set\n ratio_idx = int(np.floor(ratio*len(y)))\n train_y = shuffled_y[:ratio_idx]\n train_x = shuffled_x[:ratio_idx]\n test_y = shuffled_y[ratio_idx:]\n test_x = shuffled_x[ratio_idx:]\n return train_x, train_y, test_x, test_y",
"def train_test_split(x, y, test_pct):\n data = zip(x, y)\n train, test = split_data(data, 1 - test_pct)\n x_train, y_train = zip(*train)\n x_test, y_test = zip(*test)\n return x_train, y_train, x_test, y_test",
"def split_dataset(instances, labels, train_split=0.8):\n split = int(train_split * len(instances))\n train_data, train_labels = instances[:split], labels[:split]\n test_data, test_labels = instances[split:], labels[split:]\n\n return train_data, train_labels, test_data, test_labels",
"def split_test_train(data, target=\"class\", split=0.20):\n np.random.seed(42)\n\n X = data[[c for c in list(data.columns) if c != target]]\n # y = data[target].astype(\"int\")\n y = data[target].astype(\"category\")\n\n train, test = Data(X, y), None\n if split is not None or split > 0:\n splits = train_test_split(X, y, test_size=split, stratify=y, random_state=42)\n train, test = Data(splits[0], splits[2]), Data(splits[1], splits[3])\n\n return train, test",
"def split_data(images, labels):\n images, labels = shuffle_data_pair(images, labels)\n\n num_covid_points = sum(map(lambda label: label == 0, labels))\n\n # Calculate split\n num_test = int(num_covid_points * 0.1)\n num_covid_train = num_covid_points - num_test * 2\n num_other_train = int(num_covid_train * 1.1)\n\n # (train, validate, test) points added\n num_points_added = [\n [0, 0, 0], # COVID-19\n [0, 0, 0], # Viral pneumonia\n [0, 0, 0] # Normal\n ]\n\n # Datasets\n images_train = []\n labels_train = []\n images_validate = []\n labels_validate = []\n images_test = []\n labels_test = []\n\n # Add images and labels to datasets\n notifier.send(\" Adding images and labels to dataset...\")\n for i, label in enumerate(labels):\n print(f\" Point: {i} / {len(labels)}\")\n completed_labels = [False, False, False] # Enough of label added\n if all(completed_labels):\n break\n for j in range(3): # 0: COVID-19, 1: Viral pneumonia, 2: Normal\n if completed_labels[j]:\n continue\n if label == j:\n # Add training data\n can_add_training = False\n if j == 0: # COVID-19\n if num_points_added[j][0] < num_covid_train:\n can_add_training = True\n num_points_added[j][0] += 1\n elif num_points_added[j][0] < num_other_train: # Not COVID-19\n can_add_training = True\n num_points_added[j][0] += 1\n if can_add_training:\n images_train.append(images[i])\n labels_train.append(labels[i])\n break\n\n # Add validation data\n if num_points_added[j][1] < num_test:\n num_points_added[j][1] += 1\n images_validate.append(images[i])\n labels_validate.append(labels[i])\n break\n\n # Add testing data\n if num_points_added[j][2] < num_test:\n num_points_added[j][2] += 1\n images_test.append(images[i])\n labels_test.append(labels[i])\n break\n\n # Point couldn't be added anywhere: label is complete\n completed_labels[j] = True\n break\n\n # Shuffle all data\n notifier.send(\" Shuffling data...\")\n images_train, labels_train = shuffle_data_pair(\n images_train, labels_train\n )\n images_validate, labels_validate = shuffle_data_pair(\n images_validate, labels_validate\n )\n images_test, labels_test = shuffle_data_pair(\n images_test, labels_test\n )\n\n if PLOT_LABELS:\n # Plot data frequencies\n plt.hist(labels, bins=3)\n plt.title(\"Labels\")\n\n plt.hist(labels_train, bins=3)\n plt.title(\"Train Labels\")\n\n plt.hist(labels_validate, bins=3)\n plt.title(\"Validate Labels\")\n\n plt.hist(labels_test, bins=3)\n plt.title(\"Test Labels\")\n\n plt.show()\n\n # Make labels categorical\n notifier.send(\" Making labels categorical: train...\")\n labels_train = tf.keras.utils.to_categorical(labels_train)\n notifier.send(\" Making labels categorical: validate...\")\n labels_validate = tf.keras.utils.to_categorical(labels_validate)\n notifier.send(\" Making labels categorical: test...\")\n labels_test = tf.keras.utils.to_categorical(labels_test)\n\n notifier.send(\" Converting data to NumPy arrays...\")\n return \\\n np.array(images_train), np.array(images_validate), np.array(images_test), \\\n np.array(labels_train), np.array(labels_validate), np.array(labels_test)",
"def split_dataset(dataset, train_percentage, feature_headers, target_header):\r\n\r\n # Split dataset into train and test dataset\r\n train_x, test_x, train_y, test_y = train_test_split(dataset[feature_headers], dataset[target_header],train_size=train_percentage)\r\n return train_x, test_x, train_y, test_y",
"def train_test_split(x, y, test_pct):\n\tdata = zip(x,y)\n\ttrain, test = split_data(data, 1 - test_pct)\n\tx_train, y_train = zip(*train)\n\tx_test, y_test = zip(*test)\n\treturn x_train, y_train, x_test, y_test",
"def splitData(groupList, trainSize):\r\n from sklearn.model_selection import StratifiedShuffleSplit\r\n\r\n groupList[0]['text'] = cleanRealTexts(list(groupList[0]['text']))\r\n\r\n classLabels = np.array([])\r\n for i, group in enumerate(groupList):\r\n classLabels = np.append(classLabels, np.repeat(i, len(group)))\r\n\r\n classData = pd.concat(groupList).reset_index(drop=True)\r\n\r\n splits = list(StratifiedShuffleSplit(n_splits=i,\r\n test_size=1-trainSize,\r\n train_size=trainSize,\r\n random_state=0).split(X=classData, y=classLabels))[0]\r\n trainIdx, testIdx = splits\r\n\r\n trainData = classData.iloc[trainIdx]\r\n testData = classData.iloc[testIdx]\r\n trainLabels = classLabels[trainIdx]\r\n testLabels = classLabels[testIdx]\r\n\r\n return [[trainData, trainLabels], [testData, testLabels]]",
"def split_data(self):\n if not self.load_data:\n raise AttributeError('Preprocessor has not loaded any data.')\n \n # 3 - Find example counts for each set\n self.n_examples = self.data[0].shape[0]\n self.n_train = int(self.n_examples * self.train_ratio)\n self.n_val = int(self.n_examples * self.val_ratio)\n self.n_test = self.n_examples - self.n_train - self.n_val\n \n logger.info(f'Set sizes:')\n logger.info(f'train: {self.n_train}')\n logger.info(f'val: {self.n_val}')\n logger.info(f'test: {self.n_test}')\n if self.n_test < 0:\n raise ValueError('Train + validation ratios must bef < 1')\n\n # 4 - Separate data into train, test, val\n if isinstance(self.data[0], pd.DataFrame):\n logger.info('Dataset is in a dataframe.')\n self.isdataframe = True\n\n self.train_data = [self.data[0].iloc[:self.n_train],\n self.data[1].iloc[:self.n_train]]\n \n self.val_data = [self.data[0].iloc[self.n_train:self.n_val + self.n_train],\n self.data[1].iloc[self.n_train:self.n_val + self.n_train]]\n \n self.test_data = [self.data[0].iloc[self.n_val + self.n_train:],\n self.data[1].iloc[self.n_val + self.n_train:]]\n logger.info('Data was split into train, val, test.')\n else:\n self.isdataframe = False\n logger.info('Dataset is in a numpy array.')\n \n # If datasets are numpy array or sparse\n self.train_data = [self.data[0][:self.n_train],\n self.data[1][:self.n_train]]\n \n self.val_data = [self.data[0][self.n_train:self.n_val + self.n_train],\n self.data[1][self.n_train:self.n_val + self.n_train]]\n \n self.test_data = [self.data[0][self.n_val + self.n_train:],\n self.data[1][self.n_val + self.n_train:]]\n logger.info('Data was split into train, val, test.')\n \n assert(self.n_train == self.train_data[0].shape[0])\n assert(self.n_val == self.val_data[0].shape[0])\n assert(self.n_test == self.test_data[0].shape[0])\n \n # Free memory\n del self.data\n \n if self.save_sets:\n self.save_datasets()",
"def split_data(dataset_x, dataset_y, split_ratio):\n num_examples = len(dataset_x)\n training_x = dataset_x[:int(num_examples*split_ratio)]\n training_y = dataset_y[:int(num_examples*split_ratio)]\n\n validation_x = dataset_x[int(num_examples*split_ratio): num_examples]\n validation_y = dataset_y[int(num_examples*split_ratio): num_examples]\n\n training_y = np.asarray(training_y, dtype='float32')\n validation_y = np.asarray(validation_y, dtype='float32')\n return training_x, training_y, validation_x, validation_y",
"def DataSplit(self, data):\n train_X,test_X,train_y,test_y=train_test_split(data[0],data[1], random_state=2)\n valid_X,valid_y=train_test_split(data[0],data[1],random_state=2,test_size=0.15)[1],train_test_split(data[0],data[1],random_state=2,test_size=0.15)[3]\n return (train_X,test_X,valid_X,train_y,test_y,valid_y)",
"def create_validation_split(train_data, fraction_per_class=0.1, shuffle=True):\n\n subset_train_data = []\n val_data = []\n val_label_counts = {}\n\n class_labels = [i['class']['label'] for i in train_data]\n images_per_class = Counter(class_labels)\n val_images_per_class = {label: 0 for label in images_per_class.keys()}\n\n # Sanity check to make sure each class has more than 1 label\n for label, image_count in images_per_class.items():\n if image_count <= 1:\n print(\"Warning: label %d has only %d images\" % (label, image_count))\n\n if shuffle:\n random.shuffle(train_data)\n\n for image_data in train_data:\n label = image_data['class']['label']\n\n if label not in val_label_counts:\n val_label_counts[label] = 0\n\n if val_images_per_class[label] < images_per_class[label] * fraction_per_class:\n val_data.append(image_data)\n val_images_per_class[label] += 1\n else:\n subset_train_data.append(image_data)\n\n return subset_train_data, val_data",
"def train_test_split(self, train_test_ratio=0.75, splitter=None, idx_train=None, idx_test=None):\n if splitter is not None:\n idx_test, idx_train = splitter()\n elif idx_train is not None:\n idx_train = np.sort(idx_train)\n elif idx_test is not None:\n idx_test = np.sort(idx_test)\n else: # sequential split\n n_train= int(self.X.shape[0]*train_test_ratio)\n idx_test = range(self.X.shape[0] - n_train)\n idx_train = range(self.X.shape[0] - n_train, self.X.shape[0])\n\n self.trainX, self.trainY = self.X[idx_train, :], self.Y[idx_train]\n self.testX, self.testY = self.X[idx_test, :], self.Y[idx_test]\n self.idx_train, self.idx_test = idx_train, idx_test"
]
| [
"0.8258039",
"0.7710548",
"0.7688627",
"0.75904655",
"0.75765723",
"0.7543811",
"0.75388366",
"0.7477704",
"0.7435805",
"0.7415709",
"0.7415688",
"0.74026334",
"0.7371344",
"0.7349963",
"0.73185074",
"0.7288917",
"0.72865677",
"0.7271239",
"0.7267007",
"0.7255961",
"0.7241415",
"0.7226007",
"0.72137123",
"0.72074556",
"0.7190769",
"0.7170511",
"0.71612716",
"0.7157961",
"0.7147399",
"0.71419543"
]
| 0.8792884 | 0 |
Train XGBoost model using scikitlearn RandomizedSearchCV, and output report. Train set is split into train and validation set. The validation set is used for early stopping. | def xgb_scikit_random_train(train_X, train_Y, test_X, test_Y):
x_train, x_val, y_train, y_val = train_test_split(train_X, train_Y, test_size=0.1)
logger.info(f"Train set size: {len(x_train)}, validation set(for early stopping) size: {len(x_val)}")
objective = 'binary:logistic'
eval_metric = 'logloss'
early_stopping_rounds = 7
n_iter = 100 # number of iterations for RandomizedSearchCV
param_dist = {
'n_estimators': stats.randint(100, 300), # default 100, try 100-300
'max_depth': stats.randint(5, 10), # default 6, try 5-10
'gamma': stats.uniform(0, 10), # default 0, try 0-10
'subsample': stats.uniform(0.8, 0.2), # default 1, try 0.8-1
'colsample_bytree': stats.uniform(0.7, 0.3), # default 1, try 0.7-1
'learning_rate': stats.loguniform(1e-3, 10), # default 0.3, try 0.001-10
}
clf = xgb.XGBClassifier(objective=objective, eval_metric=eval_metric, early_stopping_rounds=early_stopping_rounds)
xgb_search = RandomizedSearchCV(clf, param_distributions=param_dist, n_iter=n_iter,
return_train_score=True, n_jobs=-1, pre_dispatch=64)
xgb_search.fit(x_train, y_train, eval_set=[(x_val, y_val)], verbose=False)
model = xgb_search.best_estimator_
test_score, train_score, val_score = report_model(model, test_X, test_Y, x_train, y_train, x_val, y_val)
return model, f"spread_{test_score}_XGB_{datetime.datetime.now():%Y%m%d_%H%M}" | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def train_xgb(params, X_train, y_train, cv, scorer='neg_mean_squared_error', seed=42):\n\n n_estimators = int(params[\"n_estimators\"])\n max_depth= int(params[\"max_depth\"])\n\n try:\n model = xgb.XGBRegressor(n_estimators=n_estimators,\n max_depth=max_depth,\n learning_rate=params[\"learning_rate\"],\n subsample=params[\"subsample\"], \n seed=seed)\n\n \n #result = model.fit(X_train,\n # y_train.values.ravel(),\n # eval_set=[(X_train, y_train.values.ravel())],\n # early_stopping_rounds=50,\n # verbose=False)\n\n fit_params = {\n 'eval_set': [(X_train, y_train.values.ravel())],\n 'early_stopping_rounds': 50,\n 'verbose': False\n }\n\n return_estimator = False\n cv_score = cross_validate(\n model,\n X_train, y_train.values.ravel(),\n cv=cv,\n scoring=scorer,\n return_estimator=return_estimator,\n fit_params=fit_params\n )\n\n scores = np.abs(np.array(cv_score['test_score']))\n avg_score = np.mean(scores)\n return {\n \"loss\": avg_score,\n \"scores\": scores,\n \"status\": STATUS_OK,\n #\"models\": cv_score['estimator']\n }\n\n except ValueError as ex:\n return {\n \"error\": ex,\n \"status\": STATUS_FAIL\n }",
"def xgb_train(X_train, y_train, write=False):\n model_xgb = xgb.XGBClassifier(max_depth=7,\n min_child_weight=1,\n learning_rate=0.01,\n n_estimators=5000,\n gamma=0.8,\n subsample=0.95,\n colsample_bytree=0.6,\n reg_alpha=0.0025,\n objective='binary:logistic',\n nthread=4,\n scale_pos_weight=1,\n seed=123)\n model_xgb.fit(X_train, y_train)\n if write:\n pickle.dump(model_xgb, open(obj_save_path+'model_xgb.p', 'wb'))\n #model_xgb = pickle.load(open(obj_save_path+'model_xgb.p', 'rb'))\n plot_importance(model_xgb)\n plt.show()\n return model_xgb",
"def train_xgb(X,y):\n\t\n\txgb_handle = xgb.XGBClassifier()\n\n\tone_to_left = st.beta(10, 1) \n\tfrom_zero_positive = st.expon(0, 50)\n\t\n\t#Define distributions to sample from for hyper parameter optimization\n\tparam_dist = { \n\t \"n_estimators\": st.randint(3, 40),\n\t \"max_depth\": st.randint(3, 40),\n\t \"learning_rate\": st.uniform(0.05, 0.4),\n\t \"colsample_bytree\": one_to_left,\n\t \"subsample\": one_to_left,\n\t \"gamma\": st.uniform(0, 10),\n\t \"reg_alpha\": from_zero_positive,\n\t \"min_child_weight\": from_zero_positive,\n\t}\n\n\tn_iter_search = 20\n\trandom_search = RandomizedSearchCV(xgb_handle, param_distributions=param_dist,\n\t n_iter=n_iter_search,verbose=10,scoring=\"roc_auc\",\n\t n_jobs=1,cv=5)\n\n\trandom_search_res_xgb = random_search.fit(X, y)\n\t\n\t#Get the best model that was retrained on all data\n\txgb_model = random_search_res_xgb.best_estimator_\n\n\treturn(xgb_model,random_search_res_xgb)",
"def train_and_score_bagging(network):\n\n train_predictions = pd.read_pickle('data/train_predictions.pkl.gz', compression='gzip')\n test_predictions = pd.read_pickle('data/test_predictions.pkl.gz', compression='gzip')\n\n train_actuals = pd.read_pickle('data/train_actuals.pkl.gz', compression='gzip')\n test_actuals = pd.read_pickle('data/test_actuals.pkl.gz', compression='gzip')\n\n\n train_x = np.array(train_predictions.values)\n train_y = train_actuals[0].values\n train_log_y = safe_log(train_y)\n test_x = np.array(test_predictions.values)\n test_y = test_actuals[0].values\n test_log_y = safe_log(test_y)\n\n model = compile_model(network)\n\n print('\\rNetwork')\n\n for property in network:\n print(property, ':', network[property])\n logging.info('%s: %s' % (property, network[property]))\n\n test = xgb.DMatrix(test_x)\n train = xgb.DMatrix(train_x, label=train_log_y)\n\n\n\n eval_set = [(test_x, test_log_y)]\n model.fit(train_x, train_log_y, early_stopping_rounds=20, eval_metric='mae', eval_set=eval_set,\n verbose=False)\n\n # eval_set = [(test, test_log_y)]\n # xgb.train(network, train, num_boost_round=5000, evals=eval_set, early_stopping_rounds=5)\n\n predictions = model.predict(test_x)\n # predictions = xgb.predict(test_x)\n inverse_predictions = safe_exp(predictions)\n score = mean_absolute_error(test_y, inverse_predictions)\n mape = safe_mape(test_y, inverse_predictions)\n\n print('\\rResults')\n\n best_round = xgb.best_iteration\n\n if np.isnan(score):\n score = 9999\n\n print('best round:', best_round)\n print('loss:', score)\n print('mape:', mape)\n print('-' * 20)\n\n logging.info('best round: %d' % best_round)\n logging.info('loss: %.4f' % score)\n logging.info('mape: %.4f' % mape)\n logging.info('-' * 20)\n\n eval_results({'xgb_predictions': {\n 'actual_y': test_y,\n 'y_predict': inverse_predictions\n }\n })\n\n range_results({\n 'xgb_predictions': inverse_predictions,\n }, test_y)",
"def train_and_score_xgb(network):\n\n df_all_train_x = pd.read_pickle('data/df_all_train_x.pkl.gz', compression='gzip')\n df_all_train_y = pd.read_pickle('data/df_all_train_y.pkl.gz', compression='gzip')\n df_all_train_actuals = pd.read_pickle('data/df_all_train_actuals.pkl.gz', compression='gzip')\n df_all_test_x = pd.read_pickle('data/df_all_test_x.pkl.gz', compression='gzip')\n df_all_test_y = pd.read_pickle('data/df_all_test_y.pkl.gz', compression='gzip')\n df_all_test_actuals = pd.read_pickle('data/df_all_test_actuals.pkl.gz', compression='gzip')\n\n train_y = df_all_train_y[0].values\n train_actuals = df_all_train_actuals[0].values\n train_log_y = safe_log(train_y)\n train_x = df_all_train_x.values\n test_actuals = df_all_test_actuals.values\n test_y = df_all_test_y[0].values\n test_log_y = safe_log(test_y)\n test_x = df_all_test_x.values\n\n # Use keras model to generate x vals\n mae_intermediate_model = load_model('models/mae_intermediate_model.h5')\n\n mae_vals_train = mae_intermediate_model.predict(train_x)\n mae_vals_test = mae_intermediate_model.predict(test_x)\n\n # train = xgb.DMatrix(mae_vals_train, label=train_log_y)\n # test = xgb.DMatrix(mae_vals_test)\n\n model = compile_model(network)\n\n print('\\rNetwork')\n\n for property in network:\n print(property, ':', network[property])\n logging.info('%s: %s' % (property, network[property]))\n\n\n eval_set = [(mae_vals_test, test_log_y)]\n model.fit(mae_vals_train, train_log_y, early_stopping_rounds=5, eval_metric='mae', eval_set=eval_set)\n # , verbose=False)\n\n # eval_set = [(test, test_log_y)]\n # xgb.train(network, train, num_boost_round=5000, evals=eval_set, early_stopping_rounds=5)\n\n\n predictions = model.predict(mae_vals_test)\n # predictions = xgb.predict(test)\n score = mean_absolute_error(test_log_y, predictions)\n\n print('\\rResults')\n\n best_round = model.best_iteration\n # best_round = xgb.best_iteration\n\n if np.isnan(score):\n score = 9999\n\n print('best round:', best_round)\n print('loss:', score)\n print('-' * 20)\n\n logging.info('best round: %d' % best_round)\n logging.info('loss: %.4f' % score)\n logging.info('-' * 20)\n\n return score",
"def xgboost_cv(self, nsplits: int = 5) -> (float, float, float):\r\n x_train, x_test, y_train, y_test = train_test_split(self.x, self.y, test_size=0.2)\r\n params = {\r\n \"max_depth\": [2, 3, 5, 8],\r\n \"eta\": [0.01, 0.05, 0.1, 0.15, 0.2],\r\n \"objective\": ['binary:logistic'],\r\n \"sumsample\": [0.5, 0.7, 1],\r\n \"colsample_bytree\": [0.5, 0.7, 1],\r\n \"n_estimators\": [50, 100, 200, 500],\r\n }\r\n \"\"\"\r\n fit_params = {\r\n \"early_stopping_rounds\": 20,\r\n \"eval_metric\": \"error\",\r\n \"eval_set\": [(x_test, y_test)]\r\n }\r\n \"\"\"\r\n model = xgb.XGBClassifier()\r\n gridcv = GridSearchCV(model, params, cv=nsplits)\r\n gridcv.fit(x_train, y_train) # , **fit_params)\r\n best_params = gridcv.best_params_\r\n cv = KFold(n_splits=nsplits)\r\n acc_result = []\r\n for train, test in cv.split(self.x):\r\n x_train = self.x[train, :]\r\n x_test = self.x[test, :]\r\n y_train = self.y[train]\r\n y_test = self.y[test]\r\n model = xgb.XGBClassifier(**best_params).fit(x_train, y_train)\r\n \"\"\"\r\n x_t, x_v, y_t, y_v = train_test_split(x_train, y_train, test_size=0.2)\r\n model = xgb.XGBClassifier(**best_params).fit(x_t, y_t, eval_metric=\"error\", eval_set=[(x_v, y_v)],\r\n early_stopping_rounds=20)\r\n \"\"\"\r\n y_predict = model.predict(x_test)\r\n acc_result.append(binary_acc(y_test, y_predict))\r\n return np.mean(acc_result), np.std(acc_result), best_params",
"def xgb(x_train, y_train, x_test):\n\n model = XGBClassifier()\n # y_train = np.reshape(y_train, (len(y_train), 1))\n # data = np.concatenate((x_train, y_train), axis=1)\n # for train, test in kfold.split(data):\n # # print(\"reached here\")\n # x_tr = data[train, :-1]\n # y_tr = data[train, -1]\n # x_va = data[test, :-1]\n # y_va = data[test, -1]\n\n # model.fit(x_tr, y_tr)\n # y_pred = model.predict(x_va)\n # predictions = [round(value) for value in y_pred]\n # f1 = f1_score(y_va, predictions)\n # print(f1)\n model.fit(x_train, y_train)\n y_predict = model.predict(x_test)\n y_predict = [round(value) for value in y_predict]\n return y_predict",
"def score_XGB(train_X, train_y, val_X, val_y):\n XG_model = XGBClassifier()\n XG_model.fit(train_X, train_y)\n XG_preds = XG_model.predict(val_X)\n XG_scores = accuracy_score(val_y, XG_preds)\n \n # Print the scores for each model\n print('XG - Accuracy: ' + str(XG_scores))",
"def xgboost_model(features, df):\n X= features\n y = df['Severity'].values\n\n xg_model = XGBClassifier(subsample= .7, reg_lambda = 5, n_estimators=900, min_child_weight=1, max_depth=20,\n learning_rate=.01, gamma = .5, colsample_bytree = .6, colsample_bylevel=.7)\n xg_model.fit(X, y)\n y_pred = xg_model.predict(X)\n \n return classification_report(y, y_pred, target_names=['Non-Severe', 'Severe'])",
"def train_xgb(self, explore_bound=False):\n # Create and train classifier\n xgb_classifier = DSTL_XGB()\n xgb_classifier.train(self.features['train'], self.labels['train'], X_cv=self.features['cv'], y_cv=self.labels['cv'])\n # Predict using default boundaries\n pred = xgb_classifier.predict(self.features['test'])\n self._display_test_image(pred)\n # Try out a couple of test boundaries\n if explore_bound:\n for boundary in [0.3, 0.35, 0.4, 0.45, 0.5]:\n pred = xgb_classifier.predict(self.features['test'], boundary=boundary)\n self._display_test_image(pred)\n # Evaluate Jaccard Similarity Score\n jacc = metrics.jaccard_similarity_score(self.labels['test'], pred)\n return jacc",
"def fit(self, train_features, train_actuals):\n for name in self.models.keys():\n print('-'*shutil.get_terminal_size().columns)\n print(\"evaluating {}\".format(name).center(columns))\n print('-'*shutil.get_terminal_size().columns)\n estimator = self.models[name]\n est_params = self.params[name]\n gscv = GridSearchCV(estimator, est_params, cv=5, scoring=self.scoring_metric)\n gscv.fit(train_features, train_actuals)\n print(\"best parameters are: {}\".format(gscv.best_estimator_))\n self.single_classifier_best[name] = gscv",
"def main():\n (header, data_obj) = select(TRAIN_DATA + TEST_DATA, lambda x: True, build, PACKET_HEADER)\n\n '''\n 2) Build training data, and testing data\n '''\n data = np.array([obj.features() for obj in data_obj])\n labels = np.array([obj.label() for obj in data_obj])\n\n train, test, labels_train, labels_test = model_selection.train_test_split(data, labels, train_size=0.6, test_size=0.4)\n\n dataset_train = xgb.DMatrix(train, label=labels_train)\n dataset_test = xgb.DMatrix(test, label=labels_test)\n\n '''\n 3) Train XGBoost\n '''\n evallist = [(dataset_train, 'train')]\n param = {'bst:max_depth': 3, 'bst:eta': 0.1, 'silent': 1, 'objective': 'binary:logistic'}\n param['nthread'] = 5\n param['eval_metric'] = 'rmse'\n param['base_score'] = 0.5\n num_round = 50\n bst = xgb.train(param, dataset_train, num_round, evallist)\n\n '''\n 4) Prediction\n '''\n startTime = time.time()\n predicts = bst.predict(dataset_test)\n labels_predict = []\n\n for p in predicts:\n if p >= 0.5:\n labels_predict.append(1)\n else:\n labels_predict.append(0)\n\n executionTime = (time.time() - startTime)\n print \"********************************************************\"\n print \"The total detection time is \" + str(executionTime) + 's'\n print \"The overall accuracy is: \" + str(metrics.accuracy_score(labels_test, labels_predict))\n\n cm = metrics.confusion_matrix(labels_test, labels_predict)\n cnf_matrix = cm / cm.astype(np.float).sum(axis=1)\n np.set_printoptions(precision=6, suppress=True)\n\n cnf_matrix = metrics.confusion_matrix(labels_test, labels_predict)\n plot_confusion_matrix(cnf_matrix, classes=CLASS_NAME, file_name='stage_a_confusion_matrix_unnormal.pdf')\n\n # Plot normalized confusion matrix\n plot_confusion_matrix(cnf_matrix, classes=CLASS_NAME, normalize=True, file_name='stage_a_confusion_matrix_normal.pdf')\n plt.show()\n print \"********************************************************\"",
"def fit(train_data, train_target):\r\n for name in models.keys():\r\n est = models[name]\r\n est_params = params2[name]\r\n gscv = GridSearchCV(estimator=est, param_grid=est_params, cv=5)\r\n gscv.fit(train_data, train_target)\r\n print(\"best parameters are: {}\".format(gscv.best_estimator_))\r\n print(\"Where we selected the parameters: {}\" .format(gscv.cv_results_['params'][gscv.best_index_]))\r\n print(\"with mean cross-validated score: {}\" .format(gscv.best_score_))",
"def simple_XGBoost_model(X_XGB, Y_XGB, X_XGB_test, modeltype, log_y=False, GPU_flag=False,\r\n scaler = '', enc_method='label',verbose=0):\r\n columns = X_XGB.columns\r\n if isinstance(scaler, str):\r\n if not scaler == '':\r\n scaler = scaler.lower()\r\n if scaler == 'standard':\r\n scaler = StandardScaler()\r\n elif scaler == 'minmax':\r\n scaler = MinMaxScaler()\r\n else:\r\n scaler = StandardScaler()\r\n ######### G P U P R O C E S S I N G B E G I N S ############\r\n ###### This is where we set the CPU and GPU parameters for XGBoost\r\n if GPU_flag:\r\n GPU_exists = check_if_GPU_exists()\r\n else:\r\n GPU_exists = False\r\n ##### Set the Scoring Parameters here based on each model and preferences of user ###\r\n cpu_params = {}\r\n param = {}\r\n cpu_params['tree_method'] = 'hist'\r\n cpu_params['gpu_id'] = 0\r\n cpu_params['updater'] = 'grow_colmaker'\r\n cpu_params['predictor'] = 'cpu_predictor'\r\n if GPU_exists:\r\n param['tree_method'] = 'gpu_hist'\r\n param['gpu_id'] = 0\r\n param['updater'] = 'grow_gpu_hist' #'prune'\r\n param['predictor'] = 'gpu_predictor'\r\n print(' Running XGBoost using GPU parameters')\r\n else:\r\n param = copy.deepcopy(cpu_params)\r\n print(' Running XGBoost using CPU parameters')\r\n #################################################################################\r\n if modeltype == 'Regression':\r\n if log_y:\r\n Y_XGB.loc[Y_XGB==0] = 1e-15 ### just set something that is zero to a very small number\r\n xgb = XGBRegressor(\r\n booster = 'gbtree',\r\n colsample_bytree=0.5,\r\n alpha=0.015,\r\n gamma=4,\r\n learning_rate=0.1,\r\n max_depth=15,\r\n min_child_weight=2,\r\n n_estimators=1000,\r\n reg_lambda=0.5,\r\n \t #reg_alpha=8,\r\n subsample=0.7,\r\n random_state=99,\r\n objective='reg:squarederror',\r\n \t eval_metric='rmse',\r\n verbosity = 0,\r\n n_jobs=-1,\r\n silent = True)\r\n else:\r\n if Y_XGB.nunique() <= 2:\r\n objective='binary:logistic'\r\n eval_metric = 'logloss'\r\n else:\r\n objective='multi:softmax'\r\n eval_metric = 'mlogloss'\r\n xgb = XGBClassifier(\r\n booster = 'gbtree',\r\n colsample_bytree=0.5,\r\n alpha=0.015,\r\n gamma=4,\r\n learning_rate=0.1,\r\n max_depth=15,\r\n min_child_weight=2,\r\n n_estimators=1000,\r\n reg_lambda=0.5,\r\n objective=objective,\r\n subsample=0.7,\r\n random_state=99,\r\n n_jobs=-1,\r\n verbosity = 0,\r\n silent = True)\r\n\r\n #testing for GPU\r\n model = xgb.set_params(**param)\r\n if X_XGB.shape[0] >= 1000000:\r\n hyper_frac = 0.1\r\n elif X_XGB.shape[0] >= 100000:\r\n hyper_frac = 0.2\r\n elif X_XGB.shape[0] >= 10000:\r\n hyper_frac = 0.3\r\n else:\r\n hyper_frac = 0.4\r\n #### now select a random sample from X_XGB ##\r\n if modeltype == 'Regression':\r\n X_XGB_sample = X_XGB[:int(hyper_frac*X_XGB.shape[0])]\r\n Y_XGB_sample = Y_XGB[:int(hyper_frac*X_XGB.shape[0])]\r\n else:\r\n X_XGB_sample = X_XGB.sample(frac=hyper_frac, random_state=99)\r\n Y_XGB_sample = Y_XGB.sample(frac=hyper_frac, random_state=99)\r\n ######### Now set the number of rows we need to tune hyper params ###\r\n nums = int(X_XGB_sample.shape[0]*0.9)\r\n X_train = X_XGB_sample[:nums]\r\n X_valid = X_XGB_sample[nums:]\r\n Y_train = Y_XGB_sample[:nums]\r\n Y_valid = Y_XGB_sample[nums:]\r\n scoreFunction = { \"precision\": \"precision_weighted\",\"recall\": \"recall_weighted\"}\r\n params = {\r\n 'learning_rate': sp.stats.uniform(scale=1),\r\n 'gamma': sp.stats.randint(0, 32),\r\n 'n_estimators': sp.stats.randint(100,500),\r\n \"max_depth\": sp.stats.randint(3, 15),\r\n },\r\n model = RandomizedSearchCV(xgb.set_params(**param),\r\n param_distributions = params,\r\n n_iter = 10,\r\n return_train_score = True,\r\n random_state = 99,\r\n n_jobs=-1,\r\n #cv = 3,\r\n verbose = False)\r\n\r\n X_train, X_valid = data_transform(X_train, Y_train, X_valid,\r\n scaler=scaler, enc_method=enc_method)\r\n\r\n gbm_model = xgb_model_fit(model, X_train, Y_train, X_valid, Y_valid, modeltype,\r\n log_y, params, cpu_params)\r\n model = gbm_model.best_estimator_\r\n #############################################################################\r\n n_splits = 10\r\n ls=[]\r\n if modeltype == 'Regression':\r\n fold = KFold(n_splits=n_splits)\r\n else:\r\n fold = StratifiedKFold(shuffle=True, n_splits=n_splits, random_state=99)\r\n scores=[]\r\n if not isinstance(X_XGB_test, str):\r\n pred_xgbs = np.zeros(len(X_XGB_test))\r\n pred_probas = np.zeros(len(X_XGB_test))\r\n else:\r\n pred_xgbs = []\r\n pred_probas = []\r\n #### First convert test data into numeric using train data ###\r\n if not isinstance(X_XGB_test, str):\r\n _, X_XGB_test_enc = data_transform(X_XGB, Y_XGB, X_XGB_test,\r\n scaler=scaler, enc_method=enc_method)\r\n #### now run all the folds each one by one ##################################\r\n start_time = time.time()\r\n for folds, (train_index, test_index) in tqdm(enumerate(fold.split(X_XGB,Y_XGB))):\r\n x_train, x_test = X_XGB.iloc[train_index], X_XGB.iloc[test_index]\r\n if modeltype == 'Regression':\r\n if log_y:\r\n y_train, y_test = np.log(Y_XGB.iloc[train_index]), Y_XGB.iloc[test_index]\r\n else:\r\n y_train, y_test = Y_XGB.iloc[train_index], Y_XGB.iloc[test_index]\r\n else:\r\n y_train, y_test = Y_XGB.iloc[train_index], Y_XGB.iloc[test_index]\r\n\r\n ## scale the x_train and x_test values - use all columns -\r\n x_train, x_test = data_transform(x_train, y_train, x_test,\r\n scaler=scaler, enc_method=enc_method)\r\n\r\n model = gbm_model.best_estimator_\r\n model = xgb_model_fit(model, x_train, y_train, x_test, y_test, modeltype,\r\n log_y, params, cpu_params)\r\n\r\n #### now make predictions on validation data ##\r\n if modeltype == 'Regression':\r\n if log_y:\r\n preds = np.exp(model.predict(x_test))\r\n else:\r\n preds = model.predict(x_test)\r\n else:\r\n preds = model.predict(x_test)\r\n\r\n feature_importances = pd.DataFrame(model.feature_importances_,\r\n index = X_XGB.columns,\r\n columns=['importance'])\r\n sum_all=feature_importances.values\r\n ls.append(sum_all)\r\n ###### Time to consolidate the predictions on test data #########\r\n if modeltype == 'Regression':\r\n if not isinstance(X_XGB_test, str):\r\n if log_y:\r\n pred_xgb=np.exp(model.predict(X_XGB_test_enc[columns]))\r\n else:\r\n pred_xgb=model.predict(X_XGB_test_enc[columns])\r\n pred_xgbs = np.vstack([pred_xgbs, pred_xgb])\r\n pred_xgbs = pred_xgbs.mean(axis=0)\r\n if log_y:\r\n score = np.sqrt(mean_squared_log_error(y_test, preds))\r\n else:\r\n score = np.sqrt(mean_squared_error(y_test, preds))\r\n print('RMSE score in fold %d = %s' %(folds+1, score))\r\n else:\r\n if not isinstance(X_XGB_test, str):\r\n pred_xgb=model.predict(X_XGB_test_enc[columns])\r\n pred_proba = model.predict_proba(X_XGB_test_enc[columns])\r\n if folds == 0:\r\n pred_xgbs = copy.deepcopy(pred_xgb)\r\n pred_probas = copy.deepcopy(pred_proba)\r\n else:\r\n pred_xgbs = np.vstack([pred_xgbs, pred_xgb])\r\n pred_xgbs = stats.mode(pred_xgbs, axis=0)[0][0]\r\n pred_probas = np.mean( np.array([ pred_probas, pred_proba ]), axis=0 )\r\n score = balanced_accuracy_score(y_test, preds)\r\n print('Balanced Accuracy score in fold %d = %0.1f%%' %(folds+1, score*100))\r\n scores.append(score)\r\n print(' Time taken for training XGB (in minutes) = %0.1f' %(\r\n (time.time()-start_time)/60))\r\n if verbose:\r\n plot_importances_XGB(train_set=X_XGB, labels=Y_XGB, ls=ls, y_preds=pred_xgbs,\r\n modeltype=modeltype)\r\n print(\"Average scores are: \", np.sum(scores)/len(scores))\r\n print('\\nReturning the following:')\r\n print(' Model = %s' %model)\r\n if modeltype == 'Regression':\r\n print(' final predictions', pred_xgbs[:10])\r\n return (pred_xgbs, model)\r\n else:\r\n print(' final predictions', pred_xgbs[:10])\r\n print(' predicted probabilities', pred_probas[:1])\r\n return (pred_xgbs, pred_probas, model)",
"def train_and_eval_scutfbp(train_set_vector, test_set_vector, trainset_label, testset_label, testset_filenames):\n print(\"The shape of training set is {0}\".format(np.array(train_set_vector).shape))\n print(\"The shape of test set is {0}\".format(np.array(test_set_vector).shape))\n reg = linear_model.BayesianRidge()\n reg.fit(train_set_vector, trainset_label)\n\n predicted_label = reg.predict(test_set_vector)\n mae_lr = round(mean_absolute_error(testset_label, predicted_label), 4)\n rmse_lr = round(math.sqrt(mean_squared_error(testset_label, predicted_label)), 4)\n pc = round(np.corrcoef(testset_label, predicted_label)[0, 1], 4)\n print('===============The Mean Absolute Error of Model is {0}===================='.format(mae_lr))\n print('===============The Root Mean Square Error of Model is {0}===================='.format(rmse_lr))\n print('===============The Pearson Correlation of Model is {0}===================='.format(pc))\n\n mkdirs_if_not_exist('./model')\n joblib.dump(reg, './model/BayesRidge_SCUTFBP.pkl')\n print('The regression model has been persisted...')\n\n mkdirs_if_not_exist('./result')\n\n out_result(testset_filenames, predicted_label, testset_label, None, path='./result/Pred_GT_SCUTFBP.csv')\n\n df = pd.DataFrame([mae_lr, rmse_lr, pc])\n df.to_csv('./result/BayesRidge_SCUTFBP.csv', index=False)\n print('The result csv file has been generated...')",
"def fit_xgb(instances, labels, params):\n # model = svm.SVR(**params)\n model = xgb.XGBRegressor(objective=\"reg:squarederror\", **params)\n model.fit(np.array(instances), np.array(labels))\n return model",
"def train(x_train, y_train, x_test, y_test):\n\n print(\" Nearest centroid : \", end='')\n run(x_train, y_train, x_test, y_test, NearestCentroid())\n print(\" k-NN classifier (k=3) : \", end='')\n run(x_train, y_train, x_test, y_test, KNeighborsClassifier(n_neighbors=3))\n print(\" k-NN classifier (k=7) : \", end='')\n run(x_train, y_train, x_test, y_test, KNeighborsClassifier(n_neighbors=7))\n print(\" Naive Bayes (Gaussian) : \", end='')\n run(x_train, y_train, x_test, y_test, GaussianNB())\n print(\" Random Forest (trees= 5) : \", end='')\n run(x_train, y_train, x_test, y_test, RandomForestClassifier(n_estimators=5))\n print(\" Random Forest (trees= 50) : \", end='')\n run(x_train, y_train, x_test, y_test, RandomForestClassifier(n_estimators=50))\n print(\" Random Forest (trees=500) : \", end='')\n run(x_train, y_train, x_test, y_test, RandomForestClassifier(n_estimators=500))\n print(\" Random Forest (trees=1000): \", end='')\n run(x_train, y_train, x_test, y_test, RandomForestClassifier(n_estimators=1000))\n print(\" LinearSVM (C=0.01) : \", end='')\n run(x_train, y_train, x_test, y_test, LinearSVC(C=0.01))\n print(\" LinearSVM (C=0.1) : \", end='')\n run(x_train, y_train, x_test, y_test, LinearSVC(C=0.1))\n print(\" LinearSVM (C=1.0) : \", end='')\n run(x_train, y_train, x_test, y_test, LinearSVC(C=1.0))\n print(\" LinearSVM (C=10.0) : \", end='')\n run(x_train, y_train, x_test, y_test, LinearSVC(C=10.0))",
"def fit(self, df_train, target, verbose=False):\n # X / y\n y_train = df_train[target]\n X_train = df_train.drop(target, axis=1)\n\n # Sort HPs grid dict by param name (a->z)\n grid_names = sorted(self.grid_param)\n # random sampling : 'n_param_comb' HPS combinations\n # list(it.product(*(self.grid_param[Name] for Name in grid_names))) create all the possible combinations\n if self.comb_seed is not None:\n random.seed(self.comb_seed)\n\n sample_combinations = random.sample(list(it.product(*(self.grid_param[Name] for Name in grid_names))),\n k=self.n_param_comb)\n\n if verbose :\n print('\\033[34m' + 'Random search:', self.n_param_comb, 'HP combs', '\\033[0m')\n print('\\033[34m' + 'Model : ', self.classifier, '\\033[0m')\n\n # for each HP combination :\n for model_idx in range(len(sample_combinations)):\n t_ini_model = datetime.now()\n\n # Model params in dict\n HP_dict = dict(zip(grid_names, sample_combinations[model_idx]))\n\n # instantiate model\n if self.classifier == 'RF': # Classifier Random Forest\n clf = RandomForestClassifier(**HP_dict)\n # elif self.classifier == 'XGBOOST':\n else:\n clf = xgboost.XGBClassifier(**HP_dict)\n\n # disabling bagging\n if not self.bagging:\n\n # model training\n clf_fit = clf.fit(X_train, y_train)\n # features importance\n features_dict = dict(zip(X_train.columns, clf.feature_importances_))\n # outputs\n y_proba = clf_fit.predict_proba(X_train)[:, 1]\n y_pred = clf_fit.predict(X_train)\n\n # enabling bagging\n else:\n # init bagging object with default params\n bag = Bagging(clf, **self.bagging_param)\n # model training\n bag.fit(df_train, target)\n clf_fit = bag.list_model\n # features importance\n features_dict = bag.bag_feature_importance(X_train)\n # classification probas\n y_proba, y_pred = bag.predict(df_train.drop(target, axis=1))\n\n self.d_bagging[model_idx] = bag\n\n # Model evaluation\n eval_dict = classifier_evaluate(y_train, y_pred, y_proba, verbose=0)\n\n # store\n train_model = {'HP': HP_dict,\n 'model': clf_fit,\n 'features_importance': features_dict,\n 'train_output': {'y_proba': y_proba, 'y_pred': y_pred},\n 'train_metrics': eval_dict}\n\n # store model results for each combination\n self.d_train_model[model_idx] = train_model\n\n # Fitted !\n self.is_fitted = True\n\n if verbose:\n t_fin_model = datetime.now()\n print(str(model_idx + 1) + '/' + str(len(sample_combinations)) +\n ' >> {} Sec.'.format((t_fin_model - t_ini_model).total_seconds()))\n\n return self",
"def crossValidation(data, output_variable_name):\r\n X, xt, y, yt = train_test_split(\r\n data.drop(output_variable_name, axis=1), data[output_variable_name], test_size=0.01, random_state=SEED)\r\n\r\n model = pickle.load(open(\"models/lasso.sav\", 'rb'))\r\n lassoCV = -mean(cross_val_score(model, X, y, cv=5, scoring='neg_root_mean_squared_error'))\r\n\r\n model = pickle.load(open(\"models/ridge.sav\", 'rb'))\r\n ridgeCV = -mean(cross_val_score(model, X, y, cv=5, scoring='neg_root_mean_squared_error'))\r\n\r\n model = pickle.load(open(\"models/decisionTree.sav\", 'rb'))\r\n decTreeCV = -mean(cross_val_score(model, X, y, cv=5, scoring='neg_root_mean_squared_error'))\r\n\r\n param = {\r\n 'max_depth': 15,\r\n 'eta': 0.1,\r\n 'objective': 'reg:squarederror',\r\n 'nthread': 16,\r\n \"subsample\": 0.5,\r\n \"colsample_bytree\": 0.5,\r\n 'eval_metric': 'rmse'\r\n }\r\n num_round = XGB_EPOCH_NR\r\n\r\n dtrain = xgb.DMatrix(X, label=y)\r\n xgbCV = xgb.cv(\r\n param,\r\n dtrain,\r\n num_boost_round=num_round,\r\n seed=SEED,\r\n nfold=5,\r\n metrics={'rmse'}\r\n )[\"test-rmse-mean\"][-1:]\r\n\r\n param = {\r\n \"iterations\": 400,\r\n \"learning_rate\": 0.02,\r\n \"depth\": 12,\r\n \"eval_metric\": 'RMSE',\r\n \"random_seed\": 23,\r\n \"bagging_temperature\": 0.2,\r\n \"od_type\": 'Iter',\r\n \"metric_period\": 75,\r\n \"od_wait\": 100\r\n }\r\n\r\n catBoostCV = cv(data, param, fold_count=5, plot=True)\r\n\r\n return lassoCV, ridgeCV, decTreeCV, xgbCV, catBoostCV",
"def make_prediction_classification(logger, run_id, df_train_X, df_train_Y, df_test_X, kf, features=None,\n params=None, n_estimators=10000,\n early_stopping_rounds=100, model_type='lgb',\n is_test=False, seed=42, model=None,\n plot_feature_importance=False, cat_features=None):\n yoof = np.zeros(len(df_train_X))\n yhat = np.zeros(len(df_test_X))\n cv_scores = []\n result_dict = {}\n feature_importance = pd.DataFrame()\n best_iterations = []\n\n # kf = KFold(n_splits=n_splits, random_state=SEED, shuffle=False)\n # kf = StratifiedKFold(n_splits=n_splits, random_state=seed, shuffle=True)\n\n fold = 0\n for in_index, oof_index in kf.split(df_train_X[features], df_train_Y):\n # Start a counter describing number of folds\n fold += 1\n # Number of splits defined as a part of KFold/StratifiedKFold\n n_splits = kf.get_n_splits()\n logger.info(f'fold {fold} of {n_splits}')\n X_in, X_oof = df_train_X.iloc[in_index].values, df_train_X.iloc[oof_index].values\n y_in, y_oof = df_train_Y.iloc[in_index].values, df_train_Y.iloc[oof_index].values\n\n if model_type == 'lgb':\n lgb_train = lgb.Dataset(X_in, y_in)\n lgb_eval = lgb.Dataset(X_oof, y_oof, reference=lgb_train)\n\n model = lgb.train(\n params,\n lgb_train,\n valid_sets=[lgb_train, lgb_eval],\n verbose_eval=50,\n early_stopping_rounds=early_stopping_rounds,\n feature_name=features,\n categorical_feature=cat_features\n )\n\n del lgb_train, lgb_eval, in_index, X_in, y_in\n gc.collect()\n\n yoof[oof_index] = model.predict(X_oof, num_iteration=model.best_iteration)\n if is_test is False:\n yhat += model.predict(df_test_X.values, num_iteration=model.best_iteration)\n\n logger.info(f'Best number of iterations for fold {fold} is: {model.best_iteration}')\n best_iteration = model.best_iteration\n\n elif model_type == 'xgb':\n xgb_train = xgb.DMatrix(data=X_in, label=y_in, feature_names=features)\n xgb_eval = xgb.DMatrix(data=X_oof, label=y_oof, feature_names=features)\n\n watchlist = [(xgb_train, 'train'), (xgb_eval, 'valid_data')]\n model = xgb.train(dtrain=xgb_train,\n num_boost_round=n_estimators,\n evals=watchlist,\n early_stopping_rounds=early_stopping_rounds,\n params=params,\n verbose_eval=50)\n\n del xgb_train, xgb_eval, in_index, X_in, y_in\n gc.collect()\n\n yoof[oof_index] = model.predict(xgb.DMatrix(X_oof, feature_names=features), ntree_limit=model.best_ntree_limit)\n if is_test is False:\n yhat += model.predict(xgb.DMatrix(\n df_test_X.values, feature_names=features),\n ntree_limit=model.best_ntree_limit)\n\n logger.info(f'Best number of iterations for fold {fold} is: {model.best_ntree_limit}')\n best_iteration = model.best_ntree_limit\n\n elif model_type == 'cat':\n # feature_names accepts only list\n cat_train = Pool(data=X_in, label=y_in, feature_names=features.tolist(), cat_features=cat_features)\n cat_eval = Pool(data=X_oof, label=y_oof, feature_names=features.tolist(), cat_features=cat_features)\n cat_test = Pool(data=df_test_X, feature_names=features.tolist(), cat_features=cat_features)\n\n model = CatBoost(params=params)\n model.fit(cat_train, eval_set=cat_eval, use_best_model=True)\n\n del in_index, X_in, y_in, cat_train\n gc.collect()\n\n yoof[oof_index] = model.predict(cat_eval)\n if is_test is False:\n # yhat += model.predict(df_test_X.values)\n yhat += model.predict(cat_test)\n\n del cat_eval, cat_test\n best_iteration = model.best_iteration_\n logger.info(f'Best number of iterations for fold {fold} is: {best_iteration}')\n\n elif model_type == 'sklearn':\n model = model\n model.fit(X_in, y_in)\n\n yoof[oof_index] = model.predict_proba(X_oof)[:, 1]\n if is_test is False:\n yhat += model.predict_proba(df_test_X.values)[:, 1]\n\n # Calculate feature importance per fold\n # TODO : Bolier plate code\n if model_type == 'lgb':\n fold_importance = pd.DataFrame()\n fold_importance[\"feature\"] = features\n fold_importance[\"importance\"] = model.feature_importance()\n fold_importance[\"fold\"] = fold\n feature_importance = pd.concat([feature_importance, fold_importance], axis=0)\n feature_importance.sort_values(by=['importance'], inplace=True)\n elif model_type == 'xgb':\n # Calculate feature importance per fold\n fold_importance = pd.DataFrame()\n fold_importance[\"feature\"] = model.get_score().keys()\n fold_importance[\"importance\"] = model.get_score().values()\n fold_importance[\"fold\"] = fold\n feature_importance = pd.concat([feature_importance, fold_importance], axis=0)\n feature_importance.sort_values(by=['importance'], inplace=True)\n elif model_type == 'cat':\n fold_importance = pd.DataFrame()\n fold_importance[\"feature\"] = model.feature_names_\n fold_importance[\"importance\"] = model.get_feature_importance()\n fold_importance[\"fold\"] = fold\n feature_importance = pd.concat([feature_importance, fold_importance], axis=0)\n feature_importance.sort_values(by=['importance'], inplace=True)\n\n cv_oof_score = roc_auc_score(y_oof, yoof[oof_index])\n logger.info(f'CV OOF Score for fold {fold} is {cv_oof_score}')\n cv_scores.append(cv_oof_score)\n best_iterations.append(best_iteration)\n\n del oof_index, X_oof, y_oof\n gc.collect()\n\n util.update_tracking(run_id, \"metric_fold_{}\".format(fold), cv_oof_score, is_integer=False)\n\n yhat /= n_splits\n\n oof_score = round(roc_auc_score(df_train_Y, yoof), 5)\n avg_cv_scores = round(sum(cv_scores)/len(cv_scores), 5)\n std_cv_scores = round(np.array(cv_scores).std(), 5)\n\n logger.info(f'Combined OOF score : {oof_score}')\n logger.info(f'Average of {fold} folds OOF score {avg_cv_scores}')\n logger.info(f'std of {fold} folds OOF score {std_cv_scores}')\n\n result_dict['yoof'] = yoof\n result_dict['prediction'] = yhat\n result_dict['oof_score'] = oof_score\n result_dict['cv_scores'] = cv_scores\n result_dict['avg_cv_scores'] = avg_cv_scores\n result_dict['std_cv_scores'] = std_cv_scores\n\n util.update_tracking(run_id, \"oof_score\", oof_score, is_integer=False)\n util.update_tracking(run_id, \"cv_avg_score\", avg_cv_scores, is_integer=False)\n util.update_tracking(run_id, \"cv_std_score\", std_cv_scores, is_integer=False)\n # Best Iteration\n util.update_tracking(run_id, 'avg_best_iteration', np.mean(best_iterations), is_integer=False)\n util.update_tracking(run_id, 'std_best_iteration', np.std(best_iterations), is_integer=False)\n\n del yoof, yhat\n gc.collect()\n\n # Plot feature importance\n if (model_type == 'lgb') | (model_type == 'xgb') | (model_type == 'cat'):\n # Not sure why it was necessary. Hence commenting\n # feature_importance[\"importance\"] /= n_splits\n cols = feature_importance[[\"feature\", \"importance\"]].groupby(\"feature\").mean().sort_values(\n by=\"importance\", ascending=False)[:50].index\n\n best_features = feature_importance.loc[feature_importance.feature.isin(cols)]\n\n result_dict['feature_importance'] = feature_importance\n result_dict['best_features'] = best_features\n\n logger.info('Training/Prediction completed!')\n return result_dict",
"def cv_fit_xgb_model(model,\n X_train, y_train,\n X_valid, y_valid,\n cv_nfold=5,\n early_stopping_rounds=50,\n missing=np.nan,\n eval_metric='auc',\n scoring=None,\n verbose=True):\n # Train cv\n xgb_param = model.get_xgb_params()\n dtrain = xgb.DMatrix(X_train.values, label=y_train.values, missing=missing)\n cv_result = xgb.cv(\n xgb_param,\n dtrain,\n num_boost_round=model.get_params()['n_estimators'],\n nfold=cv_nfold,\n metrics=[eval_metric],\n early_stopping_rounds=early_stopping_rounds,\n show_progress=False)\n best_n_estimators = cv_result.shape[0]\n model.set_params(n_estimators=best_n_estimators)\n\n # Train model\n model.fit(X_train, y_train, eval_metric=eval_metric)\n\n scorer = get_scorer(scoring)\n # Predict and score training data\n train_score = scorer(model, X_train, y_train)\n # Predict and score validation data\n valid_score = scorer(model, X_valid, y_valid)\n\n # Print model report:\n if verbose:\n print(\"\\nModel Report\")\n print(\"best n_estimators: {}\".format(best_n_estimators))\n print(\"Score (Train): %f\" % train_score)\n print(\"Score (Validation) : %f\" % valid_score)\n\n return best_n_estimators, train_score, valid_score",
"def train(self, X_train, y_train, X_cv=None, y_cv=None, plot_df=False, **kwargs):\n # Normalize features\n self.scaler = StandardScaler().fit(X_train)\n X_norm = self.scaler.transform(X_train)\n # Train and Cross-validate\n log_reg = LogisticRegression(**kwargs)\n if X_cv is not None and y_cv is not None:\n # Train classifier\n self.classifier = log_reg\n self.classifier.fit(X_norm, y_train)\n # Find best decision boundary using CV set\n self.find_decision_boundary(X_cv, y_cv, plot=plot_df)\n else:\n # Cross-validate using training set\n weights = ['balanced']\n # Make list of class weight fractions\n for weight0 in np.logspace(-1.2, -0.8, 10):\n weights.append({0: weight0, 1: 1 - weight0})\n parameters = {'class_weight': weights, 'C': [0.1, 1, 10]}\n self.classifier = GridSearchCV(log_reg, parameters, scoring='f1')\n self.classifier.fit(X_train, y_train)",
"def bagxgb_train(X_train, y_train, size=10, write=False):\n list_models = []\n #d_train = lgb.Dataset(X_train, label=y_train)\n with tqdm(total=size) as pbar:\n for nb in range(size):\n model = xgb.XGBClassifier(max_depth=7, min_child_weight=1, learning_rate=0.01, n_estimators=5000, gamma=0.8, subsample=0.95, colsample_bytree=0.6, reg_alpha=0.0025, objective='binary:logistic', nthread=4, scale_pos_weight=1,\n seed=nb+1)\n model.fit(X_train, y_train)\n list_models.append(model)\n pbar.update()\n return list_models",
"def main():\n\tdata = load_dataset()\n\tdata = normalize_data(data, cols_to_norm)\n\ttrain, test = generate_train_testset(data)\n\n\tX_train = train.drop(['Time', 'EVENT'], axis=1).dropna(axis=0)\n\ty_train = train.dropna(axis=0)['EVENT']\n\n\tX_test = test.drop(['Time', 'EVENT'], axis=1).dropna(axis=0)\n\ty_test = test.dropna(axis=0)['EVENT']\n\n\tmodel = XGBClassifier(n_estimators=1000, random_state=42)\n\tmodel.fit(X_train, y_train)\n\n\tprint(model)\n\n\ty_pred = model.predict(X_test)\n\tpredictions = [round(value) for value in y_pred]\n\n\taccuracy = accuracy_score(y_test, predictions)\n\tprint(\"Accuracy: %.2f%%\" % (accuracy * 100.0))\n\n\tf1 = f1_score(y_test, y_pred)\n\tprint(\"F1: %.6f%%\" % (f1))",
"def optimize_xgb(X_train, y_train, max_evals=10, cv=None, scorer='neg_mean_squared_error', seed=42):\n assert cv is not None\n\n space = {\n \"n_estimators\": hp.quniform(\"n_estimators\", 100, 1000, 10),\n \"max_depth\": hp.quniform(\"max_depth\", 1, 8, 1),\n \"learning_rate\": hp.loguniform(\"learning_rate\", -5, 1),\n \"subsample\": hp.uniform(\"subsample\", 0.8, 1),\n \"gamma\": hp.quniform(\"gamma\", 0, 100, 1)\n }\n\n objective_fn = partial(train_xgb,\n X_train=X_train, y_train=y_train, \n scorer=scorer, \n cv=cv,\n seed=seed)\n\n trials = Trials()\n best = fmin(fn=objective_fn,\n space=space,\n algo=tpe.suggest,\n max_evals=max_evals,\n trials=trials)\n\n # evaluate the best model on the test set\n return best, trials",
"def xgb_experiment(X, y):\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)\n dtrain = xgb.DMatrix(X_train, label=y_train)\n dtest = xgb.DMatrix(X_test, label=y_test)\n\n param = {'optimizer': 'dart', 'max_depth': 5, 'eta': 0.001,\n 'silent': 1, 'objective': 'multi:softmax', 'num_class': 10}\n watchlist = [(dtest, 'eval'), (dtrain, 'train')]\n num_round = 1000\n bst = xgb.train(param, dtrain, num_round, watchlist, verbose_eval=False)\n preds = bst.predict(dtest)\n labels = dtest.get_label()\n logging.info('error=%f' % (sum(1 for i in range(len(preds)) if int(preds[i] > 0.5) != labels[i]) / float(len(preds))))\n return bst",
"def xgb_train_validate_on_holdout(\n logger, training, validation, predictors, target,\n params, test_X=None, n_estimators=10000, early_stopping_rounds=100,\n verbose_eval=100):\n logger.info(\"Training using XGBoost and validating on holdout\")\n train_X, train_Y, validation_X, validation_Y = __get_x_y_from_training_validation(\n logger, training, validation, predictors, target)\n\n logger.info((f\"Shape of train_X, train_Y, validation_X, validation_Y: \"\n f\"{train_X.shape}, {train_Y.shape}, {validation_X.shape}, {validation_Y.shape}\"))\n\n dtrain = xgb.DMatrix(data=train_X, label=train_Y, feature_names=predictors)\n dvalid = xgb.DMatrix(data=validation_X, label=validation_Y, feature_names=predictors)\n\n watchlist = [(dtrain, 'train'), (dvalid, 'valid_data')]\n bst = xgb.train(\n dtrain=dtrain, num_boost_round=n_estimators,\n evals=watchlist, early_stopping_rounds=early_stopping_rounds,\n params=params, verbose_eval=verbose_eval)\n\n valid_prediction = bst.predict(\n xgb.DMatrix(validation_X, feature_names=predictors),\n ntree_limit=bst.best_ntree_limit)\n\n # Get best iteration\n best_iteration = bst.best_ntree_limit\n\n valid_score = np.sqrt(\n metrics.mean_squared_error(validation_Y, valid_prediction))\n logger.info(f'Validation Score {valid_score}')\n logger.info(f'Best Iteration {best_iteration}')\n\n del watchlist, dtrain, dvalid, train_X, train_Y, validation_X, validation_Y\n gc.collect()\n\n if test_X is not None:\n logger.info(\"Retraining on the entire data including validation\")\n training = pd.concat([training, validation])\n train_X, train_Y = __get_x_y_from_data(logger, training, predictors, target)\n logger.info((f\"Shape of train_X, train_Y: \"\n f\"{train_X.shape}, {train_Y.shape}\"))\n\n dtrain = xgb.DMatrix(data=train_X, label=train_Y, feature_names=predictors)\n dtest = xgb.DMatrix(data=test_X, feature_names=predictors)\n\n bst = xgb.train(\n dtrain=dtrain, num_boost_round=best_iteration, params=params)\n\n logger.info(f\"Predicting on test data: {test_X.shape}\")\n prediction = bst.predict(dtest, ntree_limit=best_iteration)\n return bst, best_iteration, valid_score, prediction\n else:\n return bst, valid_score",
"def buildAndTrain(trainingData):\n\tname = trainingData.drop(['count', 'casual', 'registered'], axis=1).columns\n\ttarget = trainingData['count'].values\n\tfeature = trainingData.drop(['count', 'casual', 'registered'], axis=1).values\n\t# feature scaling\n\tfeature_scaled = preprocessing.scale(feature)\n\t# 0.5 cross validate\n\tcv = cross_validation.ShuffleSplit(len(feature_scaled), n_iter=5, test_size=0.2, random_state=0)\n\t# build model, then training and get accuracy of it\n\tprint('\\n---------岭回归结果--------\\n')\n\tfor train, test in cv:\n\t\tregLR = linear_model.Ridge().fit(feature_scaled[train], target[train])\n\t\tprint('train score:{0:.3f}, test score:{1:.3f}\\n'.format(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tregLR.score(feature_scaled[train], target[train]),\n\t\t regLR.score(feature_scaled[test], target[test])))\n\tprint('\\n---------svm结果--------\\n')\n\tfor train, test in cv:\n\t\tregSvm = svm.SVR().fit(feature_scaled[train], target[train])\n\t\tprint('train score:{0:.3f}, test score:{1:.3f}\\n'.format(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tregSvm.score(feature_scaled[train], target[train]),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tregSvm.score(feature_scaled[test], target[test])))\n\tprint('\\n---------随机森林结果--------\\n')\n\tfor train, test in cv:\n\t\tregRF = RandomForestRegressor(n_estimators=100).fit(feature_scaled[train], target[train])\n\t\tprint('train score:{0:.3f}, test score:{1:.3f}\\n'.format(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tregRF.score(feature_scaled[train], target[train]),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tregRF.score(feature_scaled[test], target[test])))\n\t# reduce some low correction feature\n\tfeatureReduced = trainingData.drop(['count', 'casual', 'registered', 'holiday', 'workingday', 'day'], axis=1).values\n\tfeatureReduced_scaled = preprocessing.scale(featureReduced)\n\tprint('\\n---------减少特征维度以避免过拟合后的随机森林结果--------\\n')\n\tfor train, test in cv:\n\t\tregRFImpr = RandomForestRegressor(n_estimators=100).fit(featureReduced_scaled[train], target[train])\n\t\tprint('train score:{0:.3f}, test score:{1:.3f}\\n'.format(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tregRFImpr.score(featureReduced_scaled[train], target[train]),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tregRFImpr.score(featureReduced_scaled[test], target[test])))\n\t# use grid search algorithm to improve random forest regression\n\tX_train, X_test, y_train, y_test = cross_validation.train_test_split(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tfeature_scaled, target, test_size=0.2, random_state=0)\n\ttuned_parameters = [{'n_estimators': [10,100,500], 'max_depth': [2,3,4,5,6,7,8,9,10]}]\n\tscores = ['r2']\n\n\tfor score in scores:\n\t\tprint(score)\n\t\tclf = GridSearchCV(RandomForestRegressor(), tuned_parameters, cv=5, scoring=score)\n\t\tclf.fit(X_train, y_train)\n\t\tprint(clf.best_estimator_)\n\t\tprint('each parameter combination is ')\n\t\tfor params, mean_score, scores in clf.grid_scores_:\n\t\t\tprint('{0:.3f} (+/-{1:.03f}) for {2}'.format(mean_score, scores.std()/2, params))\n\n\tprint('--------最优参数下的随机森林结果--------')\n\tfor train, test in cv:\n\t\tregRFBest = RandomForestRegressor(n_estimators=100, max_depth=10).fit(feature_scaled[train], target[train])\n\t\tprint('train score:{0:.3f}, test score:{1:.3f}\\n'.format(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tregRFBest.score(feature_scaled[train], target[train]),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tregRFBest.score(feature_scaled[test], target[test])))\n\treturn regRFBest, feature_scaled, target",
"def model_fit(train_features, train_actuals):\n for name in models.keys():\n est = models[name]\n est_params = params[name]\n gscv = GridSearchCV(estimator=est, param_grid=est_params, cv=5,\n scoring='neg_mean_absolute_error', return_train_score=True)\n gscv.fit(train_actuals, train_features)\n cvres = gscv.cv_results_\n print(cvres)\n print(\"best parameters are: {}\".format(gscv.best_estimator_))\n for mean_score,par in zip(cvres[\"mean_test_score\"],cvres[\"params\"]):\n print(-mean_score, par)",
"def fit(self,\n X_train,\n y_train, \n X_test, \n y_test):\n \n #instantiate path_model_dirs dictionary so we can know where the models are saved\n self.path_model_dirs = {}\n\n for key in self.models_dict.keys():\n \n if self.verbose >=1: print('\\n----',key,'----')\n\n #define model directory\n path_model_dir = _os.path.join(self.path_GridSearchCV_dir, key)\n self.path_model_dirs[key] = path_model_dir\n if self.verbose >=1: print('path_model_dir:',path_model_dir)\n \n model_type = type(self.models_dict[key]['model'])\n if 'sklearn' in str(model_type) or 'xgboost' in str(model_type):\n path_file = _os.path.join(path_model_dir,'model_dict.dill')\n elif 'Net' in key:\n path_file = _os.path.join(path_model_dir,'best_params_.dill')\n\n if self.retrain or _os.path.isfile(path_file)==False:\n self.models_dict[key] = self._single_model_GridSearchCV(self.models_dict[key], \n X_train, y_train, \n X_test, y_test,\n path_model_dir)\n\n else: #reload previously trained model\n if 'sklearn' in str(type(self.models_dict[key]['model'])):\n self.models_dict[key] = self.load('model_dict', 'dill', path_model_dir)\n elif 'Net' in key:\n #check kwargs for epochs\n epochs = 100\n for item in self.kwargs.items():\n if 'epochs' in item[0]: epochs = item[1]\n self.models_dict[key] = self.load_NeuralNet(path_model_dir, \n X_train, y_train, \n epochs)\n\n y_pred = self.models_dict[key]['best_model'].predict(X_test)\n\n if 'Net' not in key:\n self.models_dict[key]['best_pred_score'] = self.models_dict[key]['best_model'].score(X_test, y_test)\n else:\n self.models_dict[key]['best_pred_score'] = self.models_dict[key]['best_model'].evaluate(X_test, y_test, verbose =0)\n \n if self.verbose >=1:\n print('\\tbest_cv_score:',self.models_dict[key]['best_cv_score'])\n print('\\tbest_pred_score:',self.models_dict[key]['best_pred_score'])\n\n for metric_key in self.metrics.keys():\n if self.metrics[metric_key] !=None:\n try:\n self.models_dict[key][metric_key] = self.metrics[metric_key](y_test, y_pred)\n print('\\t',metric_key,':',self.models_dict[key][metric_key])\n except Exception as e:\n print('Exception occured for',metric_key,':',str(e))\n\n if 'sklearn' in str(type(self.models_dict[key]['model'])):\n self.save(self.models_dict[key], 'model_dict', 'dill', path_model_dir)\n elif 'Net' in key:\n model_dict_subset = self.models_dict[key].copy()\n for key in self.models_dict[key].keys():\n if key not in ['y_test','y_pred','best_pred_score'] +list(self.metrics.keys()):\n model_dict_subset.pop(key)"
]
| [
"0.7190715",
"0.7094397",
"0.70745474",
"0.6775865",
"0.6753152",
"0.66384083",
"0.65544754",
"0.65499026",
"0.6505415",
"0.649495",
"0.6494385",
"0.6487969",
"0.6479537",
"0.645025",
"0.6406553",
"0.6400581",
"0.63784343",
"0.63220215",
"0.63135535",
"0.62865824",
"0.62847376",
"0.6279065",
"0.6274939",
"0.6246206",
"0.6243399",
"0.6238331",
"0.6237411",
"0.62364084",
"0.6227217",
"0.620272"
]
| 0.7714557 | 0 |
Leverage IGDB's API to search for game information. | async def gamelookup(self, ctx, *, game_name = None):
if not game_name: return await ctx.send("Usage: `{}gamelookup [game_name]`".format(ctx.prefix))
if not self.access_token or time.time() >= self.expire_time:
if not await self._update_token():
return await ctx.send("I couldn't update my access token :( Make sure the `igdbclientid` and `igdbsecret` are correct in my settings_dict.json!")
# Let's build our search query
search_url = "https://api.igdb.com/v4/games"
data = 'search "{}"; fields name,url,summary,first_release_date,platforms.*,cover.*; limit 10;'.format(game_name.replace('"',"").replace("\\",""))
headers = {"Client-ID":self.clientid,"Authorization":"Bearer {}".format(self.access_token)}
try:
search_data = await DL.async_post_json(search_url,data=data,headers=headers)
except:
return await Message.Embed(
title="Something went wrong searching for that game :(",
color=ctx.author
).send(ctx)
if not search_data:
# Nothing was returned - bail.
return await Message.Embed(
title="Nothing was returned for that search!",
color=ctx.author
).send(ctx)
if len(search_data)==1 and all((x in search_data[0] for x in ("title","status","cause"))):
# Got an error - print it and bail
return await Message.Embed(
title="Something went wrong searching :(",
description="{}: {}".format(search_data[0]["title"],search_data[0]["cause"]),
color=ctx.author
).send(ctx)
# Organize the search data by the closest match
game = FuzzySearch.search(game_name,search_data,"name",1)[0]["Item"]
# Print the results!
await Message.Embed(
title=game["name"],
thumbnail="http:{}".format(game["cover"]["url"].replace("/t_thumb/","/t_cover_big/")),
url=game["url"],
color=ctx.author,
description=game["summary"],
fields=[
{"name":"Release Date", "value": "<t:{}:D>".format(game["first_release_date"])},
{"name":"Platforms", "value":"\n".join(sorted([x["name"] for x in game["platforms"]]))}
]
).send(ctx) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_games_from_database (self):\n r = requests.get (self.url_endpoint)\n if (r.status_code != 200):\n print (\"Failed to get games:\\n\", r.text)\n return r\n \n games = json.loads (r.text)['games']\n return_list = []\n for game in games:\n return_list.append (game['game_state'])\n return return_list",
"def retrieveGames():\n result = cs411_game.getGames()\n return prepJSON(result)",
"def search_results():\n skip = int(flask.request.args.get(\"skip\", \"0\"))\n limit = int(flask.request.args.get(\"limit\", \"20\"))\n\n obj = {}\n\n # query : will be event kit in case of triage information\n uidstr = flask.request.args.get(\"query\", None)\n\n if uidstr == None:\n obj[\"error\"] = \"Missing search ID\"\n\n uidstr = json.loads(uidstr)\n\n obj[\"query\"] = {}\n obj[\"query\"][\"uid\"] = uidstr\n obj[\"clips\"] = []\n states = backend.get_search_sessions()\n obj[\"sessions\"] = []\n for astate in states:\n obj[\"sessions\"].append(str(astate))\n try:\n uid = uuid.UUID(uidstr)\n state = backend.get_iqr_search_state(uid)\n # use the uid of the state and get the information from the database\n col = str(state.uuid)\n obj[\"collection\"] = col\n searchdb[col].ensure_index([(\"model_id\", pymongo.ASCENDING),(\"probability\", pymongo.DESCENDING) ])\n # Force probabilities\n obj[\"positives\"] = list(state.positives)\n obj[\"negatives\"] = list(state.negatives)\n log = \"\"\n for id in state.positives:\n # log = log + \"Found %d\"%(searchdb[col].find({\"model_id\" : \"FUSION\", \"clip_id\" : id}).count()) + \", \"\n # res = searchdb[col].update({\"model_id\" : \"FUSION\", \"clip_id\" : id}, {\"$set\" : { \"probability\" : 1.0}})\n # log = log + \"Done %d\"%id + \", \"\n news = searchdb[col].find_one({\"model_id\" : \"FUSION\", \"clip_id\" : id})\n news[\"probability\"] = 1.0001\n searchdb[col].save(news)\n log = log + \"Now : \" + str(news)\n\n\n for id in state.negatives:\n # log = log + \"Found %d\"%(searchdb[col].find({\"model_id\" : \"FUSION\", \"clip_id\" : id}).count()) + \", \"\n # res = searchdb[col].update({\"model_id\" : \"FUSION\", \"clip_id\" : id}, {\"$set\" : { \"probability\" : 0.0}})\n # log = log + \"Done %d\"%id + \", \"\n news = searchdb[col].find_one({\"model_id\" : \"FUSION\", \"clip_id\" : id})\n news[\"probability\"] = 0.0\n searchdb[col].save(news)\n log = log + \"Now : \" + str(news)\n\n obj[\"log\"] = log\n\n allres = searchdb[col].find({\"model_id\" : \"FUSION\"}).sort([(\"probability\", pymongo.DESCENDING)]).skip(skip).limit(limit)\n rank = skip + 1\n for one in allres:\n aclip = {}\n aclip[\"score\"] = one[\"probability\"]\n aclip[\"id\"] = \"HVC\" + str(one[\"clip_id\"]).zfill(6)\n clipobj = db[\"clips\"].find_one({\"id\" : \"HVC\" + str(one[\"clip_id\"]).zfill(6)},{\"duration\" : 1})\n aclip[\"duration\"] = clipobj[\"duration\"]\n aclip[\"rank\"] = rank\n rank = rank + 1\n obj[\"clips\"].append(aclip)\n obj[\"count\"] = len(obj[\"clips\"])\n\n except Exception as e:\n obj[\"error\"] = str(type(e)) + \": \" + str(e)\n return jsonify(obj)\n\n obj[\"next\"] = \"http://localhost:5003/iqr/search_results?\" + urllib.urlencode({\"uid\" : uid, \"skip\" : skip+limit } )\n return jsonify(obj)",
"def get_steam_info(bot, trigger):\n\n if not trigger.group(2):\n return bot.reply(\"I need a game to look up!\")\n\n user_input = parseargs(trigger.group(2).lower())\n\n query = user_input.get(\"--query\") or user_input[\"extra_text\"]\n region = user_input.get(\"--region\") or \"US\"\n\n search_html = _fetch_search_page(query=query, region=region)\n if not search_html:\n return bot.reply(\"Something went wrong finding that game!\")\n\n fetch_price = False if region == \"US\" else True\n game_data = _parse_html(search_html, fetch_price)\n if not game_data:\n return bot.reply(\"I couldn't find that game.\")\n\n details = _fetch_game_details(game_data['id'], game_data.get('pkg'))\n\n if not details[game_data['id']]['success']:\n LOGGER.error(\"error fetching details\")\n if game_data.get('pkg'):\n # TODO: implement\n game_details = details[game_data['id']]['data']\n else:\n game_details = details[game_data['id']]['data']\n\n reviews = _fetch_game_reviews(game_data['id'], game_data.get('pkg'))\n\n reply = _parse_game(game_data, game_details, reviews)\n\n bot.say(reply, max_messages=2)",
"def playerSearch(self, start, count, level, formation, position, nationality, league, team, minBid, maxBid, minBIN, maxBIN):\n searchstring = \"\"\n cardList = list()\n\n if level != \"\" and level != \"any\":\n searchstring += \"&lev=\" + level\n if formation != \"\" and formation != \"any\":\n searchstring += \"&form=\" + formation\n if position != \"\" and position != \"any\":\n if position == \"defense\" or position == \"midfield\" or position == \"attacker\":\n searchstring += \"&zone=\" + position\n else:\n searchstring += \"&pos=\" + position\n if nationality > 0:\n searchstring += \"&nat=\" + str(nationality)\n if league > 0:\n searchstring += \"&leag=\" + str(league)\n if team > 0:\n searchstring += \"&team=\" + str(team)\n if minBIN > 0:\n searchstring += \"&minb=\" + str(minBIN)\n if maxBIN > 0:\n searchstring += \"&maxb=\" + str(maxBIN)\n if minBid > 0:\n searchstring += \"&micr=\" + str(minBid)\n if maxBid > 0:\n searchstring += \"¯=\" + str(maxBid)\n\n requestor = UrlRequestor(\"https://utas.fut.ea.com/ut/game/fifa13/auctionhouse?type=player&start=\" + str(start) + \"&num=\" + str(count) + searchstring, {'Content-Type': 'application/json', 'Cookie': self.EASW_KEY + \"; \" + self.EASF_SESS + \"; \" + self.FUTPHISHING + \"; \", 'X-UT-SID': self.XUT_SID, 'x-http-method-override': 'GET'}, \"\")\n requestor.open()\n lol = requestor.getReturnData().get('auctionInfo')\n\n for card in lol:\n cardList.append(Card(card, self))\n return cardList",
"def game_info(nsuid: str) -> Game:\n slug = algolia.find_by_nsuid(nsuid)\n url = DETAIL_URL.format(slug=slug)\n\n return _scrap(url)",
"def search(request):\r\n\tinput_text = request.GET.get('search-text', '')\r\n\tgames = Game.objects.filter(name__icontains=input_text)\r\n\treturn render(request, 'home.html', {'games': games, 'MEDIA_URL': settings.MEDIA_URL})",
"def search():\n return {\n \"status\": \"UP\",\n }, 200",
"def search_api():\n query = request.args.get(\"url\", \"\", type=str)\n return_html = str_to_bool(request.args.get(\"result\", \"false\", type=str))\n show_stats = str_to_bool(request.args.get(\"stats\", \"false\", type=str))\n info = str_to_bool(request.args.get(\"info\", \"true\", type=str))\n check_all = str_to_bool(request.args.get(\"checkall\", \"false\", type=str))\n favicon = str_to_bool(request.args.get(\"favicon\", \"false\", type=str))\n return_opml = str_to_bool(request.args.get(\"opml\", \"false\", type=str))\n force_crawl = str_to_bool(request.args.get(\"force\", \"false\", type=str))\n check_feedly = str_to_bool(request.args.get(\"feedly\", \"true\", type=str))\n skip_crawl = str_to_bool(request.args.get(\"skip_crawl\", \"false\", type=str))\n\n g.return_html = return_html\n\n url: URL = validate_query(query)\n\n start_time = time.perf_counter()\n\n search_runner = SearchRunner(\n db_client=db_client,\n check_feedly=check_feedly,\n force_crawl=force_crawl,\n check_all=check_all,\n skip_crawl=skip_crawl,\n )\n feed_list: List[CustomFeedInfo] = search_runner.run_search(url)\n stats = search_runner.crawl_stats\n\n search_time = int((time.perf_counter() - start_time) * 1000)\n stats[\"search_time\"] = search_time\n app.logger.info(\"Ran search of %s in %dms\", url, search_time)\n\n if not feed_list and no_response_from_crawl(stats):\n raise NotFoundError(f\"No Response from URL: {url}\")\n\n result: Dict = {}\n if feed_list:\n try:\n kwargs = {}\n if not info:\n kwargs[\"only\"] = [\"url\"]\n if not favicon:\n kwargs[\"exclude\"] = [\"favicon_data_uri\"]\n\n feed_schema = ExternalFeedInfoSchema(many=True, **kwargs)\n\n feed_list = sorted(feed_list, key=lambda x: x.score, reverse=True)\n dump_start = time.perf_counter()\n result = feed_schema.dump(feed_list)\n dump_duration = int((time.perf_counter() - dump_start) * 1000)\n app.logger.debug(\n \"Schema dump: feeds=%d duration=%dms\", len(result), dump_duration\n )\n stats[\"dump_time\"] = dump_duration\n except ValidationError as err:\n app.logger.warning(\"Dump errors: %s\", err.messages)\n abort(500)\n\n if show_stats:\n result = {\"feeds\": result, \"search_time_ms\": search_time, \"crawl_stats\": stats}\n\n if return_html:\n return render_template(\n \"results.html\",\n feeds=feed_list,\n json=get_pretty_print(result),\n url=url,\n stats=get_pretty_print(stats),\n )\n elif return_opml:\n opml_result = output_opml(feed_list).decode(\"utf-8\")\n return Response(opml_result, mimetype=\"text/xml\")\n\n return jsonify(result)",
"def search():\n query = input('Please enter your search query\\n')\n # For now, we will just print the whole database\n #db_actions.display()\n db_actions.search(query)",
"def find_game(game_name):\n \n conn = psycopg2.connect(\n dbname=DB_NAME, user=USER, host=HOST, password=PASSWORD)\n cur = conn.cursor()\n sql = '''SELECT title, current_price, plus_price, old_price, \n image_link, discount_end_date, psprices_url FROM games WHERE LOWER(title)=LOWER(%s);'''\n cur.execute(sql, (game_name,))\n game_data = cur.fetchone()\n cur.close()\n conn.close()\n return game_data",
"def game_index(req, page):\n\n data = Game.query.get_list('game/index', page,\n req.per_page)\n\n return render_response('game_index.html', **data)",
"def main(cls, args):\n theRepository = CloudGameRepository(\"games.ggp.org/base\")\n beginTime = System.currentTimeMillis()\n theGames = HashMap()\n for gameKey in theRepository.getGameKeys():\n theGames.put(gameKey, theRepository.getGame(gameKey))\n print \"Games: \" + len(theGames)\n endTime = System.currentTimeMillis()\n print \"Time: \" + (endTime - beginTime) + \"ms.\"",
"def games_usage(parsed_args):\n if parsed_args.verb == \"GET\":\n filter_dict = {\n \"game_type\": parsed_args.game_type,\n \"genre\": parsed_args.genre,\n \"keywords\": parsed_args.keywords,\n \"mechanic\": parsed_args.mechanic\n }\n df = get_games(parsed_args.id, filter_dict)\n if parsed_args.function == \"FILTERS\":\n df = get_game_filters(df)\n else:\n df = post_games(df, parsed_args.sort_by, parsed_args.weighting)\n return df",
"def test_get_game(self):\n pass",
"def games():\n games = mongo.db.games.find({})\n \n return render_template('browse_games.html', games=games)",
"def search_for_adaptation():\n\n book_id = 0\n # variables for status results; 0 for no error, 1 for no book found, 2 for no movie found,\n # 3 for no tv show found, 4 for no tv show and movie found\n status_msg = \"\"\n status_num = 0\n\n # if the Random Book button is chosen, then select a random book from the list\n # try to match the book with a movie or tv show until one is found\n if request.args.get('random') == \"1\":\n search_term = data_functions.get_random_book()\n else:\n # if search input is used, then get the search term\n search_term = request.form['search'] # get search term from input box\n\n # Goodreads API functions\n gr_result = API_functions.request_book(search_term) # use function in API_functions.py\n\n # if no book is found, generate status code\n if gr_result[\"total\"] == 0:\n status_msg = \"No matching book found for {0}. Try another.\".format(search_term)\n status_num = 1\n\n # TheMovieDB functions\n movie_result = {} # empty dictionary\n tv_result = {} # empty dictionary\n if status_num == 0: # only continue if there is a book found\n # search for movie\n # use function in API_functions.py\n movie_result = API_functions.request_movie(gr_result[\"name_split\"], gr_result[\"author_name_clean\"], 0)\n\n if movie_result[\"total_results\"] != 0: # if a movie is found, save some of its data\n movie_id = movie_result[\"id\"] # save movie ID\n\n else: # if no movie is found, generate status message\n status_msg = \"No movie found. Try another.\"\n status_num = 2\n\n # search for TV show\n # use function in API_functions.py\n tv_result = API_functions.request_tv_show(gr_result[\"name_split\"], gr_result[\"author_name_clean\"], 0)\n\n if tv_result[\"total_results\"] != 0: # if a tv show is found, save some of its data\n tv_id = tv_result[\"id\"] # save tv ID\n\n else: # if no tv show is found, generate status message\n status_msg = \"No TV Show found. Try another.\"\n status_num = 3\n\n if movie_result[\"total_results\"] == 0 and tv_result[\"total_results\"] == 0:\n # if no movie and tv show found, generate status message.\n # in the case they are found, but not based on the book, generate the same message\n status_msg = \"No adaptation found for {0}. Try another.\".format(search_term)\n status_num = 4\n\n if previous_searches.count(\n gr_result[\"name_split\"]) == 0 and status_num != 4: # only add if book name is not in deque\n if len(previous_searches) == 5: # keep the deque at only five most recent searches\n previous_searches.pop() # remove one if there is already five\n previous_searches.appendleft(gr_result[\"name_split\"]) # add recent search to beginning of deque\n # render the page again with updated information, pass all data to render_template method\n return render_template(\"index.html\", book_id=book_id, book_data=gr_result, movie_data=movie_result,\n tv_data=tv_result, app_name=app_name, search=search_term, status_msg=status_msg,\n status_num=status_num, previous_searches=previous_searches)",
"def search(self, query):",
"def lookup(title):\n\n # Contact API\n try:\n api_key = os.environ.get(\"API_KEY\")\n response = requests.get(\n f\"http://www.omdbapi.com/?s={title}&apikey=ced7be9a\")\n response.raise_for_status()\n except requests.RequestException:\n return None\n\n # parse response\n try:\n movie = response.json()\n search = movie[\"Search\"]\n search_list = []\n for i in range(len(search)):\n search_prop = {\"title\": search[i][\"Title\"],\n \"year\": search[i][\"Year\"], \n \"poster\": search[i][\"Poster\"],\n \"id\": search[i][\"imdbID\"]}\n search_list.append(search_prop)\n\n return search_list\n\n except (KeyError, TypeError, ValueError):\n return None",
"def query(self):",
"def game_detail(req, game_id=None):\n\n data = Game.query.get(game_id)\n if data is None:\n raise NotFound()\n\n return render_response('game_detail.html', game=data)",
"def do_search(arg):\n result = {'count': 0, 'time': 0, 'records': []}\n try:\n uri, q, k, m = arg\n dqp = Pyro.core.getProxyForURI(uri)\n scoresLen,results,indocids,exdocids = dqp.search(q, k, m)\n result=(scoresLen,results,indocids,exdocids)\n except Exception as e:\n print \"Exception:\", e\n return result",
"def extract_games(self) -> Dict[int, Dict[str, Any]]:\n optadocument = self._get_doc()\n attr = assertget(optadocument, '@attributes')\n matchdata = assertget(optadocument, 'MatchData')\n matches = {}\n for match in matchdata:\n matchattr = assertget(match, '@attributes')\n matchinfo = assertget(match, 'MatchInfo')\n matchinfoattr = assertget(matchinfo, '@attributes')\n game_id = int(assertget(matchattr, 'uID')[1:])\n matches[game_id] = dict(\n # Fields required by the base schema\n game_id=game_id,\n competition_id=int(assertget(attr, 'competition_id')),\n season_id=int(assertget(attr, 'season_id')),\n game_day=int(assertget(matchinfoattr, 'MatchDay')),\n game_date=datetime.strptime(assertget(matchinfo, 'Date'), '%Y-%m-%d %H:%M:%S'),\n # home_team_id=see below,\n # away_team_id=see below,\n # Optional fields\n # home_score=see below,\n # away_score=see below,\n # duration=?\n # referee=?\n # venue=?,\n # attendance=?\n # home_manager=?\n # away_manager=?\n )\n teamdata = assertget(match, 'TeamData')\n for team in teamdata:\n teamattr = assertget(team, '@attributes')\n side = assertget(teamattr, 'Side')\n teamid = assertget(teamattr, 'TeamRef')\n score = assertget(teamattr, 'Score')\n if side == 'Home':\n matches[game_id]['home_team_id'] = int(teamid[1:])\n matches[game_id]['home_score'] = int(score)\n else:\n matches[game_id]['away_team_id'] = int(teamid[1:])\n matches[game_id]['away_score'] = int(score)\n return matches",
"def get(self, request):\n\n queries = request.GET.dict()\n user = UserValidator.validate_user(request.user.id)\n\n if user is None:\n return JsonResponse({\n \"message\": \"Invalid credentials.\",\n }, status=400)\n\n if user is None:\n return JsonResponse({\n \"message\": \"Invalid credentials.\",\n }, status=400)\n\n try:\n game = Game.value_of(queries[\"game\"].lower())\n\n except (KeyError, ValueError, Exception):\n game = None\n\n try:\n sort = queries[\"sort\"].lower()\n\n if sort not in [\"wins\", \"total\",]:\n raise ValueError(\"invalid key value\")\n\n except (KeyError, ValueError, Exception):\n sort = \"wins\"\n\n entries = GameModel.objects.values(\"player\").filter(is_deleted=False)\n\n if game is not None:\n entries = entries.filter(game_played=game)\n game = game.value\n else:\n game = \"All\"\n\n entries = entries.annotate(\n wins=(Count(\"player\", filter=Q(did_win=True))),\n total=(Count(\"player\"))\n )\n\n if sort == \"wins\":\n entries = entries.order_by(\"-wins\")\n elif sort == \"total\":\n entries = entries.order_by(\"-total\")\n\n board = ScoreboardView.get_board_from_db_rows(entries)\n\n token = Token.get_tokens_for_user(user)\n\n return JsonResponse({\n \"game\": game,\n \"board\": board,\n \"access\": token[\"access\"],\n \"refresh\": token[\"refresh\"],\n })",
"def quickSearch():\n calDB = db.TinyDB('../calDB.json')\n pars = db.Query()\n recList = calDB.search(pars.key.matches(\"wf\"))\n print len(recList)\n for idx in range(len(recList)):\n key = recList[idx]['key']\n vals = recList[idx]['vals']\n print key\n for ch in vals:\n\n print ch, vals[ch]\n return",
"def find_all():\r\n data = store.read().items()\r\n return [Game(id=id, **value) for id,value in data]",
"def search(self):\n query = self.get_request_arg(\"query\")\n if query:\n album = self.ctrl.library.search(query)\n return self.resp_from_data(album)\n return self.resp_from_data(\n {\"message\": \"No query parameters specified\"}, 400)",
"def lobbies(self, game: str = None) -> Response:\n\n endpoint = '/api/lobbies'\n if not (game is None):\n query = f'?game={game}'\n else:\n query = None\n\n return self.fetch(endpoint, query)",
"def get_games():\n page: int = int(flask.request.args.get(\"page\", 1))\n size: int = int(flask.request.args.get(\"size\", 10))\n\n request = GetPageRequest(page, size)\n response = minesweeper_service.get_game_page(request)\n return flask.jsonify(response)",
"def test_search(self):\n response = Tmdb.search('ozark')\n self.assertTrue(int(response.status_code) == 200)\n data = response.json()\n self.assertTrue(isinstance(data['results'], list))\n # TODO check if all the shows are in the good format (can be from_dict/to_dict)"
]
| [
"0.60661846",
"0.6001404",
"0.59874094",
"0.59113",
"0.59061587",
"0.5824826",
"0.577423",
"0.5772616",
"0.57339895",
"0.57207435",
"0.5694939",
"0.5686116",
"0.56730014",
"0.56721807",
"0.5669932",
"0.56376046",
"0.5632637",
"0.56092167",
"0.56059164",
"0.55752486",
"0.5535081",
"0.5533077",
"0.5530708",
"0.5519535",
"0.5519302",
"0.54986274",
"0.5490213",
"0.5478234",
"0.54779613",
"0.54762864"
]
| 0.7406503 | 0 |
Function Takes login as string Returns result of compliance validation as array of strings, void array means pass | def login_validation(login):
# Argument must be a string
if not isinstance(login, str):
raise TypeError("Argument must be a string")
result = []
# Check for length
if len(login) < 1 or len(login) > 20:
result.append("Login length must be between 1 and 20 symbols")
# Check if has only appropriate letters
if not re.match(r"^[A-Za-z0-9.-]*$", login):
result.append("Only letters, digits, minus and dot are permitted")
# Check first character
if not re.match(r"^[A-Za-z]", login):
result.append("Login must start from letter")
# Check last character
if not re.match(r".*[A-Za-z0-9]$", login):
result.append("Login must end by letter or digit")
return result | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _validate(self) -> typing.List[str]:\n return jsii.invoke(self, \"validate\", [])",
"def valid_logins():\n login_data = json.load(open('data/login_data.json', 'r', encoding=\"utf8\"))\n return (random.choice(login_data['name']),\n random.choice(login_data['email']),\n random.choice(login_data['password']))",
"def password_validation(pass1,pass2):\n errors = []\n if(pass1 != pass2):\n errors.append(\"Lösenorden matchade inte.\")\n if(len(pass1) < 3):\n errors.append(\"Lösenordet måste vara längre än 3 bokstöver.\")\n \n return errors",
"def SecondPart():\n return passwordChecker(data)",
"def evaluate_password_health(users, print_password=False):\n hasUpperCase = \"[A-Z]\"\n hasLowerCase = \"[a-z]\"\n hasNumbers = \"\\d\"\n hasNonalphas = \"\\W\"\n results = []\n for username, password in users.items():\n # print(\"testing: %s:%s\" % (username, password))\n if print_password:\n printable_pass = password\n else:\n printable_pass = \"\"\n\n rules_dict = {\"username\":username,\"password\":printable_pass,\"Length\":1,\"Capital\":1,\"Lower\":1,\"Digits\":1,\"Symbols\":1}\n\n if len(password) < 8:\n print(\"Policy breach, too short : %s %s\" % (username, printable_pass))\n rules_dict[\"Length\"] = \"0\"\n\n elif len(password) > 8:\n # print(\"larger than 8\")\n # raw_input('asdfasdf')\n breakRules = []\n score = 0;\n bestCase = 4\n # pprint(re.search(hasUpperCase, password))\n\n if not re.search(hasUpperCase, password):\n breakRules.append(\"no upper case\")\n rules_dict[\"Capital\"] = 0\n # print(\"upper\")\n if not re.search(hasLowerCase, password):\n breakRules.append(\"no lower case\")\n rules_dict[\"Lower\"] = 0\n # print(\"lower\")\n\n if not re.search(hasNumbers, password):\n breakRules.append(\"no numbers\")\n rules_dict[\"Digits\"] = 0\n\n # print(\"numbers\")\n\n if not re.search(hasNonalphas, password):\n breakRules.append(\"non symbols\")\n rules_dict[\"Symbols\"] = 0\n\n # print(\"nonalphas\")\n\n score = bestCase - len(breakRules)\n\n # print(\"%s score %s \"%(password,score)) \n # raw_input('asdfasdf')\n if score <3:\n print(\"================\\nPolicy breach: %s:%s %s \" % (username, printable_pass, score ))\n\n for el in breakRules:\n print(\"Broken Rule: %s\"%el)\n\n print(\"================\")\n results.append(rules_dict)\n return results",
"def create_pwd_login_with_all_validation():\r\n msg, status = \"\", False\r\n try:\r\n if g.platform =='android':\r\n \"Empty values for all fields\"\r\n status1 = create_pwd_login_internal('', '')\r\n print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'\r\n \"validate\"\r\n expected_dialogue_title = g.popup_title_error\r\n expected_dialogue_message = g.popup_message_password_blank\r\n verify_dialogue(title=expected_dialogue_title, message=expected_dialogue_message, name_of_control_to_click='popup_default_button')\r\n print \"one\"\r\n \"short password value\"\r\n status2 = create_pwd_login_internal('1', '')\r\n \"validate\"\r\n expected_dialogue_title = g.popup_title_error\r\n expected_dialogue_message = g.popup_message_password_short\r\n verify_dialogue(title=expected_dialogue_title, message=expected_dialogue_message, name_of_control_to_click='popup_default_button')\r\n print \"two\"\r\n \"not matching passwords\"\r\n status3 = create_pwd_login_internal(g.password, '')\r\n \"validate\"\r\n expected_dialogue_title = g.popup_title_error\r\n expected_dialogue_message = g.popup_message_password_do_not_match\r\n verify_dialogue(title=expected_dialogue_title, message=expected_dialogue_message, name_of_control_to_click='popup_default_button')\r\n print \"three\"\r\n #\"no hint\"\r\n #status4 = create_pwd_login_internal(g.password, g. password, '')\r\n #\"validate\"\r\n #expected_dialogue_title = g.popup_title_error\r\n #expected_dialogue_message = g.popup_message_password_hint\r\n # verify_dialogue(title=expected_dialogue_title, message=expected_dialogue_message, name_of_control_to_click='popup_default_button')\r\n\r\n #\"hint same as password\"\r\n #status5 = create_pwd_login_internal(g.password, g.password)\r\n #\"validate\"\r\n #expected_dialogue_title = g.popup_title_error\r\n #expected_dialogue_message = g.popup_message_password_hint_same_as_passsword\r\n #verify_dialogue(title=expected_dialogue_title, message=expected_dialogue_message, name_of_control_to_click='popup_default_button')\r\n\r\n \"Values from global_vars g\"\r\n status6 = create_pwd_login_internal(g.password, g.password)\r\n\r\n status = status1 and status2 and status3 and status6\r\n \r\n else: \r\n \"Empty values for all fields\"\r\n status1 = create_pwd_login_internal('', '')\r\n \r\n print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'\r\n \"validate\"\r\n expected_dialogue_title = g.ios_popup_title_error\r\n expected_dialogue_message = g.ios_popup_message_password_blank\r\n verify_dialogue(title=expected_dialogue_title, message=expected_dialogue_message, name_of_control_to_click='popup_default_button')\r\n print 'one is completed' \r\n \"short password value\"\r\n status2 = create_pwd_login_internal('1', '')\r\n \"validate\"\r\n expected_dialogue_title = g.ios_popup_title_error\r\n expected_dialogue_message = g.ios_popup_message_password_short\r\n verify_dialogue(title=expected_dialogue_title, message=expected_dialogue_message, name_of_control_to_click='popup_default_button')\r\n print'two is completed'\r\n \"not matching passwords\"\r\n #status3 = create_pwd_login_internal(g.password, '')\r\n \"validate\"\r\n # expected_dialogue_title = g.ios_popup_title_error\r\n #expected_dialogue_message = g.ios_popup_message_password_do_not_match\r\n #verify_dialogue(title=expected_dialogue_title, message=expected_dialogue_message, name_of_control_to_click='popup_default_button')\r\n #print 'three is completed'\r\n\r\n #\"no hint\"\r\n #status4 = create_pwd_login_internal(g.password, g. password, '')\r\n #\"validate\"\r\n #expected_dialogue_title = g.popup_title_error\r\n #expected_dialogue_message = g.popup_message_password_hint\r\n # verify_dialogue(title=expected_dialogue_title, message=expected_dialogue_message, name_of_control_to_click='popup_default_button')\r\n\r\n #\"hint same as password\"\r\n #status5 = create_pwd_login_internal(g.password, g.password)\r\n #\"validate\"\r\n #expected_dialogue_title = g.popup_title_error\r\n #expected_dialogue_message = g.popup_message_password_hint_same_as_passsword\r\n #verify_dialogue(title=expected_dialogue_title, message=expected_dialogue_message, name_of_control_to_click='popup_default_button')\r\n\r\n \"Values from global_vars g\"\r\n status6 = create_pwd_login_internal(g.password, g.password)\r\n #status = status1 and status2 \r\n\r\n status = status1 and status2 and status6\r\n\r\n except Exception as excp:\r\n traceback.print_exc()\r\n msg += str(excp)\r\n status = False\r\n return status, msg",
"def FirstPart(): \n return passwordChecker_incorrect(data)",
"def are_input_correct(self):\n\n\t\tname_user = self.check_this_input(self.entrada_usr, 100)\n\t\tpass_user = self.check_this_input(self.entrada_pwd, 72)\n\n\t\treturn name_user,pass_user",
"def Validation(self):\n self.user_get = str(self.enter_username.get())\n b = str(self.enter_password.get())\n data_load = open(\"register.txt\", \"rb\")\n test = pickle.load(data_load)\n authentication = {}\n for i in test:\n username = i[\"username\"]\n password = i[\"password\"]\n authentication.update({username: password})\n usrs = []\n for i in authentication:\n usrs.append(i.upper())\n if self.user_get.upper() in usrs:\n if authentication[self.user_get.upper()] == b:\n self.first_login_intrfc()\n else:\n messagebox.showinfo(\"Error\", \"Enter the correct password\")\n self.enter_password.delete(0, END)\n self.enter_password.focus()\n else:\n messagebox.showerror(\"Error\", \"Please provide the right username\")\n self.enter_username.delete(0, END)\n self.enter_password.delete(0, END)\n self.enter_username.focus()",
"def validate_login(name,password):\n\t\n\t#Read the attendance excelsheet check if username and password matched\n\tdf_atten=pd.read_csv(\"datasrc/People.csv\")\n\t# 10006 ultbjxu\n\t\n\tif (df_atten.Username.astype(str).str.contains(name).any() and df_atten.Password.astype(str).str.contains(password).any()):\t\t\n\t\treturn True\n\telse: \n\t\treturn False",
"def validate():",
"def details_not_matching():\n print(\"login details don't match.\")",
"def test_validate_login_info(self):\n assert(PatientService().validate_login_info(self.valid_health_card_nb, self.password) > 0)\n assert(-1 == PatientService().validate_login_info(self.valid_health_card_nb, self.password + \"INVALID\"))",
"def first_login_aux(password: str, password_repeat: str) -> [bool]:\n return [\n pw_is_viable(password),\n compare_digest(password_repeat, password)\n ]",
"def validate_input():\n pass_length = None\n account_name = None\n method = None\n in_value = None\n parser = ArgumentParser()\n parser.add_argument(\"-l\", \"--length\",\n type=int,\n help=\"length of password\")\n\n parser.add_argument(\"-a\", \"--account\",\n help=\"account or site name\")\n\n parser.add_argument(\"-m\", \"--method\",\n help=\"method to take [S|C|R|U|D]\")\n\n parser.add_argument(\"-v\", \"--value\",\n help=\"Do not generate, store this value\")\n\n args = parser.parse_args()\n if args.length:\n pass_length = int(args.length)\n\n if args.account:\n account_name = str(args.account)\n\n if args.method:\n method = str.upper(args.method)\n\n if args.value:\n in_value = str(args.value)\n\n return (pass_length, account_name, method, in_value)",
"async def test_validate_login(hass: HomeAssistant, provider, capsys) -> None:\n data = provider.data\n data.add_auth(\"test-user\", \"test-pass\")\n\n await script_auth.validate_login(\n hass, provider, Mock(username=\"test-user\", password=\"test-pass\")\n )\n captured = capsys.readouterr()\n assert captured.out == \"Auth valid\\n\"\n\n await script_auth.validate_login(\n hass, provider, Mock(username=\"test-user\", password=\"invalid-pass\")\n )\n captured = capsys.readouterr()\n assert captured.out == \"Auth invalid\\n\"\n\n await script_auth.validate_login(\n hass, provider, Mock(username=\"invalid-user\", password=\"test-pass\")\n )\n captured = capsys.readouterr()\n assert captured.out == \"Auth invalid\\n\"",
"def validate(self, data):\n user_type = 3\n return validate_login_user(self, data, user_type)",
"def parseArgs ( args ) :\n assert len ( args ) == 5\n loginInfo = []\n for s in args :\n loginInfo.append ( s )\n loginInfo.pop ( 0 )\n assert len ( loginInfo ) == 4\n return loginInfo",
"def validate(self, arg):\n new_values = []\n for i in self.cast(arg):\n# new_values.append(self.checkValues(i))\n new_values.append(self.template.validate(i))\n return new_values",
"def credentialsvalidation(self, username='', loginname='', password='', accounts=[], \\\n check_password=False, options=None):\n username_max_chars = 39 #60\n loginname_max_chars = 39 #60\n password_max_chars = 39 #PASSWORD MAX CHARS\n password_min_chars = 8 #PASSWORD MIN CHARS\n\n password_min_chars = next(iter(self._rdmc.app.select(\\\n 'AccountService.'))).dict['Oem'][self.typepath.defs.oemhp]['MinPasswordLength']\n\n if username != '' and loginname != '':\n for acct in accounts:\n if acct['UserName'] == username or acct['Oem']\\\n [self.typepath.defs.oemhp]['LoginName'] == loginname:\n raise ResourceExists('Username or login name is already in use.')\n\n if len(username) > username_max_chars:\n raise InvalidCommandLineError('Username exceeds maximum length'\\\n '. Use at most %s characters.' % username_max_chars)\n\n if len(loginname) > loginname_max_chars:\n raise InvalidCommandLineError('Login name exceeds maximum '\\\n 'length. Use at most %s characters.' % loginname_max_chars)\n\n if check_password:\n if password == '' or password == '/r':\n raise InvalidCommandLineError('An invalid password was entered.')\n else:\n if len(password) > password_max_chars:\n raise InvalidCommandLineError('Password length is invalid.'\\\n ' Use at most %s characters.' % password_max_chars)\n if len(password) < password_min_chars:\n raise InvalidCommandLineError('Password length is invalid.'\\\n ' Use at least %s characters.' % password_min_chars)",
"def check_auth():",
"def valid_password(password: Text):\n results = Utility.password_policy.test(password)\n if results:\n response = []\n for result in results:\n if isinstance(result, Length):\n response.append(\"Password length must be \" + str(result.length))\n elif isinstance(result, Special):\n response.append(\"Missing \" + str(result.count) + \" special letter\")\n elif isinstance(result, Uppercase):\n response.append(\"Missing \" + str(result.count) + \" uppercase letter\")\n elif isinstance(result, Numbers):\n response.append(\"Missing \" + str(result.count) + \"number\")\n\n if response:\n raise AppException(\"\\n\".join(response))",
"def simple_validator(passport):\n if len(passport) == 8:\n return True\n if len(passport) == 7 and \"cid\" not in passport:\n return True\n return False",
"def log_in(customer):\n card_check = input(\"Enter your card number: \")\n password_check = input(\"Enter your PIN:\")\n \n if card_check == customer[0] and password_check == customer[1]:\n print(\"You have successfully logged in!\")\n else:\n print(\"Wrong card number or PIN!\")",
"def iloaccountsvalidation(self, options):\n inputline = list()\n\n try:\n _ = self._rdmc.app.current_client\n except:\n if options.user or options.password or options.url:\n if options.url:\n inputline.extend([options.url])\n if options.user:\n if options.encode:\n options.user = Encryption.decode_credentials(options.user)\n inputline.extend([\"-u\", options.user])\n if options.password:\n if options.encode:\n options.password = Encryption.decode_credentials(options.password)\n inputline.extend([\"-p\", options.password])\n if options.https_cert:\n inputline.extend([\"--https\", options.https_cert])\n else:\n if self._rdmc.app.config.get_url():\n inputline.extend([self._rdmc.app.config.get_url()])\n if self._rdmc.app.config.get_username():\n inputline.extend([\"-u\", self._rdmc.app.config.get_username()])\n if self._rdmc.app.config.get_password():\n inputline.extend([\"-p\", self._rdmc.app.config.get_password()])\n if self._rdmc.app.config.get_ssl_cert():\n inputline.extend([\"--https\", self._rdmc.app.config.get_ssl_cert()])\n\n if not inputline:\n sys.stdout.write('Local login initiated...\\n')\n self.lobobj.loginfunction(inputline)",
"def username_validation(username):\n errors = []\n #Check if Username exists\n if(username_present(username)):\n errors.append(\"Användarnamnet finns redan.\")\n #Username needs to be longer then 3 chars\n if(len(username) <= 3):\n errors.append(\"Användarnamnet mäste vara 3 tecken eller längre.\")\n\n return errors",
"def validate_login(password_hash, password):\n print(password_hash, password)\n return check_password_hash(password_hash, password)",
"def check(self, args):\n self.parent.footer.set_text(\"Checking data...\")\n self.parent.refreshScreen()\n # Get field information\n responses = dict()\n\n for index, fieldname in enumerate(self.fields):\n if fieldname != \"blank\":\n responses[fieldname] = self.edits[index].get_edit_text()\n\n password = responses[\"FUEL_ACCESS/password\"]\n confirm_password = responses.pop(\"CONFIRM_PASSWORD\")\n\n if self.parent.save_only:\n return responses\n\n # Validate each field\n errors = []\n warnings = []\n\n # Passwords must match\n if password != confirm_password and \\\n password != self.defaults['FUEL_ACCESS/password']['value']:\n errors.append(\"Passwords do not match.\")\n\n # Password must not be empty\n if len(password) == 0:\n errors.append(\"Password must not be empty.\")\n\n # Password needs to be in ASCII character set\n try:\n if password.decode('ascii'):\n pass\n except UnicodeDecodeError:\n errors.append(\"Password contains non-ASCII characters.\")\n\n # Passwords should be at least 8 symbols\n if len(password) < 8:\n warnings.append(\"8 symbols\")\n\n # Passwords should contain at least one digit\n if re.search(r\"\\d\", password) is None:\n warnings.append(\"one digit\")\n\n if re.search(r\"[A-Z]\", password) is None:\n warnings.append(\"one uppercase letter\")\n\n if re.search(r\"[a-z]\", password) is None:\n warnings.append(\"one lowercase letter\")\n\n if re.search(r\"[!#$%&'()*+,-@./[\\\\\\]^_`{|}~\" + r'\"]', password) \\\n is None:\n warnings.append(\"one special character\")\n\n if len(errors) > 0:\n log.error(\"Errors: %s %s\" % (len(errors), errors))\n modulehelper.ModuleHelper.display_failed_check_dialog(self, errors)\n return False\n\n if len(warnings) > 0:\n self.parent.footer.set_text(\"Warning: Password should have \"\n \"at least %s.\" % (warnings[0]))\n else:\n self.parent.footer.set_text(\"No errors found.\")\n\n return responses",
"def login_change_pwd_change_with_all_validation():\r\n msg, status = \"\", False\r\n try:\r\n \"Empty values for all fields\"\r\n status1 = login_change_pwd_change_internal('', '', '')\r\n \"validate\"\r\n expected_dialogue_title = g.popup_title_error\r\n expected_dialogue_message = g.popup_message_password_incorrect\r\n verify_dialogue(title=expected_dialogue_title, message=expected_dialogue_message, name_of_control_to_click='popup_default_button')\r\n \r\n \"Correct current password, & Empty values for all other fields\"\r\n status2 = login_change_pwd_change_internal(g.password, '', '')\r\n \"validate\"\r\n expected_dialogue_title = g.popup_title_error\r\n expected_dialogue_message = g.popup_message_password_blank\r\n verify_dialogue(title=expected_dialogue_title, message=expected_dialogue_message, name_of_control_to_click='popup_default_button')\r\n \r\n \"Correct current password, & short new password value\"\r\n status3 = login_change_pwd_change_internal(g.password, '1', '')\r\n \"validate\"\r\n expected_dialogue_title = g.popup_title_error\r\n expected_dialogue_message = g.popup_message_password_short\r\n verify_dialogue(title=expected_dialogue_title, message=expected_dialogue_message, name_of_control_to_click='popup_default_button')\r\n \r\n \"Correct current password, & not matching new passwords\"\r\n status4 = login_change_pwd_change_internal(g.password, g.new_password, '')\r\n \"validate\"\r\n expected_dialogue_title = g.popup_title_error\r\n expected_dialogue_message = g.popup_message_password_do_not_match\r\n verify_dialogue(title=expected_dialogue_title, message=expected_dialogue_message, name_of_control_to_click='popup_default_button')\r\n \r\n #\"Correct current password, & matching correct new password & no hint\"\r\n #status5 = login_change_pwd_change_internal(g.password, g.new_password, g.new_password)\r\n #\"validate\"\r\n #expected_dialogue_title = g.popup_title_error\r\n #expected_dialogue_message = g.popup_message_password_hint\r\n #verify_dialogue(title=expected_dialogue_title, message=expected_dialogue_message, name_of_control_to_click='popup_default_button')\r\n \r\n #\"Correct current password, & hint same as new password\"\r\n #status6 = login_change_pwd_change_internal(g.password, g.new_password, g.new_password)\r\n # \"validate\"\r\n # expected_dialogue_title = g.popup_title_error\r\n # expected_dialogue_message = g.popup_message_password_hint_same_as_passsword\r\n #verify_dialogue(title=expected_dialogue_title, message=expected_dialogue_message, name_of_control_to_click='popup_default_button')\r\n\r\n \"Values from global_vars g\"\r\n status7 = login_change_pwd_change_internal(g.password, g.new_password, g.new_password)\r\n if status7 is True:\r\n #print 'old_password: ' + g.old_password\r\n print 'password: ' + g.password\r\n #print 'password_hint: ' + g.password_hint\r\n print 'new_password: ' + g.new_password\r\n #print 'new_password_hint: ' + g.new_password_hint\r\n\r\n #g.old_password = g.password\r\n g.password = g.new_password\r\n #g.password_hint = g.new_password_hint\r\n\r\n print '*****global vars updated*****'\r\n #print 'old_password: ' + g.old_password\r\n print 'password: ' + g.password\r\n # print 'password_hint: ' + g.password_hint\r\n print 'new_password: ' + g.new_password\r\n #print 'new_password_hint: ' + g.new_password_hint\r\n else:\r\n g.old_password = ''\r\n\r\n status = status1 and status2 and status3 and status4 and status7\r\n\r\n except Exception as excp:\r\n traceback.print_exc()\r\n msg += str(excp)\r\n status = False\r\n return status, msg",
"def perform_credential_validation(user_name_password):\n\n is_user_valid = False # Logged in user valid flag\n validation_message = \"\" # Error! user name and password entry.\n try:\n # Use default white space delimiter.\n user_name_str, password_str = user_name_password.split()\n\n # perform user name and password validation.\n is_user_valid = is_logged_in_user_valid(user_name_str, password_str)\n if is_user_valid: # User valid - Success\n validation_message = LoginConstants.EMR_LOGIN_SUCCESS_MESSAGE\n else: # User in-valid - Failure.\n validation_message = \\\n LoginConstants.EMR_LOGIN_INVALID_CREDENTIALS_MESSAGE\n except ValueError: # Error thrown in case there is no white space.\n validation_message = LoginConstants.CREDENTIAL_USER_ENTRY_ERROR_MESSAGE\n\n return is_user_valid, validation_message # Tuple - validation and message"
]
| [
"0.61099607",
"0.59491867",
"0.59310037",
"0.57885253",
"0.5766248",
"0.5730974",
"0.561032",
"0.5571905",
"0.5527034",
"0.55210996",
"0.54881525",
"0.5455377",
"0.5399907",
"0.536307",
"0.53402466",
"0.53250456",
"0.5280993",
"0.5265969",
"0.52527785",
"0.52511543",
"0.5234907",
"0.5228131",
"0.5219071",
"0.5218849",
"0.52084416",
"0.5193351",
"0.5178091",
"0.51631397",
"0.5155767",
"0.51485026"
]
| 0.66098934 | 0 |
Read and process data stored in input excel file then inject entry in DB | def importXlsxIntoDb(input):
#import global variable
global UPLOAD_ID
global PATIENT_NUM
global DATABASE
connection = db.create_connection(DATABASE)
xlsx = pd.read_excel(input)
#looping on each row
print(" - Importing data in DB", end = '')
for index, row in xlsx.iterrows():
if (pd.isna(row['DATE_MORT']) == False):
DEATH_DATE = row['DATE_MORT']
DEATH_CODE = 1
else :
DEATH_DATE = None #insert null in db
DEATH_CODE = 0
if (pd.isna(row['NOM_JEUNE_FILLE']) == False):
MAIDEN_NAME = row['NOM_JEUNE_FILLE']
else:
MAIDEN_NAME = None
db.insert_patient(connection, (PATIENT_NUM, row['NOM'], row['PRENOM'], row['DATE_NAISSANCE'], row['SEXE'], MAIDEN_NAME, row['ADRESSE'], row['TEL'], row['CP'], row['VILLE'], DEATH_DATE, row['PAYS'], DEATH_CODE, UPLOAD_ID))
db.insert_patient_ipphist(connection, (PATIENT_NUM, row['HOSPITAL_PATIENT_ID'], "export_patient.xlsx", 0, UPLOAD_ID))
PATIENT_NUM = PATIENT_NUM + 1
UPLOAD_ID = UPLOAD_ID + 1
if (index % 100 == 0):
print(".", end = '')
#commit the changes to db
connection.commit()
#close the connection
connection.close()
print("\n") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def Excel_Load_Data( self, ExcelFilename ):\n pass",
"def import_excel(self):\n self.ensure_one()\n if self.file_import:\n filecontent = base64.b64decode(self.file_import)\n try:\n # Todo: import excel\n input = cStringIO.StringIO()\n input.write(filecontent)\n wb = open_workbook(file_contents=input.getvalue())\n problem_emails = {\"inserted_names\": [],\n \"inserted_emails\": [],\n \"invalid_emails\": [],\n \"duplicate_names\": [],\n \"duplicate_emails\": []}\n for sheet in wb.sheets():\n try:\n self.insert_db(sheet, wb, problem_emails)\n except Exception as e:\n raise (str(e))\n\n except:\n # todo: import csv\n wb = filecontent.split('\\r\\n')\n for line in range(1, len(wb) - 1):\n line_data = wb[line].split(',')\n self.crete_line(line_data[0], line_data[1])\n\n if problem_emails['invalid_emails']:\n raise except_orm(_('Invalid Email Format Found!'),\n _( '\\n'.join(map(str, list(item for item in problem_emails['invalid_emails']))) + '\\n\\n Please check and try again.'))\n if problem_emails['duplicate_names']:\n raise except_orm(_('Duplicate Name Found!'),\n _( '\\n'.join(map(str, list(item for item in problem_emails['duplicate_names']))) + '\\n\\n Please check and try again.'))\n if problem_emails['duplicate_emails']:\n raise except_orm(_('Duplicate Email Found!'),\n _( '\\n'.join(map(str, list(item for item in problem_emails['duplicate_emails']))) + '\\n\\n Please check and try again.'))\n\n return {\n 'view_type': 'form',\n 'view_mode': 'form',\n 'res_model': 'shipmaster.invitation',\n 'res_id': self.id,\n 'view_id': False,\n 'type': 'ir.actions.act_window',\n 'target': 'new',\n }",
"def load_data(self):\n df= self.read_file()\n for row,col in df.iterrows():\n Employeeid = int(col['Empolyeeid'])\n Employee_Name = col['Employee_Name']\n Age = col['Age']\n Salary = col['Salary']\n self.table.put_item(\n Item={\n \"Employeeid\":Employeeid,\n \"Employee_Name\": Employee_Name,\n \"Age\": Age,\n \"Salary\": Salary\n }\n )\n return True",
"def importItem(file_path):\n\n #Ouverture du fichier\n rb = open_workbook(file_path)\n r_sheet = rb.sheet_by_index(0)\n\n for row_index in range (1, r_sheet.nrows):\n #Hydratation or get Supplier Model\n item_supplier= r_sheet.cell(row_index, 4).value\n item_supplier, created = Supplier.objects.get_or_create(name=item_supplier)\n\n #Hydratation or get Category Model\n current_category = r_sheet.cell(row_index, 0).value\n item_category, created = Category.objects.get_or_create(name=current_category)\n\n #Hydratation Item\n item_name = r_sheet.cell(row_index, 1).value\n item_ref = current_supplier= r_sheet.cell(row_index, 3).value\n item_quantity = r_sheet.cell(row_index, 2).value\n item, created = Item.objects.get_or_create(ref=item_ref, name=item_name, category=item_category, supplier=item_supplier, quantity=item_quantity)",
"def import_data(self):\n\n self.worksheet = (\n xlrd.open_workbook(filename=self.source).sheet_by_index(0)\n )\n # Import conversion data from worksheet and store as scipy arrays\n self.T_exp = np.array(\n self.worksheet.col_values(0, start_rowx=4, end_rowx=None)\n ) + 273.15\n self.HCout_raw = np.array(\n self.worksheet.col_values(4, start_rowx=4, end_rowx=None)\n )\n self.HCin_raw = np.array(\n self.worksheet.col_values(8, start_rowx=4, end_rowx=None)\n )\n self.eta_exp = (\n (self.HCin_raw - self.HCout_raw) / self.HCin_raw\n )\n self.T_model = np.linspace(\n self.T_exp[0] - 50, self.T_exp[-1] + 50, 25\n )\n self.T_array = self.T_model",
"def import_excel(self, filepath_excel,database_type):\n if database_type == \"render\":\n try:\n connection = sqlite3.connect(self.filepath_render_database)\n pointer = connection.cursor()\n\n sql_anweisung = \"\"\"\n INSERT INTO render_information (\n object_type,\n name,\n radius,\n polar_angle_min,\n polar_anglel_max,\n polar_angle_segments,\n polar_angle_random_rad,\n azimuth_angle_min,\n azimuth_angle_max,\n azimuth_angle_segments,\n azimuth_angle_random_rad,\n tracking_obj,\n segmentation\n )\n VALUES (\n :object_type,\n :name,\n :radius,\n :polar_angle_min,\n :polar_anglel_max,\n :polar_angle_segments,\n :polar_angle_random_rad,\n :azimuth_angle_min,\n :azimuth_angle_max,\n :azimuth_angle_segments,\n :azimuth_angle_random_rad,\n :tracking_obj,\n :segmentation\n )\n \"\"\"\n with open(filepath_excel) as csvdatei:\n csv_reader_object = csv.reader(csvdatei, delimiter=';')\n next(csv_reader_object)\n pointer.executemany(sql_anweisung, csv_reader_object)\n connection.commit()\n connection.close()\n print(\"render data addet from excel file\")\n except :\n print(\"adding render data from excel file failed\")\n\n elif database_type == \"object\":\n try:\n connection = sqlite3.connect(self.filepath_object_database)\n pointer = connection.cursor()\n\n sql_anweisung = \"\"\"\n INSERT INTO object_information (\n obj_filepath,\n obj_name,\n obj_type,\n obj_scale_factor,\n obj_type,\n obj_location_x,\n obj_location_y,\n obj_location_z,\n obj_rotation_x,\n obj_rotation_y,\n obj_rotation_z,\n obj_amount_percent,\n obj_material_path,\n obj_point_in_time,\n maximum_random_rotation_degree_z,\n maximum_random_translation,\n random_amount\n )\n VALUES (\n :obj_filepath,\n :obj_name,\n :obj_type,\n :obj_scale_factor,\n :obj_type,\n :obj_location_x,\n :obj_location_y,\n :obj_location_z,\n :obj_rotation_x,\n :obj_rotation_y,\n :obj_rotation_z,\n :obj_amount_percent,\n :obj_material_path,\n :obj_point_in_time,\n :maximum_random_rotation_degree_z,\n :maximum_random_translation,\n :random_amount\n )\n \"\"\"\n with open(filepath_excel) as csvdatei:\n csv_reader_object = csv.reader(csvdatei, delimiter=';')\n print(csv_reader_object)\n next(csv_reader_object)\n pointer.executemany(sql_anweisung, csv_reader_object)\n connection.commit()\n connection.close()\n print(\"object data added from excel file\")\n except :\n print(\"adding object data from excel file failed\")\n\n else:\n print(\"no Database found, maybe check spelling in method call??\")\n return",
"def import_data_model(directory):\n analyses = pd.read_excel(directory + 'analyses.xlsx')\n analytes = pd.read_excel(directory + 'analytes.xlsx')\n for index, analysis in analyses.iterrows():\n analyte_data = []\n analyte_names = analysis.analyte_keys.split(', ')\n for analyte_key in analyte_names:\n analyte_item = analytes.loc[analytes.key == analyte_key]\n analyte_data.append(analyte_item.to_dict(orient='records'))\n analyses.at[index, 'analytes'] = analyte_data \n analyses_data = analyses.to_dict(orient='records')\n for index, values in analyses_data.iterrows():\n doc_id = str(values.key)\n doc_data = values.to_dict()\n ref = ''\n update_document(ref, doc_data)\n # doc_data = data.to_dict(orient='index')\n # data_ref = create_reference(db, ref)\n # data_ref.document(doc_id).set(doc_data, merge=True)\n # data_ref.set(doc_data, merge=True)\n\n return NotImplementedError",
"def import_heat_data(self):\n worksheet = (\n xlrd.open_workbook(filename=self.filename_heat).sheet_by_index(0)\n ) \n self.exh.corrected_reading = np.array(worksheet.col_values(0,\n start_rowx=self.start_rowx, end_rowx=self.end_rowx)) \n self.exh.datum = worksheet.cell_value(2,4) # manometer datum (in) \n self.exh.pressure_drop = ( (self.exh.corrected_reading -\n self.exh.datum) * 2. * self.H2O_kPa ) \n # pressure drop across heat exchanger (kPa)\n self.cummins.torque = np.array(worksheet.col_values(1,\n start_rowx=self.start_rowx, end_rowx=self.end_rowx))\n self.exh.T_inlet_array = np.array(worksheet.col_values(2,\n start_rowx=self.start_rowx, end_rowx=self.end_rowx)) \n self.exh.T_outlet_array = np.array(worksheet.col_values(3,\n start_rowx=self.start_rowx, end_rowx=self.end_rowx)) \n self.cool.T_inlet_array = np.array(worksheet.col_values(5,\n start_rowx=self.start_rowx, end_rowx=self.end_rowx)) \n self.cool.T_outlet_array = np.array(worksheet.col_values(4,\n start_rowx=self.start_rowx, end_rowx=self.end_rowx))",
"def get_entities(self, xlsx_file):\n \n self.logger.info(\"Loading workbook: {}\".format(xlsx_file))\n\n # report on total rows.\n total_rows = sum(1 for row in self._get_rows(xlsx_file))\n self.logger.info(\"Found {} rows.\".format(total_rows))\n \n # get row data and modified checksum.\n entity_rows = self._get_rows(xlsx_file)\n hash_prefix = self._get_hash_prefix(xlsx_file)\n\n # get header.\n row = next(entity_rows)\n header = [cell.value for cell in row]\n header= tuple(header)\n\n # if header is invalid, return empty generator.\n if not self._validate_header(header):\n msg = \"Invalid header row: {}.\".format(header)\n raise self.SchemaError(msg)\n\n # create generator for each row.\n def entities():\n \n # start numbering at 2 because the header has already been read.\n row_number = 2\n \n # yield a dict for each non-header row.\n header_range = range(0,len(header))\n for row in entity_rows:\n\n self.logger.info(\"Processing row {}.\".format(row_number))\n \n # get row values.\n row = [cell.value for cell in row]\n row = [cell.strip() if isinstance(cell, str) else cell for cell in row]\n row = [(header[i], row[i]) for i in header_range]\n row = dict(row)\n\n # run row validator.\n row_valid = self._validate_row(row)\n if not row_valid:\n self.logger.warning(\"Skipping row {}; row is invalid.\".format(\n row_number))\n row_number += 1\n continue\n \n # alter data as needed and create dict for row.\n row[\"identifier\"] = hash_prefix + row[\"identifier\"]\n manifestations = self.get_manifestations(row[\"pattern\"], \n row[\"case_sensitive\"], row_number)\n row[\"manifestations\"] = [\"\".join(m) for m in manifestations]\n \n # yield row as dict.\n row_number += 1\n yield(row)\n\n return entities()",
"def __init__(self, input_file):\n self.file_name = input_file\n # Import the excel file:\n self.xlfile = ExcelFile(self.file_name) # to retrieve & work w/ input",
"def process_data(self, excel_file, output_title):\n df = read_excel(excel_file)\n labels = df.columns.values.tolist()\n title = f\"{labels[1]} vs {labels[0]}\"\n data = []\n for label in labels:\n data.append(df[label].values.tolist())\n\n for callback in self.callbacks:\n callback(title, data, labels, output_title)",
"def processFile(fileName):\n\n cursor = db.cursor()\n cursor.execute(\"BEGIN\")\n institutionCounter = 0\n\n def submitInstitute(bankCode, bankName, bic):\n try:\n cursor.execute(\"INSERT INTO institutions (bankCode, bic, name) VALUES(?,?,?)\", (bankCode, bic, bankName))\n except sqlite3.Error as e:\n print(\"Sorry , Error: {0} while inserting {1} ({2})\".format(e.args[0], bankCode, bic))\n\n book = xlrd.open_workbook(fileName, 'r')\n sheet = book.sheet_by_index(0)\n\n for row_index in range(2, sheet.nrows):\n submitInstitute(sheet.cell(row_index,0).value, sheet.cell(row_index,2).value, sheet.cell(row_index,1).value)\n institutionCounter += 1\n\n return institutionCounter",
"def read_xls_csv(self):\n filename = str(self.filename)\n location_stock_id = self.location\n vals = []\n inventory_create = self.env['stock.inventory']\n\n if (filename.endswith('xls') or filename.endswith('xlsx')):\n wb = xlrd.open_workbook(\n file_contents=base64.decodestring(self.xls_file))\n sheet = wb.sheet_by_index(0)\n\n for i in range(1, sheet.nrows):\n row = sheet.row_values(i)\n firstrow = sheet.row_values(0)\n firstrow = [str(item).lower() for item in firstrow]\n pid = row[firstrow.index('id')]\n quantity = row[firstrow.index('quantity')]\n product_obj = self.env['product.product'].search(\n [('id', '=', pid)])\n vals.append({\n 'product_code': product_obj.default_code,\n 'product_qty': quantity,\n 'location_id': location_stock_id.id,\n 'product_id': product_obj.id\n })\n inv = inventory_create.create({'name': self.inventory_name,\n 'location_id': location_stock_id.id,\n 'filter': 'partial'})\n stock_inventory_line = self.env['stock.inventory.line']\n # inv.prepare_inventory()\n for record in vals:\n record.update({'inventory_id': inv.id})\n stock_inventory_line.create(record)\n inv.action_done()\n\n else:\n xls_file = base64.b64decode(self.xls_file)\n file_input = cStringIO.StringIO(xls_file)\n file_input.seek(0)\n rows = []\n delimeter = ','\n reader = csv.reader(file_input, delimiter=delimeter,\n lineterminator='\\r\\n')\n for row in reader:\n rows.append(row)\n for row in rows[1:]:\n rows[0] = [str(item).lower() for item in rows[0]]\n product_obj = self.env['product.product'].search(\n [('id', '=', row[rows[0].index('id')])])\n vals.append({\n 'product_code': row[rows[0].index('id')],\n 'product_qty': row[rows[0].index('quantity')],\n 'location_id': location_stock_id.id,\n 'product_id': product_obj.id\n })\n inv = inventory_create.create({'name': self.inventory_name,\n 'location_id': location_stock_id.id,\n 'filter': 'partial'})\n stock_inventory_line = self.env['stock.inventory.line']\n # inv.prepare_inventory()\n for record in vals:\n record.update({'inventory_id': inv.id})\n stock_inventory_line.create(record)\n inv.action_done()\n return {\n 'name': 'Stock import',\n 'type': 'ir.actions.act_window',\n 'view_type': 'form',\n 'res_id': self.id,\n 'view_mode': 'tree,form',\n 'res_model': 'stock.inventory',\n 'target': 'current',\n }",
"def excelSheetData(request, *args, **kwargs):\n if request.method == \"POST\":\n try:\n file = request.FILES.get('file')\n # Reading file using pandas\n data = pd.read_excel(file, dtype={'Column1': str, 'Column10': str})\n\n # data = pd.read_excel(file, dtype={'ASOF': str, 'USERNAME': srf})\n except Exception as e:\n logger = logging.getLogger('root')\n logger.error('Unable to upload file', exc_info=True, extra={\n 'exception': e,\n })\n return JsonResponse({'status': 500, 'exception': e})\n\n # uploadExcelSheet.delay(data)\n uploadExcelSheet(data)\n\n return JsonResponse({'status': 200})",
"def load_funding_resource(input_file, sheet_name=None):\n df = read_excel(input_file, sheet_name=sheet_name)\n\n for source_name in df[df.columns[1]]:\n src_ = session.query(FundingSource).filter(\n FundingSource.source == source_name).first()\n if src_ is None:\n funding_source = FundingSource(source=source_name)\n session.add(funding_source)\n session.commit()\n\n '''\n staff = Staff(\n #staff_fname = row['first name'],\n #staff_lname = row['last name'],\n staff_email = row['all main researcher email']\n )\n department = Department(\n department_name=row['all department']\n )\n '''\n #session.add(staff)\n #session.add(department)",
"def updateData(self, data, filename):\r\n self.data = data\r\n #self.filename = filename + \".xlsx\"\r\n self.index = \"index.txt\"\r\n self.colum = ('A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R'\r\n ,'S','T','U','V')\r\n self.cf = XLC.checkfile(self, self.index )\r\n #print(self.cf)\r\n if self.cf == True:\r\n self.resultFile = open(self.index, 'r')\r\n self.row = int(self.resultFile.read())\r\n #print(self.row)\r\n self.resultFile.close()\r\n if self.cf == False:\r\n print(\"[WARNING] index.txt file is missing. Neglect this message if this program is running 1st time on your system\")\r\n self.row = 2\r\n \r\n # Looding the document\r\n self.filename = filename + \".xlsx\"\r\n wb = xl.load_workbook(self.filename)\r\n sheet = wb.sheetnames\r\n sheet = wb.active\r\n\r\n if sheet['A2'].value == None:\r\n self.row = 2\r\n\r\n self.data = self.data.split(\",\")\r\n for i in range(0, len(self.data)):\r\n sheet[self.colum[i]+str(self.row)] = self.data[i]\r\n #print(self.colum[i]+str(self.row))\r\n\r\n self.row = self.row + 1\r\n self.resultFile = open('index.txt', 'w')\r\n self.resultFile.write(str(self.row))\r\n self.resultFile.close()\r\n wb.save(self.filename)\r\n return",
"def import_flow_data(self):\n worksheet = (\n xlrd.open_workbook(filename=self.filename_flow).sheet_by_index(0) )\n self.corrected_reading = np.array(worksheet.col_values(1,\n start_rowx=self.start_rowx, end_rowx=self.end_rowx))\n # corrected manometer reading (in) for downstream side only\n self.time = np.array(worksheet.col_values(3,\n start_rowx=self.start_rowx, end_rowx=self.end_rowx))\n # time (s) for trash can to fill with exhaust\n self.T = ( np.array(worksheet.col_values(5,\n start_rowx=self.start_rowx, end_rowx=self.end_rowx)) + 273.15\n ) # temperature (K) of gas in trash can flow meter\n\n self.flow_trash = self.trash_volume / self.time\n # exhaust flow (m^3/s) into trash can\n\n self.pressure_drop = self.corrected_reading * self.H2O_kPa\n # pressure drop (kPa) through HX",
"def validate_file(self):\n filename = str(self.filename)\n self.is_error = False\n self.message = \"\"\n if not (filename.endswith('xls') or filename.endswith('xlsx') or filename.endswith('csv')):\n self.message += \"Please Import only '.xls' or '.xlsx' or '.csv' File.\"\n elif (filename.endswith('xls') or filename.endswith('xlsx')):\n column_list = ['id', 'quantity']\n\n wb = xlrd.open_workbook(\n file_contents=base64.decodestring(self.xls_file))\n sheet = wb.sheet_by_index(0)\n row = sheet.row_values(0)\n invalid_cols = []\n import pdb;pdb.set_trace()\n for key in row:\n key = key.encode('ascii', 'ignore')\n if key.lower() not in column_list:\n invalid_cols.append(key)\n if invalid_cols:\n self.message = \"Invalid Column Name %s\", ', '.join(\n invalid_cols)\n if not self.message:\n for i in range(1, sheet.nrows):\n row = sheet.row_values(i)\n firstrow = sheet.row_values(0)\n firstrow = [str(item).lower() for item in firstrow]\n product_obj = self.env['product.product'].search(\n [('id', '=', row[firstrow.index('id')])])\n if not row[firstrow.index('quantity')]:\n self.message += \"Enter Quantity In Your Excel File\"\n if not product_obj:\n self.message += \"Enter Valid Product Id In Your Excel File\"\n else:\n column_list = ['id', 'quantity']\n xls_file = base64.b64decode(self.xls_file)\n file_input = cStringIO.StringIO(xls_file)\n file_input.seek(0)\n rows = []\n delimeter = ','\n reader = csv.reader(file_input, delimiter=delimeter,\n lineterminator='\\r\\n')\n for row in reader:\n rows.append(row)\n firstrow = [str(item).lower() for item in rows[0]]\n match = [column for column in firstrow if column not in column_list]\n if match:\n self.message += \"Enter Valid Column Name\"\n if not self.message:\n for row in rows[1:]:\n rows[0] = [str(item).lower() for item in rows[0]]\n product_obj = self.env['product.product'].search(\n [('id', '=', row[rows[0].index('id')])])\n if not row[rows[0].index('quantity')]:\n self.message += \"Enter Quantity In Your Excel File\"\n if not product_obj:\n self.message += \"Enter Valid Product Id In Your Excel File\"\n\n if self.message:\n self.is_error = True\n if not self.is_error:\n self.is_validate = True\n return {\n 'res_id': self.id,\n 'view_id': self.env.ref('import_stock_inventory_drc.import_stock_inventory_view_wizard_form').ids,\n 'view_type': 'form',\n 'view_mode': 'form',\n 'res_model': 'stock.inventory.wizard',\n 'type': 'ir.actions.act_window',\n 'target': 'new'\n }",
"def job_ingestion(self):\n\n # Map a file type to a DAO\n if self.file_type == SmsFileTypes.SAMPLE_LIST:\n self.file_dao = SmsSampleDao()\n elif self.file_type == SmsFileTypes.N0:\n self.file_dao = SmsN0Dao()\n else:\n self.file_dao = None\n\n if self.file_dao:\n # look up if any rows exist already for the file\n records = self.file_dao.get_from_filepath(self.file_path)\n\n if records:\n logging.warning(f'File already ingested: {self.file_path}')\n return\n\n data_to_ingest = self.read_data_from_cloud_manifest(self.file_path)\n\n self.validate_columns(data_to_ingest[\"fieldnames\"], self.file_dao)\n\n self.write_data_to_manifest_table(data_to_ingest[\"rows\"])",
"def load_from_excel(self, excel_fp: str):\n # TODO:\n pass",
"def _fill_workbook_data(self, workbook, record, data_dict):\n if not record or not data_dict:\n return\n try:\n # variable to store data range of each worksheet\n worksheet_range = {}\n for sheet_name in data_dict:\n ws = data_dict[sheet_name]\n st = False\n if isinstance(sheet_name, str):\n st = get_sheet_by_name(workbook, sheet_name)\n elif isinstance(sheet_name, int):\n st = workbook.worksheets[sheet_name - 1]\n if not st:\n raise ValidationError(\n _('Sheet %s not found!') % sheet_name)\n # ================ HEAD ================\n self._fill_head(ws, st, record)\n # ============= Line Items =============\n # Check for groupby directive\n groupbys = {key: ws[key] for key in\n filter(lambda l: l[0:9] == '_GROUPBY_', ws.keys())}\n all_rc, max_row, tail_fields = self._fill_lines(ws, st, record,\n groupbys)\n # ================ TAIL ================\n self._fill_tail(ws, st, record, tail_fields)\n\n # prepare worksheet data range, to be used in BI funtions\n if all_rc:\n begin_rc = min(all_rc)\n col, row = split_row_col(\n max(sorted(all_rc, reverse=True), key=len))\n end_rc = '%s%s' % (col, max_row)\n worksheet_range[sheet_name] = '%s:%s' % (begin_rc, end_rc)\n\n # ================ BI Function ================\n self._fill_bi(workbook, data_dict, worksheet_range)\n\n except KeyError, e:\n raise except_orm(_('Key Error!'), e)\n except IllegalCharacterError, e:\n raise except_orm(\n _('IllegalCharacterError!\\n'\n 'Some exporting data may contain special character'), e)\n except Exception, e:\n raise except_orm(_('Error filling data into excel sheets!'), e)",
"def nodes_data_excel_parser(excel_path,**kwargs):\n excel_parser_engine = kwargs.get(\"engine\",\"xlrd\")\n\n # Check if excel file exists\n if not excel_path or not os.path.isfile(excel_path):\n raise FileNotFoundError(\n \"Excel data file {} not found.\".format(excel_path)\n )\n\n xls = pd.ExcelFile(excel_path,engine=excel_parser_engine)\n\n try:\n # TODO for sheet in xls.sheet_names:\n # nodes_data[sheet] = xls.parse(sheet)\n nodes_data = {\n \"buses\": xls.parse(\"buses\").replace({np.nan:None}),\n \"commodity_sources\": xls.parse(\"commodity_sources\").replace({np.nan:None}),\n \"transformers\": xls.parse(\"transformers\").replace({np.nan:None}),\n \"transformers_chp\": xls.parse(\"transformers_chp\").replace({np.nan:None}),\n \"renewables\": xls.parse(\"renewables\").replace({np.nan:None}),\n \"demand\": xls.parse(\"demand\").replace({np.nan:None}),\n \"storages\": xls.parse(\"storages\").replace({np.nan:None}),\n \"powerlines\": xls.parse(\"powerlines\").replace({np.nan:None}),\n \"timeseries\": xls.parse(\"time_series\").replace({np.nan:None}),\n \"financial\":xls.parse(\"financial\").replace({np.nan:None})\n }\n except KeyError:\n err_msg = \"Excel file must contains: [buses, commodity_sources, transformers, renewables, demand, storages, powerlines, financial and timeseries].\\n\\\n The following sheets are found: {}\".format(xls.sheet_names)\n raise Exception(err_msg)\n\n # set datetime index\n nodes_data[\"timeseries\"].set_index(\"timestamp\", inplace=True)\n nodes_data[\"timeseries\"].index = pd.to_datetime(\n nodes_data[\"timeseries\"].index\n )\n\n logger.info(\"Data from Excel file {} imported in as nodes data.\".format(excel_path))\n\n return nodes_data",
"def load_inputs():\n\n print \"Daily inputs\"\n\n Daily_Input.query.delete()\n\n\n for row in open(\"seed_data/u.input.txt\"):\n row = row.rstrip()\n input_id, date, user_id, sleep, exercise, screen_time, well_being_rating = row.split(\"|\")\n\n date = datetime.strptime(date, \"%m-%d-%y\")\n \n daily_input = Daily_Input(input_id=input_id, date=date, user_id=user_id, sleep=sleep, exercise=exercise, screen_time=screen_time, well_being_rating=well_being_rating)\n db.session.add(daily_input)\n\n db.session.commit()",
"def load_expenditures():\n\n Expenditure.query.delete()\n\n with open(expenditure_file) as f:\n for _ in range(1):\n next(f)\n \n for row in f:\n row = row.rstrip()\n expenditure_data = row.split(\",\")\n print(expenditure_data)\n\n id = expenditure_data[0]\n category_id = expenditure_data[1]\n price = expenditure_data[2]\n date_of_expenditure = expenditure_data[3]\n expenditure_userid = expenditure_data[4]\n where_bought = expenditure_data[5]\n description = expenditure_data[6]\n\n expenditure = Expenditure(\n id = id,\n category_id = category_id,\n price = price,\n date_of_expenditure = get_datetime(date_of_expenditure),\n expenditure_userid = expenditure_userid,\n where_bought = where_bought,\n description = description\n )\n\n db.session.add(expenditure)\n\n db.session.commit()",
"def execute_event(self):\n try:\n with open(self._import_path_input.get(), \"r\", encoding='UTF-8') as raw_data_file, \\\n open(self._export_path_input.get(), \"w\", encoding='UTF-8', newline='') as processed_data_file:\n\n id_dict = dict()\n for row in raw_data_file:\n processing_row = row.strip('\\r\\n')\n row_list = processing_row.split(\",\")\n if 'unknown' in row_list[1:]:\n pass\n elif row_list[0] not in id_dict.keys():\n id_dict[row_list[0]] = row_list[1:]\n else:\n count_dict_null = 0\n for i in id_dict[row_list[0]]:\n if i == '':\n count_dict_null += 1\n count_new_entry_null = 0\n for i in row_list[1:]:\n if i == '':\n count_new_entry_null += 1\n if count_dict_null > count_new_entry_null:\n id_dict[row_list[0]] = row_list[1:]\n\n for key, value in id_dict.items():\n new_row = ''.join(key) + ',' + ','.join(value)\n processed_data_file.write(new_row + \"\\n\")\n tk.messagebox.showinfo('Good News', 'Job Done!')\n except Exception as e:\n tk.messagebox.showerror('error', e)",
"def _import_source_data(self, source_file: str) -> None:\n with open(source_file, 'r') as csv_file:\n reader = csv.DictReader(csv_file)\n for row in reader:\n self.cell_map.append(\n Cell(\n datamap_id=None,\n cell_key=row['cell_key'],\n cell_value=None, # have no need of a value in dm\n cell_reference=row['cell_reference'],\n template_sheet=row['template_sheet'],\n bg_colour=row['bg_colour'],\n fg_colour=row['fg_colour'],\n number_format=row['number_format'],\n verification_list=None))",
"def upload_sheet(self, request):\n file = self.request.data['file']\n\n # validating requested payload.\n if not file:\n return Response(\"Got no file! Please hit me again with file.\")\n # Only .csv/xls format file are allowed\n if file.name.rsplit('.')[1] == 'csv':\n sheet_as_df = pd.read_csv(file)\n elif file.name.rsplit('.')[1] == 'xls':\n sheet_as_df = pd.read_excel(file)\n else:\n return Response(\"Only .csv/.xls format type allowed for now.\")\n\n # sheet uploading code\n # =============Logic Start================\n header = ['last_name', 'first_name', 'state', 'phone_number']\n df = sheet_as_df\n if not set(header).issubset(df.columns):\n return False, f'Please check uploading sheet matching headers as: {header}'\n # filling empty(NaN) of data-frame entry with 0.0\n df = df.fillna(0)\n from itertools import islice\n batch_size = 100\n while True:\n content_instance = [Content(\n first_name=record['first_name'],\n last_name=record['last_name'],\n state=record['state'],\n phone_number=record['phone_number']\n ) for record in islice(df.to_dict('records'), batch_size)]\n if not content_instance:\n logger.info('Unable to update PhoneBook model with entries.')\n break\n PhoneBook.objects.bulk_create(content_instance, batch_size)\n # =============Logic End==================\n\n return Response('Successfully updated order entry!')",
"def read_excel_file(self):\n self.df = pd.read_excel(str(self.file_path))\n self.data_mat=np.array(self.df).astype(float)",
"def _process_data_file(self, manifest_row):\n # get the file object for the data\n csv_reader = DataReader(meta=self.meta,\n manifest_row=manifest_row,\n load_from=\"file\")\n\n # get file path for storing clean PSV files\n temp_filepath = self._get_temp_filepath(manifest_row=manifest_row)\n\n # validate and clean\n self._load_single_file(table_name=manifest_row['destination_table'],\n manifest_row=manifest_row,\n csv_reader=csv_reader,\n temp_filepath=temp_filepath)",
"def load_user_inputs(file_path_device):\r\n # Transform the .xls database into panda type\r\n excel = pd.ExcelFile(file_path_device)\r\n\r\n # Collect data from a particular .xls tab\r\n site = excel.parse('site', header=0, index_col=0)\r\n metocean = excel.parse('metocean', header=0, index_col=0)\r\n device = excel.parse('device', header=0, index_col=0)\r\n sub_device = excel.parse('sub_device', header=0, index_col=0)\r\n landfall = excel.parse('landfall', header=0, index_col=0)\r\n\r\n # Splits the different dataset through different dict keys()\r\n user_inputs = {'site': site,\r\n 'metocean': metocean,\r\n 'device': device,\r\n 'sub_device': sub_device,\r\n 'landfall': landfall\r\n }\r\n\r\n return user_inputs"
]
| [
"0.6571204",
"0.6529585",
"0.647885",
"0.63984954",
"0.638115",
"0.6332327",
"0.6286695",
"0.61833394",
"0.6131002",
"0.6119043",
"0.6111723",
"0.61092854",
"0.6051651",
"0.59979343",
"0.58864397",
"0.58640933",
"0.58146226",
"0.58035517",
"0.5800213",
"0.5788585",
"0.5772386",
"0.57629573",
"0.57467085",
"0.5745092",
"0.56899804",
"0.56854165",
"0.5677548",
"0.56657785",
"0.56545144",
"0.56472236"
]
| 0.6835037 | 0 |
Read and process all pdf file situated in "./fichiers source/" then inject it in the document table | def pdfProcessing():
global DATABASE
conn = db.create_connection(DATABASE)
DOCUMENT_ORIGIN_CODE = "DOSSIER_PATIENT"
pathFolder = "fichiers source/"
extension = ".pdf"
pdfFileArrayPath = glob.glob(pathFolder + "*" + extension)
print(" - Processing pdf", end="")
for file in pdfFileArrayPath:
text = readFile.readPdfFile(file)
query = getDocumentQuery(text, DOCUMENT_ORIGIN_CODE, file, pathFolder, extension)
db.insert_document(conn, query)
print(".", end = '')
#commit the changes to db
conn.commit()
#close the connection
conn.close()
print("\n") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def main():\n mip = parametros()\n mir = Reporte(CURRENT_PATH, mip.debug, mip.overwrite)\n pdfs = mir.obtener()\n if pdfs:\n print(\"Obteniendo nuevos pdf:\")\n for pdf in pdfs:\n print(f\"* {pdf}\")\n\n for file in glob.glob(f\"{CURRENT_PATH}/resources/pdf/*.pdf\"):\n data = mir.parser(file)\n mir.escribir(data)",
"def do_preprocess(pdf_files):\n\n for pdf_file in pdf_files:\n\n base, ext = os.path.splitext(pdf_file)\n \n create_intermediate_files()\n \n # 1) split a pdf file, a page a pdf\n num_pages = pdfutil.split(os.path.join(cwd, pdf_file), DIR_PAGE)\n\n for i in xrange(1, num_pages + 1):\n\n file = '%04d.pdf' % i\n page_pdf = os.path.join(DIR_PAGE, file)\n \n pdfutil.convert_srgb(page_pdf, DIR_SRGB)\n srgb_pdf = os.path.join(DIR_SRGB, file)\n \n pdfutil.convert_vti(srgb_pdf, DIR_VTI)\n vti_pdf = os.path.join(DIR_VTI, file)\n\n pdfutil.convert_tiff(vti_pdf, DIR_TIFF)\n pdfutil.convert_text(vti_pdf, DIR_TEXT)\n\n # merge background pdf files\n pdfutil.merge_to_single_pdf(DIR_TIFF, DIR_BACK, 'back')\n background_pdf = os.path.join(DIR_BACK, 'back.pdf')\n\n # merge foreground pdf files\n output_text_pdf = '%s_text' % base\n pdfutil.merge_to_single_pdf(DIR_TEXT, DIR_TEXT, output_text_pdf)\n foreground_pdf = os.path.join(DIR_TEXT, output_text_pdf + '.pdf')\n pdfutil.export_by_preview(foreground_pdf)\n\n # merge background and foreground\n merged_pdf = os.path.join(cwd, '%s_merge.pdf' % base)\n pdfutil.merge_text_and_back(foreground_pdf, background_pdf, merged_pdf)\n\n final_pdf = '%s_final' % base\n pdfutil.optimize(merged_pdf, final_pdf)\n final_pdf = os.path.join(cwd, final_pdf + '.pdf')\n\n # aggregate what we want\n for f in (foreground_pdf, final_pdf):\n shutil.move(f, DIR_FINAL)\n \n # clean up unused\n os.unlink(merged_pdf) \n cleanup_intermediate_files()",
"def do_single_file_preprocess(pdf_file):",
"def transform(self):\n count=1\n assert len(self.list_folder)>=1 ,\"FILES NOT FOUND\"\n for i,folder in enumerate(self.list_folder):\n path=folder\n for j,pdf in enumerate(os.listdir(path)):\n if pdf!= '.DS_Store':\n self.df.loc[count] = [pdf,folder.split('/')[-2], i+1,None,None]\n \n \"\"\" 0- Read Pdf file \"\"\"\n raw = parser.from_file(os.path.join(path,pdf))\n s = raw['content']\n \n \"\"\" 1- Handle linebreaks to optimize TextBlob.sentences results\"\"\"\n s=self.treat_new_line(s)\n \n \"\"\" 2- Divide text by sentences using TextBlob\"\"\"\n blob=TextBlob(s)\n paragraphs = np.array([str(s) for s in blob.sentences],dtype=str)\n self.parser = []\n self.parser_raw=[]\n p=self.text_processor_pdf(paragraphs)\n \n \"\"\"\n 3- Get rid of bad text data:\n Discard sentences with too long word (16 is the 99% quantile in english)\n Discard sentences with too much upper words (CREDENTIALS, Link, TITLE ..)\n \"\"\"\n index_=[i for i,c in enumerate(self.parser) if (True in [len(w)>=16 for w in c.split()] )]\n index_raw=[i for i,c in enumerate(self.parser_raw) if np.sum([w==w.upper() for w in c.split()])>=4]\n index=list(set(index_ + index_raw))\n self.df.loc[count,'paragraphs']=np.delete(np.array(self.parser),index)\n self.df.loc[count,'raw paragraphs']=np.delete(np.array(self.parser_raw),index)\n count+=1\n \n print(\"files from {} succesfully converted \".format(folder))\n \n return self.df",
"def load_pdf(self, env=\"default\", debug=()):\n os.makedirs(\"txt\", exist_ok=True)\n if env is \"default\": # default python path\n call([executable,\n os.path.join(f\"{exec_prefix}\", \"Scripts\", \"pdf2txt.py\"),\n os.path.join(\"pdf\", f\"{self.pdf_filename}\"),\n os.path.join(f\"-otxt\", f\"{self.txt_filename}\")])\n if env is \"venv\": # virtual environment\n call([os.path.join(\"venv\", \"Scripts\", \"python.exe\"),\n os.path.join(\"venv\", \"Scripts\", \"pdf2txt.py\"),\n os.path.join(\"pdf\", f\"{self.pdf_filename}\"),\n os.path.join(f\"-otxt\", f\"{self.txt_filename}\")])\n with open(os.path.join(\"txt\", f\"{self.txt_filename}\"), \"r\", encoding=\"utf-8\") as file:\n self.paragraphs = [paragraph.rstrip('\\n') for paragraph in file]\n os.remove(os.path.join(\"txt\", f\"{self.txt_filename}\"))\n if debug:\n for counter, paragraph in enumerate(self.paragraphs):\n try:\n if int(debug[0]) < counter < int(debug[1]):\n print(counter, paragraph)\n except TypeError:\n print(\"Debug must be a (x,y) touple.\")",
"def preprocess_docs():\n\n print(\"Getting started!\")\n stopwords.populate_stopwords(NLP, STOPWORD_URL)\n\n print(str.format(\"Using data dir:{}\", DATA_DIR))\n\n csv_file = open(os.path.join(DATA_DIR, 'PDFs.csv'))\n reader = csv.reader(csv_file, 'excel')\n rows = list(reader)\n\n filenames = [_get_filename(row) for row in rows]\n\n pool = Pool(multiprocessing.cpu_count())\n\n try:\n pool.map(_get_item, rows)\n pool.map(pdf.extract_text, filenames)\n docs = pool.map(_extract_questions, rows)\n docs = [d for d in docs if d is not None]\n\n _find_similar(docs, simdoc=compare.compare_doc_keywords)\n\n for doc in docs:\n if doc is None:\n continue\n doc.save_json()\n\n except KeyboardInterrupt:\n pool.terminate()\n print(\"You cancelled the program!\")\n sys.exit(1)\n\n print(\"Done\")",
"def parse_pdfs():\n # get all of the pdf files in the dir\n pahopdffiles = [f for f in listdir(paho_raw_reports_dir) if isfile(join(paho_raw_reports_dir, f))]\n # set up a list to hold the data for all pdf files\n all_pdf_data = []\n # read in each pdf file\n for pahopdffile in pahopdffiles:\n try:\n logging.info(\"Now attempting to read in: \"+pahopdffile)\n fullfilepath = os.path.join(paho_raw_reports_dir, pahopdffile)\n tables = camelot.read_pdf(fullfilepath)\n # get the pandas dataframe from each pdf\n pdfdataframe = tables[0].df\n # ensure that this is a valid PAHO COVID19 report\n report_keywords = ['Cumulative','COVID-19','Americas'] \n if not all(x in pdfdataframe[0].iloc[0] for x in report_keywords):\n logging.error(pahopdffile+\" was not recognised as a normal PAHO pdf file. Skipping.\")\n continue\n # set up the list to hold the data for this file\n reportdata = []\n # create a variable to store the date of this report\n date = None\n # create a variable to store the last subregion seen\n subregion = None\n # PAHO has different formats for their tables, so we need to check the number of columns in the pdf\n numcolumns = len(pdfdataframe.columns)\n # get the row index for the last country\n lastcountryrowindex = pdfdataframe[1][pdfdataframe[1] == 'Total'].index[0]-1\n for rowindex,rowdata in pdfdataframe.iterrows():\n # set up variables to hold the data for the dict\n country_or_territory_name = None\n confirmed_cases = None\n probable_cases = None\n probable_deaths = None\n recovered = None\n percentage_increase_confirmed = None\n if numcolumns == 6:\n # this is the old format that they started with\n if rowindex == 0:\n # this row contains the date for this report\n rawdate = rowdata[0].replace('Cumulative suspected and confirmed COVID-19 cases reported by \\ncountries and territories in the Americas, as of ','')\n date = datetime.strptime(rawdate,\"%d %B %Y\")\n if not date:\n raise RuntimeError(\"Unable to determine the date of this report. Row 0 contained this data: \"+\n rowdata[0])\n elif rowindex in range(4,lastcountryrowindex+2):\n # all these rows contain data for countries/regions\n # so parse the useful data for each\n # some of these rows contain subtotals per region/territory\n if rowdata[0] != '':\n # store the name of the last seen subregion\n subregion = rowdata[0]\n if rowdata[1] == \"Subtotal\":\n # on the subtotal rows, store the name for the entire subregion\n country_or_territory_name = subregion\n elif rowdata[1] == \"Total\":\n # on the last row, store the name All Americas to represent the total\n country_or_territory_name = \"All Americas\"\n else:\n # else store the name for the specific country\n country_name = rowdata[1]\n # note that country names may also have special characters\n country_name = re.sub('[^A-Za-z0-9,()\\[\\] ]+', '', country_name)\n country_or_territory_name = country_name\n # for each of the other columns, check if empty, else store the data present in the cell\n if rowdata[2] == \"\":\n # none is used to replace NULL in the db. This represents an unknown quantity\n confirmed_cases = None\n else:\n # remove the comma and parse to an int\n confirmed_cases = int(rowdata[2].replace(\",\",\"\"))\n if rowdata[3] == \"\":\n # none is used to replace NULL in the db. This represents an unknown quantity\n probable_cases = None\n else:\n # remove the comma and parse to an int\n probable_cases = int(rowdata[3].replace(\",\",\"\"))\n if rowdata[4] == \"\":\n # none is used to replace NULL in the db. This represents an unknown quantity\n confirmed_deaths = None\n else:\n # remove the comma and parse to an int\n confirmed_deaths = int(rowdata[4].replace(\",\",\"\"))\n if rowdata[5] == \"\":\n # none is used to replace NULL in the db. This represents an unknown quantity\n transmission_type = None\n else:\n # store this string\n transmission_type = rowdata[5]\n # store null data for all other fields that were not present in the old reports\n probable_deaths = None\n recovered = None\n percentage_increase_confirmed = None\n elif numcolumns == 9:\n # PAHO added in probable cases\n if rowindex == 0:\n # this row contains the date for this report\n rawdate = rowdata[0].split(\", as of \")[1]\n if \"\\n\" in rawdate:\n rawdate = rawdate.split(\"\\n\")[0]\n try:\n date = datetime.strptime(rawdate,\"%d %B %Y\")\n except ValueError:\n logging.error(\"Unable to determine the date of this report. Row 0 contained this data: \"+\n rowdata[0])\n raise\n elif rowindex in range(4,lastcountryrowindex+2):\n # all these rows contain data for countries/regions\n # so parse the useful data for each\n # some of these rows contain subtotals per region/territory\n if rowdata[0] != '':\n # store the name of the last seen subregion\n subregion = rowdata[0]\n if rowdata[1] == \"Subtotal\":\n # on the subtotal rows, store the name for the entire subregion\n country_or_territory_name = subregion\n elif rowdata[1] == \"Total\":\n # on the last row, store the name All Americas to represent the total\n country_or_territory_name = \"All Americas\"\n else:\n # else store the name for the specific country\n country_name = rowdata[1]\n # note that country names may also have special characters\n country_name = re.sub('[^A-Za-z0-9,()\\[\\] ]+', '', country_name)\n country_or_territory_name = country_name\n # for each of the other columns, check if empty, else store the data present in the cell\n if rowdata[2] == \"\":\n # none is used to replace NULL in the db. This represents an unknown quantity\n confirmed_cases = None\n else:\n # there is a report where this column was merged for some reason\n if \"\\n\" in rowdata[2]:\n split_numbers = rowdata[2].split(\"\\n\")\n confirmed_cases = int(split_numbers[0].replace(\",\",\"\"))\n probable_cases = int(split_numbers[1].replace(\",\",\"\"))\n confirmed_deaths = int(split_numbers[2].replace(\",\",\"\"))\n probable_deaths = int(split_numbers[3].replace(\",\",\"\"))\n recovered = None\n percentage_increase_confirmed = float(rowdata[7].replace(\"%\",\"\"))\n transmission_type = rowdata[8]\n # continue with the next row for this broken report\n continue\n else:\n # remove the comma and parse to an int\n confirmed_cases = int(rowdata[2].replace(\",\",\"\"))\n if rowdata[3] == \"\":\n # none is used to replace NULL in the db. This represents an unknown quantity\n probable_cases = None\n else:\n # remove the comma and parse to an int\n probable_cases = int(rowdata[3].replace(\",\",\"\"))\n if rowdata[4] == \"\":\n # none is used to replace NULL in the db. This represents an unknown quantity\n confirmed_deaths = None\n else:\n # remove the comma and parse to an int\n confirmed_deaths = int(rowdata[4].replace(\",\",\"\"))\n if rowdata[5] == \"\":\n # none is used to replace NULL in the db. This represents an unknown quantity\n probable_deaths = None\n else:\n # store this string\n probable_deaths = rowdata[5]\n if rowdata[6] == \"\":\n # none is used to replace NULL in the db. This represents an unknown quantity\n recovered = None\n else:\n # store this string\n recovered = int(rowdata[6].replace(\",\",\"\"))\n if rowdata[7] == \"\":\n # none is used to replace NULL in the db. This represents an unknown quantity\n percentage_increase_confirmed = None\n else:\n # store this string\n percentage_increase_confirmed = float(rowdata[7].replace(\"%\",\"\"))\n if rowdata[8] == \"\":\n # none is used to replace NULL in the db. This represents an unknown quantity\n transmission_type = None\n else:\n # store this string\n transmission_type = rowdata[8]\n elif numcolumns == 10:\n # PAHO added in country ISO codes and special characters\n if rowindex == 0:\n # this row contains the date for this report\n rawdate = rowdata[0].split(\", as of \")[1]\n if \"\\n\" in rawdate:\n rawdate = rawdate.split(\"\\n\")[0]\n try:\n date = datetime.strptime(rawdate,\"%d %B %Y\")\n except ValueError:\n logging.error(\"Unable to determine the date of this report. Row 0 contained this data: \"+\n rowdata[0])\n raise\n elif rowindex in range(3,lastcountryrowindex+2):\n # all these rows contain data for countries/regions\n # so parse the useful data for each\n # some of these rows contain subtotals per region/territory\n if rowdata[0] != '':\n # store the name of the last seen subregion\n subregion = rowdata[0]\n if rowdata[2] == \"Subtotal\":\n # on the subtotal rows, store the name for the entire subregion\n country_or_territory_name = subregion\n elif rowdata[2] == \"Total\":\n # on the last row, store the name All Americas to represent the total\n country_or_territory_name = \"All Americas\"\n else:\n # else store the name for the specific country\n country_name = rowdata[2]\n # note that country names may also have special characters\n country_name = re.sub('[^A-Za-z0-9,()\\[\\] ]+', '', country_name)\n country_or_territory_name = country_name\n # for each of the other columns, check if empty, else store the data present in the cell\n if rowdata[3] == \"\":\n # none is used to replace NULL in the db. This represents an unknown quantity\n confirmed_cases = None\n else:\n # there is a report where this column was merged for some reason\n if \"\\n\" in rowdata[3]:\n split_numbers = rowdata[3].split(\"\\n\")\n confirmed_cases = int(split_numbers[0].replace(\",\",\"\"))\n probable_cases = int(split_numbers[1].replace(\",\",\"\"))\n confirmed_deaths = int(split_numbers[2].replace(\",\",\"\"))\n probable_deaths = int(split_numbers[3].replace(\",\",\"\"))\n recovered = None\n percentage_increase_confirmed = float(rowdata[8].replace(\"%\",\"\"))\n transmission_type = rowdata[9]\n # continue with the next row for this broken report\n continue\n else:\n # remove the comma and parse to an int\n confirmed_cases = int(rowdata[3].replace(\",\",\"\"))\n if rowdata[4] == \"\":\n # none is used to replace NULL in the db. This represents an unknown quantity\n probable_cases = None\n else:\n # remove the comma and parse to an int\n probable_cases = int(rowdata[4].replace(\",\",\"\"))\n if rowdata[5] == \"\":\n # none is used to replace NULL in the db. This represents an unknown quantity\n confirmed_deaths = None\n else:\n # remove the comma and parse to an int\n confirmed_deaths = int(rowdata[5].replace(\",\",\"\"))\n if rowdata[6] == \"\":\n # none is used to replace NULL in the db. This represents an unknown quantity\n probable_deaths = None\n else:\n # store this string\n probable_deaths = rowdata[6]\n if rowdata[7] == \"\":\n # none is used to replace NULL in the db. This represents an unknown quantity\n recovered = None\n else:\n # store this string\n recovered = int(rowdata[7].replace(\",\",\"\"))\n if rowdata[8] == \"\":\n # none is used to replace NULL in the db. This represents an unknown quantity\n percentage_increase_confirmed = None\n else:\n # store this string\n percentage_increase_confirmed = float(rowdata[8].replace(\"%\",\"\"))\n if rowdata[9] == \"\":\n # none is used to replace NULL in the db. This represents an unknown quantity\n transmission_type = None\n else:\n # store this string\n transmission_type = rowdata[9]\n else:\n logging.error(\"Unrecognised number of columns in the pdf file. Skipping for now.\"+\n \"Check if the report format changed from PAHO.\")\n # if we were at least able to scrape the country or territory name, create a dict and add it to the list\n if country_or_territory_name is not None:\n # set up the dict to store each row of data\n reportdict = collections.OrderedDict()\n # add the values to the dict in the order that we want for the report\n reportdict['date'] = date\n reportdict['country_or_territory_name'] = country_or_territory_name\n reportdict['confirmed_cases'] = confirmed_cases\n reportdict['probable_cases'] = probable_cases\n reportdict['confirmed_deaths'] = confirmed_deaths\n reportdict['probable_deaths'] = probable_deaths\n reportdict['recovered'] = recovered\n reportdict['percentage_increase_confirmed'] = percentage_increase_confirmed\n reportdict['transmission_type'] = transmission_type\n # now add this dict to our list for this report/pdf\n reportdata.append(reportdict)\n # once we are done adding all data for this pdf, add this pdf report to the list of all reports\n # if the reportdata list is not empty\n if reportdata:\n all_pdf_data.append(reportdata)\n logging.info(\"Successfully parsed \"+pahopdffile)\n except Exception as exc:\n logging.exception(\"Problem found while parsing \"+pahopdffile)\n raise\n logging.info(\"Completed parsing all pdfs in folder.\")\n return all_pdf_data",
"def docxProcessing():\n DOCUMENT_ORIGIN_CODE = \"RADIOLOGIE_SOFTWARE\"\n global DATABASE\n conn = db.create_connection(DATABASE)\n pathFolder = \"fichiers source/\"\n extension = \".docx\"\n docxFileArrayPath = glob.glob(pathFolder + \"*\" + extension)\n print(\" - Processing docx\", end=\"\") \n for file in docxFileArrayPath:\n text = readFile.readDocxFile(file)\n query = getDocumentQuery(text, DOCUMENT_ORIGIN_CODE, file, pathFolder, extension)\n db.insert_document(conn, query) \n print(\".\", end = '')\n #commit the changes to db\t\t\t\n conn.commit()\n #close the connection\n conn.close()\n print(\"\\n\")",
"def concat_pdf_pages(files):\n for input_file in files:\n for page in PdfFileReader(input_file).pages:\n yield page",
"def process_all_files():\n src_files = get_doc_files()\n\n for src_pathname in src_files:\n if src_pathname.suffix in MARKDOWN_EXTENSIONS:\n process_file_markdown(src_pathname)\n elif src_pathname.suffix in STATIC_ASSET_EXTENSIONS:\n process_file_copytodest(src_pathname)",
"def import_tables(file, pages):\n tables = camelot.read_pdf(\n file, pages=pages,\n flavor='stream',\n )\n return tables",
"def load_enron():\n connection=apsw.Connection(\"enron.db\")\n cursor=connection.cursor()\n\n \n cursor.execute(\"CREATE VIRTUAL TABLE corpus USING fts4(rowid, title, body);\")\n\n\n enrondir = \"/Users/jonas/Downloads/enron_mail_20110402/maildir\"\n\n pi = 0\n \n for dirpath, dirnames, filenames in os.walk(enrondir):\n for filename in filenames:\n try:\n subject, body = enron.parse_file(os.path.join(dirpath, filename))\n \n cursor.execute(\"insert into corpus values(?,?,?)\", (pi, subject, body))\n except:\n pass\n \n if pi % 100 == 0:\n print pi\n pi += 1",
"def open_pdf(directory):\n for sub_folder in os.listdir(directory):\n sub_directory = os.path.join(directory,sub_folder)\n for pdf_file in os.listdir(sub_directory):\n full_path = os.path.join(sub_directory,pdf_file)\n try:\n pdf_content = pdf_to_txt(full_path)\n if isinstance(pdf_content, str) and len(pdf_content) > 1000:\n yield full_path, pdf_content\n else:\n print('No text found, skipping \"{}\"..'.format(pdf_file))\n continue\n except Exception as e:\n print(e)\n print('Failed to parse \"%s\"' % pdf_file)",
"def convert_pdf_into_csv(source_file, target_subdir):\n DEBUG = False\n if DEBUG: header = '>>>DEBUG:\\t'\n #if DEBUG: print header\n\n regexp_table = re.compile(r\"TABLE\\s+(\\d+\\w?)\\s\")\n fileIN = open( source_file, \"r\")\n lineIN_list = fileIN.readlines()\n\n # To create csv file for tables, we need to fix the lines within the table because\n # 1. the table cross page boundary that created many blank line(s)\n # 2. the text has embedded tab which should be removed\n # 3. others: see each \"fix_...\" for details\n lineIN_list = fix_lineIN_list_space_only(lineIN_list)\n lineIN_list = fix_lineIN_list_table_10(lineIN_list)\n lineIN_list = fix_lineIN_list_table_13(lineIN_list)\n lineIN_list = fix_lineIN_list_table_17(lineIN_list)\n lineIN_list = fix_lineIN_list_table_19(lineIN_list)\n lineIN_list = fix_lineIN_list_table_22(lineIN_list)\n lineIN_list = fix_lineIN_list_table_23(lineIN_list)\n lineIN_list = fix_lineIN_list_table_24(lineIN_list)\n lineIN_list = fix_lineIN_list_table_26(lineIN_list)\n lineIN_list = fix_lineIN_list_table_31(lineIN_list)\n lineIN_list = fix_lineIN_list_table_33(lineIN_list)\n lineIN_list = fix_lineIN_list_table_34(lineIN_list)\n lineIN_list = fix_lineIN_list_table_35(lineIN_list)\n lineIN_list = fix_lineIN_list_table_39(lineIN_list)\n\n csv_filename_list = []\n head, tail = os.path.split(source_file)\n file_name, file_extension = os.path.splitext(tail)\n\n # Every table\n #target_file_all_table = target_subdir.replace('.csv', '_all_table.csv')\n target_file_all_table = os.path.join(target_subdir, file_name + '_all_table.csv')\n print '>>>Creating', '({target_file_all_table})'.format(**locals())\n\n fileOUT_all_table = open( target_file_all_table, \"w+\")\n\n # Every byte table\n #target_file_byte_table = target_subdir.replace('.csv', '_byte_table.csv')\n target_file_byte_table = os.path.join(target_subdir, file_name + '_byte_table.csv')\n print '>>>Creating', '({target_file_byte_table})'.format(**locals())\n fileOUT_byte_table = open( target_file_byte_table, \"w+\")\n\n # memory map table\n #target_file_map_table = target_subdir.replace('.csv', '_map_table.csv')\n target_file_map_table = os.path.join(target_subdir, file_name + '_map_table.csv')\n print '>>>Creating', '({target_file_map_table})'.format(**locals())\n fileOUT_map_table = open( target_file_map_table, \"w+\")\n\n for line_idx in range( len(lineIN_list) ):\n #if DEBUG and 'TABLE' in lineIN_list[line_idx]:\n # print header, lineIN_list[line_idx].strip()\n if regexp_table.search(lineIN_list[line_idx]):\n table_nu = regexp_table.search(lineIN_list[line_idx]).group(1)\n #if DEBUG: print '>>>>>>>>>>>>>>table_nu =', table_nu\n #if DEBUG: print header, 'FOUND', table_nu, '\\t',lineIN_list[line_idx].strip()\n csv_filename_list.append(table_nu)\n\n #if table_nu == '19':\n # lineOUT_list = extract_table_byte_table(line_idx, lineIN_list)\n if table_nu == '1': lineOUT_list = extract_table_1(line_idx, lineIN_list)\n elif table_nu =='2': lineOUT_list = extract_table_2(line_idx, lineIN_list)\n elif table_nu =='3': lineOUT_list = extract_table_3(line_idx, lineIN_list)\n elif table_nu =='4': lineOUT_list = extract_table_4(line_idx, lineIN_list)\n elif any( table_nu == z for z in ('5', '6', '8', '9', '10', '11', '12', '13', '17', '18', '19', '22', '23', '24', '26', '27', '28', '29', '30', '32A', '31', '33', '34', '35', '39')): \n lineOUT_list = extract_table_byte_table(line_idx, lineIN_list)\n #elif table_nu =='7': lineOUT_list = extract_table_7(line_idx, lineIN_list)\n elif any( table_nu == z for z in ('14', '15', '16', '21', '25', '32', '32', '32', '32')): \n lineOUT_list = extract_table_byte_table(line_idx, lineIN_list)\n else:\n lineOUT_list = []\n\n DEBUG = False\n if DEBUG and len(lineOUT_list) != 0:\n print header, table_nu, 'lineOUT_list', lineOUT_list\n from pprint import pprint as pp\n pp(lineOUT_list)\n\n\n table_name = re.sub('^.*?TABLE ', 'TABLE ', lineIN_list[line_idx]) # Fix this line when it does not start with 'TABLE nn'\n\n if True:\n #target_file_single_table = target_subdir.replace('.csv', '_table_%s.csv'%(table_nu))\n target_file_single_table = os.path.join(target_subdir, file_name + '_table.csv')\n print '>>>Creating', '({target_file_single_table})'.format(**locals())\n\n fileOUT_single_table = open( target_file_single_table, \"w+\")\n fileOUT_single_table.write ('\\n'.join(lineOUT_list))\n fileOUT_single_table.close()\n\n #if len(lineOUT_list) != 0:\n if True:\n if table_nu != '1': fileOUT_all_table.write('\\n\\n')\n #try:\n # if not flag_all_table:\n # fileOUT_all_table.write('\\n\\n') # add lines between 2 tables\n #except NameError:\n # flag_all_table = True\n fileOUT_all_table.write('%s'%(table_name))\n fileOUT_all_table.write ('\\n'.join(lineOUT_list))\n\n if len(lineOUT_list)>0 and (re.search('^Byte,', lineOUT_list[0]) or re.search('^Address,Byte,', lineOUT_list[0])):\n #fileOUT_byte_table.write('\\n\\n')\n try:\n if flag_byte_table:\n fileOUT_byte_table.write('\\n\\n') # add lines between 2 tables\n #fileOUT_byte_table.write('\\n') # add lines between 2 tables\n except NameError:\n flag_byte_table = True\n fileOUT_byte_table.write('%s'%(table_name))\n\n # Fix table 3: remove first column about A0h\n if re.search('SINGLE BYTE', table_name):\n lineOUT_list = [re.sub('^Address.', '', this_line) for this_line in lineOUT_list]\n lineOUT_list = [re.sub('^A0h.', '', this_line) for this_line in lineOUT_list]\n\n fileOUT_byte_table.write ('\\n'.join(lineOUT_list))\n\n if len(lineOUT_list)>0 and re.search('MAP', table_name):\n try:\n if flag_map_table:\n fileOUT_map_table.write('\\n\\n') # add lines between 2 tables\n except NameError:\n flag_map_table = True\n fileOUT_map_table.write('%s'%(table_name))\n\n ## Fix table 3: remove first column about A0h\n #if re.search('SINGLE BYTE', table_name):\n # lineOUT_list = [re.sub('^Address.', '', this_line) for this_line in lineOUT_list]\n # lineOUT_list = [re.sub('^A0h.', '', this_line) for this_line in lineOUT_list]\n\n fileOUT_map_table.write ('\\n'.join(lineOUT_list))\n\n\n\n\n fileOUT_all_table.close()\n fileOUT_byte_table.close()\n fileOUT_map_table.close()\n\n return csv_filename_list",
"def extract_table(path):\n re_ex = RE_EX\n pages = []\n page_num = 1\n with open(path, 'rb') as in_file:\n parser = PDFParser(in_file)\n doc = PDFDocument(parser)\n for page in PDFPage.create_pages(doc):\n rsrcmgr = PDFResourceManager()\n output_string = StringIO()\n device = TextConverter(rsrcmgr, output_string, laparams=LAParams())\n interpreter = PDFPageInterpreter(rsrcmgr, device)\n interpreter.process_page(page)\n finder = re.search(re_ex, output_string.getvalue(), re.IGNORECASE)\n print('Searching table', '\\tCurrent page:', page_num)\n if finder:\n print('Table finded.')\n pages.append(page_num)\n break\n\n page_num += 1\n\n table = extract_text(path, pages)\n table = isolate(table)\n table = add_separations(table)\n\n return table",
"def join_files():\n files = [ent_1.get(), ent_2.get()]\n out_writer = PyPDF2.PdfFileWriter()\n for file in files:\n pdf_file = open(file, 'rb')\n file_reader = PyPDF2.PdfFileReader(pdf_file)\n for page in range(file_reader.numPages):\n pageObj = file_reader.getPage(page)\n out_writer.addPage(pageObj)\n\n output_file_name = result_entry.get()\n output_file = open(output_file_name, 'wb')\n out_writer.write(output_file)\n output_file.close()\n pdf_file.close()\n opener = \"open\" if sys.platform == \"darwin\" else \"xdg-open\"\n subprocess.call([opener, output_file_name])\n clear_labels()",
"def readSources(self):\n for sourceCount, sourceElement in enumerate(self.root.findall(\".sources/source\")):\n # shall we just read the UFO here?\n filename = sourceElement.attrib.get('filename')\n # filename is a path relaive to the documentpath. resolve first.\n sourcePath = os.path.abspath(os.path.join(os.path.dirname(self.path), filename))\n sourceName = sourceElement.attrib.get('name')\n if sourceName is None:\n # if the source element has no name attribute\n # (some authoring tools do not need them)\n # then we should make a temporary one. We still need it for reference.\n sourceName = \"temp_master.%d\"%(sourceCount)\n self.reportProgress(\"prep\", 'load', sourcePath)\n if not os.path.exists(sourcePath):\n raise MutatorError(\"Source not found at %s\"%sourcePath)\n sourceObject = self._instantiateFont(sourcePath)\n # read the locations\n sourceLocationObject = None\n sourceLocationObject = self.locationFromElement(sourceElement)\n\n if sourceLocationObject is None:\n raise MutatorError(\"No location defined for source %s\"%sourceName)\n\n # read lib flag\n for libElement in sourceElement.findall('.lib'):\n if libElement.attrib.get('copy') == '1':\n self.libSource = sourceName\n\n # read the groups flag\n for groupsElement in sourceElement.findall('.groups'):\n if groupsElement.attrib.get('copy') == '1':\n self.groupsSource = sourceName\n\n # read the info flag\n for infoElement in sourceElement.findall(\".info\"):\n if infoElement.attrib.get('copy') == '1':\n self.infoSource = sourceName\n if infoElement.attrib.get('mute') == '1':\n self.muted['info'].append(sourceName)\n\n # read the features flag\n for featuresElement in sourceElement.findall(\".features\"):\n if featuresElement.attrib.get('copy') == '1':\n if self.featuresSource is not None:\n self.featuresSource = None\n else:\n self.featuresSource = sourceName\n\n mutedGlyphs = []\n for glyphElement in sourceElement.findall(\".glyph\"):\n glyphName = glyphElement.attrib.get('name')\n if glyphName is None:\n continue\n if glyphElement.attrib.get('mute') == '1':\n if not sourceName in self.muted['glyphs']:\n self.muted['glyphs'][sourceName] = []\n self.muted['glyphs'][sourceName].append(glyphName)\n\n for kerningElement in sourceElement.findall(\".kerning\"):\n if kerningElement.attrib.get('mute') == '1':\n self.muted['kerning'].append(sourceName)\n\n # store\n self.sources[sourceName] = sourceObject, sourceLocationObject\n self.reportProgress(\"prep\", 'done')",
"def _load_files(self):\n for filedoc in self._docset.get_files():\n path = filedoc.get_path()\n if not path:\n # In case of only partially loaded file information,\n # the path information is not set for unloaded files.\n continue\n if not os.path.isabs(path):\n path = os.path.join(self._source_root, path)\n extension = os.path.splitext(path)[1]\n # We don't care about Markdown files that only produce pages\n # (and fail the directory check below).\n if extension == '.md':\n continue\n dirdoc = filedoc.get_directory()\n if not dirdoc:\n self._reporter.xml_assert(filedoc.get_xml_path(),\n \"file is not in any directory in Doxygen\")\n continue\n relpath = self._get_rel_path(path)\n fileobj = self._files.get(relpath)\n if not fileobj:\n fileobj = File(path, relpath, self._docmap[dirdoc])\n self._files[relpath] = fileobj\n fileobj.set_doc_xml(filedoc, self)\n self._docmap[filedoc] = fileobj",
"def parse_tables_from_pdf(self, pdf_filename: str) -> Dict[str, List[Table]]:\n pdf_tokens, pdf_images = self.pdf_extractor.load_tokens_and_image(\n pdf_filename, resize_image=True\n )\n\n return self.parse_tables_from_pdf_data(pdf_tokens, pdf_images)",
"def process(self, terms):\n for entry in self.files:\n try:\n logger.info('file - {0}'.format(entry.path))\n\n # notional output file path\n path_sentences = self.path.joinpath('{0}.csv'.format(entry.path.stem))\n path_summary = self.path.joinpath('{0}-summary.csv'.format(entry.path.stem))\n logger.info('will save to - {0}'.format(path_sentences.resolve()))\n\n reports = self.inspect_doc(entry, terms)\n\n # receiving a list of dicts\n # therefore pandas can package into a useful outcome\n if len(reports) > 0:\n frame_sentences = pd.DataFrame(reports)\n\n frame_sentences = frame_sentences[['page', 'term', 'sentence']]\n logger.info('saving sentence file to - {0}'.format(path_sentences.resolve()))\n frame_sentences.to_csv(str(path_sentences.resolve()))\n \n frame_summary = frame_sentences.pivot_table(\n index='page',\n columns='term',\n aggfunc='size',\n fill_value=0\n )\n logger.info('saving summary file to - {0}'.format(path_sentences.resolve()))\n frame_summary.to_csv(str(path_summary.resolve()))\n\n\n except Exception as e:\n logger.error(e)",
"def analyze(directory, pdf_file, doc_type):\n\n total_redaction_count = 0\n total_redacted_text_area = 0\n total_estimated_text_area = 0\n total_estimated_num_words_redacted = 0\n\n # Split the pdb (which is a pdf file) into individual jpgs.\n redaction_module.pdf_to_jpg(directory, pdf_file)\n\n os.chdir(directory)\n for jpg_file in os.listdir(directory):\n # Iterating through each page of the PDB\n if jpg_file.endswith(\".jpg\"):\n\n [redaction_count, redacted_text_area, estimated_text_area, estimated_num_words_redacted, potential, text_potential, type1, type2, type3] = redaction_module.image_processing(jpg_file, doc_type)\n\n total_redaction_count += redaction_count\n total_redacted_text_area += redacted_text_area\n total_estimated_text_area += estimated_text_area\n total_estimated_num_words_redacted += estimated_num_words_redacted\n\n # Crucial clean-up of jpg files (Note: If files are not removed, code will NOT work properly).\n os.remove(jpg_file)\n\n # Now that we've gone through each page, we need to calculate the stats for the document.\n if total_estimated_text_area != 0:\n total_percent_text_redacted = float(total_redacted_text_area / total_estimated_text_area)\n else:\n total_percent_text_redacted = 0\n\n data = []\n # open csv file and write the stats in a single row representing the document.\n with open('output.csv', mode='a+') as output:\n output_writer = csv.writer(output, delimiter=',')\n row = [pdf_file, total_redaction_count, total_percent_text_redacted, total_estimated_num_words_redacted]\n data.append(row)\n print(tabulate(data, headers=[\" \", \" \", \" \", \" \", \" \"]))\n output_writer.writerow(row)\n output.close()",
"def readFile(self):\n with pdfplumber.open(self.path) as pdf:\n first_page = pdf.pages[0]\n text = first_page.extract_text()\n text = text.split('\\n')\n return processText(text)",
"def perform_parse(self):\n # get folder of pdf files\n folder = QFileDialog.getExistingDirectory(\n parent=self.parent(),\n caption='Get folder with PDF documents to parse'\n )\n if folder:\n # get list of fields and patterns\n field_list = self._get_fields()\n # performing parse\n results = make_parse(folder, field_list)\n self.open_result(results)",
"def pdf_body(input_for,desc_dir):\n res = []\n wt = []\n for item in os.listdir(desc_dir):\n filename=os.path.join(desc_dir,item)\n with open(filename) as f:\n line=f.readlines()\n weight=line[1].strip('\\n')\n name=line[0].strip('\\n')\n print(name,weight)\n res.append('name: ' +name)\n wt.append('weight: ' +weight)\n print(res)\n print(wt)\n new_obj = \"\" \n \n for i in range(len(res)):\n if res[i] and input_for == 'pdf':\n new_obj += res[i] + '<br />' + wt[i] + '<br />' + '<br />'\n return new_obj",
"def convert_file(self):\n try:\n\n doc_data_txt = []\n pdf_data_txt = []\n\n n = self.args_.file_count(self.docs)\n\n if self.docs:\n doc_data_txt = (\n Parallel\n (n_jobs=n, backend=\"multiprocessing\", verbose=10)\n (delayed\n (self.args_.docx_handler)(path, self.submitted)\n for path in self.docs))\n\n n = self.args_.file_count(self.pdfs)\n\n if self.pdfs:\n pdf_data_txt = (\n Parallel\n (n_jobs=n, backend=\"multiprocessing\", verbose=10)\n (delayed\n (self.args_.pdfminer_handler)(path, self.submitted)\n for path in self.pdfs))\n\n return doc_data_txt, pdf_data_txt\n\n except RuntimeError as error:\n logger.getLogger().error(error)\n exit(1)",
"def generate_data():\n for subdir, dirs, files in os.walk(legend_images_dir):\n for _file in files:\n getTables(_file)\n\n file_list = []\n for subdir, dirs, files in os.walk(pdf_output_dir):\n for _file in files:\n if _file.endswith('.pdf'):\n file_list.append(_file)\n\n print (\"Writing merged output in Output.pdf...\")\n current_dir = os.getcwd()\n mergeOutput(file_list, current_dir + \"/Output.pdf\")\n\n clean()",
"def merger_page_pdf(self, input_pdf, output_pdf):\n output = PdfFileWriter()\n # Appending two pdf-pages from two different files\n _input_pdf = PdfFileReader(open(input_pdf, \"rb\"))\n for i in range(30):\n page = _input_pdf.getPage(0)\n artbox = page.artBox\n x = artbox[0]\n y = artbox[1]\n y = artbox[2]\n y = artbox[3]\n output.addPage(page)\n # output.addPage(_input_pdf.getPage(0))\n # output.addPage(_input_pdf.getPage(0))\n\n # Writing all the collected pages to a file\n output.write(open(output_pdf, \"wb\"))\n\n\n # Creating a routine that appends files to the output file\n\n\n # Creating an object where pdf pages are appended to",
"def mo_parse_pdf(self, filepath):\n\n text = textract.process(filepath, encoding='utf-8')\n text = text.decode('utf-8')\n\n if 'PRESSURE CALIBRATION DATA' in text:\n self.mo_parse_p(filepath)\n\n elif 'TEMPERATURE CALIBRATION DATA' or 'CONDUCTIVITY CALIBRATION DATA' in text:\n self.mo_parse_ts(text)\n\n else:\n pass",
"def get_additional_data_from_files(df, file_description): # file description one of [\"video\", \"eaf\", \"seg\", \"gentle\"]\n if file_description == \"gentle\":\n file_folder = FILE_BASE + \"/gentle/\"\n is_gentle_file = True\n else:\n file_folder = FILE_BASE + \"/original/\"\n is_gentle_file = False\n\n file_df = None\n\n if file_description not in list(FILE_DESCRIPTIONS_TO_EXT.keys()):\n print(\"Unknown file description! Don't know what to do with %s files...\" % file_description)\n return None\n\n else:\n print(\"Load and extract information from %s files...\" % file_description)\n #pbar = tqdm.tqdm(total = len(np.unique(df[\"source_file\"])),desc='Files', position=0,leave=True,file=sys.stdout)\n #file_log = tqdm.tqdm(total=0, position=1, bar_format='{desc}',leave=True,file=sys.stdout)\n print(\"Total files to laod and preprocess: \", len(np.unique(df[\"source_file\"])))\n \n for i,file in enumerate(np.unique(df[\"source_file\"])):\n if i%100 == 0:\n print(\"File: \",i)\n \n filepath = file_folder + get_file_path(file,is_gentle_file=is_gentle_file) + FILE_DESCRIPTIONS_TO_EXT[file_description]\n\n if file_description == \"video\":\n file_i_df = mp4_file_processing.get_word_video_snippet_size(df, filepath)\n elif file_description == \"eaf\":\n speech_annotation_eaf_data, gesture_eaf_data = eaf_file_processing.read_eaf(filepath)\n file_i_df = eaf_file_processing.map_gestures_to_annotation(speech_annotation_eaf_data, gesture_eaf_data, remove_pauses=False)\n file_i_df = eaf_file_processing.binary_encode_gestures(file_i_df, gesture_column=\"gesture\")\n\n elif file_description == \"seg\":\n file_i_df = seg_file_processing.get_seg_file_pos_info(filepath)\n\n elif file_description == \"gentle\":\n file_i_df = gentle_file_processing.get_gentle_file_transcripts(filepath)\n \n else:\n print(\"Unknown file format!!!\")\n return \n\n if file_df is None:\n file_df = file_i_df\n else:\n file_df = pd.concat([file_df, file_i_df], ignore_index=True)\n\n #file_log.set_description_str(f'Processed file: {file}')\n #pbar.update(1)\n #sleep(0.02)\n #file_log.close()\n #pbar.close()\n return file_df",
"def readDocuments(docs, prefix):\n\n fmap = open(\"mapping.txt\", \"w\")\n\n\n i = -1\n for folder in pressrelease_folders_txt:\n i += 1\n fullpath = path.join(prefix, folder)\n totFilesInFolder = len(fnmatch.filter(os.listdir(fullpath),\n '*.txt'))\n countFiles = 0\n for f in listdir(path.join(prefix, folder)):\n fmap.write(\"{0}\\t {1:5d}\\n\".format(f, countFiles))\n countFiles += 1\n fullname = fullpath + f\n # text = open(fullname).readlines()\n ff = open(fullname)\n docs.append(ff.read())\n\n print(\"{0:5d}/{1:5d} :: Reading file {2:10s} \".format(countFiles,\n totFilesInFolder, f))\n\n # if countFiles > 4:\n # return\n\n\n fmap.close()"
]
| [
"0.65797544",
"0.65296715",
"0.6453306",
"0.6322549",
"0.6238333",
"0.6149662",
"0.60941696",
"0.60810435",
"0.6048382",
"0.5938034",
"0.59144366",
"0.5892034",
"0.58687395",
"0.58637136",
"0.58473825",
"0.57955265",
"0.57600033",
"0.57588214",
"0.56795657",
"0.56698626",
"0.5643766",
"0.5642962",
"0.56275946",
"0.5625936",
"0.5614355",
"0.5606185",
"0.55952287",
"0.5579959",
"0.55698156",
"0.55693185"
]
| 0.76072 | 0 |
Read and process all docx file situated in "./fichiers source/" then inject it in the document table | def docxProcessing():
DOCUMENT_ORIGIN_CODE = "RADIOLOGIE_SOFTWARE"
global DATABASE
conn = db.create_connection(DATABASE)
pathFolder = "fichiers source/"
extension = ".docx"
docxFileArrayPath = glob.glob(pathFolder + "*" + extension)
print(" - Processing docx", end="")
for file in docxFileArrayPath:
text = readFile.readDocxFile(file)
query = getDocumentQuery(text, DOCUMENT_ORIGIN_CODE, file, pathFolder, extension)
db.insert_document(conn, query)
print(".", end = '')
#commit the changes to db
conn.commit()
#close the connection
conn.close()
print("\n") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def pdfProcessing():\n global DATABASE\n conn = db.create_connection(DATABASE)\n DOCUMENT_ORIGIN_CODE = \"DOSSIER_PATIENT\"\n\n pathFolder = \"fichiers source/\"\n extension = \".pdf\"\n pdfFileArrayPath = glob.glob(pathFolder + \"*\" + extension)\n print(\" - Processing pdf\", end=\"\")\n for file in pdfFileArrayPath:\n text = readFile.readPdfFile(file)\n query = getDocumentQuery(text, DOCUMENT_ORIGIN_CODE, file, pathFolder, extension)\n \n db.insert_document(conn, query)\n print(\".\", end = '')\n #commit the changes to db\n conn.commit()\n #close the connection\n conn.close()\n print(\"\\n\")",
"def _load_files(self):\n for filedoc in self._docset.get_files():\n path = filedoc.get_path()\n if not path:\n # In case of only partially loaded file information,\n # the path information is not set for unloaded files.\n continue\n if not os.path.isabs(path):\n path = os.path.join(self._source_root, path)\n extension = os.path.splitext(path)[1]\n # We don't care about Markdown files that only produce pages\n # (and fail the directory check below).\n if extension == '.md':\n continue\n dirdoc = filedoc.get_directory()\n if not dirdoc:\n self._reporter.xml_assert(filedoc.get_xml_path(),\n \"file is not in any directory in Doxygen\")\n continue\n relpath = self._get_rel_path(path)\n fileobj = self._files.get(relpath)\n if not fileobj:\n fileobj = File(path, relpath, self._docmap[dirdoc])\n self._files[relpath] = fileobj\n fileobj.set_doc_xml(filedoc, self)\n self._docmap[filedoc] = fileobj",
"def process_doc_files(*files, add_new_line=True):\n for file in files:\n # Treat folders\n if os.path.isdir(file):\n files = [os.path.join(file, f) for f in os.listdir(file)]\n files = [f for f in files if os.path.isdir(f) or f.endswith(\".mdx\") or f.endswith(\".py\")]\n process_doc_files(*files, add_new_line=add_new_line)\n else:\n try:\n process_doc_file(file, add_new_line=add_new_line)\n except Exception:\n print(f\"There is a problem in {file}.\")\n raise",
"def updateDocFiles(self):\n for filename, filetype in self._get_doc_files():\n lines = open(filename).readlines()\n\n if self.Verbose:\n print 'Reading %s' % filename\n \n if filename.endswith('conf.py'):\n lines, write_out = self._update_doc_conf_file(lines, filename)\n else:\n raise TypeError, \"Unknown doc file type: %s\" % filetype\n\n if write_out:\n self._file_writer(lines, filename)",
"def process_all_files():\n src_files = get_doc_files()\n\n for src_pathname in src_files:\n if src_pathname.suffix in MARKDOWN_EXTENSIONS:\n process_file_markdown(src_pathname)\n elif src_pathname.suffix in STATIC_ASSET_EXTENSIONS:\n process_file_copytodest(src_pathname)",
"def readSources(self):\n for sourceCount, sourceElement in enumerate(self.root.findall(\".sources/source\")):\n # shall we just read the UFO here?\n filename = sourceElement.attrib.get('filename')\n # filename is a path relaive to the documentpath. resolve first.\n sourcePath = os.path.abspath(os.path.join(os.path.dirname(self.path), filename))\n sourceName = sourceElement.attrib.get('name')\n if sourceName is None:\n # if the source element has no name attribute\n # (some authoring tools do not need them)\n # then we should make a temporary one. We still need it for reference.\n sourceName = \"temp_master.%d\"%(sourceCount)\n self.reportProgress(\"prep\", 'load', sourcePath)\n if not os.path.exists(sourcePath):\n raise MutatorError(\"Source not found at %s\"%sourcePath)\n sourceObject = self._instantiateFont(sourcePath)\n # read the locations\n sourceLocationObject = None\n sourceLocationObject = self.locationFromElement(sourceElement)\n\n if sourceLocationObject is None:\n raise MutatorError(\"No location defined for source %s\"%sourceName)\n\n # read lib flag\n for libElement in sourceElement.findall('.lib'):\n if libElement.attrib.get('copy') == '1':\n self.libSource = sourceName\n\n # read the groups flag\n for groupsElement in sourceElement.findall('.groups'):\n if groupsElement.attrib.get('copy') == '1':\n self.groupsSource = sourceName\n\n # read the info flag\n for infoElement in sourceElement.findall(\".info\"):\n if infoElement.attrib.get('copy') == '1':\n self.infoSource = sourceName\n if infoElement.attrib.get('mute') == '1':\n self.muted['info'].append(sourceName)\n\n # read the features flag\n for featuresElement in sourceElement.findall(\".features\"):\n if featuresElement.attrib.get('copy') == '1':\n if self.featuresSource is not None:\n self.featuresSource = None\n else:\n self.featuresSource = sourceName\n\n mutedGlyphs = []\n for glyphElement in sourceElement.findall(\".glyph\"):\n glyphName = glyphElement.attrib.get('name')\n if glyphName is None:\n continue\n if glyphElement.attrib.get('mute') == '1':\n if not sourceName in self.muted['glyphs']:\n self.muted['glyphs'][sourceName] = []\n self.muted['glyphs'][sourceName].append(glyphName)\n\n for kerningElement in sourceElement.findall(\".kerning\"):\n if kerningElement.attrib.get('mute') == '1':\n self.muted['kerning'].append(sourceName)\n\n # store\n self.sources[sourceName] = sourceObject, sourceLocationObject\n self.reportProgress(\"prep\", 'done')",
"def doc_loader_3_english(doc_path=doc_path_english ,num_samples=1280):\r\n print(\"Loading Doc.....\")\r\n row_cnt = 0\r\n with open(doc_path,'r', encoding='UTF-8') as f:\r\n #content = f.read() # Loads the whole FIle ## CAUTION :- May result in memory overload , solution dataset obj/ generator\r\n for row in f:\r\n row_cnt += 1\r\n #print(row_cnt)\r\n if num_samples != None:\r\n if row_cnt <= num_samples:\r\n temp_row = [int(i) for i in row.split()]\r\n if len(temp_row) < max_len:\r\n temp_row = temp_row + [0] * (max_len - len(temp_row))\r\n yield (temp_row)\r\n else:\r\n temp_row = temp_row[:max_len+1]\r\n yield (temp_row)\r\n else:\r\n break\r\n else:\r\n temp_row = [int(i) for i in row.split()]\r\n if len(temp_row) < max_len:\r\n temp_row = temp_row + ([0] * (max_len - len(temp_row)))\r\n yield (temp_row)\r\n else:\r\n temp_row = temp_row[:max_len + 1]\r\n yield (temp_row)",
"def read_docx(docx_file, **kwargs):\n ns = {'w': 'http://schemas.openxmlformats.org/wordprocessingml/2006/main'}\n with zipfile.ZipFile(docx_file).open('word/document.xml') as f:\n root = etree.parse(f)\n for el in root.xpath('//w:tbl', namespaces=ns):\n el.tag = 'table'\n for el in root.xpath('//w:tr', namespaces=ns):\n el.tag = 'tr'\n for el in root.xpath('//w:tc', namespaces=ns):\n el.tag = 'td'\n return pd.read_html(etree.tostring(root), **kwargs)",
"def process_docs(directory, vocab):\n for filename in listdir(directory):\n if not filename.endswith('.txt'):\n continue\n path = directory + '/' + filename\n add_doc_to_vocab(path, vocab)",
"def load_enron():\n connection=apsw.Connection(\"enron.db\")\n cursor=connection.cursor()\n\n \n cursor.execute(\"CREATE VIRTUAL TABLE corpus USING fts4(rowid, title, body);\")\n\n\n enrondir = \"/Users/jonas/Downloads/enron_mail_20110402/maildir\"\n\n pi = 0\n \n for dirpath, dirnames, filenames in os.walk(enrondir):\n for filename in filenames:\n try:\n subject, body = enron.parse_file(os.path.join(dirpath, filename))\n \n cursor.execute(\"insert into corpus values(?,?,?)\", (pi, subject, body))\n except:\n pass\n \n if pi % 100 == 0:\n print pi\n pi += 1",
"def createStructuredTranscript_Non_Core_Doc():\n\n #create a temporary folder that will hold the data transformed from doc to docx\n os.system('mkdir ' + INPUT_FOLDER+'temp')\n\n core_doc_asset = []\n missing_count = 0\n missing_files=[]\n # get all the docx files that are part of the core asset\n for file in glob.glob(INPUT_FOLDER+\"*.doc\"):\n\n # RG numbers for the core asset\n if (\"RG-50.030\" not in file and\n \"RG-50.106\" not in file and\n \"RG-50.549\" not in file):\n \n\n \n # convert file to docx, storing it in an untracked folder called temp\n file_docx = file + 'x'\n command = 'textutil -convert docx ' + file + ' -output ' + INPUT_FOLDER+'temp/'+ file_docx.split('/')[-1]\n call(command, shell=True)\n\n # append to the array\n core_doc_asset.append(file_docx)\n \n\n \n\n # get the units for each file, store them and update tracker\n core_doc_asset=create_dictionary_of_file_list(core_doc_asset)\n \n not_processed=0\n processed_doc=0\n \n # get the units for each file, store them and update tracker \n for mongo_rg in core_doc_asset:\n # get text units for this entry\n processed=[]\n result=[]\n \n for file in core_doc_asset[mongo_rg]:\n \n \n \n units = getTextUnits(INPUT_FOLDER+'temp/'+file.split('/')[-1])\n \n if units:\n #replace white spaces\n for i,element in enumerate(units):\n units[i]['unit']=' '.join(element['unit'].split())\n result.extend(units)\n \n processed.append(True)\n else:\n #check if processed\n processed.append(False)\n\n #set the method used to transform the transcript\n h.update_field(DB, TRACKER, \"rg_number\", mongo_rg, \"method\", \"transcribe_non_core_doc\")\n\n not_processed=not_processed+1\n\n if False in processed:\n\n h.update_field(DB, TRACKER, \"rg_number\", mongo_rg, \"status\", \"Unprocessed\")\n not_processed=not_processed+1\n missing_files.append(' '.join(core_doc_asset[mongo_rg]))\n else:\n # insert units on the output collection\n h.update_field(DB, OUTPUT, \"shelfmark\", 'USHMM '+mongo_rg, \"structured_transcript\", result)\n\n \n # update status on the stracker\n \n h.update_field(DB, TRACKER, \"rg_number\", mongo_rg, \"status\", \"Processed\")\n processed_doc=processed_doc+1\n \n\n #delete the temporary folder\n os.system('rm -r ' + INPUT_FOLDER+'temp')\n\n \n #write the missing files to text file\n file = open(OUTPUT_FOLDER_USHMM_PROCESSING_LOGS+'transcribe_non_core_doc_failed.txt','w')\n file.write('\\n'.join(missing_files))\n\n \n # success\n pprint.pprint(\"Non-core doc files were successfully processed, but there are \" + str(missing_count) + \" missing\")",
"def process_docs(directory, vocab):\n for file_name in listdir(directory):\n file_path = directory + '/' + file_name\n add_doc_to_vocab(file_path, vocab)",
"def docx():\n env.file_ext = \".docx\"\n local(\"pandoc {input_files} -o {output_file}{file_ext} --bibliography={bib_file} --csl={csl_file} --toc\".format(**env))",
"def readDocuments(docs, prefix):\n\n fmap = open(\"mapping.txt\", \"w\")\n\n\n i = -1\n for folder in pressrelease_folders_txt:\n i += 1\n fullpath = path.join(prefix, folder)\n totFilesInFolder = len(fnmatch.filter(os.listdir(fullpath),\n '*.txt'))\n countFiles = 0\n for f in listdir(path.join(prefix, folder)):\n fmap.write(\"{0}\\t {1:5d}\\n\".format(f, countFiles))\n countFiles += 1\n fullname = fullpath + f\n # text = open(fullname).readlines()\n ff = open(fullname)\n docs.append(ff.read())\n\n print(\"{0:5d}/{1:5d} :: Reading file {2:10s} \".format(countFiles,\n totFilesInFolder, f))\n\n # if countFiles > 4:\n # return\n\n\n fmap.close()",
"def documents(sources, source_type, include):\n with commit():\n if source_type == 'migrator-kit':\n import_documents_from_record_file(sources, include)\n else:\n import_documents_from_dump(\n sources=sources,\n source_type=source_type,\n eager=True,\n include=include\n )",
"def do_docs(self, path):\n print(\"scaraping documentation\")\n for p in path.glob(\"**/*\"):\n if p.is_file():\n parts = p.relative_to(path).parts\n if parts[-1].endswith(\"rst\"):\n data = tsparse(p.read_bytes())\n blob = DocBlob()\n blob.arbitrary = data\n blob.content = {}\n\n blob.ordered_sections = []\n blob.item_file = None\n blob.item_line = None\n blob.item_type = None\n blob.aliases = []\n blob.example_section_data = Section()\n blob.see_also = []\n blob.signature = None\n blob.references = None\n blob.refs = []\n\n self.docs[parts] = json.dumps(blob.to_json(), indent=2)\n else:\n pass\n # data = p.read_bytes()",
"def preprocess_docs():\n\n print(\"Getting started!\")\n stopwords.populate_stopwords(NLP, STOPWORD_URL)\n\n print(str.format(\"Using data dir:{}\", DATA_DIR))\n\n csv_file = open(os.path.join(DATA_DIR, 'PDFs.csv'))\n reader = csv.reader(csv_file, 'excel')\n rows = list(reader)\n\n filenames = [_get_filename(row) for row in rows]\n\n pool = Pool(multiprocessing.cpu_count())\n\n try:\n pool.map(_get_item, rows)\n pool.map(pdf.extract_text, filenames)\n docs = pool.map(_extract_questions, rows)\n docs = [d for d in docs if d is not None]\n\n _find_similar(docs, simdoc=compare.compare_doc_keywords)\n\n for doc in docs:\n if doc is None:\n continue\n doc.save_json()\n\n except KeyboardInterrupt:\n pool.terminate()\n print(\"You cancelled the program!\")\n sys.exit(1)\n\n print(\"Done\")",
"def openie_prepare_files(document_file, no_entity_filter=False, consider_sections=False):\n temp_dir = tempfile.mkdtemp()\n temp_in_dir = os.path.join(temp_dir, \"input\")\n filelist_fn = os.path.join(temp_dir, \"filelist.txt\")\n out_fn = os.path.join(temp_dir, \"output.txt\")\n os.mkdir(temp_in_dir)\n input_files = []\n\n amount_skipped_files = 0\n doc_count = count_documents(document_file)\n logging.info('counting files to process....')\n if no_entity_filter:\n for document_content in read_pubtator_documents(document_file):\n doc = TaggedDocument(from_str=document_content)\n if not doc or not doc.title or not doc.abstract:\n amount_skipped_files += 1\n else:\n doc_count += 1\n # TODO: Not beautiful but join sections via a '.' to ensure sentence splitting in CoreNLP\n content = '. '.join([te for te, _ in doc.iterate_over_text_elements(sections=consider_sections)])\n input_file = os.path.join(temp_in_dir, \"{}.txt\".format(doc.id))\n input_files.append(input_file)\n with open(input_file, \"w\") as f:\n f.write(content)\n else:\n logging.info('Init spacy nlp...')\n spacy_nlp = English() # just the language with no model\n spacy_nlp.add_pipe(\"sentencizer\")\n\n doc2sentences, doc2tags = filter_document_sentences_without_tags(doc_count, document_file, spacy_nlp,\n consider_sections=consider_sections)\n doc_count = len(doc2tags)\n for doc_id, sentences in doc2sentences.items():\n if sentences:\n input_file = os.path.join(temp_in_dir, \"{}.txt\".format(doc_id))\n input_files.append(input_file)\n with open(input_file, 'wt') as f:\n f.write(' '.join(sentences))\n\n logging.info('{} files need to be processed. {} files skipped.'.format(doc_count, amount_skipped_files))\n with open(filelist_fn, \"w\") as f:\n f.write(\"\\n\".join(input_files))\n return filelist_fn, out_fn, doc_count",
"def parse_docs(filename):\n \n # open word doc\n word = win32.gencache.EnsureDispatch('Word.Application')\n doc = word.Documents.Open(os.getcwd() + '/' + filename + \".doc\")\n doc.Activate()\n \n # read word doc as list of lists\n data = [doc.Tables(i).Range.Text for i in range(1,5)]\n data = ''.join(data)\n data = data.replace('\\r\\x07\\r\\x07', ', ')\n data = data.replace('\\r\\x07', ', ')\n data = data.split(\", \")\n \n # separate columns into lists\n varname = data[0::4]\n description = data[1::4]\n valuelineref = data[2::4]\n type = data[3::4]\n\n # create pandas dataframe and clean up\n df = pd.DataFrame(list(zip(varname, description, valuelineref, type)))\n doc.Close(True) # is this a function?\n headers = df.iloc[0]\n df = df[1:]\n df.columns = headers\n df['Variable Name'] = df['Variable Name'].str.replace('\\r','')\n \n # store as csv\n df.to_csv(filename + '.csv', index = False)\n return df",
"def makedoc(folder=os.getcwd(), doc_file=os.getcwd() + r'/doc.ipynb', old=None):\n files = [f.replace(folder, '') for f in list_files(folder, ['ipynb']) if 'checkpoint' not in f]\n text = ''\n\n for file in files:\n if old is None or old not in file:\n with open(folder + file, 'r', encoding='utf-8') as n:\n j = json.loads(n.read())\n\n text += '#### ' + file.replace('.ipynb', '') + '\\n'\n try:\n if j['cells'][0]['cell_type'] == 'markdown':\n text += ''.join(j['cells'][0]['source'])\n text += '\\n'\n else:\n text += '\\n'\n except IndexError:\n print(j, file)\n\n with open(doc_file, 'r', encoding='utf-8') as d:\n jd = json.loads(d.read())\n jd['cells'][1]['source'] = text\n\n with open(doc_file, 'w', encoding='utf-8') as d:\n d.write(json.dumps(jd))",
"def doc_loader_3_french(doc_path=doc_path_french ,num_samples=1280):\r\n print(\"Loading Doc.....\")\r\n row_cnt = 0\r\n with open(doc_path,'r', encoding='UTF-8') as f:\r\n #content = f.read() # Loads the whole FIle ## CAUTION :- May result in memory overload , solution dataset obj/ generator\r\n for row in f:\r\n row_cnt += 1\r\n #print(row_cnt)\r\n if num_samples != None:\r\n if row_cnt <= num_samples:\r\n row = row.strip()\r\n temp_row = [int(i) for i in row.split()]\r\n if len(temp_row) < max_len:\r\n temp_row = temp_row + ([0] * (max_len - len(temp_row)))\r\n #temp_row = one_hot(temp_row, french_vocab_size)\r\n\r\n yield (temp_row)\r\n else:\r\n temp_row = temp_row[:max_len + 1]\r\n #temp_row = one_hot(temp_row, french_vocab_size)\r\n\r\n yield (temp_row)\r\n\r\n else:\r\n break\r\n else:\r\n row = row.strip()\r\n temp_row = [int(i) for i in row.split()]\r\n if len(temp_row) < max_len:\r\n temp_row = temp_row + ([0] * (max_len - len(temp_row)))\r\n #temp_row = one_hot(temp_row, french_vocab_size)\r\n\r\n yield (temp_row)\r\n else:\r\n temp_row = temp_row[:max_len + 1]\r\n #temp_row = one_hot(temp_row, french_vocab_size)\r\n\r\n yield (temp_row)",
"def get_doc(filename :str) -> List[List[str]]:\n\tdata = []\n\ttry:\n\t\twith open(filename, 'r', encoding='utf-8') as f:\n\t\t\tcontent = f.read()\n\t\t\t# print(content)\n\t\t\tpattern = re.compile(r\"<doc.*?>(.*?)</doc>\",re.S)\n\t\t\ttexts = re.findall(pattern, content)\n\t\t\t# print(data)\n\n\t\t\tfor text in texts:\n\t\t\t\t# print(text)\n\t\t\t\ttemp = process_doc(text)\n\t\t\t\tdata.extend(temp)\n\t\t\t\t# print(len(temp))\n\n\t\t\treturn data\n\n\texcept IOError as e:\n\t\tprint(\"the file {} cannot open\".format(filename))\n\t\tprint(e)\n\t\traise IOError",
"def iter_documents(top_directory):\n for root, dirs, files in os.walk(top_directory):\n for file in filter(lambda file: file.endswith('.txt'), files):\n document = open(os.path.join(root, file)).read() # read the entire document, as one big string\n yield utils.tokenize(document, lower=True) # or whatever tokenization suits you",
"def build_DB(self, doc_files):\n\t\tcompteur=0\n\t\tdoc_name=doc_files+'doc_'+str(compteur)+'.txt'\n\t\twhile os.path.exists(doc_name):\n\t\t doc=Doc(doc_name)\n\t\t self.DB.add_doc(doc)\n\t\t compteur+=1\n\t\t doc_name=doc_files+'doc_'+str(compteur)+'.txt'\n\t\tprint \"Number of documents in the Data Base: \", self.DB.nb_doc_total\n\t\t#print self.DB.id2nbword\n\t\tself.dump_DB()",
"def scan_docs():\n\n\n def scan_file(fn):\n f = open(fn)\n\n for l in f:\n m = re.search(r\"\\.\\. (\\w+):: ([.\\w+]+)\", l)\n\n if not m:\n continue\n\n name_kind[m.group(2)] = m.group(1)\n\n for i in os.listdir(\"source\"):\n if i.endswith(\".rst\"):\n scan_file(os.path.join(\"source\", i))\n\n for i in os.listdir(\"source/inc\"):\n scan_file(os.path.join(\"source\", \"inc\", i))",
"def process_docs_2(directory, vocab):\n lines = []\n for filename in listdir(directory):\n if not filename.endswith('.txt'):\n continue\n path = directory + '/' + filename\n line = doc_to_line(path, vocab)\n lines.append(line)\n return lines",
"def update_from_document(self, document_path):\n with open(document_path, 'r') as document_file:\n for sentence in document_file:\n words = sentence.strip().split()\n for word in words:\n self._add_new_word(word)",
"def process_wiki_file(args: Tuple[str, str, int]) -> str:\n filepath, language, min_sent_word_count = args\n with bz2.open(filepath, \"rt\", encoding=\"utf8\") as bz2_file:\n\n # Extract text between <doc> xml tags\n soup = BeautifulSoup(bz2_file.read(), \"lxml\")\n docs = soup.find_all(\"doc\")\n wiki_dump_content = \"\"\n for i, doc in enumerate(docs):\n processed_text = process_wiki_doc_text(\n doc.text, language, min_sent_word_count\n )\n if len(processed_text) == 0:\n continue\n\n # Append to result\n if i > 0 and len(wiki_dump_content) > 0:\n wiki_dump_content += \"\\n\"\n wiki_dump_content += processed_text\n\n return wiki_dump_content",
"def prepare_dictionary_from_docs(self):\n if os.path.exists(self.DICT_PATH):\n return True\n self.logger.info(\"START PREPARING DICT\")\n for fn in os.listdir(self.wiki_path):\n self.logger.info(\"dict update {0}\".format(fn))\n content = self.get_processed_content(fn)\n self.dictionary.add_documents([content])\n self.dictionary.filter_extremes(no_below=20, no_above=0.1, keep_n=100000)\n self.dictionary.compactify()\n self.dictionary.save(self.DICT_PATH)\n return True",
"def documents():\n for domain in os.listdir(DOCUMENT_FOLDER):\n for docname in os.listdir(os.path.join(DOCUMENT_FOLDER, domain)):\n filename = os.path.join(DOCUMENT_FOLDER, domain, docname)\n if filename.endswith(\".html\"):\n fullDocname = os.path.join(domain, docname)\n yield (fullDocname, filename)"
]
| [
"0.64515847",
"0.630049",
"0.627368",
"0.62275624",
"0.6038094",
"0.59903705",
"0.5970335",
"0.5945489",
"0.59367883",
"0.59338593",
"0.58784306",
"0.5854394",
"0.5817054",
"0.57945406",
"0.57783514",
"0.5768201",
"0.57398105",
"0.57188696",
"0.5715797",
"0.56921357",
"0.5682478",
"0.56394696",
"0.5637532",
"0.5617668",
"0.5597793",
"0.55848014",
"0.557743",
"0.5566544",
"0.55626875",
"0.5560103"
]
| 0.73857903 | 0 |
Test filtering hits and miss datasets. | def test_filter_exists(self):
datasets = [{"exists": True, "name": "DATASET1"}, {"exists": False, "name": "DATASET2"}]
hits = filter_exists("HIT", datasets)
misses = filter_exists("MISS", datasets)
all = filter_exists("ALL", datasets)
nothing = filter_exists("NONE", datasets)
self.assertEqual(hits, [{"exists": True, "name": "DATASET1"}])
self.assertEqual(misses, [{"exists": False, "name": "DATASET2"}])
self.assertEqual(all, datasets)
self.assertEqual(nothing, []) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def test_fetch_filtered_dataset_call_misses(self):\n pool = asynctest.CoroutineMock()\n pool.acquire().__aenter__.return_value = Connection() # db_response is []\n assembly_id = 'GRCh38'\n position = (10, 20, None, None, None, None)\n chromosome = 1\n reference = 'A'\n alternate = ('DUP', None)\n result_miss = await fetch_filtered_dataset(pool, assembly_id, position, chromosome, reference, alternate, None, None, True)\n self.assertEqual(result_miss, [])",
"def test_no_filter(self):\r\n\r\n d1 = {\"% IDENTITY\": \"97.6\"}\r\n d2 = {\"% IDENTITY\": \"0.0\"}\r\n d3 = {\"% IDENTITY\": \"100.0\"}\r\n\r\n self.assertTrue(no_filter(d1))\r\n self.assertTrue(no_filter(d2))\r\n self.assertTrue(no_filter(d3))",
"def test_finds_no_traits_if_dataset_search_doesnt_match(self):\n dataset = factories.SourceDatasetFactory.create(i_dbgap_description='a dataset about demographic measurements')\n trait = factories.SourceTraitFactory.create(i_description='lorem ipsum', source_dataset=dataset)\n response = self.client.get(self.get_url(), {'description': 'lorem', 'dataset_description': 'something'})\n context = response.context\n self.assertIn('form', context)\n self.assertTrue(context['has_results'])\n self.assertIsInstance(context['results_table'], tables.SourceTraitTableFull)\n self.assertEqual(len(context['results_table'].rows), 0)",
"def test_filter_with_empty_filters(mockdata, qfilter):\n assert len(qfilter.filter(mockdata)) == 100",
"def test_call_prefilters_when_requested(self):\r\n # no pre-filtering results in one cluster per sequence as they all\r\n # differ at their 3' ends\r\n app = CdHitOtuPicker(params={})\r\n app = CdHitOtuPicker(params={'Similarity': 0.99})\r\n self.assertEqual(app(self.tmp_seq_filepath2), dna_seqs_2_result)\r\n\r\n # no pre-filtering results in one cluster per sequence as they are all\r\n # the same at their 5' ends\r\n app = CdHitOtuPicker(params={})\r\n app = CdHitOtuPicker(params={'Similarity': 0.99},)\r\n self.assertEqual(\r\n app(self.tmp_seq_filepath2, prefix_prefilter_length=5),\r\n dna_seqs_2_result_prefilter)",
"def test_filter_function_all(self):\n self.es.register_filter(lambda x: True)\n self.assertTrue(self.es.streamfilter(self.data))\n self.es.register_filter(lambda x: False)\n self.assertFalse(self.es.streamfilter(self.data))",
"def test_feature_is_filtered(self):\n\n # Duplicate 1st row in var and assigned to 2nd\n self.validator.adata.var[\"feature_is_filtered\"][0] = True\n for i in range(self.validator.adata.X.shape[0]):\n self.validator.adata.X[i, 0] = 0\n self.validator.adata.X[0, 0] = 1\n\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: Some features are 'True' in 'feature_is_filtered' of dataframe 'var', \"\n \"but there are 1 non-zero values in the corresponding columns of the matrix 'X'. \"\n \"All values for these features must be 0.\"\n ],\n )",
"def reject_filter(self, x_hits, y_hits, *args):\n if self.verbose:\n print(\"max empty %d train %d predict\" % (self.max_train_empty, self.max_predict_empty))\n zeros_x = tf.reduce_sum(tf.to_int32(tf.equal(x_hits, 0.0)))\n keep = zeros_x <= self.max_train_empty\n return keep",
"def filterMissing(vcfDict, newVCFdf, filters, log_file, filterType): \n #logic check\n print(\"Pre-filter: {}\".format(newVCFdf.shape))\n \n axis_variable=1\n if filterType=='markers':\n axis_variable=0\n fail_counter=0\n log_file.write(\"Failed {}\\n\".format(filterType))\n for i, frequencyDict in vcfDict.items():\n missingFreq=frequencyDict.get('.')\n if type(missingFreq)==float and missingFreq > filters:\n newVCFdf.drop([i],axis=axis_variable, inplace=True)\n fail_counter+=1\n if filterType=='individuals':\n individualMissingStats=\"{}\\t{}\\n\".format(i, frequencyDict['.'])\n log_file.write(individualMissingStats)\n else:\n log_file.write(\"No missing {} data found for {}\\n\".format(filterType, i))\n log_file.write(\"\\nFailed {} Percent: {:.2f}\\n\".format(filterType, fail_counter/len(vcfDict)*100)) \n print(\"\\nFailed {} Percent: {:.2f}\\n\".format(filterType, fail_counter/len(vcfDict)*100))\n individualDict, markerDict=processDataFrame(newVCFdf, FilterStep=1)\n\n #logic check\n print(\"Post-filter: {}\".format(newVCFdf.shape))\n\n log_file.flush()\n return individualDict, markerDict",
"def test_filter_function_any(self):\n self.es.register_filter(lambda x: True, ftype='any')\n self.assertTrue(self.es.streamfilter(self.data))\n self.es.register_filter(lambda x: False, ftype='any')\n self.assertTrue(self.es.streamfilter(self.data))",
"def test_no():\n errors = generate_errors(10, 5)\n assert NoFiltering().filter(errors) == errors",
"def test_filter(self):\n file_name = \"test_filter.hdf5\"\n dataset_file_filter = h5py.File(os.path.join(tmp_path, file_name), \"w\")\n for view_index, (view_name, view, is_sparse) in enumerate(\n zip(self.view_names, self.views, self.are_sparse)):\n view_dataset = dataset_file_filter.create_dataset(\n \"View\" + str(view_index),\n view.shape,\n data=view)\n view_dataset.attrs[\"name\"] = view_name\n view_dataset.attrs[\"sparse\"] = is_sparse\n labels_dataset = dataset_file_filter.create_dataset(\"Labels\",\n shape=self.labels.shape,\n data=self.labels)\n labels_dataset.attrs[\"names\"] = [label_name.encode()\n for label_name in self.labels_names]\n meta_data_grp = dataset_file_filter.create_group(\"Metadata\")\n meta_data_grp.attrs[\"nbView\"] = len(self.views)\n meta_data_grp.attrs[\"nbClass\"] = len(np.unique(self.labels))\n meta_data_grp.attrs[\"datasetLength\"] = len(self.labels)\n dataset_object = dataset.HDF5Dataset(hdf5_file=dataset_file_filter)\n dataset_object.filter(np.array([0, 1, 0]), [\"0\", \"1\"], [1, 2, 3],\n [\"ViewN0\"], tmp_path)\n self.assertEqual(dataset_object.nb_view, 1)\n np.testing.assert_array_equal(dataset_object.get_labels(), [0, 1, 0])\n dataset_object.dataset.close()\n os.remove(os.path.join(tmp_path, \"test_filter_temp_filter.hdf5\"))\n os.remove(os.path.join(tmp_path, \"test_filter.hdf5\"))",
"def test_filterSamples_strict(self):\n with self.assertRaises(ValueError):\n self.overview_map.filterSamples(['PC.356', 'abc123'])\n\n with self.assertRaises(ValueError):\n self.empty_map.filterSamples(['foo'])",
"def test_filter_false(self):\n self.es.register_filter(foo=False)\n self.assertFalse(self.es.streamfilter(self.data))",
"def filterMissings(self, threshold, data):\n\n #replace NAs by 0 for counting\n data.fillna(0).astype(bool).sum(axis=1)\n\n filtered_columns = data.columns\n\n\n #find out threshold, i.e. minimum number of non-zero in real numbers\n rowNumber = data.shape[0]\n min_nonZeros = int(rowNumber - ((rowNumber * int(threshold))/100))\n\n zero_counts = data.astype(bool).sum(axis=0)\n\n for columnID, nonZeros in zero_counts.items():\n if nonZeros <= min_nonZeros:\n filtered_columns = filtered_columns.drop(columnID)\n\n\n return data[filtered_columns]",
"def test_filterSamples_strict(self):\r\n with self.assertRaises(ValueError):\r\n self.overview_map.filterSamples(['PC.356', 'abc123'])\r\n\r\n with self.assertRaises(ValueError):\r\n self.empty_map.filterSamples(['foo'])",
"def test_filter_wea_zero_entry():\n pass",
"def test_no_updated_datasets(self):\n response = self.client.get(self.get_url(self.study.pk))\n context = response.context\n table = context['source_dataset_table']\n for dataset in self.datasets_v3:\n self.assertNotIn(dataset, table.data)",
"def test_no_removed_datasets(self):\n removed_dataset_1 = factories.SourceDatasetFactory.create(source_study_version=self.study_version_1)\n removed_dataset_2 = factories.SourceDatasetFactory.create(\n source_study_version=self.study_version_2, i_accession=removed_dataset_1.i_accession)\n response = self.client.get(self.get_url(self.study.pk))\n context = response.context\n table = context['source_dataset_table']\n self.assertNotIn(removed_dataset_1, table.data)\n self.assertNotIn(removed_dataset_2, table.data)\n self.assertEqual(len(table.data), 0)",
"def test_filter_function_none(self):\n self.es.register_filter(lambda x: False, ftype='none')\n self.assertTrue(self.es.streamfilter(self.data))\n self.es.register_filter(lambda x: True, ftype='none')\n self.assertFalse(self.es.streamfilter(self.data))",
"def test_filter_1(self):\n usernames = ['Kinder', 'Ken', 'Alan', 'Tracy']\n filter_dict = {}\n\n actual = tf.get_filter_results(twitter_dict, usernames, filter_dict)\n expected = ['Kinder', 'Ken', 'Alan', 'Tracy']\n self.assertEqual(actual, expected)",
"def filterDataset(dat, dataset):\n #\n dat = dat[dat['organism'].isin(dataset)]\n no_mmei_index = dat['mmei']=='no'\n nonstop_index = dat['mutstop']=='no'\n zerofit_index = dat['fitness'].abs()>1e-4\n mutwt_index = dat['mutwt']=='no'\n dat = dat[no_mmei_index & nonstop_index & zerofit_index & mutwt_index]\n #print \"Filtered data\"\n return dat",
"def match_data(self, datasets):\n raise NotImplementedError",
"def test_filterSamples_no_strict(self):\n self.overview_map.filterSamples(['PC.356', 'abc123'], strict=False)\n self.assertEqual(self.overview_map.SampleIds, ['PC.356'])\n\n self.empty_map.filterSamples(['foo'], strict=False)\n self.assertEqual(self.empty_map.SampleIds, [])",
"def test_filterSamples_no_strict(self):\r\n self.overview_map.filterSamples(['PC.356', 'abc123'], strict=False)\r\n self.assertEqual(self.overview_map.SampleIds, ['PC.356'])\r\n\r\n self.empty_map.filterSamples(['foo'], strict=False)\r\n self.assertEqual(self.empty_map.SampleIds, [])",
"def test_basic(self):\n data = get()\n metrics = [verif.metric.Within(),\n verif.metric.A(), # Hit\n verif.metric.B(), # FA\n verif.metric.C(), # Miss\n verif.metric.D(), # Correct rejection\n verif.metric.Hit(),\n verif.metric.Threat(),\n verif.metric.Conditional(),\n verif.metric.XConditional(func=np.median),\n ]\n intervals = [verif.interval.Interval(-np.inf, 0, True, True), # [-inf, 0]\n verif.interval.Interval(-np.inf, 1, True, True),\n verif.interval.Interval(-np.inf, 2, True, True),\n ]\n obs = [0, 1.5, 2]\n fcst = [3.1, 1.1, -2.1]\n N = len(obs)*1.0\n\n # Each line is one metric (one number for each threshold)\n expected = [[0/N, 100/N, 100/N], # Within\n [0/N, 0/N, 2/N], # Hit\n [1/N, 1/N, 0/N], # FA\n [1/N, 1/N, 1/N], # Miss\n [1/N, 1/N, 0/N], # Correct rejection\n [0, 0, 2.0/3], # Hit rate\n [0, 0, 2.0/3], # Threat score\n [3.1, 3.1, 0.7], # Average fcst given obs in interval\n [0, 0, 1.5], # Average obs given obs in interval\n ]\n\n for m in range(len(metrics)):\n metric = metrics[m]\n for i in range(len(intervals)):\n value = metric.compute_from_obs_fcst(np.array(obs), np.array(fcst), intervals[i])\n ex = expected[m][i] * 1.0\n if np.isnan(value):\n self.assertTrue(np.isnan(ex))\n else:\n self.assertAlmostEqual(ex, value)",
"def test_sample_ids_from_category_state_combined_filters(self):\r\n # Filter out all samples (fails both filters).\r\n obs = sample_ids_from_category_state_coverage(self.tutorial_mapping_f,\r\n 'Treatment', 'DOB', min_num_states=2,\r\n required_states=['Control', 'Fast'])\r\n self.assertEqual(obs, self.exp_empty)\r\n\r\n # Filter out all samples (fails one filter).\r\n obs = sample_ids_from_category_state_coverage(self.tutorial_mapping_f,\r\n 'Treatment', 'DOB', min_num_states=2, required_states=['Control'])\r\n self.assertEqual(obs, self.exp_empty)\r\n\r\n obs = sample_ids_from_category_state_coverage(self.tutorial_mapping_f,\r\n 'Treatment', 'DOB', min_num_states=1,\r\n required_states=['Control', 'Fast'])\r\n self.assertEqual(obs, self.exp_empty)\r\n\r\n # Don't filter out any samples (passes both filters).\r\n obs = sample_ids_from_category_state_coverage(self.tutorial_mapping_f,\r\n 'Treatment', 'DOB', min_num_states=0, required_states=[])\r\n self.assertEqual(obs, self.exp_all)\r\n\r\n obs = sample_ids_from_category_state_coverage(self.tutorial_mapping_f,\r\n 'Treatment', 'DOB', min_num_states=1, required_states=[])\r\n self.assertEqual(obs, self.exp_all)\r\n\r\n # Filter out some samples.\r\n exp = (set(['PC.354', 'PC.355', 'PC.356', 'PC.481', 'PC.593']), 4,\r\n set(['Control']))\r\n obs = sample_ids_from_category_state_coverage(self.tutorial_mapping_f,\r\n 'Treatment', 'DOB', min_num_states=1, required_states=['Control'])\r\n self.assertEqual(obs, exp)\r\n\r\n exp = (set(['PC.607', 'PC.634', 'PC.635', 'PC.636']), 2, set(['Fast']))\r\n obs = sample_ids_from_category_state_coverage(self.tutorial_mapping_f,\r\n 'Treatment', 'DOB', min_num_states=1, required_states=['Fast'])\r\n self.assertEqual(obs, exp)\r\n\r\n exp = (set(['d', 'e']), 1, set(['Palm', 'Stool']))\r\n obs = sample_ids_from_category_state_coverage(\r\n self.map_str1.split('\\n'),\r\n 'BodySite', 'Study', required_states=['Stool', 'Palm'],\r\n min_num_states=1)\r\n self.assertEqual(obs, exp)\r\n\r\n # Keep subject that has more than specified covered states and has more\r\n # than one sample at a single coverage state (i.e. timepoint).\r\n exp = (set(['c', 'f', 'a', 'g']), 1, set(['1', '2', '3']))\r\n obs = sample_ids_from_category_state_coverage(self.map_str2,\r\n 'Time', 'Individual', min_num_states=3, required_states=['3', '2'])\r\n self.assertEqual(obs, exp)\r\n\r\n # Test filtering out the subject (from the above test) that has four\r\n # timepoints, but only three are unique.\r\n obs = sample_ids_from_category_state_coverage(self.map_str2,\r\n 'Time', 'Individual', min_num_states=4, required_states=['3', '2'])\r\n self.assertEqual(obs, self.exp_empty)",
"def filter_dataset(source_path, dataset_path, progress_bar, info_label, progress, root):\n # dictionary to store two source path\n source_path_name = {}\n for d in SUB_DIRS:\n source_path_name[f\"{d}\"] = os.path.join(source_path, d)\n\n if not os.path.exists(source_path + \"/\" + SUB_DIRS[0]) and not os.path.exists(source_path + \"/\" + SUB_DIRS[1]):\n messagebox.showerror(\"Message\", \"Please check whether source directory, \\n \\\n must contain 'attentive' and 'not_attentive' dataset\")\n else:\n attentive = set()\n not_attentive = set()\n\n total_img = len(os.listdir(source_path + \"/\" + SUB_DIRS[0])) + len(os.listdir(source_path + \"/\" + SUB_DIRS[1]))\n i = 0\n\n # for attentive images in format particular format and availability of face\n for image in os.listdir(source_path + \"/\" + SUB_DIRS[0]):\n if len(image.split(\".\")) == 2 and image.split(\".\")[1] in IMG_FORMAT \\\n and check_availability(source_path + \"/\" + SUB_DIRS[0] + \"/\" + image):\n attentive.add(image)\n i += 1\n progress_bar['value'] = int((i / total_img) * 100)\n progress.update()\n\n info_label['text'] = 'Not Attentive set filtering is on progress'\n\n # for not attentive images\n for image in os.listdir(source_path + \"/\" + SUB_DIRS[1]):\n if len(image.split(\".\")) == 2 and image.split(\".\")[1] in IMG_FORMAT \\\n and check_availability(source_path + \"/\" + SUB_DIRS[1] + \"/\" + image):\n not_attentive.add(image)\n i += 1\n progress_bar['value'] = int((i / total_img) * 100)\n progress.update()\n\n info_label['text'] = 'Filtering is completed'\n progress.destroy()\n\n attentive, not_attentive = list(attentive), list(not_attentive)\n\n if len(attentive) > 200 and len(not_attentive) > 200:\n next_page_interface(source_path_name, dataset_path, attentive, not_attentive, root)\n else:\n messagebox.showerror(\"Message\", \"Valid Image Count Is Less Than 100\")",
"def test_filter(self):\n usernames = ['Kinder', 'Ken', 'Alan', 'Tracy']\n filter_dict = {'follower': 'Alan'}\n\n actual = tf.get_filter_results(twitter_dict, usernames, filter_dict)\n expected = ['Kinder', 'Ken', 'Tracy']\n self.assertEqual(actual, expected)",
"def test_other_study_not_in_queryset(self):\n # Delete all but five source traits, so that there are 5 from each study.\n study2 = factories.StudyFactory.create()\n datasets2 = factories.SourceDatasetFactory.create_batch(\n 5, source_study_version__study=study2)\n # Get results from the autocomplete view and make sure only datasets from the correct study are found.\n url = self.get_url(self.study.pk)\n response = self.client.get(url)\n returned_pks = get_autocomplete_view_ids(response)\n # Make sure that the other study's datasets do not show up.\n self.assertEqual(len(returned_pks), len(self.source_datasets))\n for dataset in datasets2:\n self.assertNotIn(dataset.i_id, returned_pks)\n for dataset in self.source_datasets:\n self.assertIn(dataset.i_id, returned_pks)"
]
| [
"0.6526705",
"0.63991475",
"0.6067365",
"0.60468185",
"0.5980538",
"0.59720397",
"0.59475845",
"0.589787",
"0.5843083",
"0.58304274",
"0.5800998",
"0.57543606",
"0.573433",
"0.57284266",
"0.5726334",
"0.5716319",
"0.5710886",
"0.57061344",
"0.57022065",
"0.56984323",
"0.56940424",
"0.5684679",
"0.56432056",
"0.5626429",
"0.56208336",
"0.5615297",
"0.5597843",
"0.55684495",
"0.5547064",
"0.5534289"
]
| 0.730566 | 0 |
Test transform DB record. | def test_transform_record(self):
response = {"frequency": 0.009112876, "info": {"accessType": "PUBLIC"},
"referenceBases": "CT", "alternateBases": "AT",
"start": 10, "end": 12,
"variantCount": 3, "variantType": "MNP"}
record = Record("PUBLIC", 0.009112875989879, referenceBases="CT", alternateBases="AT", start=10, end=12, variantCount=3, variantType="MNP")
result = transform_record(record)
self.assertEqual(result, response) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def Transform(self, record):\n pass",
"def test_get_record(self):\n pass",
"def test_patch_record(self):\n pass",
"def test_create_record(self):\n pass",
"def test_update_record(self):\n pass",
"def test_transform_data(self):\n # assemble\n input_data = (\n self.spark\n .read\n .parquet(self.test_data_path + 'employees'))\n\n expected_data = (\n self.spark\n .read\n .parquet(self.test_data_path + 'employees_report'))\n\n expected_cols = len(expected_data.columns)\n expected_rows = expected_data.count()\n expected_avg_steps = (\n expected_data\n .agg(mean('steps_to_desk').alias('avg_steps_to_desk'))\n .collect()[0]\n ['avg_steps_to_desk'])\n\n # act\n data_transformed = transform_data(input_data, 21)\n\n cols = len(expected_data.columns)\n rows = expected_data.count()\n avg_steps = (\n expected_data\n .agg(mean('steps_to_desk').alias('avg_steps_to_desk'))\n .collect()[0]\n ['avg_steps_to_desk'])\n\n # assert\n self.assertEqual(expected_cols, cols)\n self.assertEqual(expected_rows, rows)\n self.assertEqual(expected_avg_steps, avg_steps)\n self.assertTrue([col in expected_data.columns\n for col in data_transformed.columns])",
"def test_apply_transform_single_album_match(self):\n album = Album(artist='Artist', album='Album',\n totaltracks=1, totalseconds=120)\n pk = album.insert(self.app.db, self.app.curs)\n tf_pk = self.add_transform(cond_artist=True, pattern_artist='Artist',\n change_artist=True, to_artist='New Artist')\n self.assertNotEqual(tf_pk, 0)\n self.app.load_data()\n\n row = self.get_album_by_id(pk)\n self.assertEqual(row['lasttransform'], 0)\n\n for line in self.app.apply_transforms():\n pass\n\n row = self.get_album_by_id(pk)\n self.assertEqual(row['lasttransform'], tf_pk)\n self.assertEqual(row['alartist'], 'New Artist')",
"def test_apply_transform_single_track_match(self):\n track = Track(artist='Artist', title='Title')\n pk = track.insert(self.app.db,\n self.app.curs,\n 'xmms',\n datetime.datetime.now())\n tf_pk = self.add_transform(cond_artist=True, pattern_artist='Artist',\n change_artist=True, to_artist='New Artist')\n self.assertNotEqual(tf_pk, 0)\n self.app.load_data()\n\n row = self.get_track_by_id(pk)\n self.assertEqual(row['lasttransform'], 0)\n\n for line in self.app.apply_transforms():\n pass\n\n row = self.get_track_by_id(pk)\n self.assertEqual(row['lasttransform'], tf_pk)\n self.assertEqual(row['artist'], 'New Artist')",
"def test_transform_metadata(self):\n response = {\"createDateTime\": \"2018-10-20T20:33:40Z\", \"updateDateTime\": \"2018-10-20T20:33:40Z\",\n \"info\": {\"accessType\": \"PUBLIC\"}}\n record = Record(\"PUBLIC\", createDateTime=datetime.strptime(\"2018-10-20 20:33:40+00\", '%Y-%m-%d %H:%M:%S+00'),\n updateDateTime=datetime.strptime(\"2018-10-20 20:33:40+00\", '%Y-%m-%d %H:%M:%S+00'))\n result = transform_metadata(record)\n self.assertEqual(result, response)",
"def test_apply_transform_single_album_no_match(self):\n album = Album(artist='Artist', album='Album',\n totaltracks=1, totalseconds=120)\n pk = album.insert(self.app.db, self.app.curs)\n tf_pk = self.add_transform(cond_artist=True, pattern_artist='Foo',\n change_artist=True, to_artist='Bar')\n self.assertNotEqual(tf_pk, 0)\n self.app.load_data()\n\n row = self.get_album_by_id(pk)\n self.assertEqual(row['lasttransform'], 0)\n\n for line in self.app.apply_transforms():\n pass\n\n row = self.get_album_by_id(pk)\n self.assertEqual(row['lasttransform'], tf_pk)",
"def test_interface(transform, example_tsds: TSDataset):\n start_columnns = example_tsds.columns\n example_tsds.fit_transform(transforms=[transform])\n assert np.all(start_columnns == example_tsds.columns)",
"def test_apply_transform_single_track_no_match(self):\n track = Track(artist='Artist', title='Title')\n pk = track.insert(self.app.db,\n self.app.curs,\n 'xmms',\n datetime.datetime.now())\n tf_pk = self.add_transform(cond_artist=True, pattern_artist='Foo',\n change_artist=True, to_artist='Bar')\n self.assertNotEqual(tf_pk, 0)\n self.app.load_data()\n\n row = self.get_track_by_id(pk)\n self.assertEqual(row['lasttransform'], 0)\n\n for line in self.app.apply_transforms():\n pass\n\n row = self.get_track_by_id(pk)\n self.assertEqual(row['lasttransform'], tf_pk)",
"def test_from_database_row(self):\n orig_track = Track(artist='Artist', album='Album', title='Title',\n ensemble='Ensemble', conductor='Conductor', composer='Composer',\n tracknum=1, seconds=10, album_id=42, last_transform=5)\n pk = orig_track.insert(self.app.db,\n self.app.curs,\n 'xmms',\n datetime.datetime.now())\n self.assertEqual(self.get_track_count(), 1)\n\n track = Track.from_database_row(self.get_track_by_id(pk))\n self.assertEqual(track.artist, 'Artist')\n self.assertEqual(track.album, 'Album')\n self.assertEqual(track.title, 'Title')\n self.assertEqual(track.ensemble, 'Ensemble')\n self.assertEqual(track.composer, 'Composer')\n self.assertEqual(track.conductor, 'Conductor')\n self.assertEqual(track.seconds, 10)\n self.assertEqual(track.album_id, 42)\n self.assertEqual(track.last_transform, 5)\n self.assertEqual(track.pk, pk)",
"def test_transform(self):\n new_route = self.route.transform(\"transformed\")\n assert new_route != self.route\n assert new_route.route[\"transform\"] == \"transformed\"",
"def test_get_records(self):\n pass",
"def test_transform(self):\n X = self.generate_X()\n task = mmRDTR()\n task.fit(X)\n res = task.transform(X)\n # check if Instance\n self.assertIsInstance(res,Container)\n # check if names\n self.assertEqual(np.all(res.colnames()==[str(i) for i in xrange(len(res.colnames()))]),True)\n # check if values as within the range expected\n self.assertEqual(np.all(res().min()>=-1),True)\n self.assertEqual(np.all(res().max()<=1),True)\n for i in range(len(res.colnames())):\n self.assertEqual(round(res()[:,i].mean(),8),0)\n # check with new data\n Y = self.generate_X()\n res = task.transform(Y)\n self.assertEqual(np.all(res.colnames()==[str(i) for i in xrange(len(res.colnames()))]),True)\n self.assertEqual(np.all(res().min()>=-1),True)\n self.assertEqual(np.all(res().max()<=1),True)",
"def test_convert():",
"def test_pipeline_transform_with_sample(testbed: SparkETLTests):\n # Given - getting the input dataframes\n inc_df: DataFrame = testbed.dataframes['page_views']\n prev_df: DataFrame = testbed.dataframes['soyel_db.user_pageviews']\n # getting the expected dataframe\n expected_data: DataFrame = testbed.dataframes['expected_output_user_pageviews']\n # When - actual data\n transformed_data: DataFrame = pipeline.transform(inc_df=inc_df,\n prev_df=prev_df,\n config=testbed.config,\n logger=testbed.logger)\n # Then - comparing the actual and expected data\n testbed.comapare_dataframes(df1=transformed_data, df2=expected_data)",
"def test_from_database_row(self):\n orig_album = Album(artist='Artist', album='Album', album_type='ep',\n totaltracks=1, totalseconds=120, last_transform=5)\n pk = orig_album.insert(self.app.db, self.app.curs)\n self.assertEqual(self.get_album_count(), 1)\n\n album = Album.from_database_row(self.get_album_by_id(pk))\n self.assertEqual(album.artist, 'Artist')\n self.assertEqual(album.album, 'Album')\n self.assertEqual(album.album_type, 'ep')\n self.assertEqual(album.totalseconds, 120)\n self.assertEqual(album.totaltracks, 1)\n self.assertEqual(album.last_transform, 5)\n self.assertEqual(album.pk, pk)",
"def test_transform(self):\n result = transform((1, 2) ,2, 2)\n self.assertEqual(result, (4 * PIXEL, 3 * PIXEL))",
"def test_load_full_transform(self):\n self.add_transform(cond_artist=True, cond_album=True, cond_title=True,\n cond_ensemble=True, cond_composer=True, cond_conductor=True,\n change_artist=True, change_album=True, change_title=True,\n change_ensemble=True, change_composer=True, change_conductor=True,\n pattern_artist='Artist', pattern_album='Album', pattern_title='Title',\n pattern_ensemble='Ensemble', pattern_composer='Composer', pattern_conductor='Conductor',\n to_artist='Artist 2', to_album='Album 2', to_title='Title 2',\n to_ensemble='Ensemble 2', to_composer='Composer 2', to_conductor='Conductor 2')\n self.app.load_data()\n self.assertEqual(len(self.app.transforms), 1)\n transform = self.app.transforms.transforms[1]\n self.assertEqual(transform.cond_artist, True)\n self.assertEqual(transform.cond_album, True)\n self.assertEqual(transform.cond_title, True)\n self.assertEqual(transform.cond_ensemble, True)\n self.assertEqual(transform.cond_composer, True)\n self.assertEqual(transform.cond_conductor, True)\n self.assertEqual(transform.change_artist, True)\n self.assertEqual(transform.change_album, True)\n self.assertEqual(transform.change_title, True)\n self.assertEqual(transform.change_ensemble, True)\n self.assertEqual(transform.change_composer, True)\n self.assertEqual(transform.change_conductor, True)\n self.assertEqual(transform.pattern_artist, 'Artist')\n self.assertEqual(transform.pattern_album, 'Album')\n self.assertEqual(transform.pattern_title, 'Title')\n self.assertEqual(transform.pattern_ensemble, 'Ensemble')\n self.assertEqual(transform.pattern_composer, 'Composer')\n self.assertEqual(transform.pattern_conductor, 'Conductor')\n self.assertEqual(transform.to_artist, 'Artist 2')\n self.assertEqual(transform.to_album, 'Album 2')\n self.assertEqual(transform.to_title, 'Title 2')\n self.assertEqual(transform.to_ensemble, 'Ensemble 2')\n self.assertEqual(transform.to_composer, 'Composer 2')\n self.assertEqual(transform.to_conductor, 'Conductor 2')",
"def test_transform(self):\n t = Identity()\n assert t.transform(\"yo\") == \"yo\"",
"def test_upload_to_df(upload_dataframe: pd.DataFrame) -> None:\n validated = UploadCollection.from_dataframe(upload_dataframe)\n assert upload_dataframe.equals(validated.to_dataframe()[upload_dataframe.columns])",
"def test_operate(self) -> None:\n test_dict = {\"key\": \"value\"}\n test_list = [\"item0\", \"item1\"]\n\n # fill table\n\n with self.session:\n self.session.add_all(\n [\n ExampleTable(row_name=\"dict_record\", json_record=test_dict),\n ExampleTable(row_name=\"list_record\", json_record=test_list),\n ]\n )\n self.session.commit()\n\n # Validate backward check\n\n dict_record = self.session.query(ExampleTable).filter(ExampleTable.row_name == \"dict_record\").first()\n\n list_record = self.session.query(ExampleTable).filter(ExampleTable.row_name == \"list_record\").first()\n\n self.assertEqual(\n dict_record.json_record,\n test_dict,\n f\"Dict was changed: {test_dict!r} -> {dict_record.json_record!r}\",\n )\n\n self.assertEqual(\n list_record.json_record,\n test_list,\n f\"List changed {test_list!r} -> {list_record.json_record!r}\",\n )\n\n # Low level\n\n # noinspection PyArgumentList\n with sqlite3.connect(database=f\"file:{self.db_path}?mode=ro\", uri=True) as conn:\n c = conn.cursor()\n c.execute(f\"SELECT row_name, json_record FROM {table_name}\")\n\n result = dict(c.fetchall())\n\n self.assertEqual(result[\"dict_record\"], json.dumps(test_dict))\n\n self.assertEqual(result[\"list_record\"], json.dumps(test_list))",
"def test_delete_record(self):\n pass",
"def test(self):\n\t\tdb=simpledb.DB(debug=0)\n\t\tfor t in self.KnownValues:\n\t\t\tresult=db.onecmd(t[0])\n\t\t\tself.assertEqual(t[1], result)",
"def test_data_creation_from_base_row(self, mock_read_csv):\n f = StringIO(self.data_header + self.data_row)\n reader = csv.DictReader(f)\n mock_read_csv.return_value = reader\n load_values()\n self.assertEqual(CountyMortgageData.objects.count(), 1)\n county = CountyMortgageData.objects.first()\n fields = reader.fieldnames\n fields.pop(fields.index('fips')) # test string separately\n fields.pop(fields.index('open')) # 'open' is stored as 'total'\n fields.pop(fields.index('date')) # date must be parsed before testing\n self.assertEqual(county.fips, self.data_row_dict.get('fips'))\n open_value = int(self.data_row_dict.get('open'))\n self.assertEqual(county.total, open_value)\n target_date = parser.parse(self.data_row_dict['date']).date()\n self.assertEqual(county.date, target_date)\n for field in fields: # remaining fields can be tested in a loop\n self.assertEqual(\n getattr(county, field), int(self.data_row_dict.get(field)))\n # test computed values\n self.assertEqual(\n county.epoch,\n int(target_date.strftime('%s')) * 1000)\n self.assertEqual(\n county.percent_90,\n int(self.data_row_dict.get('ninety')) * 1.0 / open_value)\n self.assertEqual(\n county.percent_30_60,\n (int(self.data_row_dict.get('thirty')) +\n int(self.data_row_dict.get('sixty'))) * 1.0 / open_value)",
"def test_log_track_with_transform(self):\n tf_id = self.add_transform(cond_artist=True, pattern_artist='Artist',\n change_artist=True, to_artist='Artist 2')\n self.assertNotEqual(tf_id, 0)\n self.app.load_data()\n\n track = self.app.log_track(self.track_obj('silence.mp3'))\n self.assertEqual(self.get_track_count(), 1)\n track_row = self.get_track_by_id(track.pk)\n self.assertNotEqual(track_row, None)\n self.assertEqual(track_row['lasttransform'], tf_id)\n self.assertEqual(track_row['artist'], 'Artist 2')\n self.assertEqual(track_row['album'], 'Album')\n self.assertEqual(track_row['title'], 'Track')\n self.assertEqual(track_row['source'], 'xmms')",
"def test_X_returned(self, df, expected):\n\n x = BaseTransformer(columns=\"a\", copy=True)\n\n df_transformed = x.transform(X=df)\n\n h.assert_equal_dispatch(\n expected=expected,\n actual=df_transformed,\n msg=\"Check X returned from transform\",\n )",
"def test_example(self, example_dataset, expected_result):\n\n transformer = PreprocessFeatures()\n result = transformer.fit_transform(example_dataset)\n\n assert (result == expected_result).all()"
]
| [
"0.7146753",
"0.65239096",
"0.6425714",
"0.6382288",
"0.63361186",
"0.63342535",
"0.6245365",
"0.6213984",
"0.6181023",
"0.61414087",
"0.6087115",
"0.6081557",
"0.59832424",
"0.59551847",
"0.5924976",
"0.5916013",
"0.5908581",
"0.5900851",
"0.59001505",
"0.5843761",
"0.582942",
"0.5797012",
"0.5782429",
"0.5717514",
"0.5676576",
"0.5648947",
"0.5629113",
"0.5628311",
"0.5623033",
"0.5607493"
]
| 0.7696168 | 0 |
Test transform misses record. | def test_transform_misses(self):
response = {"referenceBases": '', "alternateBases": '', "variantType": "",
"frequency": 0, "callCount": 0, "sampleCount": 0, "variantCount": 0,
"start": 0, "end": 0, "info": {"accessType": "PUBLIC"}}
record = Record("PUBLIC")
result = transform_misses(record)
self.assertEqual(result, response) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_transforms_match(self, transform: Mapping) -> None:\n xform_id = transform.get(TraceKeys.ID, \"\")\n if xform_id == id(self):\n return\n # TraceKeys.NONE to skip the id check\n if xform_id == TraceKeys.NONE:\n return\n xform_name = transform.get(TraceKeys.CLASS_NAME, \"\")\n warning_msg = transform.get(TraceKeys.EXTRA_INFO, {}).get(\"warn\")\n if warning_msg:\n warnings.warn(warning_msg)\n # basic check if multiprocessing uses 'spawn' (objects get recreated so don't have same ID)\n if torch.multiprocessing.get_start_method() in (\"spawn\", None) and xform_name == self.__class__.__name__:\n return\n raise RuntimeError(\n f\"Error {self.__class__.__name__} getting the most recently \"\n f\"applied invertible transform {xform_name} {xform_id} != {id(self)}.\"\n )",
"def test_apply_transform_single_track_no_match(self):\n track = Track(artist='Artist', title='Title')\n pk = track.insert(self.app.db,\n self.app.curs,\n 'xmms',\n datetime.datetime.now())\n tf_pk = self.add_transform(cond_artist=True, pattern_artist='Foo',\n change_artist=True, to_artist='Bar')\n self.assertNotEqual(tf_pk, 0)\n self.app.load_data()\n\n row = self.get_track_by_id(pk)\n self.assertEqual(row['lasttransform'], 0)\n\n for line in self.app.apply_transforms():\n pass\n\n row = self.get_track_by_id(pk)\n self.assertEqual(row['lasttransform'], tf_pk)",
"def test_apply_transform_single_album_no_match(self):\n album = Album(artist='Artist', album='Album',\n totaltracks=1, totalseconds=120)\n pk = album.insert(self.app.db, self.app.curs)\n tf_pk = self.add_transform(cond_artist=True, pattern_artist='Foo',\n change_artist=True, to_artist='Bar')\n self.assertNotEqual(tf_pk, 0)\n self.app.load_data()\n\n row = self.get_album_by_id(pk)\n self.assertEqual(row['lasttransform'], 0)\n\n for line in self.app.apply_transforms():\n pass\n\n row = self.get_album_by_id(pk)\n self.assertEqual(row['lasttransform'], tf_pk)",
"def test_no_rows_error(self):\n\n x = BaseTransformer(columns=\"a\")\n\n df = pandas.DataFrame(columns=[\"a\"])\n\n with pytest.raises(ValueError, match=re.escape(\"X has no rows; (0, 1)\")):\n\n x.transform(df)",
"def test_no_transform_track_with_already_applied_transform(self):\n track = Track(artist='Artist', title='Title')\n track.last_transform = 1\n tflist = TransformList()\n tflist.add_transform(Transform(1,\n cond_artist=True, pattern_artist='Artist',\n change_artist=True, to_artist='Artist 2',\n ))\n\n self.assertEqual(track.last_transform, 1)\n self.assertEqual(track.artist, 'Artist')\n self.assertEqual(track.transformed, False)\n\n tflist.apply_track(track)\n\n self.assertEqual(track.last_transform, 1)\n self.assertEqual(track.artist, 'Artist')\n self.assertEqual(track.transformed, False)",
"def test_no_transform_album_with_already_applied_transform(self):\n album = Album(artist='Artist', album='Album', last_transform=1)\n tflist = TransformList()\n tflist.add_transform(Transform(1,\n cond_artist=True, pattern_artist='Artist',\n change_artist=True, to_artist='Artist 2',\n ))\n\n self.assertEqual(album.last_transform, 1)\n self.assertEqual(album.artist, 'Artist')\n self.assertEqual(album.transformed, False)\n\n tflist.apply_album(album)\n\n self.assertEqual(album.last_transform, 1)\n self.assertEqual(album.artist, 'Artist')\n self.assertEqual(album.transformed, False)",
"def remove_record_failure():\n\t\tpass",
"def test_xform_rotation_fail(self):\n cmds.file(f=1, new=1)\n cmds.mayaUSDImport(file=self.xform_file, ani=1)\n\n values = cmds.keyframe('pCube1.rx', q=1, vc=1)\n self.assertNotAlmostEqual(0.0, values[-1])",
"def test_DataTransformationFailureMode_no_duplicates():\n\n dataset = smlb.TabularData(data=np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]))\n fails = []\n failmode = smlb.DataTransformationFailureMode((\"index\", fails), dataset.num_samples)\n failmode.handle_failure(1)\n failmode.handle_failure(5)\n failmode.handle_failure(6)\n failmode.handle_failure(5)\n dataset = failmode.finalize(dataset)\n\n assert dataset.num_samples == 10\n assert fails == [1, 5, 6]",
"def test_transform_record(self):\n response = {\"frequency\": 0.009112876, \"info\": {\"accessType\": \"PUBLIC\"},\n \"referenceBases\": \"CT\", \"alternateBases\": \"AT\",\n \"start\": 10, \"end\": 12,\n \"variantCount\": 3, \"variantType\": \"MNP\"}\n record = Record(\"PUBLIC\", 0.009112875989879, referenceBases=\"CT\", alternateBases=\"AT\", start=10, end=12, variantCount=3, variantType=\"MNP\")\n result = transform_record(record)\n self.assertEqual(result, response)",
"def test_add_duplicate_transform_id(self):\n tflist = TransformList()\n tflist.add_transform(Transform(1))\n with self.assertRaises(Exception):\n tflist.add_transform(Transform(1))\n self.assertEqual(len(tflist), 1)",
"def test_load_empty_transform(self):\n self.add_transform()\n self.app.load_data()\n self.assertEqual(len(self.app.transforms), 1)\n transform = self.app.transforms.transforms[1]\n self.assertEqual(transform.cond_artist, False)\n self.assertEqual(transform.cond_album, False)\n self.assertEqual(transform.cond_title, False)\n self.assertEqual(transform.cond_ensemble, False)\n self.assertEqual(transform.cond_conductor, False)\n self.assertEqual(transform.cond_composer, False)\n self.assertEqual(transform.change_artist, False)\n self.assertEqual(transform.change_album, False)\n self.assertEqual(transform.change_title, False)\n self.assertEqual(transform.change_ensemble, False)\n self.assertEqual(transform.change_conductor, False)\n self.assertEqual(transform.change_composer, False)\n self.assertEqual(transform.pattern_artist, '')\n self.assertEqual(transform.pattern_album, '')\n self.assertEqual(transform.pattern_title, '')\n self.assertEqual(transform.pattern_ensemble, '')\n self.assertEqual(transform.pattern_conductor, '')\n self.assertEqual(transform.pattern_composer, '')\n self.assertEqual(transform.to_artist, '')\n self.assertEqual(transform.to_album, '')\n self.assertEqual(transform.to_title, '')\n self.assertEqual(transform.to_ensemble, '')\n self.assertEqual(transform.to_conductor, '')\n self.assertEqual(transform.to_composer, '')",
"def test_no_transform_track_with_song_with_transform_id_greater(self):\n track = Track(artist='Artist', title='Title')\n track.last_transform = 1\n tflist = TransformList()\n tflist.add_transform(Transform(1,\n cond_artist=True, pattern_artist='Artist',\n change_artist=True, to_artist='Artist 2',\n ))\n\n self.assertEqual(track.last_transform, 1)\n self.assertEqual(track.artist, 'Artist')\n self.assertEqual(track.transformed, False)\n\n tflist.apply_track(track)\n\n self.assertEqual(track.last_transform, 1)\n self.assertEqual(track.artist, 'Artist')\n self.assertEqual(track.transformed, False)",
"def test_transform_album_no_changes(self):\n album = Album(artist='Artist', album='Album',\n totaltracks=1, totalseconds=60)\n transform = Transform(1, cond_artist=True, change_artist=True,\n pattern_artist='Foo', to_artist='Bar')\n\n self.assertEqual(album.last_transform, 0)\n transform.apply_album(album)\n self.assertEqual(album.last_transform, 1)\n self.assertEqual(album.artist, 'Artist')\n self.assertEqual(album.album, 'Album')\n self.assertEqual(album.transformed, False)",
"def test_apply_transform_single_track_match(self):\n track = Track(artist='Artist', title='Title')\n pk = track.insert(self.app.db,\n self.app.curs,\n 'xmms',\n datetime.datetime.now())\n tf_pk = self.add_transform(cond_artist=True, pattern_artist='Artist',\n change_artist=True, to_artist='New Artist')\n self.assertNotEqual(tf_pk, 0)\n self.app.load_data()\n\n row = self.get_track_by_id(pk)\n self.assertEqual(row['lasttransform'], 0)\n\n for line in self.app.apply_transforms():\n pass\n\n row = self.get_track_by_id(pk)\n self.assertEqual(row['lasttransform'], tf_pk)\n self.assertEqual(row['artist'], 'New Artist')",
"def test_get_all_need_transform_one_track_another_already_applied(self):\n track = Track(artist='Artist', album='Album', title='Title', last_transform=1)\n pk = track.insert(self.app.db,\n self.app.curs,\n 'xmms',\n datetime.datetime.now())\n track = Track(artist='Artist', album='Album', title='Title')\n pk = track.insert(self.app.db,\n self.app.curs,\n 'xmms',\n datetime.datetime.now())\n self.assertEqual(self.get_track_count(), 2)\n\n tracks = Track.get_all_need_transform(self.app.curs, 1)\n self.assertEqual(len(tracks), 1)\n self.assertEqual(tracks[0].pk, pk)",
"def test_get_all_need_transform_no_albums_matched(self):\n orig_album = Album(artist='Artist', album='Album',\n totaltracks=1, totalseconds=120, last_transform=1)\n pk = orig_album.insert(self.app.db, self.app.curs)\n self.assertEqual(self.get_album_count(), 1)\n\n self.assertEqual(Album.get_all_need_transform(self.app.curs, 1), [])",
"def verify_data(transformer, reader):\n\n if tf.__version__ < '2.0.0':\n tf_iter = transformer.tfrecord_iterator_oldversion()\n else:\n tf_iter = transformer.tfrecord_iterator()\n mr_iter = reader.get_next()\n\n count = 0\n for tf_item, mr_item in zip(tf_iter, mr_iter):\n count = count + 1\n assert len(tf_item) == len(mr_item)\n for key, value in tf_item.items():\n logger.info(\"key: {}, tfrecord: value: {}, mindrecord: value: {}\".format(key, value,\n mr_item[cast_name(key)]))\n if isinstance(value, np.ndarray):\n assert (value == mr_item[cast_name(key)]).all()\n else:\n assert value == mr_item[cast_name(key)]\n assert count == 10",
"def test_transform_track_no_changes(self):\n track = Track(artist='Artist', album='Album', title='Title',\n ensemble='Ensemble', conductor='Conductor', composer='Composer',\n tracknum=1, seconds=60)\n transform = Transform(1, cond_artist=True, change_artist=True,\n pattern_artist='Foo', to_artist='Bar')\n\n self.assertEqual(track.last_transform, 0)\n transform.apply_track(track)\n self.assertEqual(track.last_transform, 1)\n self.assertEqual(track.artist, 'Artist')\n self.assertEqual(track.album, 'Album')\n self.assertEqual(track.title, 'Title')\n self.assertEqual(track.ensemble, 'Ensemble')\n self.assertEqual(track.conductor, 'Conductor')\n self.assertEqual(track.composer, 'Composer')\n self.assertEqual(track.transformed, False)",
"def test_log_track_with_transform_and_nonmatching_album(self):\n\n album_id = self.add_album(artist='Artist', album='Album')\n self.assertNotEqual(album_id, 0)\n\n tf_id = self.add_transform(cond_artist=True, pattern_artist='Artist',\n change_artist=True, to_artist='Artist 2')\n self.assertNotEqual(tf_id, 0)\n self.app.load_data()\n\n track = self.app.log_track(self.track_obj('silence.mp3'))\n self.assertEqual(self.get_track_count(), 1)\n track_row = self.get_track_by_id(track.pk)\n self.assertNotEqual(track_row, None)\n self.assertEqual(track_row['lasttransform'], tf_id)\n self.assertEqual(track_row['artist'], 'Artist 2')\n self.assertEqual(track_row['album'], 'Album')\n self.assertEqual(track_row['title'], 'Track')\n self.assertEqual(track_row['source'], 'xmms')\n self.assertEqual(track_row['album_id'], 0)",
"def verify_transformed(self, data):\n data_dim = data.shape[-1]\n if data_dim != self.dimension:\n error(\n \"{} result dimension {} does not match the prescribed input dimension {}\"\n .format(self.name, data_dim, self.dimension))\n nans, _ = np.where(np.isnan(data))\n if np.size(nans) != 0:\n error(\"{} result contains nan elements in :{}\".format(\n self.name, nans))",
"def can_retransform(self):\r\n return self._can_retransform",
"def test_bad_match(self):\n log.info('===== START TEST BAD MATCH =====')\n\n # Telemetered\n file_path = os.path.join(RESOURCE_PATH, '11129553_SNA_SNA.txt')\n\n stream_handle = open(file_path, MODE_ASCII_READ)\n\n self.create_parser(stream_handle)\n\n particles = self.parser.get_records(57)\n\n log.debug(\"*** test_bad_match Num particles %s\", len(particles))\n\n # 2 bad samples\n self.assertEqual(len(self.exception_callback_value), 2)\n stream_handle.close()\n\n log.info('===== END TEST BAD MATCH =====')",
"def test_transform_track_empty_transform(self):\n track = Track(artist='Artist', album='Album', title='Title',\n ensemble='Ensemble', conductor='Conductor', composer='Composer',\n tracknum=1, seconds=60)\n transform = Transform(1,\n change_artist=True, pattern_artist='Artist', to_artist='Artist 2',\n change_album=True, pattern_album='Album', to_album='Album 2',\n change_title=True, pattern_title='Title', to_title='Title 2',\n change_ensemble=True, pattern_ensemble='Ensemble', to_ensemble='Ensemble 2',\n change_composer=True, pattern_composer='Composer', to_composer='Composer 2',\n change_conductor=True, pattern_conductor='Conductor', to_conductor='Conductor 2',\n )\n\n self.assertEqual(track.last_transform, 0)\n transform.apply_track(track)\n self.assertEqual(track.last_transform, 1)\n self.assertEqual(track.artist, 'Artist')\n self.assertEqual(track.album, 'Album')\n self.assertEqual(track.title, 'Title')\n self.assertEqual(track.ensemble, 'Ensemble')\n self.assertEqual(track.conductor, 'Conductor')\n self.assertEqual(track.composer, 'Composer')\n self.assertEqual(track.transformed, False)",
"def test_get_all_need_transform_no_tracks_matched(self):\n track = Track(artist='Artist', album='Album', title='Title', last_transform=1)\n pk = track.insert(self.app.db,\n self.app.curs,\n 'xmms',\n datetime.datetime.now())\n self.assertEqual(self.get_track_count(), 1)\n\n tracks = Track.get_all_need_transform(self.app.curs, 1)\n self.assertEqual(len(tracks), 0)",
"def test_X_no_rows_error(self):\n\n x = BaseTransformer(columns=\"a\")\n\n df = pandas.DataFrame(columns=[\"a\"])\n\n with pytest.raises(ValueError, match=re.escape(\"X has no rows; (0, 1)\")):\n\n x.fit(X=df)",
"def test_apply_transform_single_album_match(self):\n album = Album(artist='Artist', album='Album',\n totaltracks=1, totalseconds=120)\n pk = album.insert(self.app.db, self.app.curs)\n tf_pk = self.add_transform(cond_artist=True, pattern_artist='Artist',\n change_artist=True, to_artist='New Artist')\n self.assertNotEqual(tf_pk, 0)\n self.app.load_data()\n\n row = self.get_album_by_id(pk)\n self.assertEqual(row['lasttransform'], 0)\n\n for line in self.app.apply_transforms():\n pass\n\n row = self.get_album_by_id(pk)\n self.assertEqual(row['lasttransform'], tf_pk)\n self.assertEqual(row['alartist'], 'New Artist')",
"def test_unsuccessful_verification(self):\n for i in (-4, -3, 3, 4):\n description = \"TOTP verified for `i={0}`\".format(i)\n calculated = self.algorithm.calculate(self.device.secret, drift=i)\n confirmed = self.relate.verify(calculated, save=False)\n\n self.assertFalse(confirmed, description)\n\n self.relate.confirm = False",
"def test_missing_column(self, example_dataset):\n\n example_dataset = example_dataset.drop([\"Sex\"], axis=1)\n transformer = PreprocessFeatures()\n\n with pytest.raises(ValueError):\n transformer.fit_transform(example_dataset)",
"def test_transform_album_empty_transform(self):\n album = Album(artist='Artist', album='Album',\n totaltracks=1, totalseconds=60)\n transform = Transform(1,\n change_artist=True, pattern_artist='Artist', to_artist='Artist 2',\n change_album=True, pattern_album='Album', to_album='Album 2')\n\n self.assertEqual(album.last_transform, 0)\n transform.apply_album(album)\n self.assertEqual(album.last_transform, 1)\n self.assertEqual(album.artist, 'Artist')\n self.assertEqual(album.album, 'Album')\n self.assertEqual(album.transformed, False)"
]
| [
"0.6604709",
"0.6437916",
"0.6375894",
"0.6232821",
"0.5919303",
"0.590823",
"0.58788997",
"0.58498996",
"0.58181363",
"0.57857037",
"0.57830256",
"0.57796776",
"0.57594526",
"0.5697106",
"0.56866306",
"0.5685778",
"0.56558555",
"0.5641226",
"0.56247234",
"0.5612856",
"0.56034184",
"0.55923456",
"0.55900437",
"0.5589407",
"0.5576226",
"0.5531915",
"0.55069834",
"0.5472031",
"0.546107",
"0.54587424"
]
| 0.6836742 | 0 |
Test that add handover. | def test_add_handover(self):
# Test that the handover actually is added
handovers = [{"handover1": "info"}, {"handover2": "url"}]
record = {"datasetId": "test", "referenceName": "22", "referenceBases": "A",
"alternateBases": "C", "start": 10, "end": 11, "variantType": "SNP"}
with mock.patch('beacon_api.extensions.handover.make_handover', return_value=handovers):
result = add_handover(record)
record['datasetHandover'] = handovers
self.assertEqual(result, record) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_02_add_move(self):\n # Create/validate PO\n order = self.create_and_validate_po()\n\n # Add new move in picking\n picking = order.picking_ids[0]\n self.add_move(picking)\n\n # Try to validate picking\n self.assertEqual(picking.state, 'draft')\n with self.assertRaisesRegexp(exceptions.Warning, 'NEW move'):\n picking.do_transfer()\n self.assertEqual(picking.state, 'draft')",
"def test_add_one_more_test(self):\n self.assertTrue(True)",
"def test_add(self):\n self.assertEqual(3, add(1, 2))\n self.assertNotEqual(3, add(2, 2))",
"def test_add(self):\n print('test_add')\n \n self.assertEqual(120, add(100, 20))\n self.assertNotEqual(3, add(10, 10))",
"def test_add(logger):\n tamper = actions.tamper.TamperAction(None, field=\"seq\", tamper_type=\"add\", tamper_value=10)\n assert tamper.field == \"seq\", \"Tamper action changed fields.\"\n assert tamper.tamper_type == \"add\", \"Tamper action changed types.\"\n assert str(tamper) == \"tamper{TCP:seq:add:10}\", \"Tamper returned incorrect string representation: %s\" % str(tamper)\n\n packet = layers.packet.Packet(IP(src=\"127.0.0.1\", dst=\"127.0.0.1\")/TCP(sport=2222, dport=3333, seq=100, ack=100, flags=\"S\"))\n original = copy.deepcopy(packet)\n tamper.tamper(packet, logger)\n\n new_value = packet[TCP].seq\n assert new_value == 110, \"Tamper did not add\"\n\n # Must run this check repeatedly - if a scapy fuzz-ed value is not properly\n # ._fix()-ed, it will return different values each time it's requested\n for _ in range(0, 5):\n assert packet[TCP].seq == new_value, \"Corrupted value is not stable\"\n\n # Confirm tamper didn't corrupt anything else in the TCP header\n assert confirm_unchanged(packet, original, TCP, [\"seq\"])\n\n # Confirm tamper didn't corrupt anything else in the IP header\n assert confirm_unchanged(packet, original, IP, [])",
"def test_add_item_at_using_put(self):\n pass",
"def test_add_network(self):\n pass",
"def test_theft_and_stealing(self):",
"def test_add_yet_one_more_test(self):\n self.assertTrue(True)",
"def test_let(self):",
"def test_add(self):\n self.assertEqual(3, foo.add(1, 2))\n self.assertNotEqual(3, foo.add(2, 2))",
"def test_add_item_to_cart(client):\n raise NotImplemented('Acceptance test failed')",
"def _test(self):",
"def _test(self):",
"def _test(self):",
"def _test(self):",
"def _test(self):",
"def test_add(self):\n self.client.login(username='admin', password='admin')\n response = self.client.post('/add/', {'url': 'http://example.com'}, follow=True)\n self.assertShortURLCreated(response)",
"def test_our_add(self):\n\n # arrange\n x = 2\n y = 3\n expected_result = 5\n\n # act; assert\n self.assertEqual(self.our_module.add(x, y), expected_result)",
"def test_add_stock_item(self):\n pass",
"def test_add(self):\n self.assertEqual(add(1, 1), 2, \"Wrong answer\")\n self.assertEqual(add(10, 1), 11, \"Wrong answer\")\n self.assertEqual(add(15, 15), 30, \"Wrong answer\")",
"def test(self):\n pass",
"def test_add1(self):\n self.assertEqual(15, add(10 , 5), \"should be 15\")",
"def test_post_foods(self):\n pass",
"def test_add():\n\n assert add(1, 1) == 2\n assert add(1, 2) == add(2, 1) == 3",
"def _test(self):\n pass",
"def _test(self):\n pass",
"def _test(self):\n pass",
"def test_households_in_admin_unit(self):",
"def test_basic_add_endpoint(self):\n args = self.get_args()\n config = self.create_config_file()\n self.write_config_file(config, args)\n collector = execute_tool(args, test_mode=True)\n time.sleep(2)\n\n config['config'].append(self.create_export_policy())\n self.write_config_file(config, args)\n collector.reload_config()\n time.sleep(2)\n\n mac = '00:11:22:33:33:33'\n ip = '3.4.3.4'\n self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg'))"
]
| [
"0.6259538",
"0.62197006",
"0.6193764",
"0.6161933",
"0.6152835",
"0.61155427",
"0.61128086",
"0.61108625",
"0.61099696",
"0.6078901",
"0.6065535",
"0.6053951",
"0.60404694",
"0.60404694",
"0.60404694",
"0.60404694",
"0.60404694",
"0.60337585",
"0.60318476",
"0.6021554",
"0.60110885",
"0.59778905",
"0.5958876",
"0.5953918",
"0.5944079",
"0.5926874",
"0.5926874",
"0.5926874",
"0.5924964",
"0.5917318"
]
| 0.7491612 | 0 |
Test db call of getting public datasets access. | async def test_datasets_access_call_public(self):
pool = asynctest.CoroutineMock()
pool.acquire().__aenter__.return_value = Connection(accessData=[{'accesstype': 'PUBLIC', 'datasetid': 'mock:public:id'}])
result = await fetch_datasets_access(pool, None)
# for now it can return a tuple of empty datasets
# in order to get a response we will have to mock it
# in Connection() class
self.assertEqual(result, (['mock:public:id'], [], [])) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_dataset_for_personal_accounts(self):\n pass",
"async def test_datasets_access_call_multiple(self):\n pool = asynctest.CoroutineMock()\n pool.acquire().__aenter__.return_value = Connection(accessData=[{'accesstype': 'CONTROLLED', 'datasetid': 'mock:controlled:id'},\n {'accesstype': 'PUBLIC', 'datasetid': 'mock:public:id'}])\n result = await fetch_datasets_access(pool, None)\n # for now it can return a tuple of empty datasets\n # in order to get a response we will have to mock it\n # in Connection() class\n self.assertEqual(result, (['mock:public:id'], [], ['mock:controlled:id']))",
"def test_find_datasets_filters_by_access_and_use_only_returns_the_dataset_once(\n access_type,\n):\n user = factories.UserFactory.create(is_superuser=False)\n user2 = factories.UserFactory.create(is_superuser=False)\n client = Client(**get_http_sso_data(user))\n\n access_granted_master = factories.DataSetFactory.create(\n published=True,\n type=DataSetType.MASTER,\n name=\"Master - access redundantly granted\",\n user_access_type=access_type,\n )\n factories.DataSetUserPermissionFactory.create(user=user, dataset=access_granted_master)\n factories.DataSetUserPermissionFactory.create(user=user2, dataset=access_granted_master)\n\n response = client.get(\n reverse(\"datasets:find_datasets\"),\n {\"access\": \"yes\", \"use\": str(DataSetType.MASTER)},\n )\n\n assert response.status_code == 200\n assert list(response.context[\"datasets\"]) == [expected_search_result(access_granted_master)]",
"def test_finding_datasets_doesnt_query_database_excessively(\n access_type, client, django_assert_num_queries\n):\n expected_num_queries = 13\n source_tags = [factories.SourceTagFactory() for _ in range(10)]\n topic_tags = [factories.TopicTagFactory() for _ in range(10)]\n\n masters = [\n factories.DataSetFactory(\n type=DataSetType.MASTER,\n published=True,\n user_access_type=access_type,\n )\n for _ in range(random.randint(10, 50))\n ]\n for master in masters:\n master.tags.set(\n random.sample(source_tags, random.randint(1, 3))\n + random.sample(topic_tags, random.randint(1, 3))\n )\n\n datacuts = [\n factories.DataSetFactory(\n type=DataSetType.DATACUT,\n published=True,\n user_access_type=access_type,\n )\n for _ in range(random.randint(10, 50))\n ]\n for datacut in datacuts:\n datacut.tags.set(random.sample(source_tags, 1) + random.sample(topic_tags, 1))\n\n references = [factories.ReferenceDatasetFactory.create(published=True) for _ in range(10)]\n for reference in references:\n reference.tags.set(\n random.sample(source_tags, random.randint(1, 3))\n + random.sample(topic_tags, random.randint(1, 3))\n )\n\n visualisations = [\n factories.VisualisationCatalogueItemFactory.create(published=True)\n for _ in range(random.randint(10, 50))\n ]\n\n for visualisation in visualisations:\n factories.DataSetApplicationTemplatePermissionFactory(\n application_template=visualisation.visualisation_template,\n dataset=random.choice(masters),\n )\n\n # Log into site (triggers the queries related to setting up the user).\n client.get(reverse(\"root\"))\n\n with django_assert_num_queries(expected_num_queries, exact=False):\n response = client.get(reverse(\"datasets:find_datasets\"), follow=True)\n assert response.status_code == 200\n\n with django_assert_num_queries(expected_num_queries, exact=False):\n response = client.get(reverse(\"datasets:find_datasets\"), {\"q\": \"potato\"})\n assert response.status_code == 200\n\n with django_assert_num_queries(expected_num_queries + 1, exact=False):\n response = client.get(\n reverse(\"datasets:find_datasets\"),\n {\"source\": [str(tag.id) for tag in random.sample(source_tags, random.randint(1, 5))]},\n )\n assert response.status_code == 200\n\n with django_assert_num_queries(expected_num_queries + 1, exact=False):\n response = client.get(\n reverse(\"datasets:find_datasets\"),\n {\"topic\": [str(tag.id) for tag in random.sample(topic_tags, random.randint(1, 5))]},\n )\n assert response.status_code == 200\n\n with django_assert_num_queries(expected_num_queries, exact=False):\n response = client.get(\n reverse(\"datasets:find_datasets\"),\n {\"data_type\": str(DataSetType.MASTER)},\n )\n assert response.status_code == 200\n\n with django_assert_num_queries(expected_num_queries, exact=False):\n response = client.get(reverse(\"datasets:find_datasets\"), {\"access\": \"yes\"})\n assert response.status_code == 200",
"async def test_datasets_access_call_controlled(self):\n pool = asynctest.CoroutineMock()\n pool.acquire().__aenter__.return_value = Connection(accessData=[{'accesstype': 'CONTROLLED', 'datasetid': 'mock:controlled:id'}])\n result = await fetch_datasets_access(pool, None)\n # for now it can return a tuple of empty datasets\n # in order to get a response we will have to mock it\n # in Connection() class\n self.assertEqual(result, ([], [], ['mock:controlled:id']))",
"async def test_datasets_access_call_registered(self):\n pool = asynctest.CoroutineMock()\n pool.acquire().__aenter__.return_value = Connection(accessData=[{'accesstype': 'REGISTERED', 'datasetid': 'mock:registered:id'}])\n result = await fetch_datasets_access(pool, None)\n # for now it can return a tuple of empty datasets\n # in order to get a response we will have to mock it\n # in Connection() class\n self.assertEqual(result, ([], ['mock:registered:id'], []))",
"async def test_fetch_filtered_dataset_call(self):\n pool = asynctest.CoroutineMock()\n db_response = {\"referenceBases\": '', \"alternateBases\": '', \"variantType\": \"\",\n \"referenceName\": 'Chr38',\n \"frequency\": 0, \"callCount\": 0, \"sampleCount\": 0, \"variantCount\": 0,\n \"start\": 0, \"end\": 0, \"accessType\": \"PUBLIC\", \"datasetId\": \"test\"}\n pool.acquire().__aenter__.return_value = Connection(accessData=[db_response])\n assembly_id = 'GRCh38'\n position = (10, 20, None, None, None, None)\n chromosome = 1\n reference = 'A'\n alternate = ('DUP', None)\n result = await fetch_filtered_dataset(pool, assembly_id, position, chromosome, reference, alternate, None, None, False)\n # for now it can return empty dataset\n # in order to get a response we will have to mock it\n # in Connection() class\n expected = {'referenceName': 'Chr38', 'callCount': 0, 'sampleCount': 0, 'variantCount': 0, 'datasetId': 'test',\n 'referenceBases': '', 'alternateBases': '', 'variantType': '', 'start': 0, 'end': 0, 'frequency': 0,\n 'info': {'accessType': 'PUBLIC'},\n 'datasetHandover': [{'handoverType': {'id': 'CUSTOM', 'label': 'Variants'},\n 'description': 'browse the variants matched by the query',\n 'url': 'https://examplebrowser.org/dataset/test/browser/variant/Chr38-1--'},\n {'handoverType': {'id': 'CUSTOM', 'label': 'Region'},\n 'description': 'browse data of the region matched by the query',\n 'url': 'https://examplebrowser.org/dataset/test/browser/region/Chr38-1-1'},\n {'handoverType': {'id': 'CUSTOM', 'label': 'Data'},\n 'description': 'retrieve information of the datasets',\n 'url': 'https://examplebrowser.org/dataset/test/browser'}]}\n\n self.assertEqual(result, [expected])",
"async def test_23() -> None:\n LOG.debug(\"Test query for targeting a non-existing PUBLIC datasets, using ALL. (expect no data shown)\")\n payload = {\n \"referenceName\": \"MT\",\n \"start\": 9,\n \"referenceBases\": \"T\",\n \"alternateBases\": \"C\",\n \"assemblyId\": \"GRCh38\",\n \"datasetIds\": [\"urn:hg:1111genome\"],\n \"includeDatasetResponses\": \"ALL\",\n }\n headers = {\"Authorization\": f\"Bearer {TOKEN}\"}\n async with aiohttp.ClientSession(headers=headers) as session:\n async with session.post(\"http://localhost:5050/query\", data=json.dumps(payload)) as resp:\n data = await resp.json()\n assert data[\"exists\"] is False, sys.exit(\"Query POST Endpoint Error!\")\n assert len(data[\"datasetAlleleResponses\"]) == 0, sys.exit(\"Should be able to retrieve only public.\")",
"async def test_24() -> None:\n LOG.debug(\"Test query for targeting one existing and one non-existing PUBLIC datasets, using ALL. (expect only PUBLIC)\")\n payload = {\n \"referenceName\": \"MT\",\n \"start\": 9,\n \"referenceBases\": \"T\",\n \"alternateBases\": \"C\",\n \"assemblyId\": \"GRCh38\",\n \"datasetIds\": [\"urn:hg:1111genome\", \"urn:hg:1000genome\"],\n \"includeDatasetResponses\": \"ALL\",\n }\n headers = {\"Authorization\": f\"Bearer {TOKEN}\"}\n async with aiohttp.ClientSession(headers=headers) as session:\n async with session.post(\"http://localhost:5050/query\", data=json.dumps(payload)) as resp:\n data = await resp.json()\n assert data[\"exists\"] is True, sys.exit(\"Query POST Endpoint Error!\")\n assert len(data[\"datasetAlleleResponses\"]) == 1, sys.exit(\"Should be able to retrieve only public.\")",
"def test_get_records(self):\n pass",
"def test_data():\n db = current_app.db\n Site = db.tables.Site\n Endpoint = db.tables.Endpoint\n if Site.query.count():\n return # DB not empty\n entries = [\n Site(site_id=1,\n site_name='Site1',\n site_desc='First Test Site',\n site_owner=1,\n user_ca_cert='USERCERT1',\n service_ca_cert='',\n auth_type=0,\n auth_uri='localhost:49998',\n public=False,\n def_path='/~'),\n Site(site_id=2,\n site_name='Site2',\n site_desc='Second Test Site',\n site_owner=123,\n user_ca_cert='USERCERT2',\n service_ca_cert='SERVICECERT2',\n auth_type=0,\n auth_uri='localhost:49998',\n public=True,\n def_path='/project'),\n Endpoint(ep_id=1,\n site_id=1,\n ep_uri='localhost:49999'),\n Endpoint(ep_id=2,\n site_id=1,\n ep_uri='localhost2:49999'),\n Endpoint(ep_id=3,\n site_id=2,\n ep_uri='localhost:50000'),\n Endpoint(ep_id=4,\n site_id=2,\n ep_uri='localhost2:50000'),\n Site(site_id=3,\n site_name='CloudSite1',\n site_desc='Testing site in cloud (1)',\n site_owner=1,\n user_ca_cert=TEST_HOST_CA,\n service_ca_cert=UK_ESCIENCE_CA,\n auth_type=0,\n auth_uri='pdmtest1.grid.hep.ph.ic.ac.uk:49998',\n public=True,\n def_path='/~'),\n Endpoint(ep_id=5,\n site_id=3,\n ep_uri='pdmtest1.grid.hep.ph.ic.ac.uk:49999'),\n Site(site_id=4,\n site_name='CloudSite2',\n site_desc='Testing site in cloud (2)',\n site_owner=1,\n user_ca_cert=TEST_HOST_CA,\n service_ca_cert=UK_ESCIENCE_CA,\n auth_type=0,\n auth_uri='pdmtest2.grid.hep.ph.ic.ac.uk:49998',\n public=True,\n def_path='/~'),\n Endpoint(ep_id=6,\n site_id=4,\n ep_uri='pdmtest2.grid.hep.ph.ic.ac.uk:49999'),\n Site(site_id=5,\n site_name='UKI-LT2-IC-HEP',\n site_desc='Imperial College GridPP Site',\n site_owner=0,\n user_ca_cert=None,\n service_ca_cert=None,\n auth_type=1,\n auth_uri='myproxy.grid.hep.ph.ic.ac.uk:7512',\n public=True,\n def_path='/pnfs/hep.ph.ic.ac.uk/data'),\n Endpoint(ep_id=7,\n site_id=5,\n ep_uri='gfe02.grid.hep.ph.ic.ac.uk:2811'),\n Site(site_id=6,\n site_name='NERSC DTN',\n site_desc='NERSC DTN Service',\n site_owner=0,\n user_ca_cert=None,\n service_ca_cert=None,\n auth_type=0,\n auth_uri='myproxy.grid.hep.ph.ic.ac.uk:7512',\n public=True,\n def_path='/~'),\n Endpoint(ep_id=8,\n site_id=6,\n ep_uri='dtn01.nersc.gov:2811'),\n ]\n for entry in entries:\n db.session.add(entry)\n db.session.commit()",
"def test_ds(self, obj):\n pass",
"def test_data_researcher_access(self):\n self.client.login(username=self.data_researcher.username, password='test')\n\n self.verify_response(params={\n 'all_blocks': True,\n 'course_id': str(self.course_key)\n })",
"def test_by_accession_geo_platform_accession_get(self):\n pass",
"def test_dashboards_v2_request_access(self):\n pass",
"def test_acquire_dataset(self):\n\n # make sure the data does not yet exist\n with self.subTest(name='no data yet'):\n response = Epidata.covid_hosp('MA', Epidata.range(20200101, 20210101))\n self.assertEqual(response['result'], -2)\n\n # acquire sample data into local database\n # mock out network calls to external hosts\n with self.subTest(name='first acquisition'), \\\n patch.object(Network, 'fetch_metadata', return_value=self.test_utils.load_sample_metadata()) as mock_fetch_meta, \\\n patch.object(Network, 'fetch_dataset', side_effect=[self.test_utils.load_sample_dataset(\"dataset0.csv\"), # dataset for 3/13\n self.test_utils.load_sample_dataset(\"dataset0.csv\"), # first dataset for 3/15\n self.test_utils.load_sample_dataset()] # second dataset for 3/15\n ) as mock_fetch:\n acquired = Update.run()\n self.assertTrue(acquired)\n self.assertEqual(mock_fetch_meta.call_count, 1)\n\n # make sure the data now exists\n with self.subTest(name='initial data checks'):\n response = Epidata.covid_hosp('WY', Epidata.range(20200101, 20210101))\n self.assertEqual(response['result'], 1)\n self.assertEqual(len(response['epidata']), 1)\n row = response['epidata'][0]\n self.assertEqual(row['state'], 'WY')\n self.assertEqual(row['date'], 20201209)\n self.assertEqual(row['issue'], 20210315)\n self.assertEqual(row['critical_staffing_shortage_today_yes'], 8)\n actual = row['inpatient_bed_covid_utilization']\n expected = 0.11729857819905214\n self.assertAlmostEqual(actual, expected)\n self.assertIsNone(row['critical_staffing_shortage_today_no'])\n\n # expect 61 fields per row (63 database columns, except `id` and `record_type`)\n self.assertEqual(len(row), 61)\n\n with self.subTest(name='all date batches acquired'):\n response = Epidata.covid_hosp('WY', Epidata.range(20200101, 20210101), issues=20210313)\n self.assertEqual(response['result'], 1)\n\n # re-acquisition of the same dataset should be a no-op\n with self.subTest(name='second acquisition'), \\\n patch.object(Network, 'fetch_metadata', return_value=self.test_utils.load_sample_metadata()) as mock_fetch_meta, \\\n patch.object(Network, 'fetch_dataset', return_value=self.test_utils.load_sample_dataset()) as mock_fetch:\n acquired = Update.run()\n self.assertFalse(acquired)\n\n # make sure the data still exists\n with self.subTest(name='final data checks'):\n response = Epidata.covid_hosp('WY', Epidata.range(20200101, 20210101))\n self.assertEqual(response['result'], 1)\n self.assertEqual(len(response['epidata']), 1)",
"def get(log, session, args):\n url = \"{}datasets/{}\".format(\n http.get_api_url(args.url, args.project),\n args.id)\n log.debug('GET: {}'.format(url))\n response_json = http.get(session, url)\n log.print_json(response_json, \"dataset\", \"get\")",
"async def test_21() -> None:\n LOG.debug(\"Test Non-existing/MISS variant targeting PUBLIC and CONTROLLED datasets with token perms (expect all shown)\")\n payload = {\n \"referenceName\": \"MT\",\n \"start\": 8,\n \"referenceBases\": \"T\",\n \"alternateBases\": \"C\",\n \"assemblyId\": \"GRCh38\",\n \"datasetIds\": [\"urn:hg:1000genome\", \"urn:hg:1000genome:controlled\"],\n \"includeDatasetResponses\": \"MISS\",\n }\n headers = {\"Authorization\": f\"Bearer {TOKEN}\"}\n async with aiohttp.ClientSession(headers=headers) as session:\n async with session.post(\"http://localhost:5050/query\", data=json.dumps(payload)) as resp:\n data = await resp.json()\n assert data[\"exists\"] is False, sys.exit(\"Query POST Endpoint Error!\")\n assert len(data[\"datasetAlleleResponses\"]) == 2, sys.exit(\"Should be able to retrieve only public.\")",
"def test_public_user(self):\n set_permission(Permission.SHARE, self.user1, self.collection)\n\n data = {\"public\": \"view\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n\n data = {\"public\": \"none\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n\n data = {\"public\": \"edit\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)\n\n data = {\"public\": \"share\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)\n\n data = {\"public\": \"owner\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)",
"def test_data_provider_simple(self):\n data_provider = dps.DataProviderSimple()\n result = data_provider.get_data()\n self.assertTrue(isinstance(result, list))\n self.assertTrue(result[0] == 0)",
"async def test_fetch_dataset_metadata_call(self):\n pool = asynctest.CoroutineMock()\n pool.acquire().__aenter__.return_value = Connection()\n result = await fetch_dataset_metadata(pool, None, None)\n # for now it can return empty dataset\n # in order to get a response we will have to mock it\n # in Connection() class\n self.assertEqual(result, [])",
"def test_fetch_dataset(self):\n\n mock_pandas = MagicMock()\n mock_pandas.read_csv.return_value = sentinel.dataset\n\n result = Network.fetch_dataset(sentinel.url, pandas_impl=mock_pandas)\n\n self.assertEqual(result, sentinel.dataset)\n mock_pandas.read_csv.assert_called_once_with(sentinel.url, dtype=str)",
"def test_auth_public(self):\n self.do_visible(True, None, True, tenant='froggy')",
"def access():",
"def testGetAccessAllowed(self):\n for user in (self.guest, self.contributor, self.delegate, self.owner, self.root):\n response = self.runGet(user, sequencer=self.hiseq2000.sodar_uuid)\n self.response_200(response)\n data = json.loads(response.content.decode(\"utf-8\"))\n self.assertEqual(data[\"sodar_uuid\"], str(self.hiseq2000.sodar_uuid))",
"def test_all_sequential_open_distribution_has_access(self, has_access):\r\n has_access.return_value = True\r\n response = views.all_sequential_open_distrib(self.request, 'test/test/test')\r\n\r\n self.assertEqual(simplejson.dumps(self.simple_data), response.content)",
"def test_get_df(mocker):\n spy_load_metadata = mocker.spy(MetaData, 'load_document')\n expected_df = pd.read_json('tests/odata/fixtures/records.json', orient='records')\n\n provider = ODataConnector(\n name='test',\n baseroute='http://services.odata.org/V4/Northwind/Northwind.svc/',\n auth={'type': 'basic', 'args': ['u', 'p']},\n )\n\n data_source = ODataDataSource(\n domain='test',\n name='test',\n entity='Orders',\n query={\n '$filter': \"ShipCountry eq 'France'\",\n '$orderby': 'Freight desc',\n '$skip': 50,\n '$top': 3,\n },\n )\n\n try:\n df = provider.get_df(data_source)\n sl = ['CustomerID', 'EmployeeID', 'Freight']\n assert df[sl].equals(expected_df[sl])\n except socket.error:\n pytest.skip('Could not connect to the standard example OData service.')\n\n assert spy_load_metadata.call_count == 1\n args, _ = spy_load_metadata.call_args\n assert args[0].url.endswith('/$metadata')\n\n provider.auth = None\n try:\n provider.get_df(data_source)\n except socket.error:\n pytest.skip('Could not connect to the standard example OData service.')",
"def test_get_private(self):\n owner = create_user('owner')\n create_snippet('foo', private=True, owner=owner)\n expected = [0, 0, 1, 1]\n\n def check(i):\n response = self.get()\n self.assertEqual(len(response.data), expected[i])\n\n self.check_for_users(check, owner)",
"def test_get_permissions(self):\n pass",
"def test_get_host_access(self):\n pass"
]
| [
"0.69238675",
"0.69035935",
"0.67359",
"0.6674255",
"0.66601235",
"0.6349921",
"0.6299647",
"0.62559557",
"0.6216987",
"0.61688465",
"0.60637873",
"0.59757394",
"0.59607965",
"0.5922106",
"0.5850786",
"0.58391786",
"0.5838317",
"0.58256304",
"0.5823583",
"0.58134",
"0.5811772",
"0.5791216",
"0.57887703",
"0.5764024",
"0.5757532",
"0.57501704",
"0.5723114",
"0.57155126",
"0.57083917",
"0.56874245"
]
| 0.79184955 | 0 |
Test db call of getting registered datasets access. | async def test_datasets_access_call_registered(self):
pool = asynctest.CoroutineMock()
pool.acquire().__aenter__.return_value = Connection(accessData=[{'accesstype': 'REGISTERED', 'datasetid': 'mock:registered:id'}])
result = await fetch_datasets_access(pool, None)
# for now it can return a tuple of empty datasets
# in order to get a response we will have to mock it
# in Connection() class
self.assertEqual(result, ([], ['mock:registered:id'], [])) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_finding_datasets_doesnt_query_database_excessively(\n access_type, client, django_assert_num_queries\n):\n expected_num_queries = 13\n source_tags = [factories.SourceTagFactory() for _ in range(10)]\n topic_tags = [factories.TopicTagFactory() for _ in range(10)]\n\n masters = [\n factories.DataSetFactory(\n type=DataSetType.MASTER,\n published=True,\n user_access_type=access_type,\n )\n for _ in range(random.randint(10, 50))\n ]\n for master in masters:\n master.tags.set(\n random.sample(source_tags, random.randint(1, 3))\n + random.sample(topic_tags, random.randint(1, 3))\n )\n\n datacuts = [\n factories.DataSetFactory(\n type=DataSetType.DATACUT,\n published=True,\n user_access_type=access_type,\n )\n for _ in range(random.randint(10, 50))\n ]\n for datacut in datacuts:\n datacut.tags.set(random.sample(source_tags, 1) + random.sample(topic_tags, 1))\n\n references = [factories.ReferenceDatasetFactory.create(published=True) for _ in range(10)]\n for reference in references:\n reference.tags.set(\n random.sample(source_tags, random.randint(1, 3))\n + random.sample(topic_tags, random.randint(1, 3))\n )\n\n visualisations = [\n factories.VisualisationCatalogueItemFactory.create(published=True)\n for _ in range(random.randint(10, 50))\n ]\n\n for visualisation in visualisations:\n factories.DataSetApplicationTemplatePermissionFactory(\n application_template=visualisation.visualisation_template,\n dataset=random.choice(masters),\n )\n\n # Log into site (triggers the queries related to setting up the user).\n client.get(reverse(\"root\"))\n\n with django_assert_num_queries(expected_num_queries, exact=False):\n response = client.get(reverse(\"datasets:find_datasets\"), follow=True)\n assert response.status_code == 200\n\n with django_assert_num_queries(expected_num_queries, exact=False):\n response = client.get(reverse(\"datasets:find_datasets\"), {\"q\": \"potato\"})\n assert response.status_code == 200\n\n with django_assert_num_queries(expected_num_queries + 1, exact=False):\n response = client.get(\n reverse(\"datasets:find_datasets\"),\n {\"source\": [str(tag.id) for tag in random.sample(source_tags, random.randint(1, 5))]},\n )\n assert response.status_code == 200\n\n with django_assert_num_queries(expected_num_queries + 1, exact=False):\n response = client.get(\n reverse(\"datasets:find_datasets\"),\n {\"topic\": [str(tag.id) for tag in random.sample(topic_tags, random.randint(1, 5))]},\n )\n assert response.status_code == 200\n\n with django_assert_num_queries(expected_num_queries, exact=False):\n response = client.get(\n reverse(\"datasets:find_datasets\"),\n {\"data_type\": str(DataSetType.MASTER)},\n )\n assert response.status_code == 200\n\n with django_assert_num_queries(expected_num_queries, exact=False):\n response = client.get(reverse(\"datasets:find_datasets\"), {\"access\": \"yes\"})\n assert response.status_code == 200",
"async def test_datasets_access_call_multiple(self):\n pool = asynctest.CoroutineMock()\n pool.acquire().__aenter__.return_value = Connection(accessData=[{'accesstype': 'CONTROLLED', 'datasetid': 'mock:controlled:id'},\n {'accesstype': 'PUBLIC', 'datasetid': 'mock:public:id'}])\n result = await fetch_datasets_access(pool, None)\n # for now it can return a tuple of empty datasets\n # in order to get a response we will have to mock it\n # in Connection() class\n self.assertEqual(result, (['mock:public:id'], [], ['mock:controlled:id']))",
"def test_dataset_details():\n with new_test_dataset(2) as test_ds:\n args = build_register_args(test_ds.copy_to_s3())\n ds_name = args['name']\n URLs.run(url_info=URLs.register_url(), json_body=args)\n\n ds_parts = URLs.run(url_info=URLs.dataset_parts_url(ds_name)).json\n assert ds_parts['filenames'] == test_ds.expected_parts.filenames\n expected_columns = json.loads(datafile_schema().to_json())['columns']\n\n ds_short_schema = URLs.run(url_info=URLs.dataset_schema_url(ds_name, full=False)).json\n assert ds_short_schema['columns'] == expected_columns\n\n ds_full_schema = URLs.run(url_info=URLs.dataset_schema_url(ds_name, full=True)).json\n assert ds_full_schema['columns'][DEFAULT_TIMESTAMP_COLUMN]['colattrs']['numericMin'] == BASE_TIME\n\n URLs.run(url_info=URLs.unregister_url(ds_name))",
"async def test_datasets_access_call_public(self):\n pool = asynctest.CoroutineMock()\n pool.acquire().__aenter__.return_value = Connection(accessData=[{'accesstype': 'PUBLIC', 'datasetid': 'mock:public:id'}])\n result = await fetch_datasets_access(pool, None)\n # for now it can return a tuple of empty datasets\n # in order to get a response we will have to mock it\n # in Connection() class\n self.assertEqual(result, (['mock:public:id'], [], []))",
"def test_get_records(self):\n pass",
"async def test_datasets_access_call_controlled(self):\n pool = asynctest.CoroutineMock()\n pool.acquire().__aenter__.return_value = Connection(accessData=[{'accesstype': 'CONTROLLED', 'datasetid': 'mock:controlled:id'}])\n result = await fetch_datasets_access(pool, None)\n # for now it can return a tuple of empty datasets\n # in order to get a response we will have to mock it\n # in Connection() class\n self.assertEqual(result, ([], [], ['mock:controlled:id']))",
"async def test_fetch_filtered_dataset_call(self):\n pool = asynctest.CoroutineMock()\n db_response = {\"referenceBases\": '', \"alternateBases\": '', \"variantType\": \"\",\n \"referenceName\": 'Chr38',\n \"frequency\": 0, \"callCount\": 0, \"sampleCount\": 0, \"variantCount\": 0,\n \"start\": 0, \"end\": 0, \"accessType\": \"PUBLIC\", \"datasetId\": \"test\"}\n pool.acquire().__aenter__.return_value = Connection(accessData=[db_response])\n assembly_id = 'GRCh38'\n position = (10, 20, None, None, None, None)\n chromosome = 1\n reference = 'A'\n alternate = ('DUP', None)\n result = await fetch_filtered_dataset(pool, assembly_id, position, chromosome, reference, alternate, None, None, False)\n # for now it can return empty dataset\n # in order to get a response we will have to mock it\n # in Connection() class\n expected = {'referenceName': 'Chr38', 'callCount': 0, 'sampleCount': 0, 'variantCount': 0, 'datasetId': 'test',\n 'referenceBases': '', 'alternateBases': '', 'variantType': '', 'start': 0, 'end': 0, 'frequency': 0,\n 'info': {'accessType': 'PUBLIC'},\n 'datasetHandover': [{'handoverType': {'id': 'CUSTOM', 'label': 'Variants'},\n 'description': 'browse the variants matched by the query',\n 'url': 'https://examplebrowser.org/dataset/test/browser/variant/Chr38-1--'},\n {'handoverType': {'id': 'CUSTOM', 'label': 'Region'},\n 'description': 'browse data of the region matched by the query',\n 'url': 'https://examplebrowser.org/dataset/test/browser/region/Chr38-1-1'},\n {'handoverType': {'id': 'CUSTOM', 'label': 'Data'},\n 'description': 'retrieve information of the datasets',\n 'url': 'https://examplebrowser.org/dataset/test/browser'}]}\n\n self.assertEqual(result, [expected])",
"def test_acquire_dataset(self):\n\n # make sure the data does not yet exist\n with self.subTest(name='no data yet'):\n response = Epidata.covid_hosp('MA', Epidata.range(20200101, 20210101))\n self.assertEqual(response['result'], -2)\n\n # acquire sample data into local database\n # mock out network calls to external hosts\n with self.subTest(name='first acquisition'), \\\n patch.object(Network, 'fetch_metadata', return_value=self.test_utils.load_sample_metadata()) as mock_fetch_meta, \\\n patch.object(Network, 'fetch_dataset', side_effect=[self.test_utils.load_sample_dataset(\"dataset0.csv\"), # dataset for 3/13\n self.test_utils.load_sample_dataset(\"dataset0.csv\"), # first dataset for 3/15\n self.test_utils.load_sample_dataset()] # second dataset for 3/15\n ) as mock_fetch:\n acquired = Update.run()\n self.assertTrue(acquired)\n self.assertEqual(mock_fetch_meta.call_count, 1)\n\n # make sure the data now exists\n with self.subTest(name='initial data checks'):\n response = Epidata.covid_hosp('WY', Epidata.range(20200101, 20210101))\n self.assertEqual(response['result'], 1)\n self.assertEqual(len(response['epidata']), 1)\n row = response['epidata'][0]\n self.assertEqual(row['state'], 'WY')\n self.assertEqual(row['date'], 20201209)\n self.assertEqual(row['issue'], 20210315)\n self.assertEqual(row['critical_staffing_shortage_today_yes'], 8)\n actual = row['inpatient_bed_covid_utilization']\n expected = 0.11729857819905214\n self.assertAlmostEqual(actual, expected)\n self.assertIsNone(row['critical_staffing_shortage_today_no'])\n\n # expect 61 fields per row (63 database columns, except `id` and `record_type`)\n self.assertEqual(len(row), 61)\n\n with self.subTest(name='all date batches acquired'):\n response = Epidata.covid_hosp('WY', Epidata.range(20200101, 20210101), issues=20210313)\n self.assertEqual(response['result'], 1)\n\n # re-acquisition of the same dataset should be a no-op\n with self.subTest(name='second acquisition'), \\\n patch.object(Network, 'fetch_metadata', return_value=self.test_utils.load_sample_metadata()) as mock_fetch_meta, \\\n patch.object(Network, 'fetch_dataset', return_value=self.test_utils.load_sample_dataset()) as mock_fetch:\n acquired = Update.run()\n self.assertFalse(acquired)\n\n # make sure the data still exists\n with self.subTest(name='final data checks'):\n response = Epidata.covid_hosp('WY', Epidata.range(20200101, 20210101))\n self.assertEqual(response['result'], 1)\n self.assertEqual(len(response['epidata']), 1)",
"async def test_fetch_dataset_metadata_call(self):\n pool = asynctest.CoroutineMock()\n pool.acquire().__aenter__.return_value = Connection()\n result = await fetch_dataset_metadata(pool, None, None)\n # for now it can return empty dataset\n # in order to get a response we will have to mock it\n # in Connection() class\n self.assertEqual(result, [])",
"def test_get_db_records(self):\n string = StringIndexer.objects.create(organization_id=123, string=\"oop\")\n collection = KeyCollection({123: {\"oop\"}})\n key = \"123:oop\"\n\n assert indexer_cache.get(key, self.cache_namespace) is None\n assert indexer_cache.get(string.id, self.cache_namespace) is None\n\n self.indexer.indexer._get_db_records(self.use_case_id, collection)\n\n assert indexer_cache.get(string.id, self.cache_namespace) is None\n assert indexer_cache.get(key, self.cache_namespace) is None",
"def test_dataset_for_personal_accounts(self):\n pass",
"def test_data():\n db = current_app.db\n Site = db.tables.Site\n Endpoint = db.tables.Endpoint\n if Site.query.count():\n return # DB not empty\n entries = [\n Site(site_id=1,\n site_name='Site1',\n site_desc='First Test Site',\n site_owner=1,\n user_ca_cert='USERCERT1',\n service_ca_cert='',\n auth_type=0,\n auth_uri='localhost:49998',\n public=False,\n def_path='/~'),\n Site(site_id=2,\n site_name='Site2',\n site_desc='Second Test Site',\n site_owner=123,\n user_ca_cert='USERCERT2',\n service_ca_cert='SERVICECERT2',\n auth_type=0,\n auth_uri='localhost:49998',\n public=True,\n def_path='/project'),\n Endpoint(ep_id=1,\n site_id=1,\n ep_uri='localhost:49999'),\n Endpoint(ep_id=2,\n site_id=1,\n ep_uri='localhost2:49999'),\n Endpoint(ep_id=3,\n site_id=2,\n ep_uri='localhost:50000'),\n Endpoint(ep_id=4,\n site_id=2,\n ep_uri='localhost2:50000'),\n Site(site_id=3,\n site_name='CloudSite1',\n site_desc='Testing site in cloud (1)',\n site_owner=1,\n user_ca_cert=TEST_HOST_CA,\n service_ca_cert=UK_ESCIENCE_CA,\n auth_type=0,\n auth_uri='pdmtest1.grid.hep.ph.ic.ac.uk:49998',\n public=True,\n def_path='/~'),\n Endpoint(ep_id=5,\n site_id=3,\n ep_uri='pdmtest1.grid.hep.ph.ic.ac.uk:49999'),\n Site(site_id=4,\n site_name='CloudSite2',\n site_desc='Testing site in cloud (2)',\n site_owner=1,\n user_ca_cert=TEST_HOST_CA,\n service_ca_cert=UK_ESCIENCE_CA,\n auth_type=0,\n auth_uri='pdmtest2.grid.hep.ph.ic.ac.uk:49998',\n public=True,\n def_path='/~'),\n Endpoint(ep_id=6,\n site_id=4,\n ep_uri='pdmtest2.grid.hep.ph.ic.ac.uk:49999'),\n Site(site_id=5,\n site_name='UKI-LT2-IC-HEP',\n site_desc='Imperial College GridPP Site',\n site_owner=0,\n user_ca_cert=None,\n service_ca_cert=None,\n auth_type=1,\n auth_uri='myproxy.grid.hep.ph.ic.ac.uk:7512',\n public=True,\n def_path='/pnfs/hep.ph.ic.ac.uk/data'),\n Endpoint(ep_id=7,\n site_id=5,\n ep_uri='gfe02.grid.hep.ph.ic.ac.uk:2811'),\n Site(site_id=6,\n site_name='NERSC DTN',\n site_desc='NERSC DTN Service',\n site_owner=0,\n user_ca_cert=None,\n service_ca_cert=None,\n auth_type=0,\n auth_uri='myproxy.grid.hep.ph.ic.ac.uk:7512',\n public=True,\n def_path='/~'),\n Endpoint(ep_id=8,\n site_id=6,\n ep_uri='dtn01.nersc.gov:2811'),\n ]\n for entry in entries:\n db.session.add(entry)\n db.session.commit()",
"def test_ds(self, obj):\n pass",
"def test_dataset_found_when_querying_number_in_name(self):\n models.SourceTrait.objects.all().delete()\n # Use a different study to ensure that one of the pre-created datasets doesn't match.\n dataset_name = 'unlikely_24601_dataset'\n # Use an accession that won't match for one dataset but not the other\n dataset_name_match = factories.SourceDatasetFactory.create(\n dataset_name=dataset_name,\n i_accession=123456,\n source_study_version=self.source_study_version\n )\n dataset_accession_match = factories.SourceDatasetFactory.create(\n dataset_name='other_name',\n i_accession=24601,\n source_study_version=self.source_study_version\n )\n url = self.get_url(self.study.pk)\n response = self.client.get(url, {'q': 246})\n returned_pks = get_autocomplete_view_ids(response)\n self.assertEqual(sorted(returned_pks), sorted([dataset_name_match.i_id, dataset_accession_match.i_id]))",
"def get_dataset():\n\n return db.store.all()",
"def test_dataset_found_when_querying_number_in_name(self):\n models.SourceTrait.objects.all().delete()\n # Use a different study to ensure that one of the pre-created datasets doesn't match.\n dataset_name = 'unlikely_24601_dataset'\n # Use an accession that won't match for one dataset but not the other\n dataset_name_match = factories.SourceDatasetFactory.create(dataset_name=dataset_name, i_accession=123456)\n dataset_accession_match = factories.SourceDatasetFactory.create(dataset_name='other_name', i_accession=24601)\n url = self.get_url()\n response = self.client.get(url, {'q': 246})\n returned_pks = get_autocomplete_view_ids(response)\n self.assertEqual(sorted(returned_pks), sorted([dataset_name_match.i_id, dataset_accession_match.i_id]))",
"def test_find_datasets_filters_by_access_and_use_only_returns_the_dataset_once(\n access_type,\n):\n user = factories.UserFactory.create(is_superuser=False)\n user2 = factories.UserFactory.create(is_superuser=False)\n client = Client(**get_http_sso_data(user))\n\n access_granted_master = factories.DataSetFactory.create(\n published=True,\n type=DataSetType.MASTER,\n name=\"Master - access redundantly granted\",\n user_access_type=access_type,\n )\n factories.DataSetUserPermissionFactory.create(user=user, dataset=access_granted_master)\n factories.DataSetUserPermissionFactory.create(user=user2, dataset=access_granted_master)\n\n response = client.get(\n reverse(\"datasets:find_datasets\"),\n {\"access\": \"yes\", \"use\": str(DataSetType.MASTER)},\n )\n\n assert response.status_code == 200\n assert list(response.context[\"datasets\"]) == [expected_search_result(access_granted_master)]",
"def test_correct_dataset_found_by_name(self):\n dataset_name = 'my_unlikely_dataset_name'\n dataset = factories.SourceDatasetFactory.create(dataset_name=dataset_name)\n url = self.get_url()\n response = self.client.get(url, {'q': dataset_name})\n returned_pks = get_autocomplete_view_ids(response)\n self.assertEqual(returned_pks, [dataset.i_id])",
"def test_correct_dataset_found_by_name(self):\n dataset_name = 'my_unlikely_dataset_name'\n dataset = factories.SourceDatasetFactory.create(dataset_name=dataset_name)\n url = self.get_url()\n response = self.client.get(url, {'q': dataset_name})\n returned_pks = get_autocomplete_view_ids(response)\n self.assertEqual(returned_pks, [dataset.i_id])",
"def test_context_data(self):\n response = self.client.get(self.get_url())\n context = response.context\n self.assertIn('source_dataset_table', context)\n for ds in self.datasets:\n self.assertIn(ds, context['source_dataset_table'].data)\n self.assertIsInstance(context['source_dataset_table'], tables.SourceDatasetTableFull)",
"def test_db_read(env_setup, env_table, db_insert_test_data, db_read_test_data, response_test_data):\n DbManager(SqLiteHelper, {\"db_path\": env_setup, \"master_table\": \"birthday_data\"}) \\\n .processor(db_insert_test_data.get(\"valid\")) # inserting data\n test_string = DbManager(SqLiteHelper, {\"db_path\": env_setup, \"master_table\": env_table}) \\\n .processor(db_read_test_data.get(\"valid\")) # testing\n assert test_string == response_test_data.get(\"valid_read\")",
"def testDatabase(self):\n con = self.getMetadataDatabaseConnection()\n if con:\n return True",
"def test_connect_db_to_query(db):\n assert 1",
"def test_correct_dataset_found_by_name(self):\n dataset_name = 'my_unlikely_dataset_name'\n dataset = factories.SourceDatasetFactory.create(\n dataset_name=dataset_name,\n source_study_version=self.source_study_version\n )\n url = self.get_url(self.study.pk)\n response = self.client.get(url, {'q': dataset_name})\n returned_pks = get_autocomplete_view_ids(response)\n self.assertEqual(returned_pks, [dataset.i_id])",
"def test_correct_dataset_found_by_name(self):\n dataset_name = 'my_unlikely_dataset_name'\n dataset = factories.SourceDatasetFactory.create(\n dataset_name=dataset_name,\n source_study_version=self.source_study_version\n )\n url = self.get_url(self.study.pk)\n response = self.client.get(url, {'q': dataset_name})\n returned_pks = get_autocomplete_view_ids(response)\n self.assertEqual(returned_pks, [dataset.i_id])",
"def test_dummydb_basic(self):\n db = DummyDB()",
"def test_fetch_dataset(self):\n\n mock_pandas = MagicMock()\n mock_pandas.read_csv.return_value = sentinel.dataset\n\n result = Network.fetch_dataset(sentinel.url, pandas_impl=mock_pandas)\n\n self.assertEqual(result, sentinel.dataset)\n mock_pandas.read_csv.assert_called_once_with(sentinel.url, dtype=str)",
"def test_context_data_with_valid_search_and_dataset_name(self):\n study = factories.StudyFactory.create()\n dataset = factories.SourceDatasetFactory.create(i_dbgap_description='lorem ipsum', dataset_name='dolor',\n source_study_version__study=study)\n factories.SourceDatasetFactory.create(i_dbgap_description='lorem other', dataset_name='tempor')\n response = self.client.get(self.get_url(), {'description': 'lorem', 'name': 'dolor'})\n context = response.context\n self.assertIn('form', context)\n self.assertTrue(context['has_results'])\n self.assertIsInstance(context['results_table'], tables.SourceDatasetTableFull)\n self.assertQuerysetEqual(context['results_table'].data, [repr(dataset)])",
"async def test_25() -> None:\n LOG.debug(\"Test query for targeting three datasets, using ALL. (expect data shown)\")\n payload = {\n \"referenceName\": \"MT\",\n \"start\": 10,\n \"referenceBases\": \"T\",\n \"alternateBases\": \"C\",\n \"assemblyId\": \"GRCh38\",\n \"datasetIds\": [\"urn:hg:1000genome\", \"urn:hg:1000genome:controlled\", \"urn:hg:1000genome:registered\"],\n \"includeDatasetResponses\": \"ALL\",\n }\n headers = {\"Authorization\": f\"Bearer {TOKEN}\"}\n async with aiohttp.ClientSession(headers=headers) as session:\n async with session.post(\"http://localhost:5050/query\", data=json.dumps(payload)) as resp:\n data = await resp.json()\n assert data[\"exists\"] is False, sys.exit(\"Query POST Endpoint Error!\")\n assert len(data[\"datasetAlleleResponses\"]) == 3, sys.exit(\"Should be able to retrieve data for all datasets.\")",
"async def test_fetch_all_w_data(database, valid_data):\n await database.setup_database(reset=True)\n for id,user_id,embeddings,batch_id in valid_data:\n await database.insert_user(user_id=user_id)\n await database.insert(id=id,\n user_id=user_id,\n embeddings=embeddings,\n batch_id=batch_id)\n assert isinstance(await database.fetch_all(user_id=user_id),list)\n await database.close_pool()"
]
| [
"0.692886",
"0.66641706",
"0.6565195",
"0.65563977",
"0.65087014",
"0.6503476",
"0.6399934",
"0.6390301",
"0.63635707",
"0.63535875",
"0.63359255",
"0.6288772",
"0.6233373",
"0.6157593",
"0.6155992",
"0.61443067",
"0.6102189",
"0.6071379",
"0.6071379",
"0.60491437",
"0.6041043",
"0.6016093",
"0.58957505",
"0.5889631",
"0.5889631",
"0.58818924",
"0.58504957",
"0.5839465",
"0.58242756",
"0.5813213"
]
| 0.7375869 | 0 |
Test db call of getting controlled datasets access. | async def test_datasets_access_call_controlled(self):
pool = asynctest.CoroutineMock()
pool.acquire().__aenter__.return_value = Connection(accessData=[{'accesstype': 'CONTROLLED', 'datasetid': 'mock:controlled:id'}])
result = await fetch_datasets_access(pool, None)
# for now it can return a tuple of empty datasets
# in order to get a response we will have to mock it
# in Connection() class
self.assertEqual(result, ([], [], ['mock:controlled:id'])) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def test_datasets_access_call_multiple(self):\n pool = asynctest.CoroutineMock()\n pool.acquire().__aenter__.return_value = Connection(accessData=[{'accesstype': 'CONTROLLED', 'datasetid': 'mock:controlled:id'},\n {'accesstype': 'PUBLIC', 'datasetid': 'mock:public:id'}])\n result = await fetch_datasets_access(pool, None)\n # for now it can return a tuple of empty datasets\n # in order to get a response we will have to mock it\n # in Connection() class\n self.assertEqual(result, (['mock:public:id'], [], ['mock:controlled:id']))",
"def test_finding_datasets_doesnt_query_database_excessively(\n access_type, client, django_assert_num_queries\n):\n expected_num_queries = 13\n source_tags = [factories.SourceTagFactory() for _ in range(10)]\n topic_tags = [factories.TopicTagFactory() for _ in range(10)]\n\n masters = [\n factories.DataSetFactory(\n type=DataSetType.MASTER,\n published=True,\n user_access_type=access_type,\n )\n for _ in range(random.randint(10, 50))\n ]\n for master in masters:\n master.tags.set(\n random.sample(source_tags, random.randint(1, 3))\n + random.sample(topic_tags, random.randint(1, 3))\n )\n\n datacuts = [\n factories.DataSetFactory(\n type=DataSetType.DATACUT,\n published=True,\n user_access_type=access_type,\n )\n for _ in range(random.randint(10, 50))\n ]\n for datacut in datacuts:\n datacut.tags.set(random.sample(source_tags, 1) + random.sample(topic_tags, 1))\n\n references = [factories.ReferenceDatasetFactory.create(published=True) for _ in range(10)]\n for reference in references:\n reference.tags.set(\n random.sample(source_tags, random.randint(1, 3))\n + random.sample(topic_tags, random.randint(1, 3))\n )\n\n visualisations = [\n factories.VisualisationCatalogueItemFactory.create(published=True)\n for _ in range(random.randint(10, 50))\n ]\n\n for visualisation in visualisations:\n factories.DataSetApplicationTemplatePermissionFactory(\n application_template=visualisation.visualisation_template,\n dataset=random.choice(masters),\n )\n\n # Log into site (triggers the queries related to setting up the user).\n client.get(reverse(\"root\"))\n\n with django_assert_num_queries(expected_num_queries, exact=False):\n response = client.get(reverse(\"datasets:find_datasets\"), follow=True)\n assert response.status_code == 200\n\n with django_assert_num_queries(expected_num_queries, exact=False):\n response = client.get(reverse(\"datasets:find_datasets\"), {\"q\": \"potato\"})\n assert response.status_code == 200\n\n with django_assert_num_queries(expected_num_queries + 1, exact=False):\n response = client.get(\n reverse(\"datasets:find_datasets\"),\n {\"source\": [str(tag.id) for tag in random.sample(source_tags, random.randint(1, 5))]},\n )\n assert response.status_code == 200\n\n with django_assert_num_queries(expected_num_queries + 1, exact=False):\n response = client.get(\n reverse(\"datasets:find_datasets\"),\n {\"topic\": [str(tag.id) for tag in random.sample(topic_tags, random.randint(1, 5))]},\n )\n assert response.status_code == 200\n\n with django_assert_num_queries(expected_num_queries, exact=False):\n response = client.get(\n reverse(\"datasets:find_datasets\"),\n {\"data_type\": str(DataSetType.MASTER)},\n )\n assert response.status_code == 200\n\n with django_assert_num_queries(expected_num_queries, exact=False):\n response = client.get(reverse(\"datasets:find_datasets\"), {\"access\": \"yes\"})\n assert response.status_code == 200",
"async def test_datasets_access_call_public(self):\n pool = asynctest.CoroutineMock()\n pool.acquire().__aenter__.return_value = Connection(accessData=[{'accesstype': 'PUBLIC', 'datasetid': 'mock:public:id'}])\n result = await fetch_datasets_access(pool, None)\n # for now it can return a tuple of empty datasets\n # in order to get a response we will have to mock it\n # in Connection() class\n self.assertEqual(result, (['mock:public:id'], [], []))",
"async def test_fetch_filtered_dataset_call(self):\n pool = asynctest.CoroutineMock()\n db_response = {\"referenceBases\": '', \"alternateBases\": '', \"variantType\": \"\",\n \"referenceName\": 'Chr38',\n \"frequency\": 0, \"callCount\": 0, \"sampleCount\": 0, \"variantCount\": 0,\n \"start\": 0, \"end\": 0, \"accessType\": \"PUBLIC\", \"datasetId\": \"test\"}\n pool.acquire().__aenter__.return_value = Connection(accessData=[db_response])\n assembly_id = 'GRCh38'\n position = (10, 20, None, None, None, None)\n chromosome = 1\n reference = 'A'\n alternate = ('DUP', None)\n result = await fetch_filtered_dataset(pool, assembly_id, position, chromosome, reference, alternate, None, None, False)\n # for now it can return empty dataset\n # in order to get a response we will have to mock it\n # in Connection() class\n expected = {'referenceName': 'Chr38', 'callCount': 0, 'sampleCount': 0, 'variantCount': 0, 'datasetId': 'test',\n 'referenceBases': '', 'alternateBases': '', 'variantType': '', 'start': 0, 'end': 0, 'frequency': 0,\n 'info': {'accessType': 'PUBLIC'},\n 'datasetHandover': [{'handoverType': {'id': 'CUSTOM', 'label': 'Variants'},\n 'description': 'browse the variants matched by the query',\n 'url': 'https://examplebrowser.org/dataset/test/browser/variant/Chr38-1--'},\n {'handoverType': {'id': 'CUSTOM', 'label': 'Region'},\n 'description': 'browse data of the region matched by the query',\n 'url': 'https://examplebrowser.org/dataset/test/browser/region/Chr38-1-1'},\n {'handoverType': {'id': 'CUSTOM', 'label': 'Data'},\n 'description': 'retrieve information of the datasets',\n 'url': 'https://examplebrowser.org/dataset/test/browser'}]}\n\n self.assertEqual(result, [expected])",
"def test_find_datasets_filters_by_access_and_use_only_returns_the_dataset_once(\n access_type,\n):\n user = factories.UserFactory.create(is_superuser=False)\n user2 = factories.UserFactory.create(is_superuser=False)\n client = Client(**get_http_sso_data(user))\n\n access_granted_master = factories.DataSetFactory.create(\n published=True,\n type=DataSetType.MASTER,\n name=\"Master - access redundantly granted\",\n user_access_type=access_type,\n )\n factories.DataSetUserPermissionFactory.create(user=user, dataset=access_granted_master)\n factories.DataSetUserPermissionFactory.create(user=user2, dataset=access_granted_master)\n\n response = client.get(\n reverse(\"datasets:find_datasets\"),\n {\"access\": \"yes\", \"use\": str(DataSetType.MASTER)},\n )\n\n assert response.status_code == 200\n assert list(response.context[\"datasets\"]) == [expected_search_result(access_granted_master)]",
"def test_acquire_dataset(self):\n\n # make sure the data does not yet exist\n with self.subTest(name='no data yet'):\n response = Epidata.covid_hosp('MA', Epidata.range(20200101, 20210101))\n self.assertEqual(response['result'], -2)\n\n # acquire sample data into local database\n # mock out network calls to external hosts\n with self.subTest(name='first acquisition'), \\\n patch.object(Network, 'fetch_metadata', return_value=self.test_utils.load_sample_metadata()) as mock_fetch_meta, \\\n patch.object(Network, 'fetch_dataset', side_effect=[self.test_utils.load_sample_dataset(\"dataset0.csv\"), # dataset for 3/13\n self.test_utils.load_sample_dataset(\"dataset0.csv\"), # first dataset for 3/15\n self.test_utils.load_sample_dataset()] # second dataset for 3/15\n ) as mock_fetch:\n acquired = Update.run()\n self.assertTrue(acquired)\n self.assertEqual(mock_fetch_meta.call_count, 1)\n\n # make sure the data now exists\n with self.subTest(name='initial data checks'):\n response = Epidata.covid_hosp('WY', Epidata.range(20200101, 20210101))\n self.assertEqual(response['result'], 1)\n self.assertEqual(len(response['epidata']), 1)\n row = response['epidata'][0]\n self.assertEqual(row['state'], 'WY')\n self.assertEqual(row['date'], 20201209)\n self.assertEqual(row['issue'], 20210315)\n self.assertEqual(row['critical_staffing_shortage_today_yes'], 8)\n actual = row['inpatient_bed_covid_utilization']\n expected = 0.11729857819905214\n self.assertAlmostEqual(actual, expected)\n self.assertIsNone(row['critical_staffing_shortage_today_no'])\n\n # expect 61 fields per row (63 database columns, except `id` and `record_type`)\n self.assertEqual(len(row), 61)\n\n with self.subTest(name='all date batches acquired'):\n response = Epidata.covid_hosp('WY', Epidata.range(20200101, 20210101), issues=20210313)\n self.assertEqual(response['result'], 1)\n\n # re-acquisition of the same dataset should be a no-op\n with self.subTest(name='second acquisition'), \\\n patch.object(Network, 'fetch_metadata', return_value=self.test_utils.load_sample_metadata()) as mock_fetch_meta, \\\n patch.object(Network, 'fetch_dataset', return_value=self.test_utils.load_sample_dataset()) as mock_fetch:\n acquired = Update.run()\n self.assertFalse(acquired)\n\n # make sure the data still exists\n with self.subTest(name='final data checks'):\n response = Epidata.covid_hosp('WY', Epidata.range(20200101, 20210101))\n self.assertEqual(response['result'], 1)\n self.assertEqual(len(response['epidata']), 1)",
"def test_dataset_for_personal_accounts(self):\n pass",
"def test_get_records(self):\n pass",
"def test_ds(self, obj):\n pass",
"async def test_datasets_access_call_registered(self):\n pool = asynctest.CoroutineMock()\n pool.acquire().__aenter__.return_value = Connection(accessData=[{'accesstype': 'REGISTERED', 'datasetid': 'mock:registered:id'}])\n result = await fetch_datasets_access(pool, None)\n # for now it can return a tuple of empty datasets\n # in order to get a response we will have to mock it\n # in Connection() class\n self.assertEqual(result, ([], ['mock:registered:id'], []))",
"def test_get_db_records(self):\n string = StringIndexer.objects.create(organization_id=123, string=\"oop\")\n collection = KeyCollection({123: {\"oop\"}})\n key = \"123:oop\"\n\n assert indexer_cache.get(key, self.cache_namespace) is None\n assert indexer_cache.get(string.id, self.cache_namespace) is None\n\n self.indexer.indexer._get_db_records(self.use_case_id, collection)\n\n assert indexer_cache.get(string.id, self.cache_namespace) is None\n assert indexer_cache.get(key, self.cache_namespace) is None",
"def test_get_df(mocker):\n spy_load_metadata = mocker.spy(MetaData, 'load_document')\n expected_df = pd.read_json('tests/odata/fixtures/records.json', orient='records')\n\n provider = ODataConnector(\n name='test',\n baseroute='http://services.odata.org/V4/Northwind/Northwind.svc/',\n auth={'type': 'basic', 'args': ['u', 'p']},\n )\n\n data_source = ODataDataSource(\n domain='test',\n name='test',\n entity='Orders',\n query={\n '$filter': \"ShipCountry eq 'France'\",\n '$orderby': 'Freight desc',\n '$skip': 50,\n '$top': 3,\n },\n )\n\n try:\n df = provider.get_df(data_source)\n sl = ['CustomerID', 'EmployeeID', 'Freight']\n assert df[sl].equals(expected_df[sl])\n except socket.error:\n pytest.skip('Could not connect to the standard example OData service.')\n\n assert spy_load_metadata.call_count == 1\n args, _ = spy_load_metadata.call_args\n assert args[0].url.endswith('/$metadata')\n\n provider.auth = None\n try:\n provider.get_df(data_source)\n except socket.error:\n pytest.skip('Could not connect to the standard example OData service.')",
"def test_context_data(self):\n response = self.client.get(self.get_url())\n context = response.context\n self.assertIn('source_dataset_table', context)\n for ds in self.datasets:\n self.assertIn(ds, context['source_dataset_table'].data)\n self.assertIsInstance(context['source_dataset_table'], tables.SourceDatasetTableFull)",
"async def test_fetch_dataset_metadata_call(self):\n pool = asynctest.CoroutineMock()\n pool.acquire().__aenter__.return_value = Connection()\n result = await fetch_dataset_metadata(pool, None, None)\n # for now it can return empty dataset\n # in order to get a response we will have to mock it\n # in Connection() class\n self.assertEqual(result, [])",
"def get(log, session, args):\n url = \"{}datasets/{}\".format(\n http.get_api_url(args.url, args.project),\n args.id)\n log.debug('GET: {}'.format(url))\n response_json = http.get(session, url)\n log.print_json(response_json, \"dataset\", \"get\")",
"def test_db_read(env_setup, env_table, db_insert_test_data, db_read_test_data, response_test_data):\n DbManager(SqLiteHelper, {\"db_path\": env_setup, \"master_table\": \"birthday_data\"}) \\\n .processor(db_insert_test_data.get(\"valid\")) # inserting data\n test_string = DbManager(SqLiteHelper, {\"db_path\": env_setup, \"master_table\": env_table}) \\\n .processor(db_read_test_data.get(\"valid\")) # testing\n assert test_string == response_test_data.get(\"valid_read\")",
"def test_retrieve_dyn():\n # use the same id as previous test.\n the_id = 'from-test-dyndb'\n\n # get the response using the\n response = dyn_crud.retrieve_record(the_id)\n\n # run test.\n assert True if (response['company']['S'] == 'test company' and\n response['location']['S'] == 'Shambhala') else False",
"async def test_25() -> None:\n LOG.debug(\"Test query for targeting three datasets, using ALL. (expect data shown)\")\n payload = {\n \"referenceName\": \"MT\",\n \"start\": 10,\n \"referenceBases\": \"T\",\n \"alternateBases\": \"C\",\n \"assemblyId\": \"GRCh38\",\n \"datasetIds\": [\"urn:hg:1000genome\", \"urn:hg:1000genome:controlled\", \"urn:hg:1000genome:registered\"],\n \"includeDatasetResponses\": \"ALL\",\n }\n headers = {\"Authorization\": f\"Bearer {TOKEN}\"}\n async with aiohttp.ClientSession(headers=headers) as session:\n async with session.post(\"http://localhost:5050/query\", data=json.dumps(payload)) as resp:\n data = await resp.json()\n assert data[\"exists\"] is False, sys.exit(\"Query POST Endpoint Error!\")\n assert len(data[\"datasetAlleleResponses\"]) == 3, sys.exit(\"Should be able to retrieve data for all datasets.\")",
"async def test_24() -> None:\n LOG.debug(\"Test query for targeting one existing and one non-existing PUBLIC datasets, using ALL. (expect only PUBLIC)\")\n payload = {\n \"referenceName\": \"MT\",\n \"start\": 9,\n \"referenceBases\": \"T\",\n \"alternateBases\": \"C\",\n \"assemblyId\": \"GRCh38\",\n \"datasetIds\": [\"urn:hg:1111genome\", \"urn:hg:1000genome\"],\n \"includeDatasetResponses\": \"ALL\",\n }\n headers = {\"Authorization\": f\"Bearer {TOKEN}\"}\n async with aiohttp.ClientSession(headers=headers) as session:\n async with session.post(\"http://localhost:5050/query\", data=json.dumps(payload)) as resp:\n data = await resp.json()\n assert data[\"exists\"] is True, sys.exit(\"Query POST Endpoint Error!\")\n assert len(data[\"datasetAlleleResponses\"]) == 1, sys.exit(\"Should be able to retrieve only public.\")",
"def test_dataset_found_when_querying_number_in_name(self):\n models.SourceTrait.objects.all().delete()\n # Use a different study to ensure that one of the pre-created datasets doesn't match.\n dataset_name = 'unlikely_24601_dataset'\n # Use an accession that won't match for one dataset but not the other\n dataset_name_match = factories.SourceDatasetFactory.create(\n dataset_name=dataset_name,\n i_accession=123456,\n source_study_version=self.source_study_version\n )\n dataset_accession_match = factories.SourceDatasetFactory.create(\n dataset_name='other_name',\n i_accession=24601,\n source_study_version=self.source_study_version\n )\n url = self.get_url(self.study.pk)\n response = self.client.get(url, {'q': 246})\n returned_pks = get_autocomplete_view_ids(response)\n self.assertEqual(sorted(returned_pks), sorted([dataset_name_match.i_id, dataset_accession_match.i_id]))",
"def test_dataset_found_when_querying_number_in_name(self):\n models.SourceTrait.objects.all().delete()\n # Use a different study to ensure that one of the pre-created datasets doesn't match.\n dataset_name = 'unlikely_24601_dataset'\n # Use an accession that won't match for one dataset but not the other\n dataset_name_match = factories.SourceDatasetFactory.create(dataset_name=dataset_name, i_accession=123456)\n dataset_accession_match = factories.SourceDatasetFactory.create(dataset_name='other_name', i_accession=24601)\n url = self.get_url()\n response = self.client.get(url, {'q': 246})\n returned_pks = get_autocomplete_view_ids(response)\n self.assertEqual(sorted(returned_pks), sorted([dataset_name_match.i_id, dataset_accession_match.i_id]))",
"def test_fetch_dataset(self):\n\n mock_pandas = MagicMock()\n mock_pandas.read_csv.return_value = sentinel.dataset\n\n result = Network.fetch_dataset(sentinel.url, pandas_impl=mock_pandas)\n\n self.assertEqual(result, sentinel.dataset)\n mock_pandas.read_csv.assert_called_once_with(sentinel.url, dtype=str)",
"def test_get(self, init_db, audit):\n assert Audit.get(audit.id) == audit",
"def test_data():\n db = current_app.db\n Site = db.tables.Site\n Endpoint = db.tables.Endpoint\n if Site.query.count():\n return # DB not empty\n entries = [\n Site(site_id=1,\n site_name='Site1',\n site_desc='First Test Site',\n site_owner=1,\n user_ca_cert='USERCERT1',\n service_ca_cert='',\n auth_type=0,\n auth_uri='localhost:49998',\n public=False,\n def_path='/~'),\n Site(site_id=2,\n site_name='Site2',\n site_desc='Second Test Site',\n site_owner=123,\n user_ca_cert='USERCERT2',\n service_ca_cert='SERVICECERT2',\n auth_type=0,\n auth_uri='localhost:49998',\n public=True,\n def_path='/project'),\n Endpoint(ep_id=1,\n site_id=1,\n ep_uri='localhost:49999'),\n Endpoint(ep_id=2,\n site_id=1,\n ep_uri='localhost2:49999'),\n Endpoint(ep_id=3,\n site_id=2,\n ep_uri='localhost:50000'),\n Endpoint(ep_id=4,\n site_id=2,\n ep_uri='localhost2:50000'),\n Site(site_id=3,\n site_name='CloudSite1',\n site_desc='Testing site in cloud (1)',\n site_owner=1,\n user_ca_cert=TEST_HOST_CA,\n service_ca_cert=UK_ESCIENCE_CA,\n auth_type=0,\n auth_uri='pdmtest1.grid.hep.ph.ic.ac.uk:49998',\n public=True,\n def_path='/~'),\n Endpoint(ep_id=5,\n site_id=3,\n ep_uri='pdmtest1.grid.hep.ph.ic.ac.uk:49999'),\n Site(site_id=4,\n site_name='CloudSite2',\n site_desc='Testing site in cloud (2)',\n site_owner=1,\n user_ca_cert=TEST_HOST_CA,\n service_ca_cert=UK_ESCIENCE_CA,\n auth_type=0,\n auth_uri='pdmtest2.grid.hep.ph.ic.ac.uk:49998',\n public=True,\n def_path='/~'),\n Endpoint(ep_id=6,\n site_id=4,\n ep_uri='pdmtest2.grid.hep.ph.ic.ac.uk:49999'),\n Site(site_id=5,\n site_name='UKI-LT2-IC-HEP',\n site_desc='Imperial College GridPP Site',\n site_owner=0,\n user_ca_cert=None,\n service_ca_cert=None,\n auth_type=1,\n auth_uri='myproxy.grid.hep.ph.ic.ac.uk:7512',\n public=True,\n def_path='/pnfs/hep.ph.ic.ac.uk/data'),\n Endpoint(ep_id=7,\n site_id=5,\n ep_uri='gfe02.grid.hep.ph.ic.ac.uk:2811'),\n Site(site_id=6,\n site_name='NERSC DTN',\n site_desc='NERSC DTN Service',\n site_owner=0,\n user_ca_cert=None,\n service_ca_cert=None,\n auth_type=0,\n auth_uri='myproxy.grid.hep.ph.ic.ac.uk:7512',\n public=True,\n def_path='/~'),\n Endpoint(ep_id=8,\n site_id=6,\n ep_uri='dtn01.nersc.gov:2811'),\n ]\n for entry in entries:\n db.session.add(entry)\n db.session.commit()",
"def test_context_data_with_valid_search_and_dataset_name(self):\n study = factories.StudyFactory.create()\n dataset = factories.SourceDatasetFactory.create(i_dbgap_description='lorem ipsum', dataset_name='dolor',\n source_study_version__study=study)\n factories.SourceDatasetFactory.create(i_dbgap_description='lorem other', dataset_name='tempor')\n response = self.client.get(self.get_url(), {'description': 'lorem', 'name': 'dolor'})\n context = response.context\n self.assertIn('form', context)\n self.assertTrue(context['has_results'])\n self.assertIsInstance(context['results_table'], tables.SourceDatasetTableFull)\n self.assertQuerysetEqual(context['results_table'].data, [repr(dataset)])",
"async def test_23() -> None:\n LOG.debug(\"Test query for targeting a non-existing PUBLIC datasets, using ALL. (expect no data shown)\")\n payload = {\n \"referenceName\": \"MT\",\n \"start\": 9,\n \"referenceBases\": \"T\",\n \"alternateBases\": \"C\",\n \"assemblyId\": \"GRCh38\",\n \"datasetIds\": [\"urn:hg:1111genome\"],\n \"includeDatasetResponses\": \"ALL\",\n }\n headers = {\"Authorization\": f\"Bearer {TOKEN}\"}\n async with aiohttp.ClientSession(headers=headers) as session:\n async with session.post(\"http://localhost:5050/query\", data=json.dumps(payload)) as resp:\n data = await resp.json()\n assert data[\"exists\"] is False, sys.exit(\"Query POST Endpoint Error!\")\n assert len(data[\"datasetAlleleResponses\"]) == 0, sys.exit(\"Should be able to retrieve only public.\")",
"async def test_fetch_filtered_dataset_call_misses(self):\n pool = asynctest.CoroutineMock()\n pool.acquire().__aenter__.return_value = Connection() # db_response is []\n assembly_id = 'GRCh38'\n position = (10, 20, None, None, None, None)\n chromosome = 1\n reference = 'A'\n alternate = ('DUP', None)\n result_miss = await fetch_filtered_dataset(pool, assembly_id, position, chromosome, reference, alternate, None, None, True)\n self.assertEqual(result_miss, [])",
"def test_correct_dataset_found_by_name(self):\n dataset_name = 'my_unlikely_dataset_name'\n dataset = factories.SourceDatasetFactory.create(dataset_name=dataset_name)\n url = self.get_url()\n response = self.client.get(url, {'q': dataset_name})\n returned_pks = get_autocomplete_view_ids(response)\n self.assertEqual(returned_pks, [dataset.i_id])",
"def test_correct_dataset_found_by_name(self):\n dataset_name = 'my_unlikely_dataset_name'\n dataset = factories.SourceDatasetFactory.create(dataset_name=dataset_name)\n url = self.get_url()\n response = self.client.get(url, {'q': dataset_name})\n returned_pks = get_autocomplete_view_ids(response)\n self.assertEqual(returned_pks, [dataset.i_id])",
"def test_dataset_details():\n with new_test_dataset(2) as test_ds:\n args = build_register_args(test_ds.copy_to_s3())\n ds_name = args['name']\n URLs.run(url_info=URLs.register_url(), json_body=args)\n\n ds_parts = URLs.run(url_info=URLs.dataset_parts_url(ds_name)).json\n assert ds_parts['filenames'] == test_ds.expected_parts.filenames\n expected_columns = json.loads(datafile_schema().to_json())['columns']\n\n ds_short_schema = URLs.run(url_info=URLs.dataset_schema_url(ds_name, full=False)).json\n assert ds_short_schema['columns'] == expected_columns\n\n ds_full_schema = URLs.run(url_info=URLs.dataset_schema_url(ds_name, full=True)).json\n assert ds_full_schema['columns'][DEFAULT_TIMESTAMP_COLUMN]['colattrs']['numericMin'] == BASE_TIME\n\n URLs.run(url_info=URLs.unregister_url(ds_name))"
]
| [
"0.71085185",
"0.69189525",
"0.68948346",
"0.66749257",
"0.66024005",
"0.6447773",
"0.6408226",
"0.64078",
"0.63756603",
"0.63122475",
"0.61679447",
"0.6157619",
"0.60740554",
"0.6063692",
"0.6012641",
"0.59584606",
"0.5945216",
"0.59411925",
"0.5923041",
"0.59035486",
"0.58886516",
"0.5874341",
"0.58551157",
"0.58545744",
"0.58401227",
"0.58278495",
"0.58225703",
"0.5818928",
"0.5818928",
"0.5809366"
]
| 0.7419891 | 0 |
Test db call of getting controlled and public datasets access. | async def test_datasets_access_call_multiple(self):
pool = asynctest.CoroutineMock()
pool.acquire().__aenter__.return_value = Connection(accessData=[{'accesstype': 'CONTROLLED', 'datasetid': 'mock:controlled:id'},
{'accesstype': 'PUBLIC', 'datasetid': 'mock:public:id'}])
result = await fetch_datasets_access(pool, None)
# for now it can return a tuple of empty datasets
# in order to get a response we will have to mock it
# in Connection() class
self.assertEqual(result, (['mock:public:id'], [], ['mock:controlled:id'])) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def test_datasets_access_call_public(self):\n pool = asynctest.CoroutineMock()\n pool.acquire().__aenter__.return_value = Connection(accessData=[{'accesstype': 'PUBLIC', 'datasetid': 'mock:public:id'}])\n result = await fetch_datasets_access(pool, None)\n # for now it can return a tuple of empty datasets\n # in order to get a response we will have to mock it\n # in Connection() class\n self.assertEqual(result, (['mock:public:id'], [], []))",
"async def test_datasets_access_call_controlled(self):\n pool = asynctest.CoroutineMock()\n pool.acquire().__aenter__.return_value = Connection(accessData=[{'accesstype': 'CONTROLLED', 'datasetid': 'mock:controlled:id'}])\n result = await fetch_datasets_access(pool, None)\n # for now it can return a tuple of empty datasets\n # in order to get a response we will have to mock it\n # in Connection() class\n self.assertEqual(result, ([], [], ['mock:controlled:id']))",
"def test_dataset_for_personal_accounts(self):\n pass",
"def test_find_datasets_filters_by_access_and_use_only_returns_the_dataset_once(\n access_type,\n):\n user = factories.UserFactory.create(is_superuser=False)\n user2 = factories.UserFactory.create(is_superuser=False)\n client = Client(**get_http_sso_data(user))\n\n access_granted_master = factories.DataSetFactory.create(\n published=True,\n type=DataSetType.MASTER,\n name=\"Master - access redundantly granted\",\n user_access_type=access_type,\n )\n factories.DataSetUserPermissionFactory.create(user=user, dataset=access_granted_master)\n factories.DataSetUserPermissionFactory.create(user=user2, dataset=access_granted_master)\n\n response = client.get(\n reverse(\"datasets:find_datasets\"),\n {\"access\": \"yes\", \"use\": str(DataSetType.MASTER)},\n )\n\n assert response.status_code == 200\n assert list(response.context[\"datasets\"]) == [expected_search_result(access_granted_master)]",
"def test_finding_datasets_doesnt_query_database_excessively(\n access_type, client, django_assert_num_queries\n):\n expected_num_queries = 13\n source_tags = [factories.SourceTagFactory() for _ in range(10)]\n topic_tags = [factories.TopicTagFactory() for _ in range(10)]\n\n masters = [\n factories.DataSetFactory(\n type=DataSetType.MASTER,\n published=True,\n user_access_type=access_type,\n )\n for _ in range(random.randint(10, 50))\n ]\n for master in masters:\n master.tags.set(\n random.sample(source_tags, random.randint(1, 3))\n + random.sample(topic_tags, random.randint(1, 3))\n )\n\n datacuts = [\n factories.DataSetFactory(\n type=DataSetType.DATACUT,\n published=True,\n user_access_type=access_type,\n )\n for _ in range(random.randint(10, 50))\n ]\n for datacut in datacuts:\n datacut.tags.set(random.sample(source_tags, 1) + random.sample(topic_tags, 1))\n\n references = [factories.ReferenceDatasetFactory.create(published=True) for _ in range(10)]\n for reference in references:\n reference.tags.set(\n random.sample(source_tags, random.randint(1, 3))\n + random.sample(topic_tags, random.randint(1, 3))\n )\n\n visualisations = [\n factories.VisualisationCatalogueItemFactory.create(published=True)\n for _ in range(random.randint(10, 50))\n ]\n\n for visualisation in visualisations:\n factories.DataSetApplicationTemplatePermissionFactory(\n application_template=visualisation.visualisation_template,\n dataset=random.choice(masters),\n )\n\n # Log into site (triggers the queries related to setting up the user).\n client.get(reverse(\"root\"))\n\n with django_assert_num_queries(expected_num_queries, exact=False):\n response = client.get(reverse(\"datasets:find_datasets\"), follow=True)\n assert response.status_code == 200\n\n with django_assert_num_queries(expected_num_queries, exact=False):\n response = client.get(reverse(\"datasets:find_datasets\"), {\"q\": \"potato\"})\n assert response.status_code == 200\n\n with django_assert_num_queries(expected_num_queries + 1, exact=False):\n response = client.get(\n reverse(\"datasets:find_datasets\"),\n {\"source\": [str(tag.id) for tag in random.sample(source_tags, random.randint(1, 5))]},\n )\n assert response.status_code == 200\n\n with django_assert_num_queries(expected_num_queries + 1, exact=False):\n response = client.get(\n reverse(\"datasets:find_datasets\"),\n {\"topic\": [str(tag.id) for tag in random.sample(topic_tags, random.randint(1, 5))]},\n )\n assert response.status_code == 200\n\n with django_assert_num_queries(expected_num_queries, exact=False):\n response = client.get(\n reverse(\"datasets:find_datasets\"),\n {\"data_type\": str(DataSetType.MASTER)},\n )\n assert response.status_code == 200\n\n with django_assert_num_queries(expected_num_queries, exact=False):\n response = client.get(reverse(\"datasets:find_datasets\"), {\"access\": \"yes\"})\n assert response.status_code == 200",
"async def test_fetch_filtered_dataset_call(self):\n pool = asynctest.CoroutineMock()\n db_response = {\"referenceBases\": '', \"alternateBases\": '', \"variantType\": \"\",\n \"referenceName\": 'Chr38',\n \"frequency\": 0, \"callCount\": 0, \"sampleCount\": 0, \"variantCount\": 0,\n \"start\": 0, \"end\": 0, \"accessType\": \"PUBLIC\", \"datasetId\": \"test\"}\n pool.acquire().__aenter__.return_value = Connection(accessData=[db_response])\n assembly_id = 'GRCh38'\n position = (10, 20, None, None, None, None)\n chromosome = 1\n reference = 'A'\n alternate = ('DUP', None)\n result = await fetch_filtered_dataset(pool, assembly_id, position, chromosome, reference, alternate, None, None, False)\n # for now it can return empty dataset\n # in order to get a response we will have to mock it\n # in Connection() class\n expected = {'referenceName': 'Chr38', 'callCount': 0, 'sampleCount': 0, 'variantCount': 0, 'datasetId': 'test',\n 'referenceBases': '', 'alternateBases': '', 'variantType': '', 'start': 0, 'end': 0, 'frequency': 0,\n 'info': {'accessType': 'PUBLIC'},\n 'datasetHandover': [{'handoverType': {'id': 'CUSTOM', 'label': 'Variants'},\n 'description': 'browse the variants matched by the query',\n 'url': 'https://examplebrowser.org/dataset/test/browser/variant/Chr38-1--'},\n {'handoverType': {'id': 'CUSTOM', 'label': 'Region'},\n 'description': 'browse data of the region matched by the query',\n 'url': 'https://examplebrowser.org/dataset/test/browser/region/Chr38-1-1'},\n {'handoverType': {'id': 'CUSTOM', 'label': 'Data'},\n 'description': 'retrieve information of the datasets',\n 'url': 'https://examplebrowser.org/dataset/test/browser'}]}\n\n self.assertEqual(result, [expected])",
"def test_get_records(self):\n pass",
"def test_acquire_dataset(self):\n\n # make sure the data does not yet exist\n with self.subTest(name='no data yet'):\n response = Epidata.covid_hosp('MA', Epidata.range(20200101, 20210101))\n self.assertEqual(response['result'], -2)\n\n # acquire sample data into local database\n # mock out network calls to external hosts\n with self.subTest(name='first acquisition'), \\\n patch.object(Network, 'fetch_metadata', return_value=self.test_utils.load_sample_metadata()) as mock_fetch_meta, \\\n patch.object(Network, 'fetch_dataset', side_effect=[self.test_utils.load_sample_dataset(\"dataset0.csv\"), # dataset for 3/13\n self.test_utils.load_sample_dataset(\"dataset0.csv\"), # first dataset for 3/15\n self.test_utils.load_sample_dataset()] # second dataset for 3/15\n ) as mock_fetch:\n acquired = Update.run()\n self.assertTrue(acquired)\n self.assertEqual(mock_fetch_meta.call_count, 1)\n\n # make sure the data now exists\n with self.subTest(name='initial data checks'):\n response = Epidata.covid_hosp('WY', Epidata.range(20200101, 20210101))\n self.assertEqual(response['result'], 1)\n self.assertEqual(len(response['epidata']), 1)\n row = response['epidata'][0]\n self.assertEqual(row['state'], 'WY')\n self.assertEqual(row['date'], 20201209)\n self.assertEqual(row['issue'], 20210315)\n self.assertEqual(row['critical_staffing_shortage_today_yes'], 8)\n actual = row['inpatient_bed_covid_utilization']\n expected = 0.11729857819905214\n self.assertAlmostEqual(actual, expected)\n self.assertIsNone(row['critical_staffing_shortage_today_no'])\n\n # expect 61 fields per row (63 database columns, except `id` and `record_type`)\n self.assertEqual(len(row), 61)\n\n with self.subTest(name='all date batches acquired'):\n response = Epidata.covid_hosp('WY', Epidata.range(20200101, 20210101), issues=20210313)\n self.assertEqual(response['result'], 1)\n\n # re-acquisition of the same dataset should be a no-op\n with self.subTest(name='second acquisition'), \\\n patch.object(Network, 'fetch_metadata', return_value=self.test_utils.load_sample_metadata()) as mock_fetch_meta, \\\n patch.object(Network, 'fetch_dataset', return_value=self.test_utils.load_sample_dataset()) as mock_fetch:\n acquired = Update.run()\n self.assertFalse(acquired)\n\n # make sure the data still exists\n with self.subTest(name='final data checks'):\n response = Epidata.covid_hosp('WY', Epidata.range(20200101, 20210101))\n self.assertEqual(response['result'], 1)\n self.assertEqual(len(response['epidata']), 1)",
"async def test_datasets_access_call_registered(self):\n pool = asynctest.CoroutineMock()\n pool.acquire().__aenter__.return_value = Connection(accessData=[{'accesstype': 'REGISTERED', 'datasetid': 'mock:registered:id'}])\n result = await fetch_datasets_access(pool, None)\n # for now it can return a tuple of empty datasets\n # in order to get a response we will have to mock it\n # in Connection() class\n self.assertEqual(result, ([], ['mock:registered:id'], []))",
"def test_ds(self, obj):\n pass",
"async def test_24() -> None:\n LOG.debug(\"Test query for targeting one existing and one non-existing PUBLIC datasets, using ALL. (expect only PUBLIC)\")\n payload = {\n \"referenceName\": \"MT\",\n \"start\": 9,\n \"referenceBases\": \"T\",\n \"alternateBases\": \"C\",\n \"assemblyId\": \"GRCh38\",\n \"datasetIds\": [\"urn:hg:1111genome\", \"urn:hg:1000genome\"],\n \"includeDatasetResponses\": \"ALL\",\n }\n headers = {\"Authorization\": f\"Bearer {TOKEN}\"}\n async with aiohttp.ClientSession(headers=headers) as session:\n async with session.post(\"http://localhost:5050/query\", data=json.dumps(payload)) as resp:\n data = await resp.json()\n assert data[\"exists\"] is True, sys.exit(\"Query POST Endpoint Error!\")\n assert len(data[\"datasetAlleleResponses\"]) == 1, sys.exit(\"Should be able to retrieve only public.\")",
"def test_data():\n db = current_app.db\n Site = db.tables.Site\n Endpoint = db.tables.Endpoint\n if Site.query.count():\n return # DB not empty\n entries = [\n Site(site_id=1,\n site_name='Site1',\n site_desc='First Test Site',\n site_owner=1,\n user_ca_cert='USERCERT1',\n service_ca_cert='',\n auth_type=0,\n auth_uri='localhost:49998',\n public=False,\n def_path='/~'),\n Site(site_id=2,\n site_name='Site2',\n site_desc='Second Test Site',\n site_owner=123,\n user_ca_cert='USERCERT2',\n service_ca_cert='SERVICECERT2',\n auth_type=0,\n auth_uri='localhost:49998',\n public=True,\n def_path='/project'),\n Endpoint(ep_id=1,\n site_id=1,\n ep_uri='localhost:49999'),\n Endpoint(ep_id=2,\n site_id=1,\n ep_uri='localhost2:49999'),\n Endpoint(ep_id=3,\n site_id=2,\n ep_uri='localhost:50000'),\n Endpoint(ep_id=4,\n site_id=2,\n ep_uri='localhost2:50000'),\n Site(site_id=3,\n site_name='CloudSite1',\n site_desc='Testing site in cloud (1)',\n site_owner=1,\n user_ca_cert=TEST_HOST_CA,\n service_ca_cert=UK_ESCIENCE_CA,\n auth_type=0,\n auth_uri='pdmtest1.grid.hep.ph.ic.ac.uk:49998',\n public=True,\n def_path='/~'),\n Endpoint(ep_id=5,\n site_id=3,\n ep_uri='pdmtest1.grid.hep.ph.ic.ac.uk:49999'),\n Site(site_id=4,\n site_name='CloudSite2',\n site_desc='Testing site in cloud (2)',\n site_owner=1,\n user_ca_cert=TEST_HOST_CA,\n service_ca_cert=UK_ESCIENCE_CA,\n auth_type=0,\n auth_uri='pdmtest2.grid.hep.ph.ic.ac.uk:49998',\n public=True,\n def_path='/~'),\n Endpoint(ep_id=6,\n site_id=4,\n ep_uri='pdmtest2.grid.hep.ph.ic.ac.uk:49999'),\n Site(site_id=5,\n site_name='UKI-LT2-IC-HEP',\n site_desc='Imperial College GridPP Site',\n site_owner=0,\n user_ca_cert=None,\n service_ca_cert=None,\n auth_type=1,\n auth_uri='myproxy.grid.hep.ph.ic.ac.uk:7512',\n public=True,\n def_path='/pnfs/hep.ph.ic.ac.uk/data'),\n Endpoint(ep_id=7,\n site_id=5,\n ep_uri='gfe02.grid.hep.ph.ic.ac.uk:2811'),\n Site(site_id=6,\n site_name='NERSC DTN',\n site_desc='NERSC DTN Service',\n site_owner=0,\n user_ca_cert=None,\n service_ca_cert=None,\n auth_type=0,\n auth_uri='myproxy.grid.hep.ph.ic.ac.uk:7512',\n public=True,\n def_path='/~'),\n Endpoint(ep_id=8,\n site_id=6,\n ep_uri='dtn01.nersc.gov:2811'),\n ]\n for entry in entries:\n db.session.add(entry)\n db.session.commit()",
"async def test_23() -> None:\n LOG.debug(\"Test query for targeting a non-existing PUBLIC datasets, using ALL. (expect no data shown)\")\n payload = {\n \"referenceName\": \"MT\",\n \"start\": 9,\n \"referenceBases\": \"T\",\n \"alternateBases\": \"C\",\n \"assemblyId\": \"GRCh38\",\n \"datasetIds\": [\"urn:hg:1111genome\"],\n \"includeDatasetResponses\": \"ALL\",\n }\n headers = {\"Authorization\": f\"Bearer {TOKEN}\"}\n async with aiohttp.ClientSession(headers=headers) as session:\n async with session.post(\"http://localhost:5050/query\", data=json.dumps(payload)) as resp:\n data = await resp.json()\n assert data[\"exists\"] is False, sys.exit(\"Query POST Endpoint Error!\")\n assert len(data[\"datasetAlleleResponses\"]) == 0, sys.exit(\"Should be able to retrieve only public.\")",
"async def test_21() -> None:\n LOG.debug(\"Test Non-existing/MISS variant targeting PUBLIC and CONTROLLED datasets with token perms (expect all shown)\")\n payload = {\n \"referenceName\": \"MT\",\n \"start\": 8,\n \"referenceBases\": \"T\",\n \"alternateBases\": \"C\",\n \"assemblyId\": \"GRCh38\",\n \"datasetIds\": [\"urn:hg:1000genome\", \"urn:hg:1000genome:controlled\"],\n \"includeDatasetResponses\": \"MISS\",\n }\n headers = {\"Authorization\": f\"Bearer {TOKEN}\"}\n async with aiohttp.ClientSession(headers=headers) as session:\n async with session.post(\"http://localhost:5050/query\", data=json.dumps(payload)) as resp:\n data = await resp.json()\n assert data[\"exists\"] is False, sys.exit(\"Query POST Endpoint Error!\")\n assert len(data[\"datasetAlleleResponses\"]) == 2, sys.exit(\"Should be able to retrieve only public.\")",
"def test_retrieve_dyn():\n # use the same id as previous test.\n the_id = 'from-test-dyndb'\n\n # get the response using the\n response = dyn_crud.retrieve_record(the_id)\n\n # run test.\n assert True if (response['company']['S'] == 'test company' and\n response['location']['S'] == 'Shambhala') else False",
"def test_get_df(mocker):\n spy_load_metadata = mocker.spy(MetaData, 'load_document')\n expected_df = pd.read_json('tests/odata/fixtures/records.json', orient='records')\n\n provider = ODataConnector(\n name='test',\n baseroute='http://services.odata.org/V4/Northwind/Northwind.svc/',\n auth={'type': 'basic', 'args': ['u', 'p']},\n )\n\n data_source = ODataDataSource(\n domain='test',\n name='test',\n entity='Orders',\n query={\n '$filter': \"ShipCountry eq 'France'\",\n '$orderby': 'Freight desc',\n '$skip': 50,\n '$top': 3,\n },\n )\n\n try:\n df = provider.get_df(data_source)\n sl = ['CustomerID', 'EmployeeID', 'Freight']\n assert df[sl].equals(expected_df[sl])\n except socket.error:\n pytest.skip('Could not connect to the standard example OData service.')\n\n assert spy_load_metadata.call_count == 1\n args, _ = spy_load_metadata.call_args\n assert args[0].url.endswith('/$metadata')\n\n provider.auth = None\n try:\n provider.get_df(data_source)\n except socket.error:\n pytest.skip('Could not connect to the standard example OData service.')",
"def test_get_db_records(self):\n string = StringIndexer.objects.create(organization_id=123, string=\"oop\")\n collection = KeyCollection({123: {\"oop\"}})\n key = \"123:oop\"\n\n assert indexer_cache.get(key, self.cache_namespace) is None\n assert indexer_cache.get(string.id, self.cache_namespace) is None\n\n self.indexer.indexer._get_db_records(self.use_case_id, collection)\n\n assert indexer_cache.get(string.id, self.cache_namespace) is None\n assert indexer_cache.get(key, self.cache_namespace) is None",
"def test_get(self, init_db, audit):\n assert Audit.get(audit.id) == audit",
"def test_data_researcher_access(self):\n self.client.login(username=self.data_researcher.username, password='test')\n\n self.verify_response(params={\n 'all_blocks': True,\n 'course_id': str(self.course_key)\n })",
"def testDatabase(self):\n con = self.getMetadataDatabaseConnection()\n if con:\n return True",
"def test_context_data(self):\n response = self.client.get(self.get_url())\n context = response.context\n self.assertIn('source_dataset_table', context)\n for ds in self.datasets:\n self.assertIn(ds, context['source_dataset_table'].data)\n self.assertIsInstance(context['source_dataset_table'], tables.SourceDatasetTableFull)",
"def test_get_record(self):\n pass",
"def testGetAccessAllowed(self):\n for user in (self.guest, self.contributor, self.delegate, self.owner, self.root):\n response = self.runGet(user, sequencer=self.hiseq2000.sodar_uuid)\n self.response_200(response)\n data = json.loads(response.content.decode(\"utf-8\"))\n self.assertEqual(data[\"sodar_uuid\"], str(self.hiseq2000.sodar_uuid))",
"def access():",
"def test_db_read(env_setup, env_table, db_insert_test_data, db_read_test_data, response_test_data):\n DbManager(SqLiteHelper, {\"db_path\": env_setup, \"master_table\": \"birthday_data\"}) \\\n .processor(db_insert_test_data.get(\"valid\")) # inserting data\n test_string = DbManager(SqLiteHelper, {\"db_path\": env_setup, \"master_table\": env_table}) \\\n .processor(db_read_test_data.get(\"valid\")) # testing\n assert test_string == response_test_data.get(\"valid_read\")",
"def test_by_accession_geo_platform_accession_get(self):\n pass",
"async def test_22() -> None:\n LOG.debug(\"Test non-existing variant targeting CONTROLLED datasets with token perms, using MISS (expect only controlled shown)\")\n payload = {\n \"referenceName\": \"MT\",\n \"start\": 8,\n \"referenceBases\": \"T\",\n \"alternateBases\": \"C\",\n \"assemblyId\": \"GRCh38\",\n \"datasetIds\": [\"urn:hg:1000genome:controlled\"],\n \"includeDatasetResponses\": \"MISS\",\n }\n headers = {\"Authorization\": f\"Bearer {TOKEN}\"}\n async with aiohttp.ClientSession(headers=headers) as session:\n async with session.post(\"http://localhost:5050/query\", data=json.dumps(payload)) as resp:\n data = await resp.json()\n assert data[\"exists\"] is False, sys.exit(\"Query POST Endpoint Error!\")\n assert len(data[\"datasetAlleleResponses\"]) == 1, sys.exit(\"Should be able to retrieve only public.\")",
"def testGetAccessAllowed(self):\n for user in (self.guest, self.contributor, self.delegate, self.owner, self.root):\n response = self.runGet(user, sequencer=self.hiseq2000.vendor_id)\n self.response_200(response)\n data = json.loads(response.content.decode(\"utf-8\"))\n self.assertEqual(data[\"sodar_uuid\"], str(self.hiseq2000.sodar_uuid))",
"def test_get_permissions(self):\n pass",
"async def test_12() -> None:\n LOG.debug(\"Test post query (public data (success) and controlled data without token (failure))\")\n payload = {\n \"referenceName\": \"MT\",\n \"start\": 9,\n \"referenceBases\": \"T\",\n \"alternateBases\": \"C\",\n \"assemblyId\": \"GRCh38\",\n \"datasetIds\": [\"urn:hg:1000genome\", \"urn:hg:1000genome:controlled\"],\n \"includeDatasetResponses\": \"HIT\",\n }\n async with aiohttp.ClientSession() as session:\n async with session.post(\"http://localhost:5050/query\", data=json.dumps(payload)) as resp:\n data = await resp.json()\n assert data[\"exists\"] is True, sys.exit(\"Query POST Endpoint Error!\")\n assert len(data[\"datasetAlleleResponses\"]) == 1, sys.exit(\"Should be able to retrieve only public.\")"
]
| [
"0.747451",
"0.71193075",
"0.683675",
"0.67620665",
"0.67434555",
"0.644622",
"0.6355829",
"0.6252154",
"0.624759",
"0.62461096",
"0.6196234",
"0.6161387",
"0.61113137",
"0.59870577",
"0.59677655",
"0.5960365",
"0.59477",
"0.59436566",
"0.59349966",
"0.59032625",
"0.58963615",
"0.5886209",
"0.5865245",
"0.5853331",
"0.58528125",
"0.5846343",
"0.5807258",
"0.5792336",
"0.5778899",
"0.57625633"
]
| 0.71368766 | 1 |
Test db call of getting datasets metadata. | async def test_fetch_dataset_metadata_call(self):
pool = asynctest.CoroutineMock()
pool.acquire().__aenter__.return_value = Connection()
result = await fetch_dataset_metadata(pool, None, None)
# for now it can return empty dataset
# in order to get a response we will have to mock it
# in Connection() class
self.assertEqual(result, []) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_dataset_details():\n with new_test_dataset(2) as test_ds:\n args = build_register_args(test_ds.copy_to_s3())\n ds_name = args['name']\n URLs.run(url_info=URLs.register_url(), json_body=args)\n\n ds_parts = URLs.run(url_info=URLs.dataset_parts_url(ds_name)).json\n assert ds_parts['filenames'] == test_ds.expected_parts.filenames\n expected_columns = json.loads(datafile_schema().to_json())['columns']\n\n ds_short_schema = URLs.run(url_info=URLs.dataset_schema_url(ds_name, full=False)).json\n assert ds_short_schema['columns'] == expected_columns\n\n ds_full_schema = URLs.run(url_info=URLs.dataset_schema_url(ds_name, full=True)).json\n assert ds_full_schema['columns'][DEFAULT_TIMESTAMP_COLUMN]['colattrs']['numericMin'] == BASE_TIME\n\n URLs.run(url_info=URLs.unregister_url(ds_name))",
"def get_dataset_metadata(fields=[]):\n return get_dict_from_db(key='metadata', fields=fields)",
"def test_fetch_metadata_for_dataset(self):\n\n with patch.object(pd, \"read_csv\") as func:\n func.return_value = pd.DataFrame(\n {\"Archive Link\": [\"test2\", \"test1\", \"test3\"],\n \"Update Date\": [\"2020/1/2\", \"2020/1/1\", \"2020/1/3\"]}\n )\n result = Network.fetch_metadata_for_dataset(\"test\")\n pd.testing.assert_frame_equal(\n result,\n pd.DataFrame(\n {\"Archive Link\": [\"test1\", \"test2\", \"test3\"],\n \"Update Date\": pd.date_range(\"2020/1/1\", \"2020/1/3\")}\n ).set_index(\"Update Date\")\n )\n func.assert_called_once_with(\n \"https://healthdata.gov/api/views/test/rows.csv\",\n dtype=str\n )",
"async def get_datasets_metadata(location_id: LocationID, user_id: UserID):",
"def test_get_records(self):\n pass",
"def test_get_metadata_df(self):\n\n # first need to populate LabMetadata tables\n from data_processors.lims.lambdas import labmetadata\n labmetadata.scheduled_update_handler({'event': \"test_get_metadata_df\"}, None)\n\n logger.info(f\"Lab metadata count: {LabMetadata.objects.count()}\")\n\n # SEQ-II validation dataset\n mock_bcl_workflow: Workflow = WorkflowFactory()\n mock_sqr: SequenceRun = mock_bcl_workflow.sequence_run\n mock_sqr.run_id = \"r.Uvlx2DEIME-KH0BRyF9XBg\"\n mock_sqr.instrument_run_id = \"200612_A01052_0017_BH5LYWDSXY\"\n mock_sqr.gds_volume_name = \"bssh.acddbfda498038ed99fa94fe79523959\"\n mock_sqr.gds_folder_path = f\"/Runs/{mock_sqr.instrument_run_id}_{mock_sqr.run_id}\"\n mock_sqr.sample_sheet_name = \"SampleSheet.csv\"\n mock_sqr.name = mock_sqr.instrument_run_id\n mock_sqr.save()\n\n mock_library_run = LibraryRun(\n instrument_run_id=mock_sqr.instrument_run_id,\n run_id=mock_sqr.run_id,\n library_id=\"L2000199\",\n lane=1,\n override_cycles=\"Y151;I8N2;U10;Y151\",\n )\n mock_library_run.save()\n\n samplesheet_path = f\"{mock_sqr.gds_folder_path}/{mock_sqr.sample_sheet_name}\"\n\n metadata_df = bcl_convert.get_metadata_df(\n gds_volume=mock_sqr.gds_volume_name,\n samplesheet_path=samplesheet_path\n )\n\n logger.info(\"-\" * 32)\n logger.info(f\"\\n{metadata_df}\")\n\n self.assertTrue(not metadata_df.empty)\n self.assertTrue(\"PTC_SsCRE200323LL_L2000172_topup\" in metadata_df[\"sample\"].tolist())\n\n if \"\" in metadata_df[\"override_cycles\"].unique().tolist():\n logger.info(\"-\" * 32)\n logger.info(\"THERE SEEM TO BE BLANK OVERRIDE_CYCLES METADATA FOR SOME SAMPLES...\")\n self.assertFalse(\"\" in metadata_df[\"override_cycles\"].tolist())\n # This probably mean need to fix data, look for corresponding Lab Metadata entry...\n\n library_id_list = metadata_df[\"library_id\"].tolist()\n library_run_list = libraryrun_srv.link_library_runs_with_x_seq_workflow(library_id_list, mock_bcl_workflow)\n self.assertIsNotNone(library_run_list)\n self.assertEqual(1, len(library_run_list))\n self.assertEqual(mock_library_run.library_id, library_run_list[0].library_id)\n\n library_run_in_workflows = mock_bcl_workflow.libraryrun_set.all()\n self.assertEqual(1, library_run_in_workflows.count())",
"def test_acquire_dataset(self):\n\n # make sure the data does not yet exist\n with self.subTest(name='no data yet'):\n response = Epidata.covid_hosp('MA', Epidata.range(20200101, 20210101))\n self.assertEqual(response['result'], -2)\n\n # acquire sample data into local database\n # mock out network calls to external hosts\n with self.subTest(name='first acquisition'), \\\n patch.object(Network, 'fetch_metadata', return_value=self.test_utils.load_sample_metadata()) as mock_fetch_meta, \\\n patch.object(Network, 'fetch_dataset', side_effect=[self.test_utils.load_sample_dataset(\"dataset0.csv\"), # dataset for 3/13\n self.test_utils.load_sample_dataset(\"dataset0.csv\"), # first dataset for 3/15\n self.test_utils.load_sample_dataset()] # second dataset for 3/15\n ) as mock_fetch:\n acquired = Update.run()\n self.assertTrue(acquired)\n self.assertEqual(mock_fetch_meta.call_count, 1)\n\n # make sure the data now exists\n with self.subTest(name='initial data checks'):\n response = Epidata.covid_hosp('WY', Epidata.range(20200101, 20210101))\n self.assertEqual(response['result'], 1)\n self.assertEqual(len(response['epidata']), 1)\n row = response['epidata'][0]\n self.assertEqual(row['state'], 'WY')\n self.assertEqual(row['date'], 20201209)\n self.assertEqual(row['issue'], 20210315)\n self.assertEqual(row['critical_staffing_shortage_today_yes'], 8)\n actual = row['inpatient_bed_covid_utilization']\n expected = 0.11729857819905214\n self.assertAlmostEqual(actual, expected)\n self.assertIsNone(row['critical_staffing_shortage_today_no'])\n\n # expect 61 fields per row (63 database columns, except `id` and `record_type`)\n self.assertEqual(len(row), 61)\n\n with self.subTest(name='all date batches acquired'):\n response = Epidata.covid_hosp('WY', Epidata.range(20200101, 20210101), issues=20210313)\n self.assertEqual(response['result'], 1)\n\n # re-acquisition of the same dataset should be a no-op\n with self.subTest(name='second acquisition'), \\\n patch.object(Network, 'fetch_metadata', return_value=self.test_utils.load_sample_metadata()) as mock_fetch_meta, \\\n patch.object(Network, 'fetch_dataset', return_value=self.test_utils.load_sample_dataset()) as mock_fetch:\n acquired = Update.run()\n self.assertFalse(acquired)\n\n # make sure the data still exists\n with self.subTest(name='final data checks'):\n response = Epidata.covid_hosp('WY', Epidata.range(20200101, 20210101))\n self.assertEqual(response['result'], 1)\n self.assertEqual(len(response['epidata']), 1)",
"def get_dataset():\n\n return db.store.all()",
"def test_dataset_info():\n info = utils.get_dataset_info(asset1)\n assert info[\"geometry\"]\n assert info[\"properties\"][\"path\"]\n assert info[\"properties\"][\"bounds\"]\n assert info[\"properties\"][\"datatype\"]\n assert info[\"properties\"][\"minzoom\"] == 7\n assert info[\"properties\"][\"maxzoom\"] == 9",
"def test_finding_datasets_doesnt_query_database_excessively(\n access_type, client, django_assert_num_queries\n):\n expected_num_queries = 13\n source_tags = [factories.SourceTagFactory() for _ in range(10)]\n topic_tags = [factories.TopicTagFactory() for _ in range(10)]\n\n masters = [\n factories.DataSetFactory(\n type=DataSetType.MASTER,\n published=True,\n user_access_type=access_type,\n )\n for _ in range(random.randint(10, 50))\n ]\n for master in masters:\n master.tags.set(\n random.sample(source_tags, random.randint(1, 3))\n + random.sample(topic_tags, random.randint(1, 3))\n )\n\n datacuts = [\n factories.DataSetFactory(\n type=DataSetType.DATACUT,\n published=True,\n user_access_type=access_type,\n )\n for _ in range(random.randint(10, 50))\n ]\n for datacut in datacuts:\n datacut.tags.set(random.sample(source_tags, 1) + random.sample(topic_tags, 1))\n\n references = [factories.ReferenceDatasetFactory.create(published=True) for _ in range(10)]\n for reference in references:\n reference.tags.set(\n random.sample(source_tags, random.randint(1, 3))\n + random.sample(topic_tags, random.randint(1, 3))\n )\n\n visualisations = [\n factories.VisualisationCatalogueItemFactory.create(published=True)\n for _ in range(random.randint(10, 50))\n ]\n\n for visualisation in visualisations:\n factories.DataSetApplicationTemplatePermissionFactory(\n application_template=visualisation.visualisation_template,\n dataset=random.choice(masters),\n )\n\n # Log into site (triggers the queries related to setting up the user).\n client.get(reverse(\"root\"))\n\n with django_assert_num_queries(expected_num_queries, exact=False):\n response = client.get(reverse(\"datasets:find_datasets\"), follow=True)\n assert response.status_code == 200\n\n with django_assert_num_queries(expected_num_queries, exact=False):\n response = client.get(reverse(\"datasets:find_datasets\"), {\"q\": \"potato\"})\n assert response.status_code == 200\n\n with django_assert_num_queries(expected_num_queries + 1, exact=False):\n response = client.get(\n reverse(\"datasets:find_datasets\"),\n {\"source\": [str(tag.id) for tag in random.sample(source_tags, random.randint(1, 5))]},\n )\n assert response.status_code == 200\n\n with django_assert_num_queries(expected_num_queries + 1, exact=False):\n response = client.get(\n reverse(\"datasets:find_datasets\"),\n {\"topic\": [str(tag.id) for tag in random.sample(topic_tags, random.randint(1, 5))]},\n )\n assert response.status_code == 200\n\n with django_assert_num_queries(expected_num_queries, exact=False):\n response = client.get(\n reverse(\"datasets:find_datasets\"),\n {\"data_type\": str(DataSetType.MASTER)},\n )\n assert response.status_code == 200\n\n with django_assert_num_queries(expected_num_queries, exact=False):\n response = client.get(reverse(\"datasets:find_datasets\"), {\"access\": \"yes\"})\n assert response.status_code == 200",
"def test_get_df(mocker):\n spy_load_metadata = mocker.spy(MetaData, 'load_document')\n expected_df = pd.read_json('tests/odata/fixtures/records.json', orient='records')\n\n provider = ODataConnector(\n name='test',\n baseroute='http://services.odata.org/V4/Northwind/Northwind.svc/',\n auth={'type': 'basic', 'args': ['u', 'p']},\n )\n\n data_source = ODataDataSource(\n domain='test',\n name='test',\n entity='Orders',\n query={\n '$filter': \"ShipCountry eq 'France'\",\n '$orderby': 'Freight desc',\n '$skip': 50,\n '$top': 3,\n },\n )\n\n try:\n df = provider.get_df(data_source)\n sl = ['CustomerID', 'EmployeeID', 'Freight']\n assert df[sl].equals(expected_df[sl])\n except socket.error:\n pytest.skip('Could not connect to the standard example OData service.')\n\n assert spy_load_metadata.call_count == 1\n args, _ = spy_load_metadata.call_args\n assert args[0].url.endswith('/$metadata')\n\n provider.auth = None\n try:\n provider.get_df(data_source)\n except socket.error:\n pytest.skip('Could not connect to the standard example OData service.')",
"def testDatabase(self):\n con = self.getMetadataDatabaseConnection()\n if con:\n return True",
"def get(log, session, args):\n url = \"{}datasets/{}\".format(\n http.get_api_url(args.url, args.project),\n args.id)\n log.debug('GET: {}'.format(url))\n response_json = http.get(session, url)\n log.print_json(response_json, \"dataset\", \"get\")",
"def test_context_data(self):\n response = self.client.get(self.get_url())\n context = response.context\n self.assertIn('source_dataset_table', context)\n for ds in self.datasets:\n self.assertIn(ds, context['source_dataset_table'].data)\n self.assertIsInstance(context['source_dataset_table'], tables.SourceDatasetTableFull)",
"def test_spotdb_reader(spotdb_data):\n\n db = spotdb_data\n\n reader = SpotDBReader(db)\n gfs = reader.read()\n\n assert len(gfs) == 4\n\n metrics = {\"Total time (inc)\", \"Avg time/rank (inc)\"}\n\n assert len(gfs[0].dataframe) > 2\n assert gfs[0].default_metric == \"Total time (inc)\"\n assert metrics < set(gfs[0].dataframe.columns)\n assert metrics < set(gfs[3].dataframe.columns)\n\n assert \"launchdate\" in gfs[0].metadata.keys()",
"def test_load_dataset():\n\n # Given\n dataset_file_name = core.config.app_config.TESTING_DATA_FILE\n\n # When\n subject = utils.load_dataset(filename=dataset_file_name)\n\n # Then\n assert isinstance(subject, pd.DataFrame)\n assert subject.shape == (5940, 41)",
"def test_cli_datasets_default():\n runner = CliRunner()\n result = runner.invoke(cli.main, [\"datasets\"])\n assert result.exit_code == 0\n assert \"Dataset ID\" in result.output\n assert \"ggallus_gene_ensembl\" in result.output\n assert \"ENSEMBL_MART_ENSEMBL\" in result.output",
"def test_fetch_dataset(self):\n\n mock_pandas = MagicMock()\n mock_pandas.read_csv.return_value = sentinel.dataset\n\n result = Network.fetch_dataset(sentinel.url, pandas_impl=mock_pandas)\n\n self.assertEqual(result, sentinel.dataset)\n mock_pandas.read_csv.assert_called_once_with(sentinel.url, dtype=str)",
"def test_readSongData():\n\n # make sure the number of columns pull out from the database is correct\n assert svd.song_df.shape[1] == 8",
"async def test_fetch_filtered_dataset_call(self):\n pool = asynctest.CoroutineMock()\n db_response = {\"referenceBases\": '', \"alternateBases\": '', \"variantType\": \"\",\n \"referenceName\": 'Chr38',\n \"frequency\": 0, \"callCount\": 0, \"sampleCount\": 0, \"variantCount\": 0,\n \"start\": 0, \"end\": 0, \"accessType\": \"PUBLIC\", \"datasetId\": \"test\"}\n pool.acquire().__aenter__.return_value = Connection(accessData=[db_response])\n assembly_id = 'GRCh38'\n position = (10, 20, None, None, None, None)\n chromosome = 1\n reference = 'A'\n alternate = ('DUP', None)\n result = await fetch_filtered_dataset(pool, assembly_id, position, chromosome, reference, alternate, None, None, False)\n # for now it can return empty dataset\n # in order to get a response we will have to mock it\n # in Connection() class\n expected = {'referenceName': 'Chr38', 'callCount': 0, 'sampleCount': 0, 'variantCount': 0, 'datasetId': 'test',\n 'referenceBases': '', 'alternateBases': '', 'variantType': '', 'start': 0, 'end': 0, 'frequency': 0,\n 'info': {'accessType': 'PUBLIC'},\n 'datasetHandover': [{'handoverType': {'id': 'CUSTOM', 'label': 'Variants'},\n 'description': 'browse the variants matched by the query',\n 'url': 'https://examplebrowser.org/dataset/test/browser/variant/Chr38-1--'},\n {'handoverType': {'id': 'CUSTOM', 'label': 'Region'},\n 'description': 'browse data of the region matched by the query',\n 'url': 'https://examplebrowser.org/dataset/test/browser/region/Chr38-1-1'},\n {'handoverType': {'id': 'CUSTOM', 'label': 'Data'},\n 'description': 'retrieve information of the datasets',\n 'url': 'https://examplebrowser.org/dataset/test/browser'}]}\n\n self.assertEqual(result, [expected])",
"def test_spot_dataset_reader():\n\n regionprofile = {\n \"a/b/c\": {\"m\": 20, \"m#inclusive\": 20},\n \"a/b\": {\"m#inclusive\": 40},\n \"a\": {\"m#inclusive\": 42},\n }\n metadata = {\"launchdate\": 123456789}\n attr_info = {\n \"m\": {\"type\": \"double\"},\n \"m#inclusive\": {\"type\": \"int\", \"alias\": \"M Alias\"},\n }\n\n reader = SpotDatasetReader(regionprofile, metadata, attr_info)\n gf = reader.read(default_metric=\"M Alias (inc)\")\n\n assert len(gf.dataframe) == 3\n assert set(gf.dataframe.columns) == {\"name\", \"m\", \"M Alias (inc)\"}\n\n assert gf.metadata[\"launchdate\"] == metadata[\"launchdate\"]\n assert gf.default_metric == \"M Alias (inc)\"",
"def dataset(options):\n pass",
"def test_data_infos__default_db_directories(self):\n test_dataset_root = osp.join(self.data_dir, 'VOCdevkit', 'VOC2007')\n custom_ds = self.dataset_class(\n data_root=test_dataset_root,\n ann_file=osp.join(test_dataset_root, 'ImageSets', 'Main',\n 'trainval.txt'),\n pipeline=[],\n classes=('person', 'dog'),\n test_mode=True)\n\n self.assertListEqual([{\n 'id': '000001',\n 'filename': osp.join('JPEGImages', '000001.jpg'),\n 'width': 353,\n 'height': 500\n }], custom_ds.data_infos)",
"def test_get_metadata_value(self):\n pass",
"def test_08_dataset_counts_all(self):\n print (self.test_08_dataset_counts_all.__doc__)\n\n stats_maker = StatsMakerDatasets()\n\n r = stats_maker.get_dataset_counts_by_create_date()\n\n # check number of months\n self.assertEqual(len(r.result_data['records']), 16)\n\n # check 1st month\n first_month = {'count': 36,\n 'month_name': 'April',\n 'month_name_short': 'Apr',\n 'month_num': 4,\n 'running_total': 36,\n 'year_num': 2015,\n 'yyyy_mm': '2015-04'}\n self.assertEqual(dict(r.result_data['records'][0]), first_month)\n\n # check last month\n last_month = {'count': 98,\n 'month_name': 'July',\n 'month_name_short': 'Jul',\n 'month_num': 7,\n 'running_total': 570,\n 'year_num': 2016,\n 'yyyy_mm': '2016-07'}\n self.assertEqual(dict(r.result_data['records'][-1]), last_month)",
"def test_dataset_request(get_interface_params):\n from sail_on_client.protocol.localinterface import LocalInterface\n\n config_directory, config_name = get_interface_params\n local_interface = LocalInterface(config_name, config_directory)\n session_id = _initialize_session(local_interface, \"OND\")\n # Test correct dataset request\n filename = local_interface.dataset_request(\"OND.1.1.1234\", 0, session_id)\n expected = os.path.join(\n local_interface.result_directory, f\"{session_id}.OND.1.1.1234.0.csv\"\n )\n assert expected == filename\n expected_image_ids = _read_image_ids(expected)\n assert expected_image_ids == [\"n01484850_18013.JPEG\", \"n01484850_24624.JPEG\"]",
"def test_get_data_columns():\n logger.info(\"No unit tests exist for pudl.analysis.spatial.get_data_columns()\")",
"def get_datasets_summary(rs):\n\n\tif rs == \"rs1\":\n\t\tdataset_list = db.get_engine(current_app, 'methylation_data').execute(\"SELECT * FROM datasets WHERE dataset NOT LIKE 'CEMBA_RS2_%%'\").fetchall()\n\t\tdataset_list += db.get_engine(current_app, 'snATAC_data').execute(\"SELECT * FROM datasets WHERE dataset NOT LIKE 'CEMBA_RS2_%%'\").fetchall()\n\n\t\t# This is a hack to get unique values in a list of dictionaries\n\t\tdataset_list = list({x['dataset']:x for x in dataset_list}.values());\n\t\ttotal_methylation_cell_each_dataset = db.get_engine(current_app, 'methylation_data').execute(\"SELECT dataset, COUNT(*) as `num` FROM cells WHERE dataset NOT LIKE 'CEMBA_RS2_%%' GROUP BY dataset\").fetchall()\n\t\ttotal_snATAC_cell_each_dataset = db.get_engine(current_app, 'snATAC_data').execute(\"SELECT dataset, COUNT(*) as `num` FROM cells WHERE dataset NOT LIKE 'CEMBA_RS2_%%' GROUP BY dataset\").fetchall()\n\telif rs == \"rs2\":\n\t\tdataset_list = db.get_engine(current_app, 'methylation_data').execute(\"SELECT * FROM datasets WHERE dataset LIKE 'CEMBA_RS2_%%'\").fetchall()\n\t\ttotal_methylation_cell_each_dataset = db.get_engine(current_app, 'methylation_data').execute(\"SELECT dataset, COUNT(*) as `num` FROM cells WHERE dataset LIKE 'CEMBA_RS2_%%' GROUP BY dataset\").fetchall()\n\t\ttotal_snATAC_cell_each_dataset = db.get_engine(current_app, 'snATAC_data').execute(\"SELECT dataset, COUNT(*) as `num` FROM cells WHERE dataset LIKE 'CEMBA_RS2_%%' GROUP BY dataset\").fetchall()\n\telif rs == \"all\":\n\t\tdataset_list = db.get_engine(current_app, 'methylation_data').execute(\"SELECT * FROM datasets\").fetchall()\n\t\tdataset_list += db.get_engine(current_app, 'snATAC_data').execute(\"SELECT * FROM datasets\").fetchall()\n\t\t# This is a hack to get unique values in a list of dictionaries\n\t\tdataset_list = list({x['dataset']:x for x in dataset_list}.values());\n\t\ttotal_methylation_cell_each_dataset = db.get_engine(current_app, 'methylation_data').execute(\"SELECT dataset, COUNT(*) as `num` FROM cells GROUP BY dataset\").fetchall()\n\t\ttotal_snATAC_cell_each_dataset = db.get_engine(current_app, 'snATAC_data').execute(\"SELECT dataset, COUNT(*) as `num` FROM cells GROUP BY dataset\").fetchall()\n\telse:\n\t\treturn\n\n\ttotal_methylation_cell_each_dataset = [ {d['dataset']: d['num']} for d in total_methylation_cell_each_dataset ]\n\ttotal_methylation_cell_each_dataset = { k: v for d in total_methylation_cell_each_dataset for k, v in d.items() }\n\ttotal_snATAC_cell_each_dataset = [ {d['dataset']: d['num']} for d in total_snATAC_cell_each_dataset ]\n\ttotal_snATAC_cell_each_dataset = { k: v for d in total_snATAC_cell_each_dataset for k, v in d.items() }\n\n\tdataset_cell_counts = []\n\tfor dataset in dataset_list:\n\t\ttry:\n\t\t\tnum_snATAC_cells = total_snATAC_cell_each_dataset[dataset['dataset']]\n\t\texcept KeyError as e:\n\t\t\tnum_snATAC_cells = 0\n\n\t\tif \"RS2\" not in dataset['dataset']:\n\t\t\tbrain_region_code = dataset['dataset'].split('_')[1]\n\t\t\tresearch_segment = \"RS1\"\n\t\telse:\n\t\t\tbrain_region_code = dataset['dataset'].split('_')[2]\n\t\t\tbrain_region_code = brain_region_code[-2:]\n\t\t\tresearch_segment = \"RS2\"\n\n\t\tregions_sql = db.get_engine(current_app, 'methylation_data').execute(\"SELECT ABA_description FROM ABA_regions WHERE ABA_acronym='%s'\" % dataset['brain_region']).fetchone()\n\t\tif regions_sql is not None:\n\t\t\tABA_regions_descriptive = regions_sql['ABA_description'].replace('+', ', ')\n\t\telse:\n\t\t\tABA_regions_descriptive = \"\"\n\n\t\tif rs == \"rs1\":\n\t\t\ttry:\n\t\t\t\tdataset_cell_counts.append( {\"dataset_name\": dataset['dataset'],\n\t\t\t\t\t\t\t\t\t\t\t \"sex\": dataset['sex'],\n\t\t\t\t\t\t\t\t\t\t\t \"methylation_cell_count\": total_methylation_cell_each_dataset[dataset['dataset']],\n\t\t\t\t\t\t\t\t\t\t\t \"snATAC_cell_count\": num_snATAC_cells,\n\t\t\t\t\t\t\t\t\t\t\t \"ABA_regions_acronym\": dataset['brain_region'].replace('+', ', '),\n\t\t\t\t\t\t\t\t\t\t\t \"ABA_regions_descriptive\": ABA_regions_descriptive,\n\t\t\t\t\t\t\t\t\t\t\t \"slice\": brain_region_code,\n\t\t\t\t\t\t\t\t\t\t\t \"date_added\": str(dataset['date_online']),\n\t\t\t\t\t\t\t\t\t\t\t \"description\": dataset['description'] })\n\t\t\texcept:\n\t\t\t\tdataset_cell_counts.append( {\"dataset_name\": dataset['dataset'],\n\t\t\t\t\t\t\t\t\t\t\t \"sex\": dataset['sex'],\n\t\t\t\t\t\t\t\t\t\t\t \"methylation_cell_count\": 0,\n\t\t\t\t\t\t\t\t\t\t\t \"snATAC_cell_count\": num_snATAC_cells,\n\t\t\t\t\t\t\t\t\t\t\t \"ABA_regions_acronym\": dataset['brain_region'].replace('+', ', '),\n\t\t\t\t\t\t\t\t\t\t\t \"ABA_regions_descriptive\": ABA_regions_descriptive,\n\t\t\t\t\t\t\t\t\t\t\t \"slice\": brain_region_code,\n\t\t\t\t\t\t\t\t\t\t\t \"date_added\": str(dataset['date_online']),\n\t\t\t\t\t\t\t\t\t\t\t \"description\": dataset['description'] })\n\t\telse:\n\t\t\ttarget_region_sql = db.get_engine(current_app, 'methylation_data').execute(\"SELECT ABA_description FROM ABA_regions WHERE ABA_acronym='%s'\" % dataset['target_region']).fetchone()\n\t\t\tif target_region_sql is not None:\n\t\t\t\ttarget_region_descriptive = target_region_sql['ABA_description'].replace('+', ', ')\n\t\t\telse:\n\t\t\t\ttarget_region_descriptive = \"\"\n\n\t\t\ttry:\n\t\t\t\tdataset_cell_counts.append( {\"dataset_name\": dataset['dataset'],\n\t\t\t\t\t\t\t\t\t\t\t \"research_segment\": research_segment,\n\t\t\t\t\t\t\t\t\t\t\t \"sex\": dataset['sex'],\n\t\t\t\t\t\t\t\t\t\t\t \"methylation_cell_count\": total_methylation_cell_each_dataset[dataset['dataset']],\n\t\t\t\t\t\t\t\t\t\t\t \"snATAC_cell_count\": num_snATAC_cells,\n\t\t\t\t\t\t\t\t\t\t\t \"ABA_regions_acronym\": dataset['brain_region'].replace('+', ', '),\n\t\t\t\t\t\t\t\t\t\t\t \"ABA_regions_descriptive\": ABA_regions_descriptive,\n\t\t\t\t\t\t\t\t\t\t\t \"slice\": brain_region_code,\n\t\t\t\t\t\t\t\t\t\t\t \"date_added\": str(dataset['date_online']),\n\t\t\t\t\t\t\t\t\t\t\t \"description\": dataset['description'],\n\t\t\t\t\t\t\t\t\t\t\t \"target_region_acronym\": dataset['target_region'],\n\t\t\t\t\t\t\t\t\t\t\t \"target_region_descriptive\": target_region_descriptive})\n\t\t\texcept:\n\t\t\t\tdataset_cell_counts.append( {\"dataset_name\": dataset['dataset'],\n\t\t\t\t\t\t\t\t\t\t\t \"research_segment\": research_segment,\n\t\t\t\t\t\t\t\t\t\t\t \"sex\": dataset['sex'],\n\t\t\t\t\t\t\t\t\t\t\t \"methylation_cell_count\": 0,\n\t\t\t\t\t\t\t\t\t\t\t \"snATAC_cell_count\": num_snATAC_cells,\n\t\t\t\t\t\t\t\t\t\t\t \"ABA_regions_acronym\": dataset['brain_region'].replace('+', ', '),\n\t\t\t\t\t\t\t\t\t\t\t \"ABA_regions_descriptive\": ABA_regions_descriptive,\n\t\t\t\t\t\t\t\t\t\t\t \"slice\": brain_region_code,\n\t\t\t\t\t\t\t\t\t\t\t \"date_added\": str(dataset['date_online']),\n\t\t\t\t\t\t\t\t\t\t\t \"description\": dataset['description'],\n\t\t\t\t\t\t\t\t\t\t\t \"target_region_acronym\": dataset['target_region'],\n\t\t\t\t\t\t\t\t\t\t\t \"target_region_descriptive\": target_region_descriptive})\n\n\treturn json.dumps(dataset_cell_counts)",
"def testQuery(self):\n # Clear anything first\n for i in range(10):\n row_name = \"aff4:/row:%s\" % i\n data_store.DB.Set(row_name, \"metadata:%s\" % i, str(i), timestamp=5,\n token=self.token)\n data_store.DB.Set(row_name, \"aff4:type\", \"test\", token=self.token)\n\n # Retrieve all subjects with metadata:5 set:\n rows = [row for row in data_store.DB.Query(\n [\"metadata:5\"], data_store.DB.filter.HasPredicateFilter(\"metadata:5\"),\n subject_prefix=\"aff4:/row:\", token=self.token)]\n\n self.assertEqual(len(rows), 1)\n self.assertEqual(rows[0][\"subject\"][0][0], \"aff4:/row:5\")\n self.assertEqual(rows[0][\"metadata:5\"][0][0], \"5\")\n self.assertEqual(rows[0][\"metadata:5\"][0][1], 5)",
"def test_retrieve_data_dict_1(self):\n list_of_data_dicts = \\\n basic.retrieve_data_dict(self.test_import_table_1)\n self.assertEqual(len(list_of_data_dicts), 2)"
]
| [
"0.7350097",
"0.67755806",
"0.66323686",
"0.66123265",
"0.64851105",
"0.6451084",
"0.63992196",
"0.6393353",
"0.63796693",
"0.63779145",
"0.63269633",
"0.6278049",
"0.6246102",
"0.62334937",
"0.617247",
"0.61681867",
"0.61557543",
"0.61213416",
"0.61100024",
"0.61076134",
"0.61000377",
"0.6073482",
"0.6065701",
"0.60590756",
"0.603143",
"0.60310876",
"0.60078",
"0.59804827",
"0.59730625",
"0.5959209"
]
| 0.7253901 | 1 |
Test PostgreSQL wildcard handling. | def test_handle_wildcard(self):
sequence1 = 'ATCG'
sequence2 = 'ATNG'
sequence3 = 'NNCN'
self.assertEqual(handle_wildcard(sequence1), ['ATCG'])
self.assertEqual(handle_wildcard(sequence2), ["%AT_G%"])
self.assertEqual(handle_wildcard(sequence3), ["%__C_%"]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_match_any_wildcard_in_literal(self):\n qs = '\"Foo t*\"'\n qs_escaped, wildcard = wildcard_escape(qs)\n\n self.assertEqual(\n qs_escaped, r'\"Foo t\\*\"', \"Wildcard should be escaped\"\n )\n self.assertFalse(wildcard, \"Wildcard should not be detected\")\n self.assertIsInstance(\n Q_(\"match\", \"title\", qs),\n type(index.Q(\"match\", title=r'\"Foo t\\*\"')),\n \"Wildcard Q object should not be generated\",\n )",
"def test_match_any_wildcard_is_present(self):\n qs = \"Foo t*\"\n qs_escaped, wildcard = wildcard_escape(qs)\n\n self.assertTrue(wildcard, \"Wildcard should be detected\")\n self.assertEqual(qs, qs_escaped, \"The querystring should be unchanged\")\n self.assertIsInstance(\n Q_(\"match\", \"title\", qs),\n type(index.Q(\"wildcard\", title=qs)),\n \"Wildcard Q object should be generated\",\n )",
"def test_multiple_match_any_wildcard_in_literal(self):\n qs = '\"Fo*o t*\"'\n qs_escaped, wildcard = wildcard_escape(qs)\n\n self.assertEqual(\n qs_escaped, r'\"Fo\\*o t\\*\"', \"Both wildcards should be escaped\"\n )\n self.assertFalse(wildcard, \"Wildcard should not be detected\")\n self.assertIsInstance(\n Q_(\"match\", \"title\", qs),\n type(index.Q(\"match\", title=r'\"Fo\\*o t\\*\"')),\n \"Wildcard Q object should not be generated\",\n )",
"def test_wildcards_both_inside_and_outside_literal(self):\n qs = '\"Fo? t*\" said the *'\n qs_escaped, wildcard = wildcard_escape(qs)\n\n self.assertEqual(\n qs_escaped,\n r'\"Fo\\? t\\*\" said the *',\n \"Wildcards in literal should be escaped\",\n )\n self.assertTrue(wildcard, \"Wildcard should be detected\")\n self.assertIsInstance(\n Q_(\"match\", \"title\", qs),\n type(index.Q(\"wildcard\", title=r'\"Fo\\? t\\*\" said the *')),\n \"Wildcard Q object should be generated\",\n )",
"def test_wildcard_at_opening_of_string(self):\n with self.assertRaises(index.QueryError):\n wildcard_escape(\"*nope\")\n\n with self.assertRaises(index.QueryError):\n Q_(\"match\", \"title\", \"*nope\")",
"def test_wildcards_inside_outside_multiple_literals(self):\n qs = '\"Fo?\" s* \"yes*\" o?'\n qs_escaped, wildcard = wildcard_escape(qs)\n\n self.assertEqual(\n qs_escaped,\n r'\"Fo\\?\" s* \"yes\\*\" o?',\n \"Wildcards in literal should be escaped\",\n )\n self.assertTrue(wildcard, \"Wildcard should be detected\")\n\n self.assertIsInstance(\n Q_(\"match\", \"title\", qs),\n type(index.Q(\"wildcard\", title=r'\"Fo\\?\" s* \"yes\\*\" o?')),\n \"Wildcard Q object should be generated\",\n )",
"def test_mixed_wildcards_in_literal(self):\n qs = '\"Fo? t*\"'\n qs_escaped, wildcard = wildcard_escape(qs)\n\n self.assertEqual(\n qs_escaped, r'\"Fo\\? t\\*\"', \"Both wildcards should be escaped\"\n )\n self.assertFalse(wildcard, \"Wildcard should not be detected\")\n self.assertIsInstance(\n Q_(\"match\", \"title\", qs),\n type(index.Q(\"match\", title=r'\"Fo\\? t\\*\"')),\n \"Wildcard Q object should not be generated\",\n )",
"def test_searchWildcard(self):\n self.assertFalse(\n self.server.search_UID([b'2:3'], self.seq, self.msg, (1, 1234)))\n # 2:* should get translated to 2:<max UID> and then to 1:2\n self.assertTrue(\n self.server.search_UID([b'2:*'], self.seq, self.msg, (1, 1234)))\n self.assertTrue(\n self.server.search_UID([b'*'], self.seq, self.msg, (1, 1234)))",
"def test_psycopg_select_LIKE(instrument, postgres_connection, elasticapm_client):\n cursor = postgres_connection.cursor()\n query = \"SELECT * FROM test WHERE name LIKE 't%'\"\n\n try:\n elasticapm_client.begin_transaction(\"web.django\")\n cursor.execute(query)\n cursor.fetchall()\n elasticapm_client.end_transaction(None, \"test-transaction\")\n finally:\n # make sure we've cleared out the spans for the other tests.\n transactions = elasticapm_client.events[TRANSACTION]\n spans = elasticapm_client.spans_for_transaction(transactions[0])\n span = spans[0]\n assert span[\"name\"] == \"SELECT FROM test\"\n assert span[\"type\"] == \"db\"\n assert span[\"subtype\"] == \"postgresql\"\n assert span[\"action\"] == \"query\"\n assert \"db\" in span[\"context\"]\n assert span[\"context\"][\"db\"][\"instance\"] == \"elasticapm_test\"\n assert span[\"context\"][\"db\"][\"type\"] == \"sql\"\n assert span[\"context\"][\"db\"][\"statement\"] == query\n assert span[\"context\"][\"service\"][\"target\"][\"type\"] == \"postgresql\"\n assert span[\"context\"][\"service\"][\"target\"][\"name\"] == \"elasticapm_test\"",
"def wildcard_match(item, base, wildcard):\n if wildcard.startswith(\"**/\"):\n wildcard = wildcard[3:]\n for base_element in base.split(\"/\"):\n if fnmatch.fnmatch(base_element, wildcard):\n return True\n return False\n else:\n return fnmatch.fnmatch(item, wildcard)",
"def _is_wildcard_match(s, wildcard):\n\n wildcard = wildcard.strip()\n glob_pat = re.compile(r'\\*(:(?P<type>\\w+))?$')\n m = glob_pat.match(wildcard)\n if m:\n if m.group('type'):\n type_to_meth = globals()['__builtins__']\n type_to_meth = {k:v for k,v in type_to_meth.items()\n if k in ['str','int','float','bool']}\n try:\n return isinstance(s, type_to_meth[m.group('type')])\n except KeyError:\n raise InvalidWildcardError(\"{} is an invalid type in {}\".format(\n m.group('type'), wildcard))\n return True\n raise InvalidWildcardError(wildcard)",
"def test_searchWildcardHigh(self):\n self.assertTrue(\n self.server.search_UID([b'1235:*'], self.seq, self.msg, (1234, 1)))",
"def test_asterisk(self):\n with self.assertRaises(ValidationError):\n db_name_validator('logstash*')",
"def test_wildcard_all(self):\n with self.assertRaisesRegex(\n ValueError, \"WILDCARD_ALL passed with other key information\"):\n _path.RootOper.Foo(_defs.WILDCARD_ALL, 4)",
"def _wildcardformat(regxval):\n if regxval == None:\n return None\n else:\n try:\n return regxval.replace(\"*\",\"%\").replace(\"?\",\"_\")\n except AttributeError:\n return regxval",
"def __reWildcard(self, regexp, string):\n regexp = re.sub(\"\\*+\", \"*\", regexp)\n match = True\n if regexp.count(\"*\") == 0:\n if regexp == string:\n return True\n else:\n return False\n blocks = regexp.split(\"*\")\n start = \"\"\n end = \"\"\n if not regexp.startswith(\"*\"):\n start = blocks[0]\n if not regexp.endswith(\"*\"):\n end = blocks[-1]\n if start != \"\":\n if string.startswith(start):\n blocks = blocks[1:]\n else:\n return False\n if end != \"\":\n if string.endswith(end):\n blocks = blocks[:-1]\n else:\n return False\n blocks = [block for block in blocks if block != \"\"]\n if blocks == []:\n return match\n for block in blocks:\n i = string.find(block)\n if i == -1:\n return False\n string = string[i + len(block):]\n return match",
"def convertSQL_LIKE2REGEXP(sql_like_pattern):\n # Replace '_' by equivalent regexp, except when precede by '\\'\n # (escape character)\n regexp = re.sub(r'(?<!\\\\)_', '.', sql_like_pattern)\n # Replace '%' by equivalent regexp, except when precede by '\\'\n # (escape character)\n regexp = re.sub(r'(?<!\\\\)%', '.*', regexp)\n # Set regexp to ignore cases; SQL patterns are case-insensitive by default.\n regexp = \"(?i)^(\" + regexp + \")$\"\n return regexp",
"def wildcard(s, star_min=1):\n\n def _feed_parts(input_parts):\n for part in input_parts:\n if part == \"*\":\n if star_min == 0:\n yield \".*\"\n elif star_min == 1:\n yield \".+\"\n else:\n yield f\".{{{star_min},}}\"\n elif part == \"?\":\n yield \".\"\n else:\n yield re.escape(part)\n\n return \"\".join(_feed_parts(re.split(r'([\\?\\*])', s)))",
"def is_wildcard(obj):\n return isinstance(obj, Symbol) and obj == Symbol('*')",
"def glob_match(value, pat, doublestar=False, ignorecase=False, path_normalize=False):\n if ignorecase:\n value = value.lower()\n pat = pat.lower()\n if path_normalize:\n value = value.replace('\\\\', '/')\n pat = pat.replace('\\\\', '/')\n return _translate(pat, doublestar=doublestar).match(value) is not None",
"def EscapeWildcards(string: Text) -> Text:\n precondition.AssertType(string, Text)\n return string.replace(\"%\", r\"\\%\").replace(\"_\", r\"\\_\")",
"def test_regex_case_sensitive_nomatch(self):\n cursor = self.dbh.cursor()\n try:\n expr = self.dbh.get_regex_clause(\"'ABC'\", 'a.*')\n qry = self.dbh.get_expr_exec_format() % \"'TRUE'\"\n qry += ' WHERE ' + expr\n\n cursor.execute(qry)\n\n self.assertIsNone(cursor.fetchone())\n finally:\n self.dbh.rollback()\n cursor.close()",
"def test_name(self):\n\n self.check_search(\n dict(name=u'flamethrower'),\n [u'Flamethrower'],\n 'searching by name',\n exact=True,\n )\n\n self.check_search(\n dict(name=u'durp'),\n [],\n 'searching for a nonexistent name',\n exact=True,\n )\n\n self.check_search(\n dict(name=u'quICk AttACk'),\n [u'Quick Attack'],\n 'case is ignored',\n exact=True,\n )\n\n self.check_search(\n dict(name=u'thunder'),\n [ u'Thunder', u'Thunderbolt', u'Thunder Wave',\n u'ThunderShock', u'ThunderPunch', u'Thunder Fang'],\n 'no wildcards is treated as substring',\n exact=True,\n )\n self.check_search(\n dict(name=u'*under'),\n [u'Thunder'], # not ThunderShock, etc.!\n 'splat wildcard works and is not used as substring',\n exact=True,\n )\n self.check_search(\n dict(name=u'b?te'),\n [u'Bite'], # not Bug Bite!\n 'question wildcard works and is not used as substring',\n exact=True,\n )",
"def test_regex_query_shortcuts(self):\n person = self.Person(name=\"Guido van Rossum\")\n person.save()\n\n # Test contains\n obj = self.Person.objects(name__contains=\"van\").first()\n assert obj == person\n obj = self.Person.objects(name__contains=\"Van\").first()\n assert obj is None\n\n # Test icontains\n obj = self.Person.objects(name__icontains=\"Van\").first()\n assert obj == person\n\n # Test startswith\n obj = self.Person.objects(name__startswith=\"Guido\").first()\n assert obj == person\n obj = self.Person.objects(name__startswith=\"guido\").first()\n assert obj is None\n\n # Test istartswith\n obj = self.Person.objects(name__istartswith=\"guido\").first()\n assert obj == person\n\n # Test endswith\n obj = self.Person.objects(name__endswith=\"Rossum\").first()\n assert obj == person\n obj = self.Person.objects(name__endswith=\"rossuM\").first()\n assert obj is None\n\n # Test iendswith\n obj = self.Person.objects(name__iendswith=\"rossuM\").first()\n assert obj == person\n\n # Test exact\n obj = self.Person.objects(name__exact=\"Guido van Rossum\").first()\n assert obj == person\n obj = self.Person.objects(name__exact=\"Guido van rossum\").first()\n assert obj is None\n obj = self.Person.objects(name__exact=\"Guido van Rossu\").first()\n assert obj is None\n\n # Test iexact\n obj = self.Person.objects(name__iexact=\"gUIDO VAN rOSSUM\").first()\n assert obj == person\n obj = self.Person.objects(name__iexact=\"gUIDO VAN rOSSU\").first()\n assert obj is None\n\n # Test wholeword\n obj = self.Person.objects(name__wholeword=\"Guido\").first()\n assert obj == person\n obj = self.Person.objects(name__wholeword=\"rossum\").first()\n assert obj is None\n obj = self.Person.objects(name__wholeword=\"Rossu\").first()\n assert obj is None\n\n # Test iwholeword\n obj = self.Person.objects(name__iwholeword=\"rOSSUM\").first()\n assert obj == person\n obj = self.Person.objects(name__iwholeword=\"rOSSU\").first()\n assert obj is None\n\n # Test regex\n obj = self.Person.objects(name__regex=\"^[Guido].*[Rossum]$\").first()\n assert obj == person\n obj = self.Person.objects(name__regex=\"^[guido].*[rossum]$\").first()\n assert obj is None\n obj = self.Person.objects(name__regex=\"^[uido].*[Rossum]$\").first()\n assert obj is None\n\n # Test iregex\n obj = self.Person.objects(name__iregex=\"^[guido].*[rossum]$\").first()\n assert obj == person\n obj = self.Person.objects(name__iregex=\"^[Uido].*[Rossum]$\").first()\n assert obj is None\n\n # Test unsafe expressions\n person = self.Person(name=\"Guido van Rossum [.'Geek']\")\n person.save()\n\n obj = self.Person.objects(name__icontains=\"[.'Geek\").first()\n assert obj == person",
"def test_regex_bad_case_sensitivity(self):\n with self.assertRaises(despydb.UnknownCaseSensitiveError):\n self.dbh.get_regex_clause(\"'ABC'\", 'a.*', 'F')",
"def test_glob_pattern(self):\n glob_pattern = GlobPattern()\n det_name = 'R22_S11'\n self.assertEqual(glob_pattern('fe55', det_name),\n 'fe55_fe55_*/*_{}.fits'.format(det_name))\n self.assertEqual(glob_pattern('cte_low', det_name),\n 'sflat_*_flat_*L*/*_{}.fits'.format(det_name))",
"def wildcard(pattern):\n wildcards = pattern.count('?')\n alphabet = ['0', '1']\n\n def xcombinations(items, length):\n if length == 0:\n yield []\n else:\n for i in xrange(len(items)):\n for sc in xcombinations(items, length - 1):\n yield [items[i]] + sc\n\n for combination in xcombinations(alphabet, wildcards):\n buff = ''\n for c in pattern:\n if c == '?':\n buff += combination.pop()\n else:\n buff += c\n yield buff",
"def column_wildcard(self) -> Optional[pulumi.Input['DataCellsFilterColumnWildcardArgs']]:\n return pulumi.get(self, \"column_wildcard\")",
"def _glob_to_sql(self, string):\n\n # What's with the chr(1) and chr(2) nonsense? It's a trick to\n # hide \\* and \\? from the * and ? substitutions. This trick\n # depends on the substitutiones being done in order. chr(1)\n # and chr(2) were picked because I know those characters\n # almost certainly won't be in the input string\n table = ((r'\\\\', chr(1)), (r'\\*', chr(2)), (r'\\?', chr(3)),\n (r'%', r'\\%'), (r'?', '_'), (r'*', '%'),\n (chr(1), r'\\\\'), (chr(2), r'\\*'), (chr(3), r'\\?'))\n\n for (a, b) in table:\n string = string.replace(a,b)\n\n string = string[1:] if string.startswith(\"^\") else \"%\" + string\n string = string[:-1] if string.endswith(\"$\") else string + \"%\"\n\n return string",
"def test_regex_case_sensitive_match(self):\n cursor = self.dbh.cursor()\n try:\n expr = self.dbh.get_regex_clause(\"'abc'\", 'a.*')\n qry = self.dbh.get_expr_exec_format() % \"'TRUE'\"\n qry += ' WHERE ' + expr\n\n cursor.execute(qry)\n\n self.assertEqual(cursor.fetchone()[0], 'TRUE')\n finally:\n self.dbh.rollback()\n cursor.close()"
]
| [
"0.7469122",
"0.738512",
"0.71618766",
"0.71604025",
"0.7111361",
"0.7002698",
"0.69867164",
"0.6560411",
"0.6547104",
"0.6484008",
"0.638041",
"0.630823",
"0.616465",
"0.6079208",
"0.59992486",
"0.599845",
"0.5911614",
"0.58510184",
"0.58022",
"0.57422745",
"0.5732638",
"0.5730074",
"0.5701056",
"0.569439",
"0.5672273",
"0.5648896",
"0.56236017",
"0.5545577",
"0.5545116",
"0.54966074"
]
| 0.74337596 | 1 |
Built the error Message from a key with specific params | def built_error_message(self, key: str, params: List[str]) -> str:
if key in self.errors:
error_msg = self.errors[key]
error_msg = re.sub("{..}", "", error_msg)
return error_msg.format(*params)
else:
return "" | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def make_error(self, key: str, **kwargs) -> ValidationError:\n try:\n msg = self.error_messages[key]\n except KeyError as error:\n class_name = self.__class__.__name__\n message = (\n \"ValidationError raised by `{class_name}`, but error key `{key}` does \"\n \"not exist in the `error_messages` dictionary.\"\n ).format(class_name=class_name, key=key)\n raise AssertionError(message) from error\n if isinstance(msg, (str, bytes)):\n msg = msg.format(**kwargs)\n return ValidationError(msg)",
"def build_error_msg(error_body: dict) -> str:\n ret_error_msg = ''\n for error in error_body:\n if error.get('field'):\n ret_error_msg += 'field ' + error['field']\n if error.get('message'):\n ret_error_msg += ' ' + error['message']\n return ret_error_msg",
"def key_error_message(self,key):\n if not key:\n return 'key is blank.'\n elif any(map(lambda s: s in key,space_chars)):\n return '\"{}\" contains whitespace.'.format(key)\n elif any(map(lambda s: s in key,bad_chars)):\n return '\"{}\" contains special characters.'.format(key)",
"def error(self, key, **kwargs):\n try:\n msg = self.error_messages[key]\n except KeyError:\n class_name = self.__class__.__name__\n raise AssertionError('Error with key={} is not found for class={}'.format(key, class_name))\n message_string = msg.format(**kwargs)\n raise ValidationError(message_string, code=key)",
"def format_error(invalid, doc_type):\n # using string for checking is probably not ideal,\n # but voluptuous does not have specific sub error\n # types for these errors\n if invalid.error_message == 'extra keys not allowed':\n msg = \"Key '{}' is not allowed\".format(invalid.path[0])\n elif invalid.error_message == 'required key not provided':\n msg = \"{} '{}' is missing\".format(doc_type, invalid.path[0])\n else:\n msg = invalid.message\n return {'message': msg, 'field': str(invalid.path[0])}",
"def buildErrorMessage(self, test, err):\n\n errorMessage = \"\"\n errorMessage += test.id()\n errorMessage += \"\\n\\n\"\n\n errorMessage += traceback.format_exc() + \"\\n\"\n return errorMessage",
"def _add_error(self, key, message):\n if key not in self._error_key_list:\n self._error_key_list.append(key)\n self.add_error(key, str(message))",
"def error(self, key, value, context, errorclass=InvalidDataError, **values):\n msg_template = self.message_for_key(key, context)\n raise errorclass(msg_template % values, value, key=key, context=context)",
"def creation_error(src_dict: Dict[str, List[str]], e: str):\n return \"LED Group error in %s: %s\\n)\" % (json.dumps(src_dict), e)",
"def _get_error_text(self, result: dict) -> str:\n try:\n return result[self._FIELD_TEXT]\n except KeyError:\n return self._DEFAULT_ERROR_MSG",
"def construct_err(err_name='DEFAULT', err_language='english'):\n if err_language not in errors.keys():\n err_language = 'english'\n if err_name not in errors[err_language].keys():\n err_name = 'DEFAULT'\n\n error_obj = errors[err_language][err_name]\n\n return json.dumps({\n 'message': error_obj['message'],\n 'status_code': error_obj['status_code']\n }), error_obj['status_code']",
"def sfherrormessage(func, *args, **kwargs):\n def wrapper(*args, **kwargs):\n e_message = \"DEBUGGING ASSISTANT: make sure the parameters_dict contains \" \\\n \"all the necessary parameters spelt correctly. \" \\\n \"Accepted parameters are: 'tau', 'T0', 'constant', 'alpha', 'beta'\"\n try:\n func(*args, **kwargs)\n except KeyError as e:\n raise HokiKeyError(\n f\"{e} has not been defined and I need it. \"+e_message)\n return wrapper",
"def error_message(iden: int | None, code: str, message: str) -> dict[str, Any]:\n return {\n \"id\": iden,\n \"type\": const.TYPE_RESULT,\n \"success\": False,\n \"error\": {\"code\": code, \"message\": message},\n }",
"def gen_error(error_id, *args):\n errors = {\n 'generic': {'status': 400, 'error': 'generic', 'description': 'A unspecified error occurred'},\n 'invalid_pagetype': {'status': 400, 'description': 'Invalid pagetype \"{}\"'},\n }\n\n if error_id in errors.keys():\n error = dict(**errors[error_id])\n error['description'] = error['description'].format(*args)\n error['error'] = error_id\n return json.dumps({**error, 'success': False}), error['status']\n\n return json.dumps(errors['generic']), errors['generic']['status']",
"def get_error_ext(error_type, key):\n extension = _ERROR % (error_type, key)\n return extension",
"def get_message(self, *args, **kwargs):\n\n message = ''\n message += ', '.join([str(key) + ': ' + str(val) for key, val in kwargs.items()]) + '; ' if kwargs else ''\n message += ', '.join(str(val) for val in args) if args else ''\n\n return message",
"def get_error(msg, prefix, default_value='Error:'):\n \n if prefix is not default_value:\n prefix = default_value + prefix\n\n error_msg = prefix + ' ' + msg\n return error_msg",
"def error_message(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"error_message\")",
"def parsed_error_msg(self):\r\n # Translates the category names and messages into something more human readable\r\n message_dict = {\r\n (\"photoIdReasons\", \"Not provided\"): _(\"No photo ID was provided.\"),\r\n (\"photoIdReasons\", \"Text not clear\"): _(\"We couldn't read your name from your photo ID image.\"),\r\n (\"generalReasons\", \"Name mismatch\"): _(\"The name associated with your account and the name on your ID do not match.\"),\r\n (\"userPhotoReasons\", \"Image not clear\"): _(\"The image of your face was not clear.\"),\r\n (\"userPhotoReasons\", \"Face out of view\"): _(\"Your face was not visible in your self-photo\"),\r\n }\r\n\r\n try:\r\n msg_json = json.loads(self.error_msg)\r\n msg_dict = msg_json[0]\r\n\r\n msg = []\r\n for category in msg_dict:\r\n # find the messages associated with this category\r\n category_msgs = msg_dict[category]\r\n for category_msg in category_msgs:\r\n msg.append(message_dict[(category, category_msg)])\r\n return u\", \".join(msg)\r\n except (ValueError, KeyError):\r\n # if we can't parse the message as JSON or the category doesn't\r\n # match one of our known categories, show a generic error\r\n log.error('PhotoVerification: Error parsing this error message: %s', self.error_msg)\r\n return _(\"There was an error verifying your ID photos.\")",
"def _build_rpc_error(self, id, error, exception, keep_null_id=False):\n if id is None and not keep_null_id:\n return None\n\n message = RPC_ERROR_MESSAGES.get(error, str(exception))\n\n return {\n 'jsonrpc': '2.0',\n 'id': id,\n 'error': {\n 'code': error.value,\n 'message': message,\n 'data': {\n 'stacktrace': str(exception) + '\\n' + '\\n'.join(traceback.format_tb(exception.__traceback__))\n }\n }\n }",
"def get_expected_type_error_message(key, val, expected_type):\n\n return \"Invalid type at key '%s'. Expected '%s' got '%s'.\" \\\n % (str(key), str(expected_type), str(type(val)))",
"def get_or_raise(self, key: str, error_message: str = None) -> str:\n v = self.get_or_default(key, None)\n if v is None:\n if error_message is None:\n print(\"Error, '\" + key + \"' is required.\")\n else:\n print(error_message)\n raise CLIMissingKeyError(error_message)\n\n else:\n return v",
"def err(error_dictionary):\n return {'error': error_dictionary['message']}, error_dictionary['code']",
"def creation_error(src_dict: Dict[str, List[Union['Repeater', 'Step']]], e: str) -> str:\n return \"Sequencer error in %s: %s\\n\" % (json.dumps(src_dict), e)",
"def lanzarError(msg, status, title, typee):\n d = {}\n d[\"detail\"] = msg\n d[\"error\"] = status\n d[\"title\"] = title\n d[\"type\"] = typee\n return d",
"def get_errors(*args, **kwargs):\n\n prefix = kwargs.get('prefix', 'Error:')\n if prefix is not 'Error:':\n prefix = 'Error: '+ prefix\n\n msg = ' '.join(args)\n \n error_msg = prefix + ' ' + msg\n return error_msg",
"def _raise_error_with_context(context):\n context_str = \" \"\n for c in context:\n if isinstance(c, str):\n context_str = context_str + f\"in key {c} \"\n elif isinstance(c, int):\n context_str = context_str + f\"in index {c} \"\n raise ValueError(f\"Value{context_str}is required\")",
"def raise_validation_error(\n problematic_key_set,\n problem_message,\n exception_class\n):\n stringified_keys = '{0} '.format(linesep).join(sorted(problematic_key_set))\n\n tags_error_message = '{problem_message}{linesep}{linesep} {stringified_keys}{linesep}'.format(\n problem_message=problem_message,\n linesep=linesep,\n stringified_keys=stringified_keys\n )\n\n raise exception_class(tags_error_message)",
"def format_errordict(self, errordict):\n errormsg = f'Comparison between {self.ad1.filename} and {self.ad2.filename}'\n for k, v in errordict.items():\n errormsg += f'\\nComparison failure in {k}'\n errormsg += '\\n' + ('-' * (22 + len(k))) + '\\n'\n errormsg += '\\n '.join(v)\n return errormsg",
"def __str__(self):\n base_message = self.base_message.format(filename=self.yaml_file_path)\n error_message = ERROR_MESSAGE.format(key=self.key, expected=self.expected)\n return base_message + error_message"
]
| [
"0.6951925",
"0.672945",
"0.6627752",
"0.65712",
"0.6533963",
"0.64329624",
"0.6227141",
"0.6193963",
"0.6119053",
"0.6087474",
"0.60702777",
"0.6059164",
"0.60467255",
"0.5873384",
"0.58444184",
"0.58381826",
"0.5819411",
"0.5790388",
"0.5787507",
"0.5787152",
"0.57777953",
"0.5754967",
"0.57405925",
"0.5726129",
"0.57084936",
"0.56904215",
"0.5658912",
"0.5654798",
"0.5650686",
"0.55968744"
]
| 0.86145705 | 0 |
Takes a dataframe and adds the 3top tags associated with the LDA model prediction over a corpus of interest. | def add_tags(df,lda,corpus,pmin):
vector =[]
for doc in corpus:
prediccion = lda[doc]
prediccion.sort(reverse= True,key=lambda x: x[1])
prediccion = (filtro_probs(prediccion,pmin))
vector.append(prediccion)
M1_glob = [item[0] for item in vector]
M1_final = [item[0] for item in M1_glob]
M2_glob = [item[1] for item in vector]
M2_final = [item[0] for item in M2_glob]
M3_glob = [item[2] for item in vector]
M3_final = [item[0] for item in M3_glob]
df["Pred_M1"] = M1_final
df["Pred_M2"] = M2_final
df["Pred_M3"] = M3_final
return df | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def tag_pred(model, vectorized_input, feature_names, top_t, nb_tag_pred=10,\n threshold=0.15):\n\n # Topic df -----------------------------------------------------------------\n topic_df = display_topics2(model, feature_names)\n # associate each topic with a list of tags\n topics_kwords_df = topic_df.T #(topic_df.isin(top_t)*topic_df).T\n topic2tags_d = {}\n # tags_per_topic = []\n for topic in topics_kwords_df:\n tag_list = []\n for e in topics_kwords_df.loc[:, topic]:\n if e is not \"\":\n tag_list.append(e)\n topic2tags_d[topic] = tag_list\n\n # Create Document Vs Topic df ----------------------------------------------\n import numpy as npy\n model_output = model.transform(vectorized_input)\n topicnames = [\"Topic\" + str(i) for i in range(model.components_.shape[0])]\n docnames = [\"Post\" + str(i) for i in range(vectorized_input.shape[0])]\n df_document_topic = pd.DataFrame(npy.round(model_output, 2),\n columns=topicnames,\n index=docnames)\n\n # Tag predictions ----------------------------------------------------------\n tag_pred_l = []\n for post in df_document_topic.index:\n tags_post = []\n topics_proba = df_document_topic.loc[post, :]\n mask = topics_proba >= threshold\n topic_pred = list(df_document_topic.loc[post, mask].index)\n tot_proba = topics_proba[topic_pred].sum()\n\n # if no major topic in this post, propose just top 10 tags\n if len(topic_pred) == 0:\n tags_post = tags_post + top_t[0:nb_tag_pred].copy()\n else:\n for topic in topic_pred:\n # pic number of top elements ~ to proba of the topic\n nb_elements = int(round(topics_proba[topic]*10/tot_proba,0))\n tags_post = tags_post + topic2tags_d[topic][0:nb_elements].copy()\n tag_pred_l.append(tags_post)\n\n return tag_pred_l",
"def get_top_keywords_from_articles(self, kwords_list):\n _all_keywords = []\n for a in kwords_list:\n if a != []:\n for w in a:\n _all_keywords.append([w['keyword'],w['weight'],w['label']])\n _df_g = pd.DataFrame(_all_keywords, columns=[\"Keyword\", \"Count\",\"Label\"])\n _df_g.sort_values(by=\"Count\", inplace=True, ascending=False)\n _df_g.reset_index(drop=True, inplace=True)\n _df_g.to_csv('test.csv')\n print(len(_df_g))\n\n _df_g['Keyword'] = _df_g['Keyword'].apply(self.remove_repeat_words)\n _df_g.dropna(axis=0, inplace=True)\n p1,p2 = self.pos_taggers(_df_g)\n _df_g['c_POS'] = p1\n _df_g['s_POS'] = p2\n _df_g['c_POS_score'] = _df_g['c_POS'].apply(self.combine_pos_score)\n _df_g['s_POS_score'] = _df_g['s_POS'].apply(self.specific_pos_score)\n _df_g['Count'] = _df_g['Count'] + _df_g['c_POS_score'] + _df_g['s_POS_score'] \n print(len(_df_g))\n _df_g.sort_values(by='Count',inplace=True, ascending=False)\n print(len(_df_g))\n _df_g = _df_g.reset_index(drop=True)\n _df_g = _df_g[:10]\n response_dict = dict()\n response_dict['nc'] = \", \".join(_df_g['Keyword'].to_list())\n return response_dict",
"def lda_predict_df(df, col_name, lda_model, dictionary, lda_topic_name_dict=None, only_best_prediction=True):\n# for index, score in sorted(LDAmodel_lang[bow_vector], key=lambda tup: -1*tup[1]):\n# print(\"Score: {}\\t Topic: {}\".format(score, lda_model.print_topic(index, 5)))\n cols = list(df.columns)\n df['bow'] = list(map(lambda doc: dictionary.doc2bow(doc), df[col_name]))\n if only_best_prediction:\n if lda_topic_name_dict is None:\n df['prediction'] = df['bow'].apply(PredictTopicFromBOW,lda_model=lda_model)\n df[['pred_probability','pred_index']] = pd.DataFrame(df.prediction.values.tolist(), index= df.index)\n else:\n df['prediction'] = df['bow'].apply(PredictTopicFromBOW,lda_model=lda_model, lda_topic_name_dict=lda_topic_name_dict)\n df[['pred_probability','pred_index','pred_label']] = pd.DataFrame(df.prediction.values.tolist(), index= df.index)\n df.drop(['prediction'], axis=1)\n else:\n num_topics = len(lda_model.get_topics())\n for i in range(num_topics):\n df[i] = df['bow'].apply(PredictTopicFromBOW,lda_model=lda_model, prediction_index=i)\n# df[['pred_probability','pred_index']] = pd.DataFrame(df.prediction.values.tolist(), index= df.index)\n\n # Unpivot values, and split predictions\n values = [i for i in range(num_topics)]\n df = pd.melt(df, id_vars=cols, value_vars=values)\n df = df[df['value'].isnull()==False].sort_values(by=[col_name])\n df.rename(columns={'variable':'index','value':'prediction'}, inplace=True)\n df[['pred_probability','pred_index']] = pd.DataFrame(df.prediction.values.tolist(), index=df.index)\n \n return df",
"def predicted_tags(classification): \n # translate classification into tag_ids and weights\n try:\n doc = [[tag_id, int(weight/classification_threshold)]\n for tag_id, weight in enumerate(classification)\n if weight > classification_threshold]\n\n # add contribution from all terms in all similar LDA topics\n tag_suggestions = defaultdict(int)\n for topic, weight in lda[doc]:\n for weight, term in lda.show_topic(topic):\n if \"class:\" not in term:\n tag_suggestions[term] += weight\n\n # turn weights into actual suggestions and take topN values\n return [tag for tag in sorted(tag_suggestions,\n key=tag_suggestions.get,\n reverse=True)\n if tag_suggestions[tag] > suggestion_threshold][:topN]\n except IndexError:\n return []",
"def convert_data(test_data,params,list_dict,rational_present=True,topk=2):\n \"\"\"input: params -- input dict, list_dict -- previous predictions containing rationals\n rational_present -- whether to keep rational only or remove them only\n topk -- how many words to select\"\"\"\n \n temp_dict={}\n for ele in list_dict:\n temp_dict[ele['annotation_id']]=ele['rationales'][0]['soft_rationale_predictions']\n \n test_data_modified=[]\n \n for index,row in tqdm(test_data.iterrows(),total=len(test_data)):\n try:\n attention=temp_dict[row['Post_id']]\n except KeyError:\n continue\n topk_indices = sorted(range(len(attention)), key=lambda i: attention[i])[-topk:]\n new_text =[]\n new_attention =[]\n if(rational_present):\n if(params['bert_tokens']):\n new_attention =[0]\n new_text = [101]\n for i in range(len(row['Text'])):\n if(i in topk_indices):\n new_text.append(row['Text'][i])\n new_attention.append(row['Attention'][i])\n if(params['bert_tokens']):\n new_attention.append(0)\n new_text.append(102)\n else:\n for i in range(len(row['Text'])):\n if(i not in topk_indices):\n new_text.append(row['Text'][i])\n new_attention.append(row['Attention'][i])\n test_data_modified.append([row['Post_id'],new_text,new_attention,row['Label']])\n\n df=pd.DataFrame(test_data_modified,columns=test_data.columns)\n return df",
"def _preprocess_data(df, use_preprocessdata=False, save_path=None):\n data = _load_data(df, use_preprocessdata, save_path)\n X = []\n X2 = []\n X3 = []\n X4 = []\n for i, (words, indexes) in enumerate(data):\n X.append(\n _vectorise_bag_of_pos_with_position(words, indexes, DEFAULT_WINDOW_SIZE,\n targets=[df['Pronoun'][i], df['A'][i], df['B'][i]]))\n X2.append(_vectorise_bag_of_pos_with_dependency(words, indexes))\n X3.append(_get_dependency_labels(words, indexes, targets=[df['Pronoun'][i], df['A'][i], df['B'][i]]))\n X4.append(_get_gpt2_likelihood(words, indexes))\n\n X5 = _bert_attentions(df, data)\n X5 = np.array(X5)\n\n X = np.array(X)\n X2 = np.array(X2)\n featur_len = int(X.shape[1] / 3)\n featur_len2 = int(X2.shape[1] / 3)\n X_pr = X[:, 0:featur_len]\n X_a = X[:, featur_len:featur_len*2]\n X_b = X[:, featur_len*2:featur_len*3]\n X2_pr = X2[:, 0:featur_len2]\n X2_a = X2[:, featur_len2:featur_len2*2]\n X2_b = X2[:, featur_len2*2:featur_len2*3]\n X = np.concatenate((\n X_pr - X_a,\n X_pr - X_b,\n X_pr * X_a,\n X_pr * X_b,\n X2_pr - X2_a,\n X2_pr - X2_b,\n X2_pr * X2_a,\n X2_pr * X2_b,\n X3,\n X5,\n (df['Pronoun-offset'] - df['A-offset']).values.reshape(len(X), 1),\n (df['Pronoun-offset'] - df['B-offset']).values.reshape(len(X), 1)\n ), axis=1)\n Y = _get_classify_labels(df)\n return X, Y",
"def display_topics2(model, feature_names, n_top_words=25):\n word_dict = {};\n for topic_idx, topic in enumerate(model.components_):\n word_dict[\"Topic%d\" % (topic_idx)] = [feature_names[i]\n for i in topic.argsort()[:-n_top_words - 1:-1]]\n return pd.DataFrame(word_dict).T",
"def print_top3_topics(theta_matrix: pandas.DataFrame, model_artm: artm.artm_model.ARTM) -> None:\n top3_topics = theta_matrix[theta_matrix.columns[0]].nlargest(3)\n topic_names = list(top3_topics.index)\n for i, topic_name in enumerate(topic_names):\n print(topic_name, top3_topics[i],\n model_artm.score_tracker[\"TopTokensScore\"].last_tokens[topic_name])",
"def preprocess(self, df, maxlen = 169):\n \n vocabs = self.tk.word_index.keys()\n \n df1 = self.treat_na(df)\n df2 = self.remove_punc_sw(df1)\n df3 = self.remove_numbers(df2)\n df4 = self.lemma_pos(df3)\n df5 = self.bigram(df4)\n df6 = self.combine_bigrams(df5)\n \n new_docs = []\n \n for word_list in df6:\n \n if len(word_list) == 2 and word_list[0].lower() == 'noinfo' and word_list[1].lower() == 'noinfo':\n new_docs.append(list(np.zeros(maxlen)))\n \n else:\n new_word_list = []\n for word in word_list:\n if word not in vocabs:\n word = 'UNKNOWN_TOKEN'\n new_word_list.append(word)\n \n sequence = \" \".join(new_word_list)\n vectors = self.tk.texts_to_sequences([sequence])\n padded_vectors = pad_sequences(vectors, maxlen=maxlen, padding='post', truncating='post')\n \n new_docs.append(list(padded_vectors[0]))\n \n return new_docs",
"def main_topic_doc(ldamodel, corpus=corpus): \n \n doc_topics = pd.DataFrame()\n\n for i, row in enumerate(ldamodel[corpus]):\n row = sorted(row, key=lambda x: (x[1]), reverse=True)\n\n for j, (topic_num, prop_topic) in enumerate(row):\n if j == 0:\n wp = ldamodel.show_topic(topic_num)\n topic_keywords = \"' \".join([word for word, prop in wp])\n doc_topics = doc_topics.append(pd.Series([int(topic_num), round(prop_topic,4), topic_keywords]), ignore_index=True)\n else:\n break\n doc_topics.columns = ['Dominant_Topic', 'Percent_Contrib', 'Topic_keywords']\n return doc_topics",
"def pos_taggers(self, df):\n p2 = []\n post_process = df['Keyword'].tolist() \n p1 = nltk.pos_tag(post_process)\n for i in post_process:\n p2.append(nltk.pos_tag([i]))\n return p1,p2",
"def rank_pre_extract(self, mention_data, predictions):\n mdata = pd.DataFrame(mention_data)\n\n\n\n\n pass",
"def top_cat(df_, feature, top=10):\n alphabet = list('ABCDEFGHIJKLMNOPQRSTUVWXYZ')\n labels = alphabet[:top]\n other = alphabet[top + 1]\n top_violation_codes = df_.groupby(feature)[feature].count().sort_values(ascending=False).head(\n top).index.tolist()\n map_values = {k: l for k, l in (zip(top_violation_codes, labels))} # [::-1]\n key_others = set(map_values.keys()) ^ (set(df_.loc[:, feature].values))\n map_others = {k: other for k in key_others}\n map_all = {**map_others, **map_values}\n df_.loc[:, feature] = df_.loc[:, feature].replace(map_all).astype('category')\n return df_",
"def top_cat(df_, feature, top=10):\n alphabet = list('ABCDEFGHIJKLMNOPQRSTUVWXYZ')\n labels = alphabet[:top]\n other = alphabet[top + 1]\n top_violation_codes = df_.groupby(feature)[feature].count().sort_values(ascending=False).head(\n top).index.tolist()\n map_values = {k: l for k, l in (zip(top_violation_codes, labels))} # [::-1]\n key_others = set(map_values.keys()) ^ (set(df_.loc[:, feature].values))\n map_others = {k: other for k in key_others}\n map_all = {**map_others, **map_values}\n df_.loc[:, feature] = df_.loc[:, feature].replace(map_all).astype('category')\n return df_",
"def _load_labels_3d(self, results):\n results[\"gt_labels_3d\"] = results[\"ann_info\"][\"gt_labels_3d\"]\n return results",
"def explore_topic_nouns(topic_number, topn=25, model=10):\n #\n if model==10:\n lda = LdaMulticore.load(joinp(pilot_path, 'lda_model_10'))\n topicname=topic_names_10[topic_number]\n gensimdic={0:9,1:8,2:6,3:7,4:3,5:10,6:5,7:1,8:2,9:4}\n gensimSTR=str(gensimdic[topic_number])\n \n # \n print(u'{:20} {}'.format(u'term', u'frequency') + u'\\n')\n \n dic={}\n j=0\n \n print('top 5 terms')\n for term, frequency in lda.show_topic(topic_number, topn):\n if dfff[dfff['nouns']==term].empty: ## dfff is loaded from pilot_path/bow_nouns.csv\n pass\n else:\n j=j+1\n if j<6:\n print (u'{:20} {:.3f}'.format(term, round(frequency, 3)))\n dic[term]=frequency\n dff=pd.DataFrame.from_dict(dic,orient='index')\n dff.columns=[''.join(['topic:',topicname,' (gensim topic:',gensimSTR,')'])] \n return(dff)",
"def prepare_class_data_for_prediction(dataframe, model_dict, user_keyword, task_name):\r\n test_tweets = dataframe.iloc[:, [0, 1, 2]]\r\n\r\n parent_dir = Path.cwd().parent\r\n pickle_dir = parent_dir.joinpath('default_results', 'pickle_files_feat_eng')\r\n feature_X_user = pd.DataFrame\r\n emo_X_test_dict = {}\r\n\r\n\r\n for emotion, model_prop in model_dict.items():\r\n preprocessed_X_user = Preprocessor.perform(test_tweets, emotion, user_keyword, task_name)\r\n feature_X_user = Feature_Transformer.perform(preprocessed_X_user, emotion, user_keyword, task_name)\r\n vectorizer = Dictionaries.vectorizer_dict[model_prop[2]]\r\n\r\n #Fit transform the vectorizer with the corresponding preprocessed training data\r\n if os.path.exists(pickle_dir.joinpath(emotion + '_c_train_preprocess_df.pkl')):\r\n preprocess_train_df = pd.read_pickle(pickle_dir.joinpath(emotion + '_c_train_preprocess_df.pkl'))\r\n train_vect = vectorizer.fit_transform(preprocess_train_df['preprocessed_text'].values)\r\n print(emotion + 'TRAIN', train_vect.shape)\r\n train_vect_df = pd.DataFrame(train_vect.toarray(), columns=vectorizer.get_feature_names())\r\n else:\r\n #If the file doesnt exist, exit the program with instructions\r\n print('\\nRequired files does not exist.\\n\\n Please, train the models first by running > Modelling.py')\r\n sys.exit(1)\r\n\r\n # Use the same vectorizer to transform test data and then perform the feature union\r\n vector_X = vectorizer.transform(preprocessed_X_user['preprocessed_text'].values)\r\n test_vect_df = pd.DataFrame(vector_X.toarray(), columns=vectorizer.get_feature_names())\r\n X_test = pd.concat([test_vect_df, feature_X_user], axis=1)\r\n emo_X_test_dict[emotion] = X_test\r\n print(emotion + 'TEST', test_vect_df.shape, X_test.shape)\r\n return emo_X_test_dict",
"def textTagger():\r\n df_results=pd.DataFrame()\r\n \r\n #for word in medications:\r\n ## print (m_word)\r\n # med_list.append(m_word)\r\n # se=[]\r\n # sentence=[]\r\n counter=0\r\n results=[]\r\n for s_word in surprise_words:\r\n s_word=sent_tokenize(str(s_word))\r\n s_word=\"\".join(s_word)\r\n surprise_list.append(s_word)\r\n \r\n for row in df['full_text']:\r\n sentence=[]\r\n sentence.append(sent_tokenize(str(row)))\r\n #print (row)\r\n time.sleep(1)\r\n results=[]\r\n seen=[]\r\n word_list=[]\r\n found=[]\r\n for word in word_tokenize(str(row)):\r\n word_list.append(word)\r\n #for med in word_tokenize(str(medications)):\r\n # med_list.append(med)\r\n for n in word_list:\r\n \r\n for a in word_tokenize(str(medications)):\r\n #a=re.sub('[\\W_]+', '', a)\r\n #print (a) \r\n if a == n and a not in seen: \r\n found.append(a)\r\n tokens=word_tokenize(str(row))\r\n text=nltk.text.ConcordanceIndex(tokens)\r\n #print (a,n,text)\r\n results.append(concordance(text,str(a)))\r\n seen.append(a)\r\n print (results)\r\n continue\r\n else:\r\n continue\r\n counter+=1\r\n print (counter)\r\n \r\n \r\n df_results['Tag-Word']=found\r\n df_results['Sentence']=pd.Series(results)\r\n df_results.dropna()\r\n df_results.index.name='Index'\r\n #print(df_results)\r\n df_results.to_csv('med_tagged_step2.csv', sep='|')\r\n return df_results",
"def tag_info_df(spark):\n language_list = [\"abap\", \"abc\", \"actionscript\", \"ada\", \"algol\", \"algol 58\", \"algol 60\", \"algol w\", \"algol 68\",\n \"alice\", \"amiga e\", \"apex\", \"apl\", \"applescript\", \"argh!\", \"aargh!\", \"assembly\",\n \"assembly language\", \"autolisp\", \"autolt\", \"awk\",\n \"b\", \"bash\", \"basic\", \"ballerina\", \"bbc basic\", \"bc\", \"bcpl\", \"blitz basic\", \"bourne shell\",\n \"brainfuck\",\n \"c\", \"c++\", \"c#\", \"cfml\", \"cl\", \"classic visual basic\", \"clean\", \"clipper\", \"clojure\", \"cobol\",\n \"comal\", \"common lisp\", \"coffeescript\", \"crystal\", \"c shell\", \"ct\",\n \"d\", \"darkbasic\", \"dart\", \"decimal basic\", \"delphi\", \"delta cobol\", \"div games studio\",\n \"egl\", \"eiffel\", \"elixir\", \"elm\", \"emacs lisp\", \"erlang\", \"euphoria\",\n \"f#\", \"factor\", \"fenix project\", \"forth\", \"fortran\", \"foxpro\",\n \"gambas\", \"gcl\", \"gml\", \"go\", \"grasshopper\", \"groovy\",\n \"hack\", \"haskell\", \"hypertalk\",\n \"icon\", \"inform\", \"io\", \"ironpython\",\n \"j\", \"just another language\", \"java\", \"javascript\", \"just basic\", \"jscript\", \"julia\",\n \"korn shell\", \"kotlin\",\n \"labview\", \"ladder logic\", \"leet\", \"liberty basic\", \"lisp\", \"logo\", \"lua\",\n \"m4\", \"machine\", \"machine language\", \"malbolge\", \"maple\", \"matlab\", \"m-code\", \"mercury\", \"ml\",\n \"modula-2\", \"mondrian\", \"mql4\", \"msl\",\n \"natural\",\n \"oberon\", \"objective-c\", \"objectpal\", \"object pascal\", \"ocaml\", \"opencl\", \"openedge abl\", \"oz\",\n \"pascal\", \"pawn\", \"perl\", \"php\", \"piet\", \"pl/1\", \"pl/i\", \"pl/sql\", \"pl/pgsql\", \"postscript\",\n \"powerbasic\", \"powerbuilder\", \"powershell\", \"processing\", \"progress\", \"prolog\", \"providex\",\n \"purebasic\", \"python\",\n \"q#\", \"qbasic\",\n \"r\", \"raku\", \"rexx\", \"ring\", \"rpg\", \"ruby\", \"rust\",\n \"sas\", \"scala\", \"sed\", \"scheme\", \"scratch\", \"scratch jr.\", \"seed7\", \"self\", \"simula\", \"smalltalk\",\n \"smallbasic\", \"snobol\", \"solidity\", \"spark\", \"spss\", \"sql\", \"stata\", \"swift\",\n \"tcl\", \"tex\", \"ti-basic\", \"transact-sql\", \"t-sql\", \"turbobasic\", \"turbo c\", \"turbo pascal\",\n \"typescript\",\n \"ubasic\",\n \"vala\", \"vala/genie\", \"vb\", \"vbs\", \"vbscript\", \"verilog\", \"vhdl\", \"visual basic\", \"visual c\",\n \"visual foxpro\", \"visual objects\", \"vbscripts\", \"whitespace\",\n \"xslt\", \"xquery\",\n \"yaml\"]\n language_list_col = array(*[lit(x) for x in language_list])\n platform_list = [\"arthur\", \"arx\", \"mos\", \"risc-ix\", \"risc-os\", \"amigaos\", \"amigaos-1.0-3.9\", \"amigaos-4\",\n \"amiga-unix\", \"amsdos\", \"contiki\", \"cp-m-2.2\", \"cp-m-plus\", \"symbos\", \"apple-ii\", \"apple-dos\",\n \"apple-pascal\", \"prodos\", \"gs-os\", \"gno-me\", \"apple-iii\", \"apple-sos\", \"apple-lisa\",\n \"apple-macintosh\", \"classic-mac-os\", \"a-ux\", \"copland\", \"mklinux\", \"pink\", \"rhapsody\", \"macos\",\n \"macos-server\", \"apple-network-server\", \"ibm-aix\", \"apple-messagepad\", \"newton-os\", \"iphone\",\n \"ios\", \"ipad\", \"ipados\", \"apple-watch\", \"watchos\", \"apple-tv\", \"tvos\", \"a-rose\", \"ipod-software\",\n \"netbsd\", \"domain-os\", \"atari-dos\", \"atari-tos\", \"atari-multitos\", \"xts-400\", \"beos\", \"beia\",\n \"beos-r5.1d0\", \"magnussoft-zeta\", \"unix\", \"unix-time-sharing-system-v6\", \"pwb-unix\", \"cb-unix\",\n \"unix-time-sharing-system-v7\", \"unix-system-iii\", \"unix-system-v\", \"unix-time-sharing-system-v8\",\n \"unix-time-sharing-system-v9\", \"unix-time-sharing-system-v10\", \"besys\", \"plan-9-from-bell-labs\",\n \"inferno\", \"burroughs-mcp\", \"chippewa-operating-system\", \"kronos\", \"nos\", \"scope\", \"puffin-os\",\n \"convergent-technologies-operating-system\", \"cromemco-dos\", \"cromix\", \"aos\", \"dg-ux\", \"rdos\",\n \"datapoint-2200\", \"datapoint\", \"deos\", \"heartos\", \"cp-m\", \"personal-cp-m\", \"cp-m-68k\", \"cp-m-8000\",\n \"cp-m-86\", \"cp-m-86-plus\", \"personal-cp-m-86\", \"mp-m\", \"mp-m-ii\", \"mp-m-86\", \"mp-m-8-16\",\n \"concurrent-cp-m\", \"concurrent-cp-m-86\", \"concurrent-cp-m-8-16\", \"concurrent-cp-m-68k\", \"dos\",\n \"concurrent-dos\", \"concurrent-pc-dos\", \"concurrent-dos-8-16\", \"concurrent-dos-286\",\n \"concurrent-dos-xm\", \"concurrent-dos-386\", \"concurrent-dos-386-mge\", \"concurrent-dos-68k\",\n \"flexos\", \"flexos-186\", \"flexos-286\", \"siemens-s5-dos-mt\", \"ibm-4680-os\", \"ibm-4690-os\",\n \"toshiba-4690-os\", \"flexos-386\", \"flexos-68k\", \"multiuser-dos\", \"cci-multiuser-dos\",\n \"datapac-multiuser-dos\", \"datapac-system-manager\", \"ims-multiuser-dos\", \"real-32\", \"real-ng\",\n \"dos-plus\", \"dr-dos\", \"palmdos\", \"star-trek\", \"novell-dos\", \"opendos\", \"batch-11-dos-11\", \"hp-ux\",\n \"multi-programming-executive\", \"nonstop\", \"os-8\", \"rsts-e\", \"rsx-11\", \"rt-11\", \"tops-10\", \"tenex\",\n \"tops-20\", \"digital-unix\", \"ultrix\", \"vms\", \"waits\", \"ose\", \"towns-os\", \"os-iv\", \"msp\", \"msp-ex\",\n \"real-time-multiprogramming-operating-system\", \"gcos\", \"multics\", \"chromium-os\", \"chrome-os\",\n \"container-optimized-os\", \"android\", \"glinux\", \"fuchsia\", \"integrity\", \"integrity-178b\",\n \"u-velosity\", \"vulcan-o-s\", \"harris-unix\", \"hdos\", \"ht-11\", \"hp-multi-programming-executive\",\n \"nonstop-os\", \"cp-6\", \"harmony-os\", \"irmx\", \"isis\", \"compatible-time-sharing-system\",\n \"gm-os-&-gm-naa-i-o\", \"ibsys\", \"ijmon\", \"share-operating-system\",\n \"university-of-michigan-executive-system\", \"os-360-and-successors\", \"os-360\", \"mft\", \"mft-ii\",\n \"mvt\", \"system-370\", \"os-vs1\", \"multiple-virtual-storage\", \"mvs-xa\", \"mvs-esa\", \"os-390\",\n \"phoenix-mvs\", \"z-os\", \"dos-360-and-successors\", \"bos-360\", \"tos-360\", \"dos-360\", \"dos-vs\",\n \"dos-vse\", \"vse-sp\", \"z-vse\", \"cp-cms\", \"cp-40\", \"cp-67\", \"vm-370\", \"vm-xa\", \"virtual-machine\",\n \"z-vm\", \"acp\", \"tpf\", \"z-tpf\", \"unix-like\", \"aix-370\", \"aix-esa\", \"opensolaris-for-system-z\",\n \"uts\", \"linux-on-ibm-z\", \"mts\", \"tss-360\", \"music-sp\", \"orvyl-and-wylbur\", \"pc-dos\", \"os-2\",\n \"os-2-warp\", \"ecomstation\", \"arcaos\", \"aix\", \"ibm-series-1\", \"edx\", \"rps\", \"cps\", \"serix\",\n \"ibm-1130\", \"dms\", \"ibm-1800\", \"tsx\", \"mpx\", \"ibm-8100\", \"dpcx\", \"dppx\", \"ibm-system-3\",\n \"ibm-system-34\", \"ibm-system-38\", \"cpf\", \"ibm-system-88\", \"stratus-vos\", \"as-400\", \"os-400\",\n \"i5-os\", \"ibm-i\", \"workplace-os\", \"k42\", \"dynix\", \"j\", \"george\", \"executive\", \"tme\", \"icl-vme\",\n \"vme-k\", \"remix-os\", \"lynxos\", \"microc-os-ii\", \"microc-os-iii\", \"xenix\", \"msx-dos\", \"ms-dos\",\n \"dos-v\", \"windows\", \"windows-1.0\", \"windows-2.0\", \"windows-3.0\", \"windows-3.1x\",\n \"windows-for-workgroups-3.1\", \"windows-3.2\", \"windows-for-workgroups-3.11\", \"windows-95\",\n \"windows-98\", \"windows-millennium-edition\", \"windows-nt\", \"windows-nt-3.1\", \"windows-nt-3.5\",\n \"windows-nt-3.51\", \"windows-nt-4.0\", \"windows-2000\", \"windows-xp\", \"windows-server-2003\",\n \"windows-fundamentals-for-legacy-pcs\", \"windows-vista\", \"windows-azure\", \"windows-home-server\",\n \"windows-server-2008\", \"windows-7\", \"windows-phone-7\", \"windows-server-2008-r2\",\n \"windows-home-server-2011\", \"windows-server-2012\", \"windows-8\", \"windows-phone-8\", \"windows-8.1\",\n \"windows-phone-8.1\", \"windows-server-2012-r2\", \"xbox-one-system-software\", \"windows-10\",\n \"windows-10-mobile\", \"windows-server-2016\", \"windows-server-2019\", \"windows-ce\", \"windows-ce-3.0\",\n \"windows-ce-5.0\", \"windows-ce-6.0\", \"windows-embedded-compact-7\", \"windows-embedded-compact-2013\",\n \"windows-mobile\", \"singularity\", \"midori\", \"xbox-360-system-software\", \"azure-sphere\", \"threadx\",\n \"altair-dos\", \"mobilinux\", \"tmx\", \"imos\", \"vrx\", \"es\", \"nextstep\", \"netware\", \"unixware\",\n \"novell-supernos\", \"novell-corsair\", \"novell-expose\", \"open-enterprise-server\", \"rtxc-quadros\",\n \"time-sharing-operating-system\", \"dspnano-rtos\", \"bada\", \"tizen\", \"sco-unix\", \"sco-open-desktop\",\n \"sco-openserver\", \"berkeley-timesharing-system\", \"pikeos\", \"trsdos\", \"color-basic\", \"newdos-80\",\n \"deskmate\", \"edos\", \"ti-rtos-kernel\", \"tron\", \"t-kernel\", \"exec-i\", \"exec-ii\", \"exec-8\", \"vs-9\",\n \"wps\", \"ois\", \"vxworks\", \"z80-rio\", \"zorin-os\", \"lisp-machines--inc.\", \"symbolics\",\n \"texas-instruments\", \"xerox\", \"andos\", \"csi-dos\", \"mk-dos\", \"pilot\", \"perq\", \"elbrus\", \"eos\",\n \"elxsi\", \"mai-basic-four\", \"michigan-terminal-system\", \"es-evm\", \"pc-mos-386\", \"buran\",\n \"sintran-iii\", \"skyos\", \"soda\", \"theos\", \"tsx-32\", \"dx10\", \"aegis\", \"coherent\", \"dc-osx\", \"dnix\",\n \"helios\", \"interactive-unix\", \"irix\", \"meikos\", \"os-9\", \"os-9000\", \"osf-1\", \"openstep\", \"qnx\",\n \"rmx\", \"sinix\", \"solaris\", \"sunos\", \"super-ux\", \"system-v\", \"system-v-at--386\", \"trusted-solaris\",\n \"uniflex\", \"unicos\", \"zenix\", \"minix\", \"bsd\", \"freebsd\", \"dragonflybsd\", \"midnightbsd\", \"ghostbsd\",\n \"trueos\", \"openbsd\", \"bitrig\", \"darwin\", \"gnu\", \"linux\", \"redox\", \"android-x86\",\n \"cray-linux-environment\", \"opensolaris\", \"illumos\", \"openindiana\", \"nexenta-os\", \"smartos\",\n \"rtems\", \"haiku\", \"syllable-desktop\", \"vsta\", \"plurix\", \"tunis\", \"dahliaos\", \"cosmos\", \"freedos\",\n \"genode\", \"ghost-os\", \"its\", \"osfree\", \"osv\", \"phantom-os\", \"reactos\", \"sharpos\", \"templeos\",\n \"visopsys\", \"research-unix\", \"amoeba\", \"croquet\", \"eros\", \"capros\", \"harmony\", \"helenos\", \"house\",\n \"ilios\", \"l4\", \"mach\", \"nemesis\", \"spring\", \"the-multiprogramming-system\", \"thoth\", \"v\", \"verve\",\n \"xinu\", \"86-dos\", \"dr-dos-startrek\", \"dr-dos-winbolt\", \"pts-dos\", \"turbodos\", \"desqview\",\n \"x-windowing\", \"banyan-vines\", \"cambridge-ring\", \"cisco-ios\", \"cisco-nx-os\", \"ctos\", \"data-ontap\",\n \"extremeware\", \"extremexos\", \"fabric-os\", \"junos\", \"network-operating-system\",\n \"novell-open-enterprise-server\", \"plan-9\", \"blis-cobol\", \"bluebottle\", \"bs1000\", \"bs2000\",\n \"bs3000\", \"flex9\", \"gem\", \"geos\", \"javaos\", \"jnode\", \"jx\", \"kernal\", \"merlin\", \"morphos\",\n \"fujitsu\", \"oberon-(operating-system)\", \"osd-xc\", \"pick\", \"primos\", \"sinclair-qdos\", \"ssb-dos\",\n \"symobi\", \"tripos\", \"ucsd-p-system\", \"vos\", \"vos3\", \"vm2000\", \"visi-on\", \"vps-vm\", \"aros\",\n \"atheos\", \"baremetal\", \"dexos\", \"emutos\", \"lse-os\", \"menuetos\", \"kolibrios\", \"toaruos\", \"ponyos\",\n \"serenityos\", \"dip-dos\", \"embedded-linux\", \"replicant\", \"lineageos\",\n \"list-of-custom-android-distributions\", \"firefox-os\", \"angstrom-distribution\", \"familiar-linux\",\n \"maemo\", \"openzaurus\", \"webos\", \"access-linux-platform\", \"openmoko-linux\", \"ophone\", \"meego\",\n \"moblin\", \"motomagx\", \"qt-extended\", \"sailfish-os\", \"ubuntu-touch\", \"postmarketos\", \"magic-cap\",\n \"palm-os\", \"pen-geos\", \"penpoint-os\", \"pvos\", \"symbian-os\", \"epoc\", \"pocket-pc\", \"windows-phone\",\n \"ipodlinux\", \"iriver-clix\", \"rockbox\", \"blackberry-os\", \"symbian-platform\", \"blackberry-10\",\n \"catos\", \"ios-xr\", \"lancom-systems\", \"openwrt\", \"dd-wrt\", \"lede\", \"gargoyle\", \"librecmc\",\n \"zeroshell\", \"rtos\", \"m0n0wall\", \"opnsense\", \"pfsense\", \"apache-mynewt\", \"chibios-rt\",\n \"erika-enterprise\", \"ecos\", \"nucleus-rtos\", \"nuttx\", \"ncos\", \"freertos--openrtos-and-safertos\",\n \"openembedded\", \"psos\", \"rex-os\", \"riot\", \"rom-dos\", \"tinyos\", \"rt-thread\", \"windows-iot\",\n \"windows-embedded\", \"wombat-os\", \"zephyr\", \"brickos\", \"lejos\", \"cambridge-cap-computer\",\n \"flex-machine\", \"hydra\", \"keykos\"] # generated from util/platform_list.rb\n platform_list_col = array(*[lit(x) for x in platform_list])\n\n df = spark.read.parquet(\"/user/***REMOVED***/StackOverflow/PostHistory.parquet\") \\\n .select([\"_PostId\", \"_Text\", '_PostHistoryTypeId']) \\\n .filter(col(\"_PostHistoryTypeId\") == 3) \\\n .withColumn(\"_Tags\", expr(\"substring(_Text, 2, length(_Text) - 2)\")) \\\n .withColumn(\"_Tags\", split(col(\"_Tags\"), \"><\")) \\\n .withColumn(\"#tags\", when(size(\"_Tags\") < 0, 0).otherwise(size(\"_Tags\"))) \\\n .withColumn(\"contains_language_tag\", size(array_intersect(\"_Tags\", language_list_col)) > 0) \\\n .withColumn(\"contains_platform_tag\", size(array_intersect(\"_Tags\", platform_list_col)) > 0) \\\n .drop(\"_Tags\", \"_PostHistoryTypeId\", \"_Text\") \\\n .withColumnRenamed('_PostId', '_Id')\n\n return df",
"def store_predictions(df):\n ts = df[df.columns[1]]\n base = pd.DataFrame(ts)\n preds = make_preds(ts, 'Predicted '+ df.columns[1])\n base.index = df['year']\n base = base.append(pd.DataFrame(preds), sort = True)\n for col in df.columns[2:]:\n ts = df[col]\n temp = pd.DataFrame(ts)\n preds = make_preds(ts, 'Predicted ' + col)\n temp.index = df['year']\n temp = temp.append(pd.DataFrame(preds), sort = True)\n base = base.join(temp)\n return base",
"def bestThreshold3(lda_imlementation, vectorized_coded_revs, column, best_tops,\n coded_reviews_df):\n cat_array = lda_imlementation.transform(vectorized_coded_revs)\n\n topic_array = []\n for i in range(lda_imlementation.n_topics):\n coded_reviews_df['topic_'+str(i)] = cat_array[:, i]\n topic_array += ['topic_'+str(i)]\n\n # Compute ROC curve point\n y_score = sum([coded_reviews_df[top] for top in best_tops])\n precision, recall, thresh = precision_recall_curve(\n coded_reviews_df[column], y_score)\n\n\n prc_thresh = zip(precision, recall, thresh)\n prc_df = pd.DataFrame(prc_thresh, columns=['prec', 'recall', 'threshold'])\n prc_df2 = prc_df[prc_df.recall > .5]\n return prc_df2.threshold.iloc[prc_df2.prec.argmax()]",
"def construct_df_topics(self, n_words=20):\n\n self.check_model()\n topic_keywords = []\n keywords = array(self.vectorizer.get_feature_names())\n\n for topic_weights in self.model.components_:\n top_keyword_locs = (-topic_weights).argsort()[:n_words]\n topic_keywords.append(keywords.take(top_keyword_locs))\n\n self.df_topic_keywords = pd.DataFrame(topic_keywords)\n self.df_topic_keywords.columns = ['Word ' + str(i) for i in range(self.df_topic_keywords.shape[1])]\n self.df_topic_keywords.index = ['Topic ' + str(i) for i in range(self.df_topic_keywords.shape[0])]",
"def topic_extraction(df, col_name):\n tfidf_vectorizer = TfidfVectorizer(max_df=0.95, min_df=2,\n max_features=200,\n stop_words='english')\n tfidf = tfidf_vectorizer.fit_transform(df[col_name])\n\n tf_vectorizer = CountVectorizer(max_df=0.95, min_df=2,\n max_features=200,\n stop_words='english')\n tf = tf_vectorizer.fit_transform(df[col_name])\n nmf = NMF(n_components=20, random_state=1,\n alpha=.1, l1_ratio=.5)\n tfidf_feature_names = tfidf_vectorizer.get_feature_names()\n nmf_w = nmf.fit_transform(tfidf)\n nmf_h = nmf.components_\n df['labels'] = nmf_w.argmax(axis=1) # this was the right code to get labels/clusters\n\n\n print(\"\\nTopics in NMF model:\")\n print_top_words(nmf, tfidf_feature_names)\n\n\n lda = LatentDirichletAllocation(n_topics=20, max_iter=5,\n learning_method='online',\n learning_offset=50.,\n random_state=0,\n n_jobs=-1)\n lda.fit(tf)\n doc_topic_distrib = lda.transform(tf)\n lda_labels = doc_topic_distrib.argmax(axis=1)\n print lda_labels[:100]\n df['lda_labels'] = lda_labels\n print(\"\\nTopics in LDA model:\")\n tf_feature_names = tf_vectorizer.get_feature_names()\n print_top_words(lda, tf_feature_names)\n return df",
"def model_topics(df):\n\n data = df.text.values.tolist()\n data_words = list(sent_to_words(data))\n\n # Build the bigram and trigram models\n bigram = gensim.models.Phrases(data_words, min_count=5, threshold=100)\n trigram = gensim.models.Phrases(bigram[data_words], threshold=100) \n\n # Faster way to get a sentence clubbed as a trigram/bigram\n bigram_mod = gensim.models.phrases.Phraser(bigram)\n trigram_mod = gensim.models.phrases.Phraser(trigram)\n\n # Remove Stop Words\n data_words_nostops = remove_stopwords(data_words)\n\n # Form Bigrams\n data_words_bigrams = make_bigrams(data_words_nostops,bigram_mod)\n\n # Initialize spacy 'en' model, keeping only tagger component (for efficiency)\n nlp = spacy.load('en', disable=['parser', 'ner'])\n\n # Do lemmatization keeping only noun, adj, vb, adv\n data_lemmatized = lemmatization(data_words_bigrams, allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV'])\n\n # Create Dictionary\n id2word = corpora.Dictionary(data_lemmatized)\n\n # Create Corpus\n texts = data_lemmatized\n\n # Term Document Frequency\n corpus = [id2word.doc2bow(text) for text in texts]\n\n # Perform Topic Modeling for number of topics ranging from 5 to 50 in steps of 5\n model_list, coherence_values = compute_coherence_values(dictionary=id2word, corpus=corpus, texts=data_lemmatized, start=5, limit=50, step=5)\n\n return model_list,coherence_values,corpus,id2word",
"def get_keywords_text_rank(data, top_k) -> pd.DataFrame:\n id_list, title_list, abstract_list = data['id'], data['title'], data['abs']\n ids, titles, keys = [], [], []\n # load custom stopwords\n jieba.analyse.set_stop_words(\"data/stopWord.txt\")\n for index in range(len(id_list)):\n # concat title and abstract\n text = '%s。%s' % (title_list[index], abstract_list[index])\n print('process {} by text rank:'.format(title_list[index]))\n keywords = jieba.analyse.textrank(text, topK=top_k, allowPOS=('n', 'nz', 'v', 'vd', 'vn', 'l', 'a', 'd'))\n word_split = ' '.join(keywords)\n print('keywords is {}'.format(word_split))\n keys.append(word_split.encode('utf-8'))\n ids.append(id_list[index])\n titles.append(title_list[index])\n\n result = pd.DataFrame({\"id\": ids, \"key\": keys}, columns=['id', 'key'])\n return result",
"def topic_wordcloud(top_model):\n\n cols = [color for name, color in mcolors.TABLEAU_COLORS.items()] # more colors: 'mcolors.XKCD_COLORS'\n\n cloud = WordCloud(stopwords=stop_words,\n background_color='white',\n width=2500,\n height=1800,\n max_words=20,\n colormap='tab10',\n color_func=lambda *args, **kwargs: cols[i],\n prefer_horizontal=1.0)\n\n topics = top_model.show_topics(formatted=False)\n\n fig, axes = plt.subplots(3, 3, figsize=(10,10), sharex=True, sharey=True)\n\n for i, ax in enumerate(axes.flatten()):\n fig.add_subplot(ax)\n topic_words = dict(topics[i][1])\n cloud.generate_from_frequencies(topic_words, max_font_size=300)\n plt.gca().imshow(cloud)\n plt.gca().set_title('Topic ' + str(i), fontdict=dict(size=16))\n plt.gca().axis('off')\n\n\n plt.subplots_adjust(wspace=0, hspace=0)\n plt.axis('off')\n plt.margins(x=0, y=0)\n plt.tight_layout()\n plt.show()",
"def data_preprocessing(dat: pd.DataFrame, art='C', y=None, logger=None, remove=True):\n if logger == None:\n logging.basicConfig(\n level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n logger = logging.getLogger(__name__)\n \n logger.info('Start data preprocessing')\n # replace original indeices with default ones\n dat = dat.reset_index(drop=True)\n\n if art == 'C':\n logger.info('Start to label target feature y for classification task')\n dat.iloc[:, -1] = LabelEncoder().fit_transform(dat.iloc[:, -1])\n logger.info('End with label encoding the target feature')\n if remove:\n # remove columns with more than 1/2 na\n dat = dat.loc[:, dat.isna().sum()/len(dat) < .5]\n logger.info('Following features are removed from the dataframe because half of their value are NA: %s' %\n (dat.columns[dat.isna().sum()/len(dat) > .5].to_list()))\n # Encoding\n oe = OneHotEncoder(drop='first')\n # get categorical columns\n if y:\n dat_y = dat[[y]]\n cols = dat.columns.to_list()\n cols.remove(y)\n dat_x = dat[cols]\n else:\n dat_y = dat[[dat.columns[-1]]]\n dat_x = dat[dat.columns[:-1]]\n dat_categ = dat_x.select_dtypes(include=['object'])\n # get kterm of categ features\n for i in dat_categ.columns:\n # save output to dat\n tmp = dat_x[i].value_counts()\n dat_x[i + '_kterm'] = dat_x[i].map(lambda x: tmp[x] if x in tmp.index else 0)\n # float columns including the k term cols\n dat_numeric = dat_x.select_dtypes(include=['float32', 'float64', 'int32', 'int64'])\n # onehot encoding and label encoding\n dat_categ_onehot = dat_categ.iloc[:, dat_categ.apply(lambda x: len(x.unique())).values < 8]\n dat_categ_label = dat_categ.iloc[:, dat_categ.apply(lambda x: len(x.unique())).values >= 8]\n flag_onehot = False\n flag_label = False\n # oe\n if dat_categ_onehot.shape[1] > 0:\n logger.info('Start to do onehot to the following categoric features: %s' %\n (str(dat_categ_onehot.columns.to_list())))\n dat_onehot = pd.DataFrame(oe.fit_transform(dat_categ_onehot.astype(str)).toarray(),\n columns=oe.get_feature_names(dat_categ_onehot.columns))\n logger.info('End with onehot')\n flag_onehot = True\n else:\n dat_onehot = None\n # le\n if dat_categ_label.shape[1] > 0:\n logger.info('Start to do label encoding to the following categoric features: %s' %\n (str(dat_categ_label.columns.to_list())))\n dat_categ_label = dat_categ_label.fillna('NULL')\n dat_label = pd.DataFrame(columns=dat_categ_label.columns)\n for i in dat_categ_label.columns:\n dat_label[i] = LabelEncoder().fit_transform(dat_categ_label[i].astype(str))\n flag_label = True\n logger.info('End with label encoding')\n else:\n dat_label = None\n # scaling\n # combine\n dat_new = pd.DataFrame()\n if flag_onehot and flag_label:\n dat_new = pd.concat([dat_numeric, dat_onehot, dat_label], axis=1)\n elif flag_onehot:\n dat_new = pd.concat([dat_numeric, dat_onehot], axis=1)\n elif flag_label:\n dat_new = pd.concat([dat_numeric, dat_label], axis=1)\n else:\n dat_new = dat_numeric\n dat_new = pd.concat([dat_new, dat_y], axis=1)\n # imputation\n dat_new = dat_new.dropna(axis=1, how='all')\n if dat_new.isna().sum().sum() > 0:\n logger.info('Nan value exist, start to fill na with iterative imputer: ' +\n str(dat_new.isna().sum().sum()))\n # include na value, impute with iterative Imputer or simple imputer\n columns = dat_new.columns\n imp = IterativeImputer(max_iter=10, random_state=0)\n # imp = SimpleImputer(missing_values=np.nan, strategy='mean')\n dat_new = imp.fit_transform(dat_new)\n dat_new = pd.DataFrame(dat_new, columns=columns)\n dat_numeric = dat_new.iloc[:, :-1].select_dtypes(include=['float32', 'float64', 'int32', 'int64'])\n logger.info('End with fill nan')\n return dat_new, dat_numeric.columns",
"def predict(self, df):\n results = [] \n _ds = pdfds.DataFrameDataset(df, self.fields) \n _iter = BucketIterator(_ds, batch_size=16, sort_key=lambda x: len(x.text),\n train=False, sort=True, sort_within_batch=True)\n self.odel.eval()\n with torch.no_grad():\n for (labels, text), _ in _iter:\n labels = labels.type(torch.LongTensor)\n text = text.type(torch.LongTensor)\n _, output = self.model(text, labels)\n sm = torch.nn.Softmax(dim=1)\n results.extend( sm(output).tolist()[1] )\n return results",
"def explore_topic(topic_number, topn=25, model=10):\n #\n if model==25:\n lda = LdaMulticore.load(joinp(pilot_path, 'lda_model_25'))\n topicname=topic_names_25[topic_number]\n gensimSTR=''\n elif model==15:\n lda = LdaMulticore.load(joinp(pilot_path, 'lda_model_15'))\n topicname=topic_names_15[topic_number]\n gensimSTR=''\n elif model==10:\n lda = LdaMulticore.load(joinp(pilot_path, 'lda_model_10'))\n topicname=topic_names_10[topic_number]\n gensimdic={0:9,1:8,2:6,3:7,4:3,5:10,6:5,7:1,8:2,9:4}\n gensimSTR=str(gensimdic[topic_number])\n \n \n # \n print(u'{:20} {}'.format(u'term', u'frequency') + u'\\n')\n \n dic={}\n j=0\n \n print('top 5 terms')\n for term, frequency in lda.show_topic(topic_number, topn):\n j=j+1\n if j<6:\n print (u'{:20} {:.3f}'.format(term, round(frequency, 3)))\n dic[term]=frequency\n dff=pd.DataFrame.from_dict(dic,orient='index')\n dff.columns=[''.join(['topic:',topicname,' (gensim topic:',gensimSTR,')'])] \n return(dff)\n ##",
"def predict(self, text, top_num=5):\n text = Suggest_Util.clean_data(text)\n encoded = Suggest_Util.text_to_id(text, self.word_to_id)\n encoded_padded = tf.keras.preprocessing.sequence.pad_sequences([encoded], maxlen=self.input_length, padding='pre')\n\n prediction = self.model.predict(encoded_padded)\n prediction = tf.squeeze(prediction, 0)\n predict_words = np.argsort(prediction[self.input_length-1, :])[::-1][:top_num]\n \n return [self.id_to_word[str(word)] for word in predict_words]"
]
| [
"0.6544521",
"0.615879",
"0.6092302",
"0.5765394",
"0.56590444",
"0.5627526",
"0.5603541",
"0.55920047",
"0.5513635",
"0.54929227",
"0.54802614",
"0.5461552",
"0.54266167",
"0.54266167",
"0.53978384",
"0.5396371",
"0.53603154",
"0.5324885",
"0.5318619",
"0.5304675",
"0.52756804",
"0.5273923",
"0.52255464",
"0.5207087",
"0.52046067",
"0.51954794",
"0.5176221",
"0.5163675",
"0.51575583",
"0.51559484"
]
| 0.68351144 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.