query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
listlengths 30
30
| negative_scores
listlengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Make a copy of this instance. Copies the local data stored as simple types and copies the client attached to this instance.
|
def copy(self):
new_client = self._client.copy()
return self.__class__(self.instance_id, new_client,
self._cluster_location_id,
display_name=self.display_name)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def copy(self) -> \"WOQLClient\":\n return copy.deepcopy(self)",
"def copy(self):\n return self.__class__(self.items, self.is_cloud)",
"def copy(self):\n pass",
"def copy(self):\n pass",
"def copy(self):\n pass",
"def copy(self):\n from copy import deepcopy\n return deepcopy(self)",
"def copy(self):\n return self.__class__(self.value, self.is_cloud)",
"def copy(self):\n return self.from_builder(self)",
"def copy(self):\n new = object.__new__(type(self))\n new.approximate_online_count = self.approximate_online_count\n new.approximate_user_count = self.approximate_user_count\n new.description = self.description\n new.discovery_splash_hash = self.discovery_splash_hash\n new.discovery_splash_type = self.discovery_splash_type\n new.emojis = self.emojis.copy()\n features = self.features\n if (features is not None):\n features = (*features,)\n new.features = features\n new.icon_hash = self.icon_hash\n new.icon_type = self.icon_type\n new.id = self.id\n new.invite_splash_hash = self.invite_splash_hash\n new.invite_splash_type = self.invite_splash_type\n new.stickers = self.stickers.copy()\n new.name = self.name\n return new",
"def _copy_(self):\n return copy.copy(self)",
"def copy (self):\n import copy\n return copy.copy(self)",
"def copy(self):\n return deepcopy(self)",
"def copy(self):\n return deepcopy(self)",
"def copy(self):\n return deepcopy(self)",
"def copy(self):\n return deepcopy(self)",
"def copy(self):\n return deepcopy(self)",
"def copy(self):\n return deepcopy(self)",
"def copy(self):\n return deepcopy(self)",
"def copy(self):\n return deepcopy(self)",
"def copy(self):\n return deepcopy(self)",
"def copy(self):\n return deepcopy(self)",
"def copy(self):\n return deepcopy(self)",
"def copy(self):\n return deepcopy(self)",
"def copy(self):\n return deepcopy(self)",
"def copy(self):\n return deepcopy(self)",
"def copy(self):\n return deepcopy(self)",
"def copy(self):\n \n return deepcopy(self)",
"def copy(self):\n\n return deepcopy(self)",
"def copy(self):\n\n return deepcopy(self)",
"def copy(self):\n\n return deepcopy(self)"
] |
[
"0.6870998",
"0.67256683",
"0.6523121",
"0.6523121",
"0.6523121",
"0.6512271",
"0.6481088",
"0.6477343",
"0.6454667",
"0.6447611",
"0.64095336",
"0.6341214",
"0.6341214",
"0.6341214",
"0.6341214",
"0.6341214",
"0.6341214",
"0.6341214",
"0.6341214",
"0.6341214",
"0.6341214",
"0.6341214",
"0.6341214",
"0.6341214",
"0.6341214",
"0.6341214",
"0.6337191",
"0.6312455",
"0.6312455",
"0.6312455"
] |
0.6868304
|
1
|
Crawl the IMDB top 250 movies, save CSV with their information.
|
def main():
# Create a directory to store copies of all the relevant HTML files (those
# will be used in testing).
print('Setting up backup dir if needed ...')
create_dir(BACKUP_DIR)
# Make backup of the IMDB top 250 movies page
print('Access top 250 page, making backup ...')
top_250_html = simple_get(TOP_250_URL)
top_250_dom = BeautifulSoup(top_250_html, "lxml")
make_backup(os.path.join(BACKUP_DIR, 'index.html'), top_250_html)
# extract the top 250 movies
print('Scraping top 250 page ...')
url_strings = scrape_top_250(top_250_dom)
# grab all relevant information from the 250 movie web pages
rows = []
for i, url in enumerate(url_strings): # Enumerate, a great Python trick!
print('Scraping movie %d ...' % i)
# Grab web page
movie_html = simple_get(url)
# Extract relevant information for each movie
movie_dom = BeautifulSoup(movie_html, "lxml")
rows.append(scrape_movie_page(movie_dom))
# Save one of the IMDB's movie pages (for testing)
if i == 83:
html_file = os.path.join(BACKUP_DIR, 'movie-%03d.html' % i)
make_backup(html_file, movie_html)
# Save a CSV file with the relevant information for the top 250 movies.
print('Saving CSV ...')
save_csv(os.path.join(SCRIPT_DIR, 'top250movies.csv'), rows)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def save_csv(outfile, movies):\n fieldnames = ['title', 'rating', 'year', 'actors', 'runtime']\n with open('movies.csv', 'w') as outfile:\n writer = csv.DictWriter(outfile, fieldnames=fieldnames)\n writer.writeheader()\n for line in movies:\n writer.writerow(line)\n\n\n # ADD SOME CODE OF YOURSELF HERE TO WRITE THE MOVIES TO DISK",
"def save_csv(outfile, movies):\n writer = csv.writer(outfile)\n writer.writerow(['Title', 'Rating', 'Year', 'Actors', 'Runtime'])\n for movie in movies:\n writer.writerow(movie)\n\n # ADD SOME CODE OF YOURSELF HERE TO WRITE THE MOVIES TO DISK",
"def scrape_top_250(soup):\n # to save the urls\n movie_urls = []\n\n # take the part where all the movies are\n content = soup.find(\"tbody\", \"lister-list\").find_all(\"tr\")\n\n # for every movie take the absolute url\n for title in content:\n url = \"http://www.imdb.com\" + title.find(\"td\", \"titleColumn\").a[\"href\"]\n movie_urls.append(url)\n\n return movie_urls",
"def get_imdb_movies(max_movies: int):\n from imdb.database import index_movie_to_database\n movie_count = 0\n for url in crawl_imdb_listing_page(TOP_PAGE):\n if movie_count == max_movies:\n break\n movie_data = parse_movie_page(url)\n if index_movie_to_database(movie_data):\n movie_count += 1",
"def unite_imdb_profiles(verbose):\n if verbose:\n print(\"Uniting IMDB movie profiles to one csv file...\")\n if not os.path.exists(_IMDB_DIR_PATH):\n print(\"No IMDB profiles to unite!\")\n return\n profiles = []\n profile_files = os.listdir(_IMDB_DIR_PATH)\n if verbose:\n profile_files = tqdm(profile_files)\n for profile_file in profile_files:\n if verbose:\n profile_files.set_description('Reading {}'.format(profile_file))\n file_path = os.path.join(_IMDB_DIR_PATH, profile_file)\n _, ext = os.path.splitext(file_path)\n if ext == '.json':\n with open(file_path, 'r') as json_file:\n profiles.append(json.load(json_file))\n df = pd.DataFrame(profiles)\n df = _decompose_dict_column(df, 'avg_rating_per_demo', _DEMOGRAPHICS)\n df = _decompose_dict_column(df, 'votes_per_demo', _DEMOGRAPHICS)\n df = _decompose_dict_column(\n df, 'rating_freq', [str(i) for i in range(1, 11)])\n df = _dummy_list_column(df, 'genres')\n unison_fpath = os.path.join(\n _get_dataset_dir_path(), 'imdb_dataset.csv')\n df.to_csv(unison_fpath, index=False)",
"def download_data(self):\n content = requests.get(self.TOP_250_LIST)\n soup = BeautifulSoup(content.content, 'lxml')\n movies = soup.select('tbody.lister-list tr')\n for m in movies:\n title_column = m.select('td.titleColumn')\n link = self.format_link(title_column[0].a['href'])\n title = self.format_title(title_column[0].a.string.encode('utf-8'))\n path = 'pages/{}.html'.format(title)\n if os.path.isfile(path):\n continue\n response = requests.get(link)\n with open(path, 'wr') as f:\n f.write(response.content)",
"def extract_movies(dom):\n\n movie_csv = []\n for movie in find_div(dom):\n title = find_title(movie)\n rating = find_rating(movie)\n year = find_year(movie)\n actors = find_actors(movie)\n runtime = find_runtime(movie)\n movie_list = append_movie(title, rating, year, actors, runtime)\n movie_csv.append(movie_list)\n return movie_csv # REPLACE THIS LINE AS WELL IF APPROPRIsATE",
"def movie_spider(self, movieTag):\n index = 0\n logging.info(\"Start crawling tag: %s\" % movieTag)\n while index < self.MAX_NUM:\n root = \"https://movie.douban.com/tag/%s?start=%d&type=T\" % (movieTag, index)\n result = {}\n try:\n html = requests.get(root, headers=random.choice(self.headers)).content\n tree = etree.HTML(html.decode('utf-8'))\n items = tree.xpath(\"//table/tr[@class='item']\")\n if len(items) == 0:\n break\n index += len(items)\n for item in items:\n itemURL = item.xpath(\"td/a[@class='nbg']/@href\")[0].strip()\n itemHTML = requests.get(itemURL, headers=random.choice(self.headers)).content\n itemTree = etree.HTML(itemHTML.decode('utf-8'))\n title = itemTree.xpath(\"//h1/span[@property='v:itemreviewed']/text()\")[0].strip()\n info = itemTree.xpath(\"//div[@class='subject clearfix']/div[@id='info']\")[0]\n director = info.xpath(\".//a[@rel='v:directedBy']/text()\")\n scriptor = info.xpath(\"span\")[1].xpath(\"span/a/text()\") # scriptor is not well formatted\n actors = info.xpath(\".//a[@rel='v:starring']/text()\")\n genre = info.xpath(\".//span[@property='v:genre']/text()\")\n initDate = info.xpath(\".//span[@property='v:initialReleaseDate']/text()\")\n runtime = info.xpath(\".//span[@property='v:runtime']/text()\")\n rating = itemTree.xpath(\"//strong[@property='v:average']/text()\")[0].strip()\n \n result['title'] = title\n result['director'] = '/'.join(director[:])\n result['scriptor'] = '/'.join(scriptor[:])\n result['actors'] = '/'.join(actors[:])\n result['genre'] = '/'.join(genre[:])\n result['initDate'] = '/'.join(initDate[:])\n result['runtime'] = '/'.join(runtime[:])\n result['rating'] = rating\n\n self._movie_list.append(result)\n result = {}\n\n except Exception as e:\n logging.exception(\"Error while crawling tag: %s\" % movieTag)",
"def main():\n\n # open links.csv in order to access IMDB id numbers\n ifile = open('movie-countries.csv', \"rb\")\n reader = csv.reader(ifile)\n \n # writer for csv with countries\n ofile = open('country_stats.csv', \"wb\")\n writer = csv.writer(ofile)\n\n # deal with headers\n reader.next() # skip first line\n writer.writerow(['country', 'number of movies', 'number of primary movies'])\n\n # one dictionary for all mention of a country, one dictionary for if the country was the first one listed\n country_count_dict = {}\n country_count_primary_dict= {}\n\n # iterate through data\n for row in reader:\n # get the countries column\n countries = row[3]\n\n # add to dicionary of countries\n for country in countries.split(\"|\"):\n country_count_dict[country] = country_count_dict.get(country, 0) + 1\n\n # if it's the primary country\n if country == countries.split(\"|\")[0]:\n country_count_primary_dict[country] = country_count_primary_dict.get(country, 0) + 1\n\n # write to the file\n for key, value in country_count_dict.iteritems():\n writer.writerow([key , str(value), country_count_primary_dict.get(key, \"0\")])\n\n ifile.close()\n ofile.close()",
"def save_movies(self, filename):\n out_file = open('{}'.format(filename), 'w')\n for movie in self.movies:\n out_file.write(\"{}\\n\".format(movie))\n out_file.close()",
"def scrape_top_250(self):\n\n page = requests.get(self.url)\n soup = BeautifulSoup(page.content, \"html.parser\")\n results = soup.find(class_=\"lister-list\")\n films = results.find_all(\"tr\")\n page.close()\n\n film_list = []\n for film in films:\n film_list.append(film)\n\n return film_list",
"def crawl_by_title(movie_name, verbose, year=None, parent_pbar=None):\n def _print(msg):\n if verbose:\n if parent_pbar is not None:\n parent_pbar.set_description(msg)\n parent_pbar.refresh()\n sys.stdout.flush()\n tqdm()\n else:\n print(msg)\n\n os.makedirs(_IMDB_DIR_PATH, exist_ok=True)\n file_name = _parse_name_for_file_name(movie_name) + '.json'\n file_path = os.path.join(_IMDB_DIR_PATH, file_name)\n if os.path.isfile(file_path):\n _print('{} already processed'.format(movie_name))\n return _result.EXIST\n\n # _print(\"Extracting a profile for {} from IMDB...\".format(movie_name))\n try:\n props = crawl_movie_profile(movie_name, year)\n # _print(\"Profile extracted succesfully\")\n # _print(\"Saving profile for {} to disk...\".format(movie_name))\n with open(file_path, 'w+') as json_file:\n # json.dump(props, json_file, cls=_RottenJsonEncoder, indent=2)\n json.dump(props, json_file, indent=2)\n _print(\"Done saving a profile for {}.\".format(movie_name))\n return _result.SUCCESS\n except Exception as exc:\n _print(\"Extracting a profile for {} failed\".format(movie_name))\n # traceback.print_exc()\n return _result.FAILURE\n # print(\"Extracting a profile for {} failed with:\".format(movie_name))\n # raise exc",
"def get_top_1000_movie_links():\n movie_links = []\n for start in range (1, 1000, 50):\n imdb_movie_list_page = requests.get(f'https://www.imdb.com/search/title/?groups=top_1000&view=simple&sort=user_rating,desc&start={start}')\n soup = BeautifulSoup(imdb_movie_list_page.text, 'html.parser')\n\n movie_list_div = soup.find('div', attrs={'class': 'lister list detail sub-list'})\n movie_item_spans = movie_list_div.find_all('span', attrs={'class':'lister-item-header'})\n links = [item.find('a').attrs['href'] for item in movie_item_spans]\n\n movie_links += links\n\n return [f'https://www.imdb.com{l}fullcredits/' for l in movie_links]",
"def _do_action_import_movie_info(self):\n self._run_express_job(\n \"org.kiji.tutorial.load.MovieInfoImporter\",\n options=\"--movie-info ml-100k/u.item\"\n )\n self._scan_table(\"movies\")",
"def douban_movies(root):\n start = time.time()\n task_path = assert_dirs(root, 'chinese_reviews_douban_movies')\n url_json = 'https://raw.githubusercontent.com/Hourout/datasets/master/nlp/chinese_reviews_douban_movies/chinese_reviews_douban_movies.json'\n url_movies = 'https://raw.githubusercontent.com/Hourout/datasets/master/nlp/chinese_reviews_douban_movies/movies.txt'\n url_ratings = 'https://raw.githubusercontent.com/Hourout/datasets/master/nlp/chinese_reviews_douban_movies/ratings.txt'\n rq.json(url_json, path_join(task_path, 'chinese_reviews_douban_movies.json'))\n rq.table(url_movies, path_join(task_path, 'movies.txt'))\n l = [url_ratings[:-4]+str(i)+url_ratings[-4:] for i in range(13)]\n with concurrent.futures.ProcessPoolExecutor() as excutor:\n data = pd.concat(excutor.map(_request_txt, l))\n data.to_csv(path_join(task_path, 'ratings.txt'), index=False)\n print('chinese_reviews_douban_movies dataset download completed, run time %d min %.2f sec' %divmod((time.time()-start), 60))\n return task_path",
"def load_movies_reviews():\n data = pd.read_csv(CSV_PATH + MOVIES_REVIEWS_CSV_NAME).T.to_dict()\n for i in range(len(data)):\n movie_id = Movies.query.filter(Movies.title == data[i]['Title'].strip()).first().id\n review = data[i]['Reviews'].strip()\n rating = float(data[i]['Rating'])*100000\n review_exist = Reviews.query.filter(Reviews.review == review).first()\n if not review_exist:\n db.session.add(Reviews(movie_id=movie_id, review=review, rating=int(rating)))\n db.session.commit()\n db.session.close()\n db.session.close()",
"def load_movies_details():\n data = pd.read_csv(CSV_PATH + MOVIES_DETAILS_CSV_NAME).T.to_dict()\n #Insert directors to database\n for i in range(len(data)):\n director_name = data[i]['Director']\n director_exist = Director.query.filter(Director.name == director_name).first()\n if not director_exist:\n db.session.add(Director(name=director_name))\n db.session.commit()\n #Insert movies to database\n title = data[i]['Title']\n year = data[i]['Year']\n summary = data[i]['Movie summary']\n director = Director.query.filter(Director.name == director_name).first()\n new_movie = Movies(title=title, year=year, director=director.id, summary=summary)\n movie_exist = Movies.query.filter(Movies.title == new_movie.title and Movies.year == new_movie.year and Movies.director == new_movie.director).first()\n if not movie_exist:\n db.session.add(new_movie)\n db.session.commit()\n #Insert ranking to database\n date = datetime.date(datetime.now())\n movie_id = Movies.query.filter(Movies.title == title and Movies.year == year).first().id\n ranking_exist = Ranking.query.filter(Ranking.movie_id == movie_id, Ranking.date == date).first()\n if not ranking_exist:\n db.session.add(Ranking(movie_id=movie_id, ranking=data[i]['Ranking Position'], date=date))\n db.session.commit()\n db.session.close()\n db.session.close()",
"def main(api_key, output_file):\n tmdb.API_KEY = api_key\n \n # find 100 valid movie_id\n i=1\n movie_id=[]\n while (len(movie_id)<100): # keep trying until it works\n try: # only things that might cause HTTP error belong in the \"try\"\n movie = tmdb.Movies(i) # this is one request\n response=movie.info()\n reviews = movie.reviews()\n review_list = reviews['results']\n if (review_list!=[]):\n movie_id.append(i) # only append those with reviews\n i=i+1\n except requests.HTTPError:\n print(\"invalid movie id\")\n i=i+1\n time.sleep(0.3) # now we have a list of valid movie id\n \n f= open(output_file,\"w+\",encoding=\"utf-8\")\n i=0\n for i in range(0,len(movie_id)):\n while True: # keep trying until it works\n try: # only things that might cause HTTP error belong in the \"try\"\n movie = tmdb.Movies(movie_id[i]) # this is one request\n reviews = movie.reviews() # one request\n break\n except requests.HTTPError:\n print(\"HTTPError, waiting 3 seconds\")\n time.sleep(0.3)\n review_list = reviews['results']\n for review_dict in review_list:\n auth = review_dict['author']\n text = review_dict['content']\n text=re.sub(r'\\n',' ',text)\n text=re.sub(r'\\t',' ',text)\n text=re.sub(r'\\r',' ',text)\n f.write(f\"{movie_id[i]}\\t{auth}\\t{text}\\n\")\n i=i+1\n f.close()",
"def crawl_by_file(file_path, verbose, year=None):\n results = {res_type : 0 for res_type in _result.ALL_TYPES}\n titles = _titles_from_file(file_path)\n if verbose:\n print(\"Crawling over all {} IMDB movies in {}...\".format(\n len(titles), file_path))\n movie_pbar = tqdm(titles, miniters=1, maxinterval=0.0001,\n mininterval=0.00000000001, total=len(titles))\n for title in movie_pbar:\n res = crawl_by_title(title, verbose, year, movie_pbar)\n results[res] += 1\n print(\"{} IMDB movie profiles crawled.\".format(len(titles)))\n for res_type in _result.ALL_TYPES:\n print('{} {}.'.format(results[res_type], res_type))",
"def get_movielens():\n\n filename = os.path.join(_download.LOCAL_CACHE_DIR, \"movielens_20m.hdf5\")\n if not os.path.isfile(filename):\n log.info(\"Downloading dataset to '%s'\", filename)\n _download.download_file(URL, filename)\n else:\n log.info(\"Using cached dataset at '%s'\", filename)\n\n with h5py.File(filename, 'r') as f:\n m = f.get('movie_user_ratings')\n plays = csr_matrix((m.get('data'), m.get('indices'), m.get('indptr')))\n return np.array(f['movie']), plays",
"def query_all_movies():\n result = session.query(Movie).all()\n print(\"total movies: %s\" % len(result))\n for movie in result:\n print(\"movie poster: %s\" % movie.poster)\n print(\"%s trailer:%s genre:%s user_id:%s\" %\n (movie.name, movie.trailer_url, movie.genre, movie.user_id))\n print(\"-------------------------------------------------\")",
"def extract_movies(dom):\n\n # extract data per movie\n movies = dom.find_all('div', class_ = 'lister-item mode-advanced')\n\n # list to store scraped data\n movielist = []\n\n for movie in movies:\n\n # append extracted data to this dict\n moviedict = {}\n\n # scrape titles and add to dict\n moviedict['title'] = movie.h3.a.text\n\n # scrape ratings and add to dict\n moviedict['rating'] = float(movie.strong.text)\n\n # scrape year of release and add to dict\n year = movie.h3.find('span', class_ = 'lister-item-year text-muted unbold')\n moviedict['year'] = re.findall('\\d+', year.text.strip('()'))[0]\n\n # scrape actors and add to dict\n actors = movie.find_all(href=re.compile(\"adv_li_st\"))\n actorlist = []\n for actor in actors:\n actorlist.append(actor.text)\n actorstring = ', '.join(actorlist)\n moviedict['actors'] = actorstring\n\n # scrape runtime and add to dict\n moviedict['runtime'] = movie.p.find('span', class_ = 'runtime').text.split(' ')[0]\n movielist.append(moviedict)\n\n\n # ADD YOUR CODE HERE TO EXTRACT THE ABOVE INFORMATION ABOUT THE\n # HIGHEST RATED MOVIES\n # NOTE: FOR THIS EXERCISE YOU ARE ALLOWED (BUT NOT REQUIRED) TO IGNORE\n # UNICODE CHARACTERS AND SIMPLY LEAVE THEM OUT OF THE OUTPUT.\n\n return movielist # REPLACE THIS LINE AS WELL IF APPROPRIATE",
"def get_basic_data(self):\n\n db = DataBase().clear_table()\n\n data = self.scraper.scrape_top_250()\n for d in data:\n title = d.find(\"td\", class_=\"titleColumn\")\n title = title.find(\"a\")\n title = re.sub(\"<.*?>\", \"\", str(title))\n\n film_id = d.find(\"td\", class_=\"watchlistColumn\")\n film_id = film_id.find(\"div\")\n film_id = film_id[\"data-tconst\"]\n\n year = d.find(\"span\", class_=\"secondaryInfo\")\n year = re.sub(\"<.*?>\", \"\", str(year)).replace(\"(\", \"\").replace(\")\", \"\")\n\n director = d.find(\"td\", class_=\"titleColumn\")\n director = director.find(\"a\")\n director = director[\"title\"]\n director, *cast = director.split(\", \")\n director = director.replace(\" (dir.)\", \"\")\n\n rating = d.find(\"td\", class_=\"ratingColumn imdbRating\")\n rating = rating.find(\"strong\")\n rating = re.sub(\"<.*?>\", \"\", str(rating))\n\n poster = d.find(\"td\", class_=\"posterColumn\")\n poster = poster.find(\"img\")[\"src\"]\n poster = re.sub(\"@.+\", \"@._V1_FMjpg_UY474_.jpg\", poster)\n\n DataBase().populate_table(\n (title, film_id, year, director, \", \".join(cast), rating, poster)\n )",
"def movies(catalog):\n movies_url = list()\n errors = 0\n for ix, critic_profile in enumerate(catalog, 1):\n try:\n checker = fetch(website, critic_profile).find_all(\"h2\", {\"class\": \"panel-heading js-review-type\"})\n if len(checker) > 0:\n if checker[0].text == \"Movie Reviews Only\":\n for td in fetch(website, critic_profile).find_all(\"td\",\n {\"class\": \"col-xs-12 col-sm-6 critic-review-table__title-column\"}):\n for a in td.find_all(\"a\"):\n if a['href'] not in movies_url:\n movies_url.append(a['href'])\n except:\n errors += 1\n # print('\\r2/4 — {:.2%} of movie URLs scraped. Error rate: {:.2%}'.format(ix/len(catalog),\n # errors/ix), end=' ')\n # print('\\r{} movie URLs successfully scraped. Error rate: {:.2%}'.format(len(movies_url)-errors, errors/ix), end='\\n')\n return movies_url",
"def load_movies():\n filepath = \"./seed_data/u.item\"\n movies = open(filepath)\n\n for movie in movies:\n movie = movie.rstrip().split('|')\n title = movie[1][:-7]\n title = title.decode(\"latin-1\")\n if movie[2]:\n date = datetime.strptime(movie[2], '%d-%b-%Y')\n else:\n date = None\n db_movie = Movie(\n movie_id = movie[0], title = title, \n released_at = date, imdb_url = movie[4])\n db.session.add(db_movie)\n\n db.session.commit()",
"def scrape_movie_page(dom):\n # to save the information\n info = []\n\n # find the information block needed\n header = dom.find(\"div\", \"title_wrapper\")\n\n # find the title and strip the string\n name_dom = header.h1.get_text().encode(\"utf-8\")\n name = str(name_dom)[2:-16]\n info.append(name)\n\n # find the year and strip the year\n year_dom = header.h1.span.get_text().encode(\"utf-8\")\n year = str(year_dom)[3:-2]\n info.append(year)\n\n # find the duration and strip the string\n duration_dom = dom.find(\"time\", itemprop=\"duration\").get_text().encode(\"utf-8\")\n duration = str(duration_dom)[28:-23]\n info.append(duration)\n\n # find all the genres and strip the string\n genre_dom = dom.find(\"div\", itemprop=\"genre\").a.get_text().encode(\"utf-8\")\n genre = find_genres(genre_dom, dom)\n info.append(genre)\n\n # find all the directors and strip the string\n director_dom = dom.find(\"span\", itemprop=\"director\").get_text().encode(\"utf-8\")\n director = find_directors(director_dom, dom)\n info.append(director)\n\n # find all the writers and strip the string\n writer_dom = dom.find(\"span\", itemprop=\"creator\").a.get_text().encode(\"utf-8\")\n writer = find_writers(writer_dom, dom)\n info.append(writer)\n\n # find all the actors and strip the string\n actor_dom = dom.find(\"span\", itemprop=\"actors\").a.get_text().encode(\"utf-8\")\n actor = find_actors(actor_dom, dom)\n info.append(actor)\n\n # find the rating and strip the string\n rating_dom = dom.find(\"span\", itemprop=\"ratingValue\").get_text().encode(\"utf-8\")\n rating = str(rating_dom)[2:-1]\n info.append(rating)\n\n # find the number of ratings and strip the string\n number_ratings_dom = dom.find(\"span\", itemprop=\"ratingCount\").get_text().encode(\"utf-8\")\n number_ratings = str(number_ratings_dom)[2:-1]\n info.append(number_ratings)\n\n return info",
"def getTopMovies(endpoint, date, count=10):\n\n try:\n response = urlreq.urlopen(endpoint.format(date))\n soup = BeautifulSoup(response.read(), \"html.parser\")\n table = soup.find('table', border=\"0\", cellpadding=\"5\", cellspacing=\"1\")\n tdata = []\n\n for i, row in enumerate(table.find_all('tr')[1:], start=1):\n if i > count:\n break\n\n cells = row.find_all('td')\n tdict = {}\n\n tdict['rank'] = i\n tdict['title'] = cells[2].text.strip()\n tdict['daily_gross'] = int(re.sub(r'[^\\d]', '', cells[4].text))\n tdict['theaters'] = int(re.sub(r'[^\\d]', '', cells[7].text))\n tdict['todate_gross'] = int(re.sub(r'[^\\d]', '', cells[9].text))\n tdict['release_day'] = int(cells[10].text)\n\n tdata.append(tdict)\n\n tdata = pd.DataFrame(tdata)\n tdata['gross_date'] = date\n return tdata\n\n except urlerr.URLError as err:\n print(\"\\nThere was an error retrieving daily revenue information\")\n print(err)\n return None\n except Exception:\n print(\"\\nThere's something wrong with the BOMojo daily revenue page\")\n return None",
"def _read_dataframes_20M(path):\n import pandas\n\n ratings = pandas.read_csv(os.path.join(path, \"ratings.csv\"))\n movies = pandas.read_csv(os.path.join(path, \"movies.csv\"))\n\n return ratings, movies",
"def top_ten_movies(path):\n content = open(path, \"r\")\n topten = []\n for x in content:\n topten.append(x) \n return topten",
"def load_movies():\n print \"Movies\"\n\n # Delete all rows in table, so if we need to run this a second time,\n # we won't be trying to add duplicate users\n Movie.query.delete()\n\n # Read u.item file and insert data\n for row in open(\"seed_data/u.item\"):\n row =row.rstrip()\n\n movie_id, title_long, released_string, imdb_url = row.split(\"|\")[:4]\n #we modified the datetime format changed released_string into \n #new format by using datetim.strptime to convert it. \n print row\n if released_string: \n release_at = datetime.strptime(released_string, \"%d-%b-%Y\")\n else: \n release_at = None \n\n #here we stripped the title of the (xxxx) year and parenthesis\n #using the slice method. \n title = title_long[:-7]\n\n print movie_id, title_long, released_string, imdb_url\n\n #assign the return values from our for loop to a new variable\n movie = Movie(movie_id=movie_id, title=title, released_at=release_at,\n imdb_url=imdb_url)\n \n # We need to add to the session or it won't ever be stored\n db.session.add(movie)\n\n #Once we're done, we should commit our work\n db.session.commit()"
] |
[
"0.69534105",
"0.6842434",
"0.67165273",
"0.6698219",
"0.6382748",
"0.6283574",
"0.62063265",
"0.6099753",
"0.6017572",
"0.599738",
"0.59611505",
"0.5958461",
"0.59397745",
"0.59288883",
"0.5927518",
"0.5907681",
"0.5907066",
"0.58562887",
"0.5839268",
"0.5821602",
"0.5808227",
"0.5788526",
"0.57557243",
"0.5693522",
"0.56858486",
"0.56588304",
"0.5653675",
"0.564636",
"0.5640401",
"0.5609768"
] |
0.68514156
|
1
|
Scrape the IMDB top 250 movies index page.
|
def scrape_top_250(soup):
# to save the urls
movie_urls = []
# take the part where all the movies are
content = soup.find("tbody", "lister-list").find_all("tr")
# for every movie take the absolute url
for title in content:
url = "http://www.imdb.com" + title.find("td", "titleColumn").a["href"]
movie_urls.append(url)
return movie_urls
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_imdb_movies(max_movies: int):\n from imdb.database import index_movie_to_database\n movie_count = 0\n for url in crawl_imdb_listing_page(TOP_PAGE):\n if movie_count == max_movies:\n break\n movie_data = parse_movie_page(url)\n if index_movie_to_database(movie_data):\n movie_count += 1",
"def get_top_1000_movie_links():\n movie_links = []\n for start in range (1, 1000, 50):\n imdb_movie_list_page = requests.get(f'https://www.imdb.com/search/title/?groups=top_1000&view=simple&sort=user_rating,desc&start={start}')\n soup = BeautifulSoup(imdb_movie_list_page.text, 'html.parser')\n\n movie_list_div = soup.find('div', attrs={'class': 'lister list detail sub-list'})\n movie_item_spans = movie_list_div.find_all('span', attrs={'class':'lister-item-header'})\n links = [item.find('a').attrs['href'] for item in movie_item_spans]\n\n movie_links += links\n\n return [f'https://www.imdb.com{l}fullcredits/' for l in movie_links]",
"def movie_spider(self, movieTag):\n index = 0\n logging.info(\"Start crawling tag: %s\" % movieTag)\n while index < self.MAX_NUM:\n root = \"https://movie.douban.com/tag/%s?start=%d&type=T\" % (movieTag, index)\n result = {}\n try:\n html = requests.get(root, headers=random.choice(self.headers)).content\n tree = etree.HTML(html.decode('utf-8'))\n items = tree.xpath(\"//table/tr[@class='item']\")\n if len(items) == 0:\n break\n index += len(items)\n for item in items:\n itemURL = item.xpath(\"td/a[@class='nbg']/@href\")[0].strip()\n itemHTML = requests.get(itemURL, headers=random.choice(self.headers)).content\n itemTree = etree.HTML(itemHTML.decode('utf-8'))\n title = itemTree.xpath(\"//h1/span[@property='v:itemreviewed']/text()\")[0].strip()\n info = itemTree.xpath(\"//div[@class='subject clearfix']/div[@id='info']\")[0]\n director = info.xpath(\".//a[@rel='v:directedBy']/text()\")\n scriptor = info.xpath(\"span\")[1].xpath(\"span/a/text()\") # scriptor is not well formatted\n actors = info.xpath(\".//a[@rel='v:starring']/text()\")\n genre = info.xpath(\".//span[@property='v:genre']/text()\")\n initDate = info.xpath(\".//span[@property='v:initialReleaseDate']/text()\")\n runtime = info.xpath(\".//span[@property='v:runtime']/text()\")\n rating = itemTree.xpath(\"//strong[@property='v:average']/text()\")[0].strip()\n \n result['title'] = title\n result['director'] = '/'.join(director[:])\n result['scriptor'] = '/'.join(scriptor[:])\n result['actors'] = '/'.join(actors[:])\n result['genre'] = '/'.join(genre[:])\n result['initDate'] = '/'.join(initDate[:])\n result['runtime'] = '/'.join(runtime[:])\n result['rating'] = rating\n\n self._movie_list.append(result)\n result = {}\n\n except Exception as e:\n logging.exception(\"Error while crawling tag: %s\" % movieTag)",
"def parse_top_movies(html: str) -> ResultSet:\n\n soup = BeautifulSoup(html, \"html.parser\")\n return soup.find_all(\"div\", class_=\"lister-item-content\")",
"def scrape_top_250(self):\n\n page = requests.get(self.url)\n soup = BeautifulSoup(page.content, \"html.parser\")\n results = soup.find(class_=\"lister-list\")\n films = results.find_all(\"tr\")\n page.close()\n\n film_list = []\n for film in films:\n film_list.append(film)\n\n return film_list",
"def main():\n\n # Create a directory to store copies of all the relevant HTML files (those\n # will be used in testing).\n print('Setting up backup dir if needed ...')\n create_dir(BACKUP_DIR)\n\n # Make backup of the IMDB top 250 movies page\n print('Access top 250 page, making backup ...')\n top_250_html = simple_get(TOP_250_URL)\n top_250_dom = BeautifulSoup(top_250_html, \"lxml\")\n\n make_backup(os.path.join(BACKUP_DIR, 'index.html'), top_250_html)\n\n # extract the top 250 movies\n print('Scraping top 250 page ...')\n url_strings = scrape_top_250(top_250_dom)\n\n # grab all relevant information from the 250 movie web pages\n rows = []\n for i, url in enumerate(url_strings): # Enumerate, a great Python trick!\n print('Scraping movie %d ...' % i)\n\n # Grab web page\n movie_html = simple_get(url)\n\n # Extract relevant information for each movie\n movie_dom = BeautifulSoup(movie_html, \"lxml\")\n rows.append(scrape_movie_page(movie_dom))\n\n # Save one of the IMDB's movie pages (for testing)\n if i == 83:\n html_file = os.path.join(BACKUP_DIR, 'movie-%03d.html' % i)\n make_backup(html_file, movie_html)\n\n # Save a CSV file with the relevant information for the top 250 movies.\n print('Saving CSV ...')\n save_csv(os.path.join(SCRIPT_DIR, 'top250movies.csv'), rows)",
"def get_movies(iurl):\n movies = []\n \n if iurl[-3:] == '?s=':\n search_text = GetSearchQuery('WatchOnlineMovies')\n search_text = urllib.quote_plus(search_text)\n iurl += search_text\n\n html = requests.get(iurl, headers=mozhdr).text\n mlink = SoupStrainer('div', {'class':re.compile('postbox')})\n items = BeautifulSoup(html, parseOnlyThese=mlink)\n plink = SoupStrainer('div', {'class':'wp-pagenavi'})\n Paginator = BeautifulSoup(html, parseOnlyThese=plink)\n\n for item in items:\n title1 = item.h2.text\n try:\n title2 = title1.replace(\"Full Movie\", \"\")\n except:\n title2 = title1.replace(\"Watch Online\", \"\")\n try:\n title3 = title2.replace(\"Watch Online Placeholdernt\", \"\")\n except:\n title3 = title2.replace(\".\", \"\")\n try:\n title4 = title3.replace(\".\", \"\")\n except:\n title4 = title3.replace(\"Watch Online Placeholder\",\"\")\n try:\n title5 = title4.replace(\"Watch Online\", \"\")\n except:\n title5 = title4.replace(\"Download\",\"\")\n try:\n title6 = title5.replace(\"Watch Onlin\", \"\")\n except:\n title6 = title5.replace(\"Placeholder\",\"\")\n try:\n title7 = title6.replace(\"HD Pri\", \"\")\n except:\n title7 = title6.replace(\"Placeholder\",\"\")\n try:\n title8 = title7.replace(\" Watch On\", \"\")\n except:\n title8 = title7.replace(\"Placeholder\",\"\")\n try:\n title9 = title8.replace(\" Watch\", \"\")\n except:\n title9 = title8.replace(\"Placeholder\",\"\")\n try:\n title10 = title9.replace(\"Free Down\", \"\")\n except:\n title10 = title9.replace(\"Placeholder\",\"\")\n try:\n title11 = title10.replace(\"Free D\", \"\")\n except:\n title11 = title10.replace(\"Placeholder\",\"\")\n try:\n title12 = title11.replace(\"Free\", \"\")\n except:\n title12 = title11.replace(\"Placeholder\",\"\")\n try:\n title13 = title12.replace(\" F\", \"\")\n except:\n title13 = title12.replace(\"Placeholder\",\"\")\n try:\n title14 = title13.replace(\" Fr\", \"\")\n except:\n title14 = title13.replace(\"Placeholder\",\"\")\n try:\n title15 = title14.replace(\" Fre\", \"\")\n except:\n title15 = title14.replace(\"Placeholder\",\"\")\n try:\n title16 = title15.replace(\" HD\", \"\")\n except:\n title16 = title15.replace(\"Placeholder\",\"\")\n try:\n title17 = title16.replace(\" H\", \"\")\n except:\n title17 = title16.replace(\"Placeholder\",\"\")\n try:\n title18 = title17.replace(\" HD P\", \"\")\n except:\n title18 = title17.replace(\"Placeholder\",\"\")\n try:\n title19 = title18.replace(\" re\", \"\")\n except:\n title19 = title18.replace(\"Placeholder\",\"\")\n try:\n title120 = title19.replace(\" r\", \"\")\n except:\n title120 = title19.replace(\"Placeholder\",\"\")\n # Coloring Years\n try:\n title21 = title120.replace(\"(2018)\", \"[COLOR yellow](2018)[/COLOR]\")\n except:\n title21 = title120.replace(\"Placeholder\",\"\")\n try:\n title22 = title21.replace(\"(2016)\", \"[COLOR lightsalmon](2016)[/COLOR]\")\n except:\n title22 = title21.replace(\"Placeholder\",\"\")\n try:\n title23 = title22.replace(\"(2015)\", \"[COLOR lime](2016)[/COLOR]\")\n except:\n title23 = title22.replace(\"Placeholder\",\"\")\n # Language\n try:\n title24 = title23.replace(\"Hindi\", \"[COLOR green]Hindi[/COLOR]\")\n except:\n title24 = title23.replace(\"Placeholder\",\"\")\n try:\n title25 = title24.replace(\"Dubbed\", \"[COLOR cyan]Dubbed[/COLOR]\")\n except:\n title25 = title24.replace(\"Placeholder\",\"\")\n\n # Continued\n try:\n title26 = title25.replace(\" nt o\", \"\")\n except:\n title26 = title25.replace(\"Placeholder\",\"\")\n try:\n title27 = title26.replace(\" nt F\", \"\")\n except:\n title27 = title26.replace(\"Placeholder\",\"\")\n try:\n title28 = title27.replace(\" nt\", \"\")\n except:\n title28 = title27.replace(\"Placeholder\",\"\")\n try:\n title = title28.replace(\" Pr\", \"\")\n except:\n title = title28.replace(\"Placeholder\",\"\")\n\n url = item.h2.find('a')['href']\n try:\n thumb = item.find('img')['src'].strip()\n except:\n thumb = _icon\n movies.append((title, thumb, url))\n \n if 'next' in str(Paginator):\n\n nextli = Paginator.find('a', {'class':re.compile('page larger')})\n\n purl = nextli.get('href')\n pages = Paginator.findAll('span', {'class':re.compile('pages')})\n lastpg = pages[len(pages)-1].text\n title = 'Next Page.. (Currently in %s)' % (lastpg)\n movies.append((title, _icon, purl))\n \n return movies",
"def getTopMovies(endpoint, date, count=10):\n\n try:\n response = urlreq.urlopen(endpoint.format(date))\n soup = BeautifulSoup(response.read(), \"html.parser\")\n table = soup.find('table', border=\"0\", cellpadding=\"5\", cellspacing=\"1\")\n tdata = []\n\n for i, row in enumerate(table.find_all('tr')[1:], start=1):\n if i > count:\n break\n\n cells = row.find_all('td')\n tdict = {}\n\n tdict['rank'] = i\n tdict['title'] = cells[2].text.strip()\n tdict['daily_gross'] = int(re.sub(r'[^\\d]', '', cells[4].text))\n tdict['theaters'] = int(re.sub(r'[^\\d]', '', cells[7].text))\n tdict['todate_gross'] = int(re.sub(r'[^\\d]', '', cells[9].text))\n tdict['release_day'] = int(cells[10].text)\n\n tdata.append(tdict)\n\n tdata = pd.DataFrame(tdata)\n tdata['gross_date'] = date\n return tdata\n\n except urlerr.URLError as err:\n print(\"\\nThere was an error retrieving daily revenue information\")\n print(err)\n return None\n except Exception:\n print(\"\\nThere's something wrong with the BOMojo daily revenue page\")\n return None",
"def top_controversial(self, n):\n return top_movies",
"def scrape_movie(url):\n soup = get_soup(url)\n\n if soup:\n # scrape all of the sections\n soup_sections = soup.find('section', {'class': 'article listo content-advisories-index'})\n\n # scrape for the specific sections required\n soup_certificates = soup_sections.find('section', {'id': 'certificates'})\n soup_nudity = soup_sections.find('section', {'id': 'advisory-nudity'})\n soup_profanity = soup_sections.find('section', {'id': 'advisory-profanity'})\n\n # further scrape the sections above\n ratings = parse_certificates(soup_certificates)\n nudity, nudity_comments = parse_section(soup_nudity)\n profanity, profanity_comments = parse_section(soup_profanity)\n\n # here is where we actually format and show the results\n display_ratings(ratings)\n display_section('nudity', nudity, nudity_comments)\n display_section('profanity', profanity, profanity_comments)\n else:\n display_error()",
"def getMoviesInfor(pages=1, proxy=1):\n\n moiveList = []\n\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_0) AppleWebKit/537.36 (KHTML, like Gecko) '\n 'Chrome/71.0.3578.98 Safari/537.36'}\n proxies = None\n if proxy == 1:\n proxies = get_proxy.get_workable_ip()\n if proxies is None:\n print(\"Didn't find a workable ip, spider will use your true ip to get data.\")\n\n session = requests.Session()\n\n # Get movies.\n try:\n for i in range(0, pages):\n r = session.get(\n \"https://movie.douban.com/j/search_subjects?type=movie&tag=%E7%83%AD%E9%97%A8&sort=recommend&\"\n \"page_limit=20&page_start=\" + str(i * 20),\n headers=headers, proxies=proxies)\n jsondatum = json.loads(r.text)\n for movie in jsondatum['subjects']:\n moiveList.append(\n {'title': movie['title'], 'rate': movie['rate'], 'id': int(movie['id'])})\n # print(moiveList)\n # print(len(moiveList))\n except AttributeError as e:\n print(\"Limited by website, please change your proxy.爬虫好像受到网站的限制,请更换代理。\")\n\n return moiveList",
"def check_ratings(self):\n\n self.browser.get('https://www.imdb.com/')\n\n for title in self.titles:\n input_bar = self.browser.find_element_by_id('navbar-query')\n input_bar.clear()\n\n input_bar.send_keys(title)\n input_bar.send_keys(Keys.RETURN)\n\n time.sleep(3)\n\n # Click on the first suggestion\n css_selector = \"div.findSection:nth-child(3) > table:nth-child(2) > tbody:nth-child(1) > tr:nth-child(1) > td:nth-child(2) > a:nth-child(1)\"\n self.browser.find_element_by_css_selector(css_selector).click()\n time.sleep(3)\n\n # Pull details that will always be available\n score = str(self.browser.find_element_by_class_name('ratingValue').text)\n score = score.split('/10')[0].replace(',', '.')\n\n time.sleep(3)\n\n summary = str(self.browser.find_element_by_class_name('summary_text').text)\n subtext = str(self.browser.find_element_by_class_name('subtext').text)\n\n # Pull details that differ between movies and series\n try:\n duration = str(self.browser.find_element_by_class_name('bp_sub_heading').text) # Only for series\n if 'episodes' not in duration:\n duration = 'Some episodes'\n except Exception:\n # bp_sub_heading won't be found on a movie page\n duration = 'movie'\n\n if subtext[0].isdigit():\n # Split up the details from the subtext\n subtext_list = subtext.split(' | ')\n else:\n # Some movies' subtext starts with 'R' / 'PG-13'\n subtext_list = subtext.split(' | ')\n del subtext_list[0]\n\n # Duration\n if duration == 'movie':\n show_type = 'Movie'\n duration = subtext_list[0]\n try:\n year = datetime.datetime.strptime(subtext_list[2].split(' (')[0], '%d %B %Y').strftime('%Y')\n except ValueError:\n year = str(subtext_list[2].split(' (')[0][-4:])\n\n else: # series\n show_type = 'Serie'\n # Retrieve last season and its release date\n season_tab = str(self.browser.find_element_by_class_name('seasons-and-year-nav').text).strip()\n\n numbers = re.findall('[0-9]+', season_tab)\n latest_season = int(numbers[0])\n latest_year = int(max(numbers, key=lambda x: int(x)))\n\n duration += ' (%d Seasons in %d), %s per episode' % (latest_season, latest_year, subtext_list[0])\n\n year = re.findall('[0-9]+', subtext_list[2])[0]\n\n # Pull some more data out from the subtext\n genres = subtext_list[1].split(', ')\n\n # Pull details that are not always available\n creds_list = []\n creds = self.browser.find_elements_by_class_name('credit_summary_item')\n for c in creds:\n temp = str(c.text)\n if '|' in temp:\n temp = temp.split('|')[0]\n\n creds_list.append(temp)\n\n self.data_dict[title] = {\n 'score': score,\n 'summary': summary,\n 'duration': duration,\n 'credits': creds_list,\n 'genres': genres,\n 'released': year,\n 'type': show_type,\n }",
"def get_top_movies(genre: Genre) -> List[Movie]:\n\n html = download_top_movies(genre)\n soup = parse_top_movies(html)\n return [extract_movie(result) for result in soup]",
"def download_data(self):\n content = requests.get(self.TOP_250_LIST)\n soup = BeautifulSoup(content.content, 'lxml')\n movies = soup.select('tbody.lister-list tr')\n for m in movies:\n title_column = m.select('td.titleColumn')\n link = self.format_link(title_column[0].a['href'])\n title = self.format_title(title_column[0].a.string.encode('utf-8'))\n path = 'pages/{}.html'.format(title)\n if os.path.isfile(path):\n continue\n response = requests.get(link)\n with open(path, 'wr') as f:\n f.write(response.content)",
"def run_search(term, imdb_page, debug = False):\n\n # confirm function call\n if debug:\n print(\"run_search()\")\n\n # scrub search term for imdb\n formatted_term = \"+\".join(term.split())\n\n # add page information to search term\n if imdb_page > 0:\n page_specifier = f\"&start={ (imdb_page * 50) + 1 }\"\n else:\n page_specifier = \"\"\n\n # get BeautifulSoup data for search term\n search_string = \"https://www.imdb.com/search/title?title=\" + formatted_term + \"&title_type=tv_series\" + page_specifier\n if debug:\n print(f\"search_string: {search_string}\")\n search_soup = bs4.BeautifulSoup(requests.get(search_string).text, features=\"html.parser\")\n\n #get max page\n if imdb_page < 1:\n\n # identify element that states range and number of results\n desc = search_soup.select(\".desc\")[0]\n span = desc.select(\"span\")[0].contents[0][0:-8]\n\n # get number of results\n if span[:8] == \"1-50 of \":\n span = span[8:]\n try:\n result_num = float(span)\n except:\n result_num = 0\n\n # calculate max_pages\n max_pages = int(ceil(result_num / 5))\n if debug:\n print(result_num)\n print(max_pages)\n\n else:\n max_pages = None;\n\n # get valid pages for no_results\n low = imdb_page * 10;\n high = low + 9\n page_range = [low, high]\n\n # cultivate return list\n links = search_soup.select(\"h3 > a\")\n\n if debug:\n print(links)\n\n search_results = []\n\n print(len(links))\n\n for i in range(len(links)):\n if debug:\n print(f\"result: {i}\")\n\n try:\n show_div = links[i]\n except:\n break\n s = (show_div.contents[0], show_div.get(\"href\"))\n search_results.append(s)\n\n if debug:\n print(f\"search results length: {len(search_results)}\")\n\n return {\"results\": search_results, \"max\": max_pages, \"range\": page_range}",
"def top_by_num_of_ratings(self, n):\n return top_movies",
"def extract_movies(dom):\n\n # extract data per movie\n movies = dom.find_all('div', class_ = 'lister-item mode-advanced')\n\n # list to store scraped data\n movielist = []\n\n for movie in movies:\n\n # append extracted data to this dict\n moviedict = {}\n\n # scrape titles and add to dict\n moviedict['title'] = movie.h3.a.text\n\n # scrape ratings and add to dict\n moviedict['rating'] = float(movie.strong.text)\n\n # scrape year of release and add to dict\n year = movie.h3.find('span', class_ = 'lister-item-year text-muted unbold')\n moviedict['year'] = re.findall('\\d+', year.text.strip('()'))[0]\n\n # scrape actors and add to dict\n actors = movie.find_all(href=re.compile(\"adv_li_st\"))\n actorlist = []\n for actor in actors:\n actorlist.append(actor.text)\n actorstring = ', '.join(actorlist)\n moviedict['actors'] = actorstring\n\n # scrape runtime and add to dict\n moviedict['runtime'] = movie.p.find('span', class_ = 'runtime').text.split(' ')[0]\n movielist.append(moviedict)\n\n\n # ADD YOUR CODE HERE TO EXTRACT THE ABOVE INFORMATION ABOUT THE\n # HIGHEST RATED MOVIES\n # NOTE: FOR THIS EXERCISE YOU ARE ALLOWED (BUT NOT REQUIRED) TO IGNORE\n # UNICODE CHARACTERS AND SIMPLY LEAVE THEM OUT OF THE OUTPUT.\n\n return movielist # REPLACE THIS LINE AS WELL IF APPROPRIATE",
"def main():\n the_movie_db = TheMovieDB(API_KEY)\n success, movies = the_movie_db.discover_movies()\n if not success:\n print \"something went wrong with the api, please check\"\n exit(1)\n\n fresh_potatoes = _convert_to_movies(the_movie_db, movies)\n fresh_tomatoes.open_movies_page(fresh_potatoes)",
"def run_scraping(max_actors, max_movies, starting_url):\n\tactors = []\n\tmovies = []\n\tseen_actors = set() # keep track of actor urls we have already read\n\tseen_movies = set() # keep track of movie urls we have already read\n\t# attempt to parse starting page as actor\n\tactor = parse_actor_page(starting_url)\n\t# parsing as actor was successful\n\tif actor != None:\n\t\tactors.append(actor)\n\t\tseen_actors.add(starting_url[24:])\n\t# parsing as actor was unsuccessful, try as a movie instead\n\telse:\n\t\tlogging.warning('' + starting_url + 'was unable to be parsed as an actor, will attempt as a movie')\n\t\tmovie = parse_movie_page(starting_url)\n\t\t# if we could not parse as movie either, just end\n\t\tif movie == None:\n\t\t\tlogging.error('' + starting_url + 'was unable to be parsed as a movie as well, application must terminate')\n\t\t\treturn\n\t\tmovies.append(movie)\n\t\tseen_movies.add(starting_url[24:])\n\n\tbaseurl = 'https://en.wikipedia.org' # all URLS in dictionaries are relative, need this.\n\ti = 0 # keep track where in actor list we currently are\n\tj = 0 # keep track where in movie list we currently are\n\t# continue scraping until enough actors and movies found\n\twhile len(actors) < max_actors or len(movies) < max_movies:\n\t\t# traverse over new actors (since last iteration)\n\t\tfor actor in actors[i:]:\n\t\t\t# if actor exists, attempt to read pages for movies they are in.\n\t\t\tif actor != None and len(movies) < max_movies + 1:\n\t\t\t\tfor movieurl in actor['movieurls'][-20:]:\n\t\t\t\t\t# if we already read this movie, skip it\n\t\t\t\t\tif movieurl in seen_movies:\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tmovie = parse_movie_page(baseurl + movieurl)\n\t\t\t\t\tseen_movies.add(movieurl)\n\t\t\t\t\t# if parsing was successful add it\n\t\t\t\t\tif movie != None:\n\t\t\t\t\t\tmovies.append(movie)\n\t\t# update where in actor list we are\n\t\ti = max(0,len(actors) - 1)\n\t\t# traverse over new movies (since last iteration)\n\t\tfor movie in movies[j:]:\n\t\t\tif movie != None and len(actors) < max_actors +1:\n\t\t\t\tfor actorurl in movie['actorurls'][:20]:\n\t\t\t\t\t# if we already read this actor, skip it\n\t\t\t\t\tif actorurl in seen_actors:\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tactor = parse_actor_page(baseurl + actorurl)\n\t\t\t\t\tseen_actors.add(actorurl)\n\t\t\t\t\t# if parsing was successful, add it\n\t\t\t\t\tif actor != None:\n\t\t\t\t\t\tactors.append(actor)\n\t\t# update current position in movie list\n\t\tj = max(0,len(movies) - 1)\n\n\t# convert python dictionaries to JSON and dump to file\n\twith open('data/actors_and_movies7.json', 'w') as f:\n\t\tjson.dump({'actors': actors, 'movies': movies}, f)",
"def scrape_movie_page(dom):\n # to save the information\n info = []\n\n # find the information block needed\n header = dom.find(\"div\", \"title_wrapper\")\n\n # find the title and strip the string\n name_dom = header.h1.get_text().encode(\"utf-8\")\n name = str(name_dom)[2:-16]\n info.append(name)\n\n # find the year and strip the year\n year_dom = header.h1.span.get_text().encode(\"utf-8\")\n year = str(year_dom)[3:-2]\n info.append(year)\n\n # find the duration and strip the string\n duration_dom = dom.find(\"time\", itemprop=\"duration\").get_text().encode(\"utf-8\")\n duration = str(duration_dom)[28:-23]\n info.append(duration)\n\n # find all the genres and strip the string\n genre_dom = dom.find(\"div\", itemprop=\"genre\").a.get_text().encode(\"utf-8\")\n genre = find_genres(genre_dom, dom)\n info.append(genre)\n\n # find all the directors and strip the string\n director_dom = dom.find(\"span\", itemprop=\"director\").get_text().encode(\"utf-8\")\n director = find_directors(director_dom, dom)\n info.append(director)\n\n # find all the writers and strip the string\n writer_dom = dom.find(\"span\", itemprop=\"creator\").a.get_text().encode(\"utf-8\")\n writer = find_writers(writer_dom, dom)\n info.append(writer)\n\n # find all the actors and strip the string\n actor_dom = dom.find(\"span\", itemprop=\"actors\").a.get_text().encode(\"utf-8\")\n actor = find_actors(actor_dom, dom)\n info.append(actor)\n\n # find the rating and strip the string\n rating_dom = dom.find(\"span\", itemprop=\"ratingValue\").get_text().encode(\"utf-8\")\n rating = str(rating_dom)[2:-1]\n info.append(rating)\n\n # find the number of ratings and strip the string\n number_ratings_dom = dom.find(\"span\", itemprop=\"ratingCount\").get_text().encode(\"utf-8\")\n number_ratings = str(number_ratings_dom)[2:-1]\n info.append(number_ratings)\n\n return info",
"def top_ten_movies(path):\n content = open(path, \"r\")\n topten = []\n for x in content:\n topten.append(x) \n return topten",
"def content_model(movie_list,top_n):\r\n # Vectorise content for each movie in list_title\r\n input_matrix = vectoriser.transform(movies[movies['title'].isin(movie_list)].content)\r\n \r\n # Initiate list to store indeces of input movies\r\n m_idx = []\r\n \r\n for title in movie_list:\r\n for id in movies.movieId[movies['title']==title]:\r\n m_idx.append(indices[id])\r\n \r\n # Create list of similarities between each input movie and every other movie in the dataset \r\n sim = list(enumerate(cosine_similarity(content_matrix,\r\n input_matrix))) \r\n\r\n # Sort the list by the average similarity of the movies\r\n sim_scores = sorted(sim, key=lambda x: x[1].mean(), reverse=True)\r\n \r\n # Select the top-k values for recommendation\r\n sim_scores = sim_scores[0:20]\r\n\r\n # Select the indices of the top-k movies\r\n movie_indices = [i[0] for i in sim_scores if i[0] not in m_idx]\r\n \r\n # Return a list of the movie titles\r\n return movies.iloc[movie_indices].title[:top_n]",
"def download_top_movies(genre: Genre) -> str:\n\n page_url = create_page_url(genre)\n response = requests.get(page_url)\n response.raise_for_status()\n return response.text",
"def crawl_imdb_listing_page(url: str) -> Generator:\n next_url = url\n while next_url:\n print(f\"CRAWLING URL {next_url}\")\n listing_page = get_soup_for_page(next_url)\n for movies_element in listing_page.find_all(\"div\", class_=\"lister-item mode-simple\"):\n a_link = movies_element.find(\"span\", class_=\"lister-item-header\").find(\"a\")\n yield IMDB_HOST + a_link[\"href\"]\n next_url = listing_page.find(\"a\", class_=\"lister-page-next next-page\")\n next_url = IMDB_HOST + next_url[\"href\"] if next_url else None",
"def top_by_ratings(self, n, metric=average):\n return top_movies",
"def get_basic_data(self):\n\n db = DataBase().clear_table()\n\n data = self.scraper.scrape_top_250()\n for d in data:\n title = d.find(\"td\", class_=\"titleColumn\")\n title = title.find(\"a\")\n title = re.sub(\"<.*?>\", \"\", str(title))\n\n film_id = d.find(\"td\", class_=\"watchlistColumn\")\n film_id = film_id.find(\"div\")\n film_id = film_id[\"data-tconst\"]\n\n year = d.find(\"span\", class_=\"secondaryInfo\")\n year = re.sub(\"<.*?>\", \"\", str(year)).replace(\"(\", \"\").replace(\")\", \"\")\n\n director = d.find(\"td\", class_=\"titleColumn\")\n director = director.find(\"a\")\n director = director[\"title\"]\n director, *cast = director.split(\", \")\n director = director.replace(\" (dir.)\", \"\")\n\n rating = d.find(\"td\", class_=\"ratingColumn imdbRating\")\n rating = rating.find(\"strong\")\n rating = re.sub(\"<.*?>\", \"\", str(rating))\n\n poster = d.find(\"td\", class_=\"posterColumn\")\n poster = poster.find(\"img\")[\"src\"]\n poster = re.sub(\"@.+\", \"@._V1_FMjpg_UY474_.jpg\", poster)\n\n DataBase().populate_table(\n (title, film_id, year, director, \", \".join(cast), rating, poster)\n )",
"def top_ten(request):\n if request.method == 'GET':\n movies = Movie.objects.filter(date_of_release__lte=datetime.date.today())\n movies = movies.order_by('-rating')[:10]\n serializer = MovieSerializer(movies, many=True)\n return Response(serializer.data)",
"def content_model(movie_list,top_n=10):\n # Initializing the empty list of recommended movies\n recommended_movies = []\n data = data_preprocessing(27000)\n # Instantiating and generating the count matrix\n count_vec = CountVectorizer()\n count_matrix = count_vec.fit_transform(data['keyWords'])\n indices = pd.Series(data['title'])\n cosine_sim = cosine_similarity(count_matrix, count_matrix)\n # Getting the index of the movie that matches the title\n idx_1 = indices[indices == movie_list[0]].index[0]\n idx_2 = indices[indices == movie_list[1]].index[0]\n idx_3 = indices[indices == movie_list[2]].index[0]\n # Creating a Series with the similarity scores in descending order\n rank_1 = cosine_sim[idx_1]\n rank_2 = cosine_sim[idx_2]\n rank_3 = cosine_sim[idx_3]\n # Calculating the scores\n score_series_1 = pd.Series(rank_1).sort_values(ascending = False)\n score_series_2 = pd.Series(rank_2).sort_values(ascending = False)\n score_series_3 = pd.Series(rank_3).sort_values(ascending = False)\n # Getting the indexes of the 10 most similar movies\n listings = score_series_1.append(score_series_1).append(score_series_3).sort_values(ascending = False)\n\n # Store movie names\n recommended_movies = []\n # Appending the names of movies\n top_50_indexes = list(listings.iloc[1:50].index)\n # Removing chosen movies\n top_indexes = np.setdiff1d(top_50_indexes,[idx_1,idx_2,idx_3])\n for i in top_indexes[:top_n]:\n recommended_movies.append(list(movies['title'])[i])\n return recommended_movies",
"def query_all_movies():\n result = session.query(Movie).all()\n print(\"total movies: %s\" % len(result))\n for movie in result:\n print(\"movie poster: %s\" % movie.poster)\n print(\"%s trailer:%s genre:%s user_id:%s\" %\n (movie.name, movie.trailer_url, movie.genre, movie.user_id))\n print(\"-------------------------------------------------\")",
"def get_movie_info(page: str, verbose:bool = True):\n\n def add_scoreInfo(pattern, raw_text, keyName):\n \"\"\"inner helper function to help add score information\n :param pattern: pattern to match\n :param raw_text: html text\n :param keyName: key name to be append to the dict\n \"\"\"\n match_pat = re.search(pattern, raw_text)\n if match_pat is None:\n info[keyName] = None\n else:\n info[keyName] = match_pat.group(1)\n\n info = dict() \n \n # verbose option\n if verbose:\n print('scraping main page')\n print('scraping url: ' + page)\n \n # make soup\n soup = _make_soup(page)\n \n if soup == '':\n return None\n \n else:\n ### extraction ###\n # movie id\n movieId = soup.find('a', href=re.compile('movieId=[0-9]+'))\n if movieId is None:\n info['movie_link'] = None\n else:\n movieId = re.search('movieId=([0-9]+)$', movieId[\"href\"])\n info['movie_link'] = '/m/'+ movieId.group(1)\n \n movieInfo= soup.find('script', type=\"application/ld+json\")\n if movieInfo is None:\n print('No movie information for this movie.')\n else:\n # movie name\n movieName = re.search('\"name\":\"?(.+?)\"?,\"', movieInfo.get_text())\n if movieName is None:\n info['movie_name'] = None\n else:\n info['movie_name'] = movieName.group(1)\n \n # rating\n rating = re.search('\"contentRating\":\"?(.+?)\"?,\"',movieInfo.get_text())\n if rating is None:\n info['rating'] = None\n else:\n info['rating'] = rating.group(1)\n \n # genre \n genre = re.search('\"genre\":\\[\"(.+?)\"\\]', movieInfo.get_text())\n if genre is None:\n info['genre'] = None\n else:\n info['genre'] = genre.group(1).replace('\"','')\n \n # directors\n directors = re.search('\"director\":(.+?),\"author\"', movieInfo.get_text())\n if directors is None:\n info['directors'] = None\n else:\n info['directors'] = ','.join(re.findall('\"name\":\"(.+?)\",\"', directors.group(1)))\n \n # writers\n writers = re.search('\"director\":.+?\"author\":(.+?),\"genre\"', movieInfo.get_text())\n if writers is None:\n info['writers'] = None\n else:\n info['writers'] = ','.join(re.findall('\"name\":\"(.+?)\",\"', writers.group(1)))\n \n # movie synopsis\n movieSyno = soup.find('div', id=re.compile('movieSynopsis'))\n if movieSyno is None:\n info['movie_info'] = None\n else:\n info['movie_info'] = movieSyno.get_text().strip()\n \n # poster_image\n poster_img = soup.find('meta', property = re.compile('image$'))\n if poster_img is None:\n info['poster_image'] = None\n else:\n info['poster_image'] = poster_img[\"content\"]\n \n # cast\n casts = soup.find_all('div', class_=re.compile('^cast-item'))\n if casts is None:\n info['casts'] = None\n else:\n info['casts'] = ','.join([cast.find('span').get_text().strip() for cast in casts])\n \n # in_theaters_date\n in_theaters_date = soup.find('div', text=re.compile(\"In Theaters\"))\n if in_theaters_date is None:\n info['in_theaters_date'] = None\n else:\n info['in_theaters_date'] = in_theaters_date.find_next_sibling('div').find('time').get_text().strip()\n \n # on_streaming_date\n on_streaming_date = soup.find('div', text=re.compile(\"On Disc/Streaming:\"))\n if on_streaming_date is None:\n info['on_streaming_date'] = None\n else:\n info['on_streaming_date'] = on_streaming_date.find_next_sibling('div').find('time').get_text().strip()\n \n # runtime_in_minutes\n runtime_in_minutes = soup.find('div', text=re.compile(\"Runtime:\"))\n if runtime_in_minutes is None:\n info['runtime_in_minutes'] = None\n else:\n info['runtime_in_minutes'] = re.search('[0-9]+',runtime_in_minutes.find_next_sibling('div').find('time').get_text().strip()).group(0)\n # studio_name\n studio_name = soup.find('div', text=re.compile(\"Studio:\"))\n if studio_name is None:\n info['studio_name'] = None\n else:\n info['studio_name'] = studio_name.find_next_sibling('div', class_=\"meta-value\").get_text().strip()\n \n # Extra: box office\n box_office = soup.find('div', text=re.compile(\"Box Office:\"))\n if box_office is None:\n info['box_office'] = None\n else:\n info['box_office'] = box_office.find_next_sibling('div', class_=\"meta-value\").get_text().strip()\n \n scoreInfo = soup.find('script', type=\"text/javascript\")\n if scoreInfo is None:\n print('No score information for this movie.')\n else:\n pat_head1 = 'root.RottenTomatoes.context.scoreInfo.+?'\n pat_keywrd = '\"consensus\":'\n pat_tail1 = '\"?(.+?)\"?,\"'\n pat_tail2 = '\"?([0-9]+?)\"?,\"'\n pat_tail3 = '\"?([0-9\\.]+?)\"?,\"'\n # critics_consensus\n criticsCns_pat = pat_head1 + pat_keywrd + pat_tail1\n add_scoreInfo(criticsCns_pat, scoreInfo.get_text(), 'critics_consensus')\n \n # tomatometer_status\n pat_keywrd ='\"tomatometerState\":'\n tmtStatus_pat = pat_head1 + pat_keywrd + pat_tail1\n add_scoreInfo(tmtStatus_pat, scoreInfo.get_text(), 'tomatometer_status')\n\n # tomatometer_rating\n pat_keywrd = '\"score\":'\n tmtRating_pat = pat_head1 + pat_keywrd + pat_tail2\n add_scoreInfo(tmtRating_pat, scoreInfo.get_text(), 'tomatometer_rating')\n\n # tomatometer_count\n pat_keywrd ='\"numberOfReviews\":'\n tmtCnt_pat = pat_head1 + pat_keywrd + pat_tail2\n add_scoreInfo(tmtCnt_pat, scoreInfo.get_text(), 'tomatometer_count')\n \n # audience_status\n audStatus_pat = 'root.RottenTomatoes.context.popcornMeterState.+?\"(.+?)\";'\n add_scoreInfo(audStatus_pat, scoreInfo.get_text(), 'audience_status')\n\n # Extra: audience_want_to_see\n audWantToSee_pat = 'root.RottenTomatoes.context.wantToSeeData.+?\"wantToSeeCount\":' + pat_tail2\n add_scoreInfo(audWantToSee_pat, scoreInfo.get_text(), 'audience_want_to_see_count')\n \n # audience_rating\n pat_keywrd = '\"audienceAll\".+?\"score\":'\n audRating_pat = pat_head1 + pat_keywrd + pat_tail2\n add_scoreInfo(audRating_pat, scoreInfo.get_text(), 'audience_rating')\n\n # audience_count\n pat_keywrd = '\"audienceAll\".+?\"ratingCount\":'\n audCnt_pat = pat_head1 + pat_keywrd + pat_tail2\n add_scoreInfo(audCnt_pat, scoreInfo.get_text(), 'audience_count')\n\n # audience_top_critics_count\n pat_keywrd = '\"tomatometerTopCritics\".+?\"numberOfReviews\":'\n audTopCritics_pat = pat_head1 + pat_keywrd + pat_tail2\n add_scoreInfo(audTopCritics_pat, scoreInfo.get_text(), 'audience_top_critics_count')\n \n # audience_fresh_critics_count\n pat_keywrd = '\"freshCount\":'\n audFreshCritics_pat = pat_head1 + pat_keywrd + pat_tail2\n add_scoreInfo(audFreshCritics_pat, scoreInfo.get_text(), 'audience_fresh_critics_count')\n \n # audience_rotten_critics_count\n pat_keywrd = '\"rottenCount\":'\n audRottenCritics_pat = pat_head1 + pat_keywrd + pat_tail2\n add_scoreInfo(audRottenCritics_pat, scoreInfo.get_text(), 'audience_rotten_critics_count')\n\n # Extra: audience_fresh_top_critics_count\n pat_keywrd = '\"tomatometerTopCritics\".+?\"freshCount\":'\n audFreshCritics_pat = pat_head1 + pat_keywrd + pat_tail2\n add_scoreInfo(audFreshCritics_pat, scoreInfo.get_text(), 'audience_fresh_top_critics_count')\n\n # Extra: audience_rotten_top_critics_count\n pat_keywrd = '\"tomatometerTopCritics\".+?\"rottenCount\":'\n audRottenCritics_pat = pat_head1 + pat_keywrd + pat_tail2\n add_scoreInfo(audRottenCritics_pat, scoreInfo.get_text(), 'audience_rotten_rotten_critics_count')\n \n # Extra: tomatometer_avg_rating\n pat_keywrd = '\"avgScore\":'\n tmtAvgRating_pat = pat_head1 + pat_keywrd + pat_tail3\n add_scoreInfo(tmtAvgRating_pat, scoreInfo.get_text(), 'tomatometer_avg_rating')\n\n # Extra: audience_top_critics_avg_rating\n pat_keywrd = '\"tomatometerTopCritics\".+?\"avgScore\":'\n audTopCriticsAvgRating_pat = pat_head1 + pat_keywrd + pat_tail3\n add_scoreInfo(audTopCriticsAvgRating_pat, scoreInfo.get_text(), 'audience_top_critics_avg_rating')\n\n # Extra: Score Sentiment\n pat_keywrd = '\"scoreSentiment\":'\n scoreSentiment_pat = pat_head1 + pat_keywrd + pat_tail1\n add_scoreInfo(scoreSentiment_pat, scoreInfo.get_text(), 'score_sentiment')\n\n # Extra: audience_avg_rating\n pat_keywrd = '\"averageRating\":'\n audienceAvgRating_pat = pat_head1 + pat_keywrd + pat_tail3\n add_scoreInfo(audienceAvgRating_pat, scoreInfo.get_text(), 'audience_avg_rating')\n print('done scraping movie info')\n return info"
] |
[
"0.76122737",
"0.666392",
"0.66290194",
"0.65015304",
"0.6491648",
"0.64602834",
"0.6427198",
"0.6267651",
"0.625287",
"0.60673034",
"0.601147",
"0.6007559",
"0.59912735",
"0.5976503",
"0.5973458",
"0.5959234",
"0.59561735",
"0.59081995",
"0.5846302",
"0.5815787",
"0.57971823",
"0.5787768",
"0.5771099",
"0.5762316",
"0.575711",
"0.57460195",
"0.5738727",
"0.5721568",
"0.5720274",
"0.56995535"
] |
0.73371065
|
1
|
Scrape the IMDB page for a single movie
|
def scrape_movie_page(dom):
# to save the information
info = []
# find the information block needed
header = dom.find("div", "title_wrapper")
# find the title and strip the string
name_dom = header.h1.get_text().encode("utf-8")
name = str(name_dom)[2:-16]
info.append(name)
# find the year and strip the year
year_dom = header.h1.span.get_text().encode("utf-8")
year = str(year_dom)[3:-2]
info.append(year)
# find the duration and strip the string
duration_dom = dom.find("time", itemprop="duration").get_text().encode("utf-8")
duration = str(duration_dom)[28:-23]
info.append(duration)
# find all the genres and strip the string
genre_dom = dom.find("div", itemprop="genre").a.get_text().encode("utf-8")
genre = find_genres(genre_dom, dom)
info.append(genre)
# find all the directors and strip the string
director_dom = dom.find("span", itemprop="director").get_text().encode("utf-8")
director = find_directors(director_dom, dom)
info.append(director)
# find all the writers and strip the string
writer_dom = dom.find("span", itemprop="creator").a.get_text().encode("utf-8")
writer = find_writers(writer_dom, dom)
info.append(writer)
# find all the actors and strip the string
actor_dom = dom.find("span", itemprop="actors").a.get_text().encode("utf-8")
actor = find_actors(actor_dom, dom)
info.append(actor)
# find the rating and strip the string
rating_dom = dom.find("span", itemprop="ratingValue").get_text().encode("utf-8")
rating = str(rating_dom)[2:-1]
info.append(rating)
# find the number of ratings and strip the string
number_ratings_dom = dom.find("span", itemprop="ratingCount").get_text().encode("utf-8")
number_ratings = str(number_ratings_dom)[2:-1]
info.append(number_ratings)
return info
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def movie_spider(self, movieTag):\n index = 0\n logging.info(\"Start crawling tag: %s\" % movieTag)\n while index < self.MAX_NUM:\n root = \"https://movie.douban.com/tag/%s?start=%d&type=T\" % (movieTag, index)\n result = {}\n try:\n html = requests.get(root, headers=random.choice(self.headers)).content\n tree = etree.HTML(html.decode('utf-8'))\n items = tree.xpath(\"//table/tr[@class='item']\")\n if len(items) == 0:\n break\n index += len(items)\n for item in items:\n itemURL = item.xpath(\"td/a[@class='nbg']/@href\")[0].strip()\n itemHTML = requests.get(itemURL, headers=random.choice(self.headers)).content\n itemTree = etree.HTML(itemHTML.decode('utf-8'))\n title = itemTree.xpath(\"//h1/span[@property='v:itemreviewed']/text()\")[0].strip()\n info = itemTree.xpath(\"//div[@class='subject clearfix']/div[@id='info']\")[0]\n director = info.xpath(\".//a[@rel='v:directedBy']/text()\")\n scriptor = info.xpath(\"span\")[1].xpath(\"span/a/text()\") # scriptor is not well formatted\n actors = info.xpath(\".//a[@rel='v:starring']/text()\")\n genre = info.xpath(\".//span[@property='v:genre']/text()\")\n initDate = info.xpath(\".//span[@property='v:initialReleaseDate']/text()\")\n runtime = info.xpath(\".//span[@property='v:runtime']/text()\")\n rating = itemTree.xpath(\"//strong[@property='v:average']/text()\")[0].strip()\n \n result['title'] = title\n result['director'] = '/'.join(director[:])\n result['scriptor'] = '/'.join(scriptor[:])\n result['actors'] = '/'.join(actors[:])\n result['genre'] = '/'.join(genre[:])\n result['initDate'] = '/'.join(initDate[:])\n result['runtime'] = '/'.join(runtime[:])\n result['rating'] = rating\n\n self._movie_list.append(result)\n result = {}\n\n except Exception as e:\n logging.exception(\"Error while crawling tag: %s\" % movieTag)",
"def get_movie_info(page: str, verbose:bool = True):\n\n def add_scoreInfo(pattern, raw_text, keyName):\n \"\"\"inner helper function to help add score information\n :param pattern: pattern to match\n :param raw_text: html text\n :param keyName: key name to be append to the dict\n \"\"\"\n match_pat = re.search(pattern, raw_text)\n if match_pat is None:\n info[keyName] = None\n else:\n info[keyName] = match_pat.group(1)\n\n info = dict() \n \n # verbose option\n if verbose:\n print('scraping main page')\n print('scraping url: ' + page)\n \n # make soup\n soup = _make_soup(page)\n \n if soup == '':\n return None\n \n else:\n ### extraction ###\n # movie id\n movieId = soup.find('a', href=re.compile('movieId=[0-9]+'))\n if movieId is None:\n info['movie_link'] = None\n else:\n movieId = re.search('movieId=([0-9]+)$', movieId[\"href\"])\n info['movie_link'] = '/m/'+ movieId.group(1)\n \n movieInfo= soup.find('script', type=\"application/ld+json\")\n if movieInfo is None:\n print('No movie information for this movie.')\n else:\n # movie name\n movieName = re.search('\"name\":\"?(.+?)\"?,\"', movieInfo.get_text())\n if movieName is None:\n info['movie_name'] = None\n else:\n info['movie_name'] = movieName.group(1)\n \n # rating\n rating = re.search('\"contentRating\":\"?(.+?)\"?,\"',movieInfo.get_text())\n if rating is None:\n info['rating'] = None\n else:\n info['rating'] = rating.group(1)\n \n # genre \n genre = re.search('\"genre\":\\[\"(.+?)\"\\]', movieInfo.get_text())\n if genre is None:\n info['genre'] = None\n else:\n info['genre'] = genre.group(1).replace('\"','')\n \n # directors\n directors = re.search('\"director\":(.+?),\"author\"', movieInfo.get_text())\n if directors is None:\n info['directors'] = None\n else:\n info['directors'] = ','.join(re.findall('\"name\":\"(.+?)\",\"', directors.group(1)))\n \n # writers\n writers = re.search('\"director\":.+?\"author\":(.+?),\"genre\"', movieInfo.get_text())\n if writers is None:\n info['writers'] = None\n else:\n info['writers'] = ','.join(re.findall('\"name\":\"(.+?)\",\"', writers.group(1)))\n \n # movie synopsis\n movieSyno = soup.find('div', id=re.compile('movieSynopsis'))\n if movieSyno is None:\n info['movie_info'] = None\n else:\n info['movie_info'] = movieSyno.get_text().strip()\n \n # poster_image\n poster_img = soup.find('meta', property = re.compile('image$'))\n if poster_img is None:\n info['poster_image'] = None\n else:\n info['poster_image'] = poster_img[\"content\"]\n \n # cast\n casts = soup.find_all('div', class_=re.compile('^cast-item'))\n if casts is None:\n info['casts'] = None\n else:\n info['casts'] = ','.join([cast.find('span').get_text().strip() for cast in casts])\n \n # in_theaters_date\n in_theaters_date = soup.find('div', text=re.compile(\"In Theaters\"))\n if in_theaters_date is None:\n info['in_theaters_date'] = None\n else:\n info['in_theaters_date'] = in_theaters_date.find_next_sibling('div').find('time').get_text().strip()\n \n # on_streaming_date\n on_streaming_date = soup.find('div', text=re.compile(\"On Disc/Streaming:\"))\n if on_streaming_date is None:\n info['on_streaming_date'] = None\n else:\n info['on_streaming_date'] = on_streaming_date.find_next_sibling('div').find('time').get_text().strip()\n \n # runtime_in_minutes\n runtime_in_minutes = soup.find('div', text=re.compile(\"Runtime:\"))\n if runtime_in_minutes is None:\n info['runtime_in_minutes'] = None\n else:\n info['runtime_in_minutes'] = re.search('[0-9]+',runtime_in_minutes.find_next_sibling('div').find('time').get_text().strip()).group(0)\n # studio_name\n studio_name = soup.find('div', text=re.compile(\"Studio:\"))\n if studio_name is None:\n info['studio_name'] = None\n else:\n info['studio_name'] = studio_name.find_next_sibling('div', class_=\"meta-value\").get_text().strip()\n \n # Extra: box office\n box_office = soup.find('div', text=re.compile(\"Box Office:\"))\n if box_office is None:\n info['box_office'] = None\n else:\n info['box_office'] = box_office.find_next_sibling('div', class_=\"meta-value\").get_text().strip()\n \n scoreInfo = soup.find('script', type=\"text/javascript\")\n if scoreInfo is None:\n print('No score information for this movie.')\n else:\n pat_head1 = 'root.RottenTomatoes.context.scoreInfo.+?'\n pat_keywrd = '\"consensus\":'\n pat_tail1 = '\"?(.+?)\"?,\"'\n pat_tail2 = '\"?([0-9]+?)\"?,\"'\n pat_tail3 = '\"?([0-9\\.]+?)\"?,\"'\n # critics_consensus\n criticsCns_pat = pat_head1 + pat_keywrd + pat_tail1\n add_scoreInfo(criticsCns_pat, scoreInfo.get_text(), 'critics_consensus')\n \n # tomatometer_status\n pat_keywrd ='\"tomatometerState\":'\n tmtStatus_pat = pat_head1 + pat_keywrd + pat_tail1\n add_scoreInfo(tmtStatus_pat, scoreInfo.get_text(), 'tomatometer_status')\n\n # tomatometer_rating\n pat_keywrd = '\"score\":'\n tmtRating_pat = pat_head1 + pat_keywrd + pat_tail2\n add_scoreInfo(tmtRating_pat, scoreInfo.get_text(), 'tomatometer_rating')\n\n # tomatometer_count\n pat_keywrd ='\"numberOfReviews\":'\n tmtCnt_pat = pat_head1 + pat_keywrd + pat_tail2\n add_scoreInfo(tmtCnt_pat, scoreInfo.get_text(), 'tomatometer_count')\n \n # audience_status\n audStatus_pat = 'root.RottenTomatoes.context.popcornMeterState.+?\"(.+?)\";'\n add_scoreInfo(audStatus_pat, scoreInfo.get_text(), 'audience_status')\n\n # Extra: audience_want_to_see\n audWantToSee_pat = 'root.RottenTomatoes.context.wantToSeeData.+?\"wantToSeeCount\":' + pat_tail2\n add_scoreInfo(audWantToSee_pat, scoreInfo.get_text(), 'audience_want_to_see_count')\n \n # audience_rating\n pat_keywrd = '\"audienceAll\".+?\"score\":'\n audRating_pat = pat_head1 + pat_keywrd + pat_tail2\n add_scoreInfo(audRating_pat, scoreInfo.get_text(), 'audience_rating')\n\n # audience_count\n pat_keywrd = '\"audienceAll\".+?\"ratingCount\":'\n audCnt_pat = pat_head1 + pat_keywrd + pat_tail2\n add_scoreInfo(audCnt_pat, scoreInfo.get_text(), 'audience_count')\n\n # audience_top_critics_count\n pat_keywrd = '\"tomatometerTopCritics\".+?\"numberOfReviews\":'\n audTopCritics_pat = pat_head1 + pat_keywrd + pat_tail2\n add_scoreInfo(audTopCritics_pat, scoreInfo.get_text(), 'audience_top_critics_count')\n \n # audience_fresh_critics_count\n pat_keywrd = '\"freshCount\":'\n audFreshCritics_pat = pat_head1 + pat_keywrd + pat_tail2\n add_scoreInfo(audFreshCritics_pat, scoreInfo.get_text(), 'audience_fresh_critics_count')\n \n # audience_rotten_critics_count\n pat_keywrd = '\"rottenCount\":'\n audRottenCritics_pat = pat_head1 + pat_keywrd + pat_tail2\n add_scoreInfo(audRottenCritics_pat, scoreInfo.get_text(), 'audience_rotten_critics_count')\n\n # Extra: audience_fresh_top_critics_count\n pat_keywrd = '\"tomatometerTopCritics\".+?\"freshCount\":'\n audFreshCritics_pat = pat_head1 + pat_keywrd + pat_tail2\n add_scoreInfo(audFreshCritics_pat, scoreInfo.get_text(), 'audience_fresh_top_critics_count')\n\n # Extra: audience_rotten_top_critics_count\n pat_keywrd = '\"tomatometerTopCritics\".+?\"rottenCount\":'\n audRottenCritics_pat = pat_head1 + pat_keywrd + pat_tail2\n add_scoreInfo(audRottenCritics_pat, scoreInfo.get_text(), 'audience_rotten_rotten_critics_count')\n \n # Extra: tomatometer_avg_rating\n pat_keywrd = '\"avgScore\":'\n tmtAvgRating_pat = pat_head1 + pat_keywrd + pat_tail3\n add_scoreInfo(tmtAvgRating_pat, scoreInfo.get_text(), 'tomatometer_avg_rating')\n\n # Extra: audience_top_critics_avg_rating\n pat_keywrd = '\"tomatometerTopCritics\".+?\"avgScore\":'\n audTopCriticsAvgRating_pat = pat_head1 + pat_keywrd + pat_tail3\n add_scoreInfo(audTopCriticsAvgRating_pat, scoreInfo.get_text(), 'audience_top_critics_avg_rating')\n\n # Extra: Score Sentiment\n pat_keywrd = '\"scoreSentiment\":'\n scoreSentiment_pat = pat_head1 + pat_keywrd + pat_tail1\n add_scoreInfo(scoreSentiment_pat, scoreInfo.get_text(), 'score_sentiment')\n\n # Extra: audience_avg_rating\n pat_keywrd = '\"averageRating\":'\n audienceAvgRating_pat = pat_head1 + pat_keywrd + pat_tail3\n add_scoreInfo(audienceAvgRating_pat, scoreInfo.get_text(), 'audience_avg_rating')\n print('done scraping movie info')\n return info",
"def find_movie_from_api(imdb_id):\n url = \"http://www.omdbapi.com/?i=\" + imdb_id + \"&apikey=\" + API_KEY\n response = requests.request(\"GET\", url)\n data = json.loads(response.text)\n\n return data",
"def movie(response):\n\n response = response.json()\n\n if response.get(\"Error\"):\n raise NotFoundError(response[\"Error\"])\n\n if response[\"Type\"] != \"movie\":\n raise NotFoundError(\"Type is {}, should be movie\".format(response[\"Type\"]))\n\n return [OrderedDict([(\"Title\", response[\"Title\"]),\n (\"ID\", response[\"imdbID\"]),\n (\"Rating\", response[\"imdbRating\"]),\n (\"Year\", response[\"Year\"].split(u\"\\u2013\")[0])])]",
"def parse_movie(self, res):\n url = res.css(SELECTORS['MOVIE_URL'])\n obj = {\n 'id': int(url.re_first(r'[/]([0-9]{1,})[/]')),\n 'title': SelectHelper.get(res, SELECTORS['MOVIE_TITLE']),\n 'description': SelectHelper.get(res, SELECTORS['MOVIE_DESCRIPTION'])[12:-10],\n 'advisory': SelectHelper.get_array(res, SELECTORS['MOVIE_ADVISORY']),\n 'image': SelectHelper.get(res, SELECTORS['MOVIE_IMAGE']),\n 'url': BASE_URL + url.extract_first(),\n }\n return Movie(obj)",
"def parse_movie_page(movie_url: str) -> Dict[str, str]:\n movie_page = get_soup_for_page(movie_url)\n\n # title and id\n movie_id = movie_url.split(\"/\")[-2]\n title = movie_page.find(\"div\", class_=\"title_wrapper\").find(\"h1\").get_text(\";\", strip=True).split(\";\")[0]\n\n # director and stars\n credit_summary_elements = movie_page.find_all(\"div\", class_=\"credit_summary_item\")\n director = credit_summary_elements[0].find(\"a\").text if len(credit_summary_elements) > 0 else \"\"\n if len(credit_summary_elements) > 2:\n stars_links = credit_summary_elements[2].find_all(\"a\")\n stars = [str(elem.text) for elem in stars_links[:-1]]\n else:\n stars = []\n movie_data = {\n \"id\": movie_id,\n \"title\": title,\n \"director\": director,\n \"stars\": stars,\n }\n print(movie_data)\n return movie_data",
"def query_imdb(movie_title):\n base_url = \"http://omdbapi.com/?t=\" # Only submitting Title\n response = urllib.urlopen(base_url + movie_title)\n if response.getcode() == 200: # HTTP status is OK\n imdb_data = json.loads(response.read()) # Deserialize into dictionary\n return imdb_data\n else: # HTTP error\n return {\"Response\" : \"False\"}",
"def get_movie_details(self):\n\n if self.isValidURL(self.url) == False:\n return None\n url = self.formatURL(self.url)\n\n response = requests.get(url)\n soup = BeautifulSoup(response.content, \"html.parser\")\n\n film = self.getJson(soup)\n more = self.getAdditionalDetails(soup)\n self.setMovieDetails(film, soup)\n self.create_mov_dict()\n return self.film",
"def scrape_movie(url):\n soup = get_soup(url)\n\n if soup:\n # scrape all of the sections\n soup_sections = soup.find('section', {'class': 'article listo content-advisories-index'})\n\n # scrape for the specific sections required\n soup_certificates = soup_sections.find('section', {'id': 'certificates'})\n soup_nudity = soup_sections.find('section', {'id': 'advisory-nudity'})\n soup_profanity = soup_sections.find('section', {'id': 'advisory-profanity'})\n\n # further scrape the sections above\n ratings = parse_certificates(soup_certificates)\n nudity, nudity_comments = parse_section(soup_nudity)\n profanity, profanity_comments = parse_section(soup_profanity)\n\n # here is where we actually format and show the results\n display_ratings(ratings)\n display_section('nudity', nudity, nudity_comments)\n display_section('profanity', profanity, profanity_comments)\n else:\n display_error()",
"def extract_movies(dom):\n\n # extract data per movie\n movies = dom.find_all('div', class_ = 'lister-item mode-advanced')\n\n # list to store scraped data\n movielist = []\n\n for movie in movies:\n\n # append extracted data to this dict\n moviedict = {}\n\n # scrape titles and add to dict\n moviedict['title'] = movie.h3.a.text\n\n # scrape ratings and add to dict\n moviedict['rating'] = float(movie.strong.text)\n\n # scrape year of release and add to dict\n year = movie.h3.find('span', class_ = 'lister-item-year text-muted unbold')\n moviedict['year'] = re.findall('\\d+', year.text.strip('()'))[0]\n\n # scrape actors and add to dict\n actors = movie.find_all(href=re.compile(\"adv_li_st\"))\n actorlist = []\n for actor in actors:\n actorlist.append(actor.text)\n actorstring = ', '.join(actorlist)\n moviedict['actors'] = actorstring\n\n # scrape runtime and add to dict\n moviedict['runtime'] = movie.p.find('span', class_ = 'runtime').text.split(' ')[0]\n movielist.append(moviedict)\n\n\n # ADD YOUR CODE HERE TO EXTRACT THE ABOVE INFORMATION ABOUT THE\n # HIGHEST RATED MOVIES\n # NOTE: FOR THIS EXERCISE YOU ARE ALLOWED (BUT NOT REQUIRED) TO IGNORE\n # UNICODE CHARACTERS AND SIMPLY LEAVE THEM OUT OF THE OUTPUT.\n\n return movielist # REPLACE THIS LINE AS WELL IF APPROPRIATE",
"def get_movie_info(movie_url):\n # 指定電影資訊的 CSS 選擇器\n rating_css = \"strong span\"\n genre_css = \".subtext a\"\n poster_css = \".poster img\"\n cast_css = \".primary_photo+ td a\"\n \n movie_doc = pq(movie_url)\n # 擷取資訊\n rating_elem = movie_doc(rating_css)\n movie_rating = float(rating_elem.text())\n genre_elem = movie_doc(genre_css)\n movie_genre = [x.text.replace(\"\\n\", \"\").strip() for x in genre_elem]\n movie_genre.pop()\n movie_poster_elem = movie_doc(poster_css)\n movie_poster = movie_poster_elem.attr('src')\n movie_cast_elem = movie_doc(cast_css)\n movie_cast = [x.text.replace(\"\\n\", \"\").strip() for x in movie_cast_elem]\n \n # 回傳資訊\n movie_info = {\n \"rating\": movie_rating,\n \"genre\": movie_genre,\n \"poster\": movie_poster,\n \"cast\": movie_cast\n }\n return movie_info",
"def get_movies(iurl):\n movies = []\n \n if iurl[-3:] == '?s=':\n search_text = GetSearchQuery('WatchOnlineMovies')\n search_text = urllib.quote_plus(search_text)\n iurl += search_text\n\n html = requests.get(iurl, headers=mozhdr).text\n mlink = SoupStrainer('div', {'class':re.compile('postbox')})\n items = BeautifulSoup(html, parseOnlyThese=mlink)\n plink = SoupStrainer('div', {'class':'wp-pagenavi'})\n Paginator = BeautifulSoup(html, parseOnlyThese=plink)\n\n for item in items:\n title1 = item.h2.text\n try:\n title2 = title1.replace(\"Full Movie\", \"\")\n except:\n title2 = title1.replace(\"Watch Online\", \"\")\n try:\n title3 = title2.replace(\"Watch Online Placeholdernt\", \"\")\n except:\n title3 = title2.replace(\".\", \"\")\n try:\n title4 = title3.replace(\".\", \"\")\n except:\n title4 = title3.replace(\"Watch Online Placeholder\",\"\")\n try:\n title5 = title4.replace(\"Watch Online\", \"\")\n except:\n title5 = title4.replace(\"Download\",\"\")\n try:\n title6 = title5.replace(\"Watch Onlin\", \"\")\n except:\n title6 = title5.replace(\"Placeholder\",\"\")\n try:\n title7 = title6.replace(\"HD Pri\", \"\")\n except:\n title7 = title6.replace(\"Placeholder\",\"\")\n try:\n title8 = title7.replace(\" Watch On\", \"\")\n except:\n title8 = title7.replace(\"Placeholder\",\"\")\n try:\n title9 = title8.replace(\" Watch\", \"\")\n except:\n title9 = title8.replace(\"Placeholder\",\"\")\n try:\n title10 = title9.replace(\"Free Down\", \"\")\n except:\n title10 = title9.replace(\"Placeholder\",\"\")\n try:\n title11 = title10.replace(\"Free D\", \"\")\n except:\n title11 = title10.replace(\"Placeholder\",\"\")\n try:\n title12 = title11.replace(\"Free\", \"\")\n except:\n title12 = title11.replace(\"Placeholder\",\"\")\n try:\n title13 = title12.replace(\" F\", \"\")\n except:\n title13 = title12.replace(\"Placeholder\",\"\")\n try:\n title14 = title13.replace(\" Fr\", \"\")\n except:\n title14 = title13.replace(\"Placeholder\",\"\")\n try:\n title15 = title14.replace(\" Fre\", \"\")\n except:\n title15 = title14.replace(\"Placeholder\",\"\")\n try:\n title16 = title15.replace(\" HD\", \"\")\n except:\n title16 = title15.replace(\"Placeholder\",\"\")\n try:\n title17 = title16.replace(\" H\", \"\")\n except:\n title17 = title16.replace(\"Placeholder\",\"\")\n try:\n title18 = title17.replace(\" HD P\", \"\")\n except:\n title18 = title17.replace(\"Placeholder\",\"\")\n try:\n title19 = title18.replace(\" re\", \"\")\n except:\n title19 = title18.replace(\"Placeholder\",\"\")\n try:\n title120 = title19.replace(\" r\", \"\")\n except:\n title120 = title19.replace(\"Placeholder\",\"\")\n # Coloring Years\n try:\n title21 = title120.replace(\"(2018)\", \"[COLOR yellow](2018)[/COLOR]\")\n except:\n title21 = title120.replace(\"Placeholder\",\"\")\n try:\n title22 = title21.replace(\"(2016)\", \"[COLOR lightsalmon](2016)[/COLOR]\")\n except:\n title22 = title21.replace(\"Placeholder\",\"\")\n try:\n title23 = title22.replace(\"(2015)\", \"[COLOR lime](2016)[/COLOR]\")\n except:\n title23 = title22.replace(\"Placeholder\",\"\")\n # Language\n try:\n title24 = title23.replace(\"Hindi\", \"[COLOR green]Hindi[/COLOR]\")\n except:\n title24 = title23.replace(\"Placeholder\",\"\")\n try:\n title25 = title24.replace(\"Dubbed\", \"[COLOR cyan]Dubbed[/COLOR]\")\n except:\n title25 = title24.replace(\"Placeholder\",\"\")\n\n # Continued\n try:\n title26 = title25.replace(\" nt o\", \"\")\n except:\n title26 = title25.replace(\"Placeholder\",\"\")\n try:\n title27 = title26.replace(\" nt F\", \"\")\n except:\n title27 = title26.replace(\"Placeholder\",\"\")\n try:\n title28 = title27.replace(\" nt\", \"\")\n except:\n title28 = title27.replace(\"Placeholder\",\"\")\n try:\n title = title28.replace(\" Pr\", \"\")\n except:\n title = title28.replace(\"Placeholder\",\"\")\n\n url = item.h2.find('a')['href']\n try:\n thumb = item.find('img')['src'].strip()\n except:\n thumb = _icon\n movies.append((title, thumb, url))\n \n if 'next' in str(Paginator):\n\n nextli = Paginator.find('a', {'class':re.compile('page larger')})\n\n purl = nextli.get('href')\n pages = Paginator.findAll('span', {'class':re.compile('pages')})\n lastpg = pages[len(pages)-1].text\n title = 'Next Page.. (Currently in %s)' % (lastpg)\n movies.append((title, _icon, purl))\n \n return movies",
"def parse_top_movies(html: str) -> ResultSet:\n\n soup = BeautifulSoup(html, \"html.parser\")\n return soup.find_all(\"div\", class_=\"lister-item-content\")",
"def get_imdb_movies(max_movies: int):\n from imdb.database import index_movie_to_database\n movie_count = 0\n for url in crawl_imdb_listing_page(TOP_PAGE):\n if movie_count == max_movies:\n break\n movie_data = parse_movie_page(url)\n if index_movie_to_database(movie_data):\n movie_count += 1",
"def fetch(self, movie_id: str) -> AVInfo:\n movie_id = movie_id.upper()\n content = requests.get(f'https://javbus.com/{movie_id}')\n content.raise_for_status()\n tree = BeautifulSoup(content.content, features='html.parser')\n info = AVInfo(movie_id=movie_id)\n\n # fill information\n info.title = tree.select_one('div.container > h3').text\n info_table = tree.select_one('div.container > div.row.movie > div.info')\n info_keys_mapping = {\n '識別碼': 'movie_id',\n '發行日期': 'premiered',\n '導演': 'director',\n '製作商': 'studio',\n '發行商': 'publisher',\n '系列': 'series',\n '長度': 'movie_length',\n }\n last_row = ''\n for info_row in info_table.select('p'):\n if last_row:\n # last row is a header, parse the content accordingly\n if last_row == 'categories':\n tags = [e.text.strip() for e in info_row.select('span.genre')]\n info.tags = [s for s in tags if s]\n elif last_row == 'actors':\n actors = [e.text.strip() for e in info_row.select('span.genre')]\n info.actors = [s for s in actors if s]\n last_row = ''\n else:\n # last row is not a header, parse it\n row_items = info_row.text.strip().split(':')\n if len(row_items) == 2:\n # \"key: value\"\n raw_key, raw_value = info_row.text.split(':')\n raw_key = raw_key.strip()\n raw_value = raw_value.strip()\n if raw_key in info_keys_mapping and raw_value:\n setattr(info, info_keys_mapping[raw_key], raw_value)\n elif raw_key == '類別':\n last_row = 'categories'\n elif raw_key == '演員':\n last_row = 'actors'\n\n # fill fanart images\n fanart_image = tree.select_one('div.container > div.row.movie a.bigImage')\n fanart_image = AVInfoImage(\n file=fanart_image['href'],\n thumbnail=fanart_image.select_one('img')['src'],\n )\n if fanart_image.thumbnail == fanart_image.file:\n fanart_image.thumbnail = None\n info.fanart_images = [fanart_image]\n\n # fill screenshot images\n screenshot_images = tree.select('div.container > div#sample-waterfall > a.sample-box')\n if screenshot_images:\n info.screenshot_images = []\n for screenshot_image in screenshot_images:\n uri = screenshot_image['href']\n thumbnail_uri = screenshot_image.select_one('img')['src']\n if thumbnail_uri == uri:\n thumbnail_uri = None\n info.screenshot_images.append(AVInfoImage(file=uri, thumbnail=thumbnail_uri))\n\n info.info_born_time = time.time()\n return info",
"def parse_filmography_actor(soup, pageurl):\n\tmovies = []\n\tmovieurls = []\n\t# find the filmography section\n\tfilmography = soup.findAll('div', {'class': 'div-col columns column-count column-count-3'})\n\tfilmography +=\tsoup.findAll('div', {'class': 'div-col columns column-count column-count-2'})\n\t# if filmography div exists, search each row.\n\tif len(filmography) != 0:\n\t\tmovie_list = filmography[0].find_next('ul')\n\t\tfor movie in movie_list.findAll('a'):\n\t\t\tmovieurls.append(movie.get('href'))\n\t\t\tmovies.append(movie.get('title'))\n\telse:\n\t\t# filmography may be stored in table format instead\n\t\tfilmography = soup.findAll('span', {'id': 'Filmography'})\n\t\tif len(filmography) == 0:\n\t\t\tlogging.warning('' + pageurl + ' does not contain a filmography for the actor and will not be parsed')\n\t\t\treturn None, None\n\t\tmovie_table = filmography[0].find_next('table')\n\t\t# if no table either, nowhere we can get filmography\n\t\tif movie_table == None:\n\t\t\tlogging.warning('' + pageurl + ' does not contain a filmography for the actor and will not be parsed')\n\t\t\treturn None, None\n\t\t# if table exits, read each row of it\n\t\tfor row in movie_table.findAll('tr'):\n\t\t\tcol1 = row.find_next('td')\n\t\t\t# table should have at least 1 column\n\t\t\tif col1 == None:\n\t\t\t\tlogging.warning('' + pageurl + ' has filmography table in irregular format, parsing will terminate')\n\t\t\t\treturn None, None\n\t\t\t# table should have at least 2 columns\n\t\t\tcol2 = col1.find_next('td')\n\t\t\tif col2 == None:\n\t\t\t\tlogging.warning('' + pageurl + ' has filmography table in irregular format, parsing will terminate')\n\t\t\t\treturn None, None\n\t\t\t# find the link in the second column\n\t\t\tmovie = col2.find('a')\n\t\t\t# if this link is nonempty, get href and title\n\t\t\tif movie != None:\n\t\t\t\tmovieurls.append(movie.get('href'))\n\t\t\t\tmovies.append(movie.get('title'))\n\t\t# address case of external filmography\n\t\tif len(movieurls) == 1:\n\t\t\tlogging.warning('' + pageurl + ' has an external filmography, parsing will terminate')\n\t\t\treturn None, None\n\treturn movies, movieurls",
"def get_movie_by_id(movie_id):\n search_url = 'https://api.themoviedb.org/3/movie/%s?api_key=%s' %(movie_id, api_key)\n print(search_url)\n with urllib.request.urlopen(search_url) as url:\n get_movie_data = url.read()\n get_movie_result = json.loads(get_movie_data)\n\n movie_idnum = get_movie_result.get('id')\n movie_name = get_movie_result.get('original_title')\n movie_overview = get_movie_result.get('overview')\n movie_backdrop = get_movie_result.get('backdrop_path')\n movie_average = get_movie_result.get('vote_average')\n movie_count = get_movie_result.get('vote_count')\n\n movie_object = Movie(movie_idnum, movie_name, movie_overview, movie_backdrop, movie_average, movie_count)\n\n return movie_object",
"def test_get_movie_by_id(self):\n # insert a movie so there is one to be found -- need executive producer\n # token\n res = self.client().post('/movies', headers={\n 'Authorization': \"Bearer {}\".format(self.executive_producer_token)\n }, json=self.VALID_NEW_MOVIE)\n # find movie inserted above with id\n res = self.client().get('/movies/1', headers={\n 'Authorization': \"Bearer {}\".format(self.casting_assistant_token)\n })\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 200)\n self.assertTrue(data[\"success\"])\n self.assertIn('movie', data)\n self.assertIn('imdb_rating', data['movie'])\n self.assertIn('duration', data['movie'])",
"def test_IMDB_Search_TC_003_see_movie_details(self):\n # to load a given URL in browser window\n self.driver.get(self.base_url) \n # to enter search term, we need to locate the search textbox\n searchTextBox=self.driver.find_element_by_id(\"suggestion-search\")\n # to clear any text in the search textbox\n searchTextBox.clear()\n # to enter the search term in the search textbox via send_keys() function\n searchTextBox.send_keys(self.search_term)\n # to search for the entered search term\n searchTextBox.send_keys(Keys.RETURN)\n # to click on movie title \n searchMovieBox = self.driver.find_element_by_link_text(\"Silicon Valley\").click()\n #self.driver.implicitly_wait(5)\n self.driver.find_element_by_xpath('//head/title[1]')\n # to verify if the search results page loaded\n self.assertIn(\"Silicon Valley (TV Series 2014–2019) - IMDb\",self.driver.title)\n # to verify if the search results page contains any results or no results were found.\n self.assertNotIn(\"No results found.\",self.driver.page_source)",
"def check_ratings(self):\n\n self.browser.get('https://www.imdb.com/')\n\n for title in self.titles:\n input_bar = self.browser.find_element_by_id('navbar-query')\n input_bar.clear()\n\n input_bar.send_keys(title)\n input_bar.send_keys(Keys.RETURN)\n\n time.sleep(3)\n\n # Click on the first suggestion\n css_selector = \"div.findSection:nth-child(3) > table:nth-child(2) > tbody:nth-child(1) > tr:nth-child(1) > td:nth-child(2) > a:nth-child(1)\"\n self.browser.find_element_by_css_selector(css_selector).click()\n time.sleep(3)\n\n # Pull details that will always be available\n score = str(self.browser.find_element_by_class_name('ratingValue').text)\n score = score.split('/10')[0].replace(',', '.')\n\n time.sleep(3)\n\n summary = str(self.browser.find_element_by_class_name('summary_text').text)\n subtext = str(self.browser.find_element_by_class_name('subtext').text)\n\n # Pull details that differ between movies and series\n try:\n duration = str(self.browser.find_element_by_class_name('bp_sub_heading').text) # Only for series\n if 'episodes' not in duration:\n duration = 'Some episodes'\n except Exception:\n # bp_sub_heading won't be found on a movie page\n duration = 'movie'\n\n if subtext[0].isdigit():\n # Split up the details from the subtext\n subtext_list = subtext.split(' | ')\n else:\n # Some movies' subtext starts with 'R' / 'PG-13'\n subtext_list = subtext.split(' | ')\n del subtext_list[0]\n\n # Duration\n if duration == 'movie':\n show_type = 'Movie'\n duration = subtext_list[0]\n try:\n year = datetime.datetime.strptime(subtext_list[2].split(' (')[0], '%d %B %Y').strftime('%Y')\n except ValueError:\n year = str(subtext_list[2].split(' (')[0][-4:])\n\n else: # series\n show_type = 'Serie'\n # Retrieve last season and its release date\n season_tab = str(self.browser.find_element_by_class_name('seasons-and-year-nav').text).strip()\n\n numbers = re.findall('[0-9]+', season_tab)\n latest_season = int(numbers[0])\n latest_year = int(max(numbers, key=lambda x: int(x)))\n\n duration += ' (%d Seasons in %d), %s per episode' % (latest_season, latest_year, subtext_list[0])\n\n year = re.findall('[0-9]+', subtext_list[2])[0]\n\n # Pull some more data out from the subtext\n genres = subtext_list[1].split(', ')\n\n # Pull details that are not always available\n creds_list = []\n creds = self.browser.find_elements_by_class_name('credit_summary_item')\n for c in creds:\n temp = str(c.text)\n if '|' in temp:\n temp = temp.split('|')[0]\n\n creds_list.append(temp)\n\n self.data_dict[title] = {\n 'score': score,\n 'summary': summary,\n 'duration': duration,\n 'credits': creds_list,\n 'genres': genres,\n 'released': year,\n 'type': show_type,\n }",
"def movie_page(movie_id):\n\n current_movie = Movie.query.filter_by(movie_id=movie_id).first()\n title = current_movie.title\n released = current_movie.released_at\n url = current_movie.imdb_url\n thing = current_movie.movie_id\n\n movie_rating = db.session.query(Rating.score).join(Movie).filter(\n Movie.movie_id==thing).all()\n\n return render_template('movie_page.html', current_movie=current_movie, \n title=title, released=released, url=url, movie_rating=movie_rating)",
"def parse_movie_page(pageurl):\n\tlogging.info('attempt to parse page ' + pageurl + 'as a movie')\n\tmovie = {}\n\t# attempt to load the wikipedia page and parse with beautifulsoup\n\tsoup = load_page(pageurl)\n\tif soup == None:\n\t\treturn None\n\t# attempt to retrieve name from page tree\n\tname = parse_name_movie(soup, pageurl)\n\tif name == None:\n\t\treturn None\n\t# attempt to retrieve movie year and gross from page tree\n\tyear, gross = parse_infotable_movie(soup, pageurl)\n\tif year == None or gross == None:\n\t\treturn None\n\t# attempt to retrieve actor names and urls from page tree\n\tactors, actorurls = parse_actors_movie(soup, pageurl)\n\tif actors == None:\n\t\treturn None\n\t# build dictionary to return (will be converted to JSON before returned to user)\n\tmovie['name'] = name\n\tmovie['year'] = year\n\tmovie['gross'] = gross\n\tmovie['actors'] = actors\n\tmovie['actorurls'] = actorurls\n\tlogging.info('' + pageurl + ' was successfully parsed as a movie')\n\treturn movie",
"def fetch_movie(title: str) -> Union[None, dict]:\n\n url = '{base_url}?apikey={api_key}&type=movie&t={title}'.format(\n base_url=settings.OMDB_API_URL,\n api_key=settings.OMDB_API_KEY,\n title=title,\n )\n\n try:\n response = requests.get(\n url,\n headers={'user-agent': 'Movie DB'},\n timeout=1.5,\n )\n if not response.ok:\n return None\n\n data = response.json()\n except (requests.RequestException, JSONDecodeError):\n return None\n\n serializer = MovieSerializer(data=data)\n if serializer.is_valid():\n serializer.save()\n return serializer.data\n\n return None",
"def movie(bot, trigger):\n if not trigger.group(2):\n return\n word = trigger.group(2).rstrip()\n uri = \"http://www.imdbapi.com/?t=\" + word\n u = web.get(uri, 30)\n data = json.loads(u) # data is a Dict containing all the information we need\n if data['Response'] == 'False':\n if 'Error' in data:\n message = '[MOVIE] %s' % data['Error']\n else:\n LOGGER.warning(\n 'Got an error from the imdb api, search phrase was %s; data was %s',\n word, str(data))\n message = '[MOVIE] Got an error from imdbapi'\n else:\n message = '[MOVIE] Title: ' + data['Title'] + \\\n ' | Year: ' + data['Year'] + \\\n ' | Rating: ' + data['imdbRating'] + \\\n ' | Genre: ' + data['Genre'] + \\\n ' | IMDB Link: http://imdb.com/title/' + data['imdbID']\n bot.say(message)",
"def download_top_movies(genre: Genre) -> str:\n\n page_url = create_page_url(genre)\n response = requests.get(page_url)\n response.raise_for_status()\n return response.text",
"def fetch(self, movie_id: str) -> AVInfo:\n raise NotImplementedError()",
"def getMovieInfo(endpoint, title, year):\n\n params = {'t': title, 'y': year, 'plot':'short', 'r':'json', 'tomatoes':'true'}\n response = requests.get(endpoint, params=params)\n\n try:\n response.raise_for_status()\n response = response.json()\n\n if 'Error' in response.keys():\n raise LookupError\n\n results = {}\n strkeys = ['Actors', 'Director', 'Genre', 'Plot', 'Rated', 'Released', 'imdbID', 'tomatoConsensus']\n intkeys = ['Runtime', 'Metascore', 'imdbVotes', 'tomatoMeter', 'tomatoReviews']\n fltkeys = ['imdbRating']\n\n for key in strkeys:\n results[key] = response[key] if response[key] != 'N/A' else None\n for key in intkeys:\n results[key] = int(re.sub(r'[^\\d]', '', response[key])) if response[key] != 'N/A' else None\n for key in fltkeys:\n results[key] = float(re.sub(r'[^\\d]', '', response[key])) if response[key] != 'N/A' else None\n return results\n\n except requests.exceptions.HTTPError:\n print(\"There was a problem with the HTTP request: {0}\".format(response.status_code))\n except requests.exceptions.Timeout:\n print(\"The HTTP request timed out\")\n except LookupError:\n pass\n return None",
"async def handle_movie(ws, movie):\n year = int(movie['year'])\n payload_kp = {'kp_query': '{0} {1}'.format(movie['title'], year)}\n content_kp = await get_content(URL_KP, payload_kp, CACHE_KP)\n kp_movie = parse_kinopoisk_page(content_kp, year)\n if kp_movie['id_kinopoisk']:\n if kp_movie['title_eng'] == '':\n kp_movie['imdb'] = ''\n kp_movie['rating_imdb'] = 0\n else:\n year = int(movie['year'])\n payload_imdb = {\n 'title': kp_movie['title_eng'],\n 'title_type': 'feature',\n 'release_date': f'{year-1},{year+1}',\n }\n content_imdb = await get_content(URL_IMDB, payload_imdb, CACHE_IMDB)\n kp_movie.update(parse_imdb_page(content_imdb, movie['year']))\n movie.update(kp_movie)\n await ws.send_message(json.dumps(movie))",
"def get_movie(id):\n if id and request.headers['accept'] == \"application/json\":\n film = mongo_mgr.get_film_by_id (id)\n # handling template movies\n # if the requested film is still a template (name is None), the source\n # and the source_id will be used to get the full data\n if film['name'] is None:\n film = mediator.update_template_film (film)\n user_has_movie = mongo_mgr.user_has_movie(id, aaa.current_user.id)\n film['my_movie'] = user_has_movie\n return json.loads(dumps(film))\n else:\n return template(\"details.html\", user=aaa.current_user.username)",
"def getMovieInfo(url):\n infobox = getInfoBox(url)\n if infobox:\n infoDict = {}\n title = getTitle(infobox)\n infoDict[\"Title\"] = title\n for label in getLabels(infobox):\n infoDict[label] = getContents(infobox, label=label)\n # Adding IMDB and Meta scores\n omdbObject = getOmdbInfo(title)\n if omdbObject:\n infoDict[\"ImdbScore\"] = omdbObject.get(\"imdbRating\", \"N/A\")\n infoDict[\"Metascore\"] = omdbObject.get(\"Metascore\", \"N/A\")\n else:\n infoDict[\"ImdbScore\"] = None\n infoDict[\"Metascore\"] = None\n\n return infoDict\n else:\n pass"
] |
[
"0.7362223",
"0.71684545",
"0.71325827",
"0.6993874",
"0.6979878",
"0.69582427",
"0.6947582",
"0.6888327",
"0.6886808",
"0.6821694",
"0.6817886",
"0.67733526",
"0.67380553",
"0.6733104",
"0.6632344",
"0.6556607",
"0.643643",
"0.6433405",
"0.6427055",
"0.6404059",
"0.63820803",
"0.6373815",
"0.63733405",
"0.63673383",
"0.6354176",
"0.6353194",
"0.63334674",
"0.63329774",
"0.63104105",
"0.63076687"
] |
0.71959716
|
1
|
takes the first genre as input and looks if there are more find_genres.
|
def find_genres(genre_dom, dom):
# take the first genre and turn it into a string
genre = str(genre_dom)[3:-1]
# see if there are more genres to a movie
next_genre = dom.find("div", itemprop="genre").a.find_next_sibling("a")
# add the new genres to the string
while(next_genre):
temp = next_genre.get_text().encode("utf-8")
genre = genre + "; " + "" + str(temp)[3:-1]
next_genre = next_genre.find_next_sibling("a")
return genre
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def search_genres(self, needle):\n return self._genre_search.search(searchable(needle))",
"def _get_genres(self):\n separated = self.movies['genre'].apply(self.separate_genre)\n return {g: True for x in separated for g in x}.keys()",
"def chatbot_genre_query(self, genres: list): #-> cursor object\n if not self.client:\n self.connect()\n return self.db.find({\"$query\": { \"genre\": { \"$in\": genres }}, \"$orderby\": { \"avg_vote\" : -1 }}).limit(25)",
"def get_movies_by_genre(self, genre: str):\n raise NotImplementedError",
"def search_for_books(self, query):\n books = []\n book = Book(self.db)\n for row in self.db.cursor().execute('SELECT genre_id FROM genres WHERE ' + query):\n books.extend(self.get_books(row[0]))\n\n return books",
"def spotifySearch(request,genre):\n\tif genre in genre_artist.keys():\n\t\ttracks = top_tracks(genre)\n\t\tif tracks:\n\t\t\treturn HttpResponse(json.dumps(tracks))\n\t\telse:\n\t\t\tresponse ={\"message\":\"Artist/track is not found.\", \"error\":True}\n\t\t\treturn HttpResponse(json.dumps(response))\n\telse:\n\t\tresponse = {\"message\": \"Please give an existed genre as a parameter. Genres are: rock, alternative rock, pop, blues, country, electronic, jazz, r&b, rap, reggae.\", \"error\":True}\n\t\treturn HttpResponse(json.dumps(response))",
"def find_genre_playlists(data):\n playlists = []\n\n if data['genre']:\n playlists += data['genre']\n\n if data['comments']:\n playlists += data['comments']\n\n matches = re.findall('\\(\\s*(cover|live|unplugged|acoustic|remix|instrumental)', data['title'].lower())\n if matches:\n if 'cover' in matches:\n matches.remove('cover')\n matches += ['covers']\n\n if 'acoustic' in matches:\n matches.remove('acoustic')\n matches += ['unplugged']\n\n if 'remix' in matches:\n matches.remove('remix')\n matches += ['remix']\n\n if 'instrumental' in matches:\n matches.remove('instrumental')\n matches += ['instrumental']\n\n playlists += matches\n\n return set([x for x in playlists if x != 'none'])",
"def get_genre_similarity(self):\n genre_words = []\n for w in self.target_movie.genres.split('|'):\n w = w.strip('- ,:(){}[]')\n genre_words.append(w)\n\n print(genre_words)\n\n res = self.db.query(Movie).filter(\n Movie.movie_id != self.target_movie.movie_id).filter(\n Movie.movie_id.in_(self.recommendation_pool.keys())\n ).filter(or_(\n Movie.genres.ilike(r'%' + gw + r'%') for gw in genre_words\n )).all()\n\n print(\"%i records from partial genres match\" % len(res))\n GSW = self.GENRES_SIMILARITY_WEIGHT\n for rec in res:\n smid = rec.movie_id\n self.recommendation_pool[smid]['genres_similarity'] = \\\n jaccard_index(self.target_movie.genres, rec.genres, '|') * GSW",
"def get_genres(self) -> List[Genre]:\n raise NotImplementedError",
"def recommendation_genre_seeds(self) -> List[str]:\n return self._get('recommendations/available-genre-seeds')['genres']",
"def matchGenres(toPredictGenresString, toCompareGenresString):\n\n #Get the sets of genres\n toPredictGenres = str(toPredictGenresString).split(\"|\")\n toCompareGenres = str(toCompareGenresString).split(\"|\")\n\n toCompareGenresSet = set(toCompareGenres)\n\n commonCount = 0\n\n #Count how many are common to the two sets\n for genre in toPredictGenres:\n if genre in toCompareGenresSet:\n commonCount += 1\n\n #Return 100 times the proportion in both\n return 100 * commonCount/len(toPredictGenres)",
"def random_by_genre_list(self):\n\n for genre in self.connection.walk_genres():\n url = self.build_url({\n \"mode\": \"random_by_genre_track_list\",\n \"foldername\": genre[\"value\"].encode(\"utf-8\")})\n\n li = xbmcgui.ListItem(genre[\"value\"])\n xbmcplugin.addDirectoryItem(\n handle=self.addon_handle, url=url, listitem=li, isFolder=True)\n\n xbmcplugin.endOfDirectory(self.addon_handle)",
"def test(self, songs, genres):\n logging.info('Starting testing.')\n num_matches = 0\n confusion_matrix = ConfusionMatrix(genres)\n for song, actual_genre in zip(songs, genres):\n predicted_genre = self.classify(song)\n logging.info('Actual genre: {}, predicted genre: {}'.format(actual_genre, predicted_genre))\n confusion_matrix.add_genres(actual_genre, predicted_genre)\n if actual_genre == predicted_genre:\n num_matches += 1\n return num_matches, confusion_matrix",
"def fetch_genre(self, gid: str):\n self.logging.info(f\"fetching genre: {gid}\")\n return self.sess.query(Genre).filter(Genre.gid == gid).one()",
"def test_get_songs_by_genre(self, track_elms, service_config, request):\n genre_id = uuid.UUID(avalon.compat.to_uuid_input('c12d2a49-d086-43d6-953d-b870deb24228'))\n service_config.track_store.get_by_genre.return_value = track_elms\n service_config.id_cache.get_genre_id.return_value = genre_id\n request.args['genre'] = 'Genre'\n params = avalon.web.request.Parameters(request)\n\n service = avalon.web.services.AvalonMetadataService(service_config)\n results = service.get_songs(params)\n\n assert results == track_elms, 'Expected matching tracks returned'\n service_config.track_store.get_by_genre.assert_called_with(genre_id)",
"def get_movies(genre: str):\n with MongoClient(uri) as client:\n movie_collection = client[DB][MSG_COLLECTION]\n msg_list = movie_collection.find({\"genres\": genre}).limit(100)\n movie_title_list = []\n for msg in msg_list:\n movie_title_list.append(msg[\"title\"])\n return movie_title_list",
"def test_get_genres_no_params(self, id_name_elms, service_config, request):\n service_config.genre_store.get_all.return_value = id_name_elms\n params = avalon.web.request.Parameters(request)\n\n service = avalon.web.services.AvalonMetadataService(service_config)\n results = service.get_genres(params)\n\n assert results == id_name_elms, 'Expected all genres returned'",
"def search_loop(max_generations, individuals, grammar, replacement, selection, fitness_function):\n #Evaluate initial population\n evaluate_fitness(individuals, grammar, fitness_function)\n best_ever = max(individuals)\n individuals.sort(reverse=True)\n print_stats(1, individuals)\n for generation in range(2, (max_generations+1)):\n individuals, best_ever = step(\n individuals, grammar, replacement, selection, fitness_function, best_ever)\n print_stats(generation, individuals)\n return best_ever",
"def addGenre(self, genre):\n if isinstance(genre, Genre):\n pass\n elif isinstance(genre, str):\n try:\n genre_name = genre[:genre.index('(')].strip().lower().replace(' ', '_')\n genre_id = int(genre[genre.index('(')+1 : genre.index(')')])\n except:\n print(\"Error: incorrectly formatted genre {}. Ignoring.\".format(genre))\n return\n genre = Genre(genre_name = genre_name, genre_id = genre_id)\n if genre not in self.anime_genres:\n self.anime_genres.append(genre)",
"def parse_genres(genres):\n\tgenre_list = []\n\tfor genre in genres:\n\t\tgenre_list.append(genre.name)\n\n\treturn \", \".join(genre_list)",
"def get_genius_song(song_name, artist_name, genius):\n song_search = song_name\n for i in range(0, 2):\n song = genius.search_song(song_search, artist_name)\n if isinstance(song, type(None)) or not match(\n\t (song_search, artist_name), (song.title, song.artist)\n ):\n if i:\n log(f\"Song '{song_search}' by '{artist_name}' not found on Genius\")\n return\n else:\n log(f\"Song '{song_search}' by '{artist_name}' not found on Genius trying cleaning\")\n song_search = clean(song_search)\n else:\n if i:\n log(f\"Found match for '{song_search}' by '{artist_name}'\")\n break\n\n return song",
"def get_genre(id_genre):\n genre = factory.get_elem_solo(Genre, id_genre)\n return genre",
"def get_genre(self, gen: str) -> Genre:\n self.logging.log(15, f\"getting genre: {gen}\")\n return self.sess.query(Genre).filter(Genre.genre == gen).one()",
"def recommendation_genre_seeds(self, **kwargs):\n return self._get(API.RECOMMENDATIONS_GENRES.value, **kwargs)",
"def genres(self):\n if \"genres\" in self._prop_dict:\n return self._prop_dict[\"genres\"]\n else:\n return None",
"async def get_genres(self) -> APIReturn:\n return await self._request(\"GET\", \"/getGenres\")",
"def get_random_game_from_genre(console_name: str = None, genre: str = None, sales_min: float = 0.) -> (str, str, str):\n if not console_name:\n console_name = df.sample().reset_index()['Platform'][0]\n\n e = df.loc[df['Platform'].str.lower() == console_name]\n\n if not genre:\n genre = e.sample().reset_index()['Genre'][0]\n\n name = e.loc[(e['Genre'].str.lower() == genre) & (e['Global_Sales'] > sales_min)].sample().reset_index()['Name'][0]\n\n return name, console_name, genre",
"def add_genre(self, gid: str, gen: str):\n if self.sess.query(exists().where(Genre.genre_id == gid or Genre.genre == gen)).scalar():\n return\n self.logging.info(f\"adding genre: {gen} with id {gid}\")\n genre = Genre(gid=uuid4().hex,\n genre_id=gid,\n genre=gen)\n self.sess.add(genre)\n self.sess.commit()",
"def query_all_genres():\n result = session.query(Genre).all()\n for genre in result:\n print(\"genre: %s\\ndescription: %s\\nposter: %s\\nuser_id:%s\" %\n (genre.name, genre.description, genre.poster, genre.user_id))\n print(\"**************************\")",
"def getGenres(movieInfo):\n if \"genres\" in movieInfo:\n return [ _format(genre[\"name\"]) for genre in movieInfo[\"genres\"] ]\n else:\n raise AttributeError(\"%s instance has no attribute genre\" % movieInfo)"
] |
[
"0.660926",
"0.59218156",
"0.5887331",
"0.58336353",
"0.5639189",
"0.5631359",
"0.55921984",
"0.53470975",
"0.5322265",
"0.53138363",
"0.5299636",
"0.5262009",
"0.52536136",
"0.52508396",
"0.52483845",
"0.52240586",
"0.51974875",
"0.5196372",
"0.51822656",
"0.5175137",
"0.5168775",
"0.51686627",
"0.5167074",
"0.5160488",
"0.5150721",
"0.51457334",
"0.51280725",
"0.510741",
"0.5067378",
"0.5055718"
] |
0.6475148
|
1
|
takes the first writer as input and looks if there are more find_genres.
|
def find_writers(writer_dom, dom):
# take the first writer and turn it into a string
writer = str(writer_dom)[2:-1]
# see if there are more writers
next_writer = dom.find("span", itemprop="creator").find_next_sibling("span",
itemprop="creator")
# add all the writers to the string
while(next_writer):
temp = next_writer.a.get_text().encode("utf-8")
writer = writer + "; " + str(temp)[2:-1]
next_writer = next_writer.find_next_sibling("span", itemprop="creator")
return writer
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def search_genres(self, needle):\n return self._genre_search.search(searchable(needle))",
"def find_genres(genre_dom, dom):\n # take the first genre and turn it into a string\n genre = str(genre_dom)[3:-1]\n\n # see if there are more genres to a movie\n next_genre = dom.find(\"div\", itemprop=\"genre\").a.find_next_sibling(\"a\")\n\n # add the new genres to the string\n while(next_genre):\n temp = next_genre.get_text().encode(\"utf-8\")\n genre = genre + \"; \" + \"\" + str(temp)[3:-1]\n next_genre = next_genre.find_next_sibling(\"a\")\n return genre",
"def get_desired_gene(j, k, l):\n unwanted_gene_list = j\n full_gene_file = open(k)\n desired_genes = open(l, 'wt')\n counter = 0\n\n for gene in full_gene_file:\n with open(unwanted_gene_list) as j:\n if gene not in j.read():\n desired_genes.write(gene)\n else:\n counter += 1\n\n\n print(\"Filtered sequences: \" + str(counter))\n full_gene_file.close()\n desired_genes.close()",
"def get_writers():\n \n #get all movies from db\n movies_df = movie_helper.get_movies_df() \n \n with tqdm(total=len(movies_df)) as pbar:\n for index, row in movies_df.iterrows():\n \n #if imdbid exists user it to look up the API\n if (row['imdbId']):\n movie = ia.get_movie(str(row['imdbId']))\n \n #get list of writers\n writers = movie.get('writer')\n if (writers != None) :\n for writer in writers:\n #first check if the person exists\n imdb_id = writer.personID \n person_df = database_helper.select_query(\"people\", {'imdbId' : imdb_id})\n if (person_df.empty):\n database_helper.insert_data(\"people\", {\"imdbId\": imdb_id, \"fullName\": writer[\"name\"]}) \n \n #add movie director link\n database_helper.insert_data(\"writers\", {\"p_imdbId\" : imdb_id, \"m_imdbId\" : row['imdbId']})\n \n pbar.update(1)",
"def _generator():\n filename_1 = 'gene.txt'\n filename_2 = 'geneSynonym.txt'\n gene_set_1 = gene_names(filename_1)\n gene_syn = gene_names(filename_2, complete=False)\n genes = gene_set_1 | gene_syn\n return genes",
"def find_reader_relations():\n for reader in readers:\n d100 = random.randint(1, 100)\n if d100 <= 50:\n reader_favourite_book[readers[reader]] = random.choice(list(book_ids.values()))\n\n d100 = random.randint(1, 100)\n if d100 <= 5:\n #TODO: fix so that you cannot be friend of yourself\n reader_knows[readers[reader]] = [random.choice(list(authors.values()))] + [random.choice(list(readers.values()))]\n elif d100 > 5 and d100 <= 10:\n reader_knows[readers[reader]] = [random.choice(list(authors.values()))]\n elif d100 > 10 and d100 <= 25:\n reader_knows[readers[reader]] = [random.choice(list(readers.values()))] + [random.choice(list(readers.values()))]\n elif d100 > 25 and d100 <= 50:\n reader_knows[readers[reader]] = [random.choice(list(readers.values()))]",
"def generate_movies(nr):\n for n in range(int(nr)):\n d6 = random.randint(1, 6)\n #A movie cannot be generated with the same name in different genres due to the nature of the data.\n if d6 <= 4:\n movie = generate_fantasy_title()\n movie_genres[movie] = \"FANTASY\"\n #if movie is a book, take author of book as producer\n if movie in books:\n movie_producers[movie] = authors[books[movie][0]]\n else:\n d20 = random.randint(1, 20)\n if d20 == 1:\n movie_producers[movie] = None\n elif d20 > 1:\n movie_producers[movie] = random.choice(list(readers.values()))\n elif d6 == 5:\n movie = generate_romance_title()\n movie_genres[movie] = \"ROMANCE\"\n d20 = random.randint(1, 20)\n if d20 == 1:\n movie_producers[movie] = None\n elif d20 > 1:\n movie_producers[movie] = random.choice(list(readers.values()))\n elif d6 == 6:\n movie = generate_horror_title()\n movie_genres[movie] = \"HORROR\"\n d20 = random.randint(1, 20)\n if d20 == 1:\n movie_producers[movie] = None\n elif d20 > 1:\n movie_producers[movie] = random.choice(list(readers.values()))\n movies.append(movie)\n counter = 1\n for movie in movies:\n movie_ids[movie] = f'Movie/{counter}'\n counter += 1",
"def _get_genres(self):\n separated = self.movies['genre'].apply(self.separate_genre)\n return {g: True for x in separated for g in x}.keys()",
"def gene_finder(dna, threshold):\n finder = []\n twoStrands = find_all_ORFs_both_strands(dna) #this calls the function that finds the compliment of dna and finds all ORFs \n print twoStrands \n for k in range(len(twoStrands)): #go through the list \"twoStrands\"\n if twoStrands[k]>threshold: #if the length of \n print twoStrands[k]\n print len(twoStrands[k])\n finder.append(twoStrands[k])\n return finder",
"def genres(self):\n if \"genres\" in self._prop_dict:\n return self._prop_dict[\"genres\"]\n else:\n return None",
"def getReadOnGeneFile(rnameList, len_param):\n log.info(\"Select reads that are on genes\")\n for ch in rnameList:\n tcount = 0\n \n geneS = {}#gene start\n geneE = {}#gene end\n g_direct = {}#gene direction\n readS = {}#read start\n readE = {}#read End\n readDic = {}#readDic[id] = read\n sortGeneId = {}\n sortReadId = {}\n genefile = os.path.join(working_dir, 'removeOverlap.'+ch+'.gff')\n readfile = os.path.join(working_dir, 'MappedRead.'+ch+'.sam')\n rgfile = os.path.join(working_dir, 'ReadOnGeneList.'+ch+'.tab')\n log.info(\"Generate \" + rgfile)\n f=open(rgfile, \"w\") \n \n geneS, geneE, g_direct = getGFFStartEnd(genefile, len_param)\n sortGeneId = sortId(geneS)\n \n readS, readE,readDic = getSAMStartEnd(readfile)\n sortReadId = sortId(readS)\n ys = 0\n \n for x in range(len(sortGeneId)):\n \n gID = sortGeneId[x]#gene id\n gs = geneS.get(gID)#gene start\n ge = geneE.get(gID)#gene end\n gd = g_direct.get(gID)\n glineList = []\n sameG = False\n \n for y in range(ys,len(sortReadId)):\n rID = sortReadId[y]\n rs = readS.get(rID)\n re = readE.get(rID)\n if rs >= gs:\n if re <= ge:\n f.write(gID)\n f.write('\\t')\n f.write(str(gs))\n f.write('\\t')\n f.write(str(ge))\n f.write('\\t')\n f.write(gd)\n f.write('\\t')\n f.write(rID)\n f.write('\\t')\n f.write(str(rs))\n f.write('\\t')\n f.write(str(re))\n f.write('\\t')\n f.write(readDic.get(rID))\n elif re > ge:\n ys = y\n break\n elif rs > ge:\n ys = y\n break\n f.close()",
"def search_loop(max_generations, individuals, grammar, replacement, selection, fitness_function):\n #Evaluate initial population\n evaluate_fitness(individuals, grammar, fitness_function)\n best_ever = max(individuals)\n individuals.sort(reverse=True)\n print_stats(1, individuals)\n for generation in range(2, (max_generations+1)):\n individuals, best_ever = step(\n individuals, grammar, replacement, selection, fitness_function, best_ever)\n print_stats(generation, individuals)\n return best_ever",
"def is_done(self):\n return_val = False\n for name in os.listdir(self.results_dir_path):\n if name.startswith('top_genes_per_phenotype'):\n return_val = True\n return return_val",
"async def get_genres(self) -> APIReturn:\n return await self._request(\"GET\", \"/getGenres\")",
"def include_parents():\n suffix = uuid4().hex\n\n click.secho('*** Creating Genres for Movie...', fg='green')\n _horror = _make_document('genre', name='Horror - %s' % suffix)\n click.secho(json.dumps(_horror, indent=2, sort_keys=True), fg='yellow')\n\n _monster = _make_document('genre', name='Monster - %s' % suffix, parent=_horror['_id'])\n click.secho(json.dumps(_monster, indent=2, sort_keys=True), fg='yellow')\n\n _vampire = _make_document('genre', name='Vampire - %s' % suffix, parent=_monster['_id'])\n click.secho(json.dumps(_vampire, indent=2, sort_keys=True), fg='yellow')\n\n _werewolf = _make_document('genre', name='Werewolf - %s' % suffix, parent=_monster['_id'])\n click.secho(json.dumps(_werewolf, indent=2, sort_keys=True), fg='yellow')\n\n click.secho('*** Creating Movie with genres `Werewolf` and `Vampire`, parent genres should be auto-filled...', fg='green')\n twilight = _make_document('movie', title='Twilight', genres=[_vampire['_id'], _werewolf['_id']])\n click.secho(json.dumps(twilight, indent=2, sort_keys=True), fg='yellow')",
"def greedy_search(self, check=False):\n\n rewire_bool = True\n rewire_count = 0\n\n if len(self.reactions) <= 1:\n print(\"Commodity only takes one reaction to product, rewiring is not possible\")\n\n else:\n\n while rewire_bool:\n\n g = random.sample(self.reactions, 2)\n rewire_count += 1\n\n g1_str = str(g[0])\n g2_str = str(g[1])\n\n if \"*\" not in g1_str and \"*\" not in g2_str:\n\n # 2 reactons dictionaries as instance. class objects\n g1_dict = g[0].__dict__\n g2_dict = g[1].__dict__\n\n # left right strings\n g1_right2 = g1_dict.get(\"right2\")\n g1_left2 = g1_dict.get(\"left2\")\n\n g2_left2 = g2_dict.get(\"left2\")\n g2_right2 = g2_dict.get(\"right2\")\n g2_right = g2_dict.get(\"right\")\n\n # cut index i\n g1_i = g1_dict.get(\"i\")\n g2_i = g2_dict.get(\"i\")\n\n # Finding reactions which can be rewired\n if g1_dict.get(\"i\") <= g2_dict.get(\"i\"):\n if self.recombination == Recomb_1:\n r_waste = g1_right2\n r1_bool = True\n\n g1_product = g1_right2[g2_i]\n g2_product = g2_left2[g2_i]\n\n desired_product = g2_left2\n\n if g1_product == g2_product:\n rewire_bool = False\n\n if check:\n print(\"candidate rewiring found!\")\n print(str(g[0]))\n print(str(g[1]))\n else:\n continue\n\n elif self.recombination == Recomb_2:\n r_waste = g1_left2\n r1_bool = False\n\n desired_product = g2_right2\n n = len(desired_product)\n\n if g1_left2[-g2_i] == g2_right[g2_i]:\n rewire_bool = False\n\n if check:\n print(\"candidate rewiring found!\")\n print(str(g[0]))\n print(str(g[1]))\n\n else:\n continue\n\n # Rewiring reactions and replacing them in reaction set\n r_n = \"{}*\".format(g2_dict.get(\"reaction_n\"))\n\n r = self.recombination(r_n, g2_dict.get(\"left\"), r_waste, g2_dict.get(\"i\"))\n\n if check:\n print(str(r))\n\n # (2) remove and replace\n g1 = g[0]\n g2 = g[1]\n\n # check same product is made\n rewire_dict = r.__dict__\n\n if r1_bool:\n rewire_product = rewire_dict.get(\"left2\")\n elif r1_bool is False:\n rewire_product = rewire_dict.get(\"right2\")\n\n if rewire_product == desired_product:\n\n if check:\n print(\"Rewire accepted and old reaction replaced\")\n\n rewire_bool = False\n\n for obj in self.reactions:\n if obj == g2:\n self.reactions.remove(obj)\n self.reactions.add(r)\n\n elif rewire_count >= 1000:\n if check:\n print(\"Rewire max attempts reached, {} total\".format(rewire_count))\n\n rewire_bool = False\n\n return self.reactions",
"def get_all_genres(self):\n self.cursor.execute(\"select * from genres\")\n self.connection.commit()\n return self.cursor.fetchall()",
"def some(args):\n from jcvi.utils.cbook import gene_name\n\n p = OptionParser(some.__doc__)\n p.add_option(\n \"--exclude\",\n default=False,\n action=\"store_true\",\n help=\"Output sequences not in the list file\",\n )\n p.add_option(\n \"--no_strip_names\",\n default=False,\n action=\"store_true\",\n help=\"Do not strip alternative splicing (e.g. At5g06540.1 -> At5g06540)\",\n )\n p.add_option(\n \"--uniprot\", default=False, action=\"store_true\", help=\"Header is from uniprot\"\n )\n\n opts, args = p.parse_args(args)\n\n if len(args) != 3:\n sys.exit(p.print_help())\n\n strip_names = not opts.no_strip_names\n fastafile, listfile, outfastafile = args\n outfastahandle = must_open(outfastafile, \"w\")\n qualfile = get_qual(fastafile)\n\n names = set(open(listfile).read().split())\n if qualfile:\n outqualfile = outfastafile + \".qual\"\n outqualhandle = open(outqualfile, \"w\")\n parser = iter_fasta_qual(fastafile, qualfile)\n else:\n parser = SeqIO.parse(fastafile, \"fasta\")\n\n recs = []\n seen = set()\n for rec in parser:\n name = rec.id\n if strip_names:\n name = gene_name(name)\n\n if name in seen: # Only report one instance\n continue\n\n if opts.uniprot:\n name = name.split(\"|\")[-1]\n\n if opts.exclude:\n if name in names:\n continue\n else:\n if name not in names:\n continue\n\n recs.append(rec)\n seen.add(name)\n\n for rec in recs:\n SeqIO.write([rec], outfastahandle, \"fasta\")\n if qualfile:\n SeqIO.write([rec], outqualhandle, \"qual\")\n\n logging.debug(\"A total of %d records written to `%s`\" % (len(recs), outfastafile))",
"def wingrep(self):\n for folder, files_ in self.walk():\n listed_files = self.list_appro_files(folder, files_)\n for file_o in self.open_files(listed_files=listed_files):\n self.search_in(file_o)",
"def run(self, generations=1000):\n gcount = 0\n \n while gcount<=generations:\n try:\n print \"Gen: \"+str(gcount),\n self.population = zip (self.population, [self.target]*len(self.population))\n self.population = self.pool.map(f, self.population)\n except:\n pass\n for i in self.population:\n print i[0],i[1]\n self.population = [organism.Organism(x[0], x[1]) for x in self.population]\n self.population.sort()\n print \" Max fitness: \"+str(self.population[::-1][1].fitness)\n try:\n if self.population[0] <= self.ppop[0]:\n self.ppop = self.population[::-1][0:10] # The top ten organisms\n else:\n self.population = self.ppop # We got worse! go back!\n except:\n self.ppop = self.population\n self.population = self.population[::-1][0:10]\n try:\n self.breed()\n except:\n print \"Breeding error\"\n gcount+=1",
"def run(filts, jgen, argsj={}, verbose=False):\n if len(filts) == 0:\n return jgen\n\n filt0 = filts[0]\n cls = {\n \"commenter\": Commenter,\n \"setup\": SetupFilter,\n }[filt0]\n return run(filts[1:], cls(argsj).run(jgen), argsj=argsj, verbose=verbose)",
"def found_needed_docstr(self):\n self.needed += 1\n self.found += 1",
"def voorbeeld():\n os.system(\"rm Twogs.txt cogs.txt\")\n os.system(\"touch Twogs.txt cogs.txt\")\n orgslijst = [y[:-1] for y in openfile(\"orglijst.txt\") if \"\\n\" in y]\n for x in range(len(orgslijst)):\n org1 = orgslijst[x]\n for y in range(len(orgslijst)):\n org2 = orgslijst[y]\n if x > y:\n schrijfnaartwog(org1, org2)\n print(org1)\n prots, twogs, cogs = openneededfiles(org1)\n if x >= 2:\n searchforcog(cogs, twogs, prots)",
"def get_genius_song(song_name, artist_name, genius):\n song_search = song_name\n for i in range(0, 2):\n song = genius.search_song(song_search, artist_name)\n if isinstance(song, type(None)) or not match(\n\t (song_search, artist_name), (song.title, song.artist)\n ):\n if i:\n log(f\"Song '{song_search}' by '{artist_name}' not found on Genius\")\n return\n else:\n log(f\"Song '{song_search}' by '{artist_name}' not found on Genius trying cleaning\")\n song_search = clean(song_search)\n else:\n if i:\n log(f\"Found match for '{song_search}' by '{artist_name}'\")\n break\n\n return song",
"def update_genres(source_item: Dict, target_item: Dict) -> None:\n for genre in target_item.get('genre', []):\n for item in source_item['highlight'].get('genres', []):\n if genre['name'].strip() in remove_html_tags(item):\n genre['name'] = item",
"def test_search_metadata_with_generator(integrated_ff):\n url = integrated_ff['ff_key']['server'] + '/'\n\n # helper to validate generator\n def validate_gen(gen, expected):\n found = 0\n for _ in gen:\n found += 1\n assert found == expected\n\n # do limit = 10 search, iterate through generator, should have 10 results\n search_gen = ff_utils.search_metadata(url + 'search/?limit=10&type=File',\n key=integrated_ff['ff_key'],\n is_generator=True)\n validate_gen(search_gen, 10)\n # do limit = 7 search, iterate through generator, should have 7 results\n search_gen = ff_utils.search_metadata(url + 'search/?limit=7&type=File',\n key=integrated_ff['ff_key'],\n is_generator=True)\n validate_gen(search_gen, 7)\n # do limit = 3 search on users\n search_gen = ff_utils.search_metadata(url + 'search/?limit=3&type=User',\n key=integrated_ff['ff_key'],\n is_generator=True)\n validate_gen(search_gen, 3)",
"def get_genre_similarity(self):\n genre_words = []\n for w in self.target_movie.genres.split('|'):\n w = w.strip('- ,:(){}[]')\n genre_words.append(w)\n\n print(genre_words)\n\n res = self.db.query(Movie).filter(\n Movie.movie_id != self.target_movie.movie_id).filter(\n Movie.movie_id.in_(self.recommendation_pool.keys())\n ).filter(or_(\n Movie.genres.ilike(r'%' + gw + r'%') for gw in genre_words\n )).all()\n\n print(\"%i records from partial genres match\" % len(res))\n GSW = self.GENRES_SIMILARITY_WEIGHT\n for rec in res:\n smid = rec.movie_id\n self.recommendation_pool[smid]['genres_similarity'] = \\\n jaccard_index(self.target_movie.genres, rec.genres, '|') * GSW",
"def get_genres(self) -> List[Genre]:\n raise NotImplementedError",
"def fn_GetMovieWriters(self, details):\n\n # If the custom url was not actually defined and we had no cached\n # data, then there is nothing to do.\n #\n if details is None:\n return\n\n dom = parseString(details)\n e = dom.firstChild\n credit = first_child(e, \"credits\")\n while credit:\n self.writers.append(credit.firstChild.data)\n credit = next_sibling(credit, \"credits\")\n dom.unlink()\n return",
"def results_aggregator(self, names):\n\t\tfor name in names:\n\t\t\tresult = self.main(name)\n\t\t\tself.results.append(result)\n\t\t\tprint(\"'%s' has been written to the file.\" % result[0])\n\t\t\t\"\"\"result is formatted name, number, rating, review count\"\"\""
] |
[
"0.6010571",
"0.5308337",
"0.5180943",
"0.5133908",
"0.50772184",
"0.49792778",
"0.4967971",
"0.49632066",
"0.49230486",
"0.49229807",
"0.48883152",
"0.48799068",
"0.48754558",
"0.48731622",
"0.47359285",
"0.4730448",
"0.47194862",
"0.46617717",
"0.46156126",
"0.46125743",
"0.45775852",
"0.457107",
"0.45694095",
"0.45589077",
"0.4553424",
"0.45421815",
"0.45418915",
"0.45375338",
"0.45294553",
"0.4526527"
] |
0.5384775
|
1
|
Returns list of product "mask" type variable names
|
def getMaskVariableNames(self, product):
h = product.getSceneRasterHeight()
# 10m resolution
if h == 10980:
return self.return_available_variables(product, MASK_VARIABLE_NAMES_10m)
# 20m resolution
elif h == 5490:
return self.return_available_variables(product, MASK_VARIABLE_NAMES_20m)
# 20m resolution
elif h == 1830:
return self.return_available_variables(product, MASK_VARIABLE_NAMES_60m)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def getMaskVariableNames(self, product):\r\n return []",
"def getMaskVariables(self, product):\r\n mask_variable_names = self.getMaskVariableNames(product)\r\n mask_variables = [self.createMaskVariable(product, n) for n in mask_variable_names]\r\n mask_variables = [self.editMaskVariable(product, v) for v in mask_variables]\r\n\r\n return mask_variables",
"def unmask(self, mask, verbose=0):\n if isinstance(mask, str): # Woodstock-style string mask format\n mask = tuple(re.sub('\\s+', ' ', mask).lower().split(' '))\n assert len(mask) == self.nthemes # must be bad mask if wrong theme count\n else:\n try:\n assert isinstance(mask, tuple) and len(mask) == self.nthemes\n except:\n print(len(mask), type(mask), mask)\n assert False\n dtype_keys = copy.copy(list(self.dtypes.keys())) # filter this\n for ti, tac in enumerate(mask):\n if tac == '?': continue # wildcard matches all\n tacs = self._expand_theme(self._themes[ti], tac, verbose=verbose) if tac in self._themes[ti] else []\n dtype_keys = [dtk for dtk in dtype_keys if dtk[ti] in tacs] # exclude bad matches\n return dtype_keys",
"def getMeteorologicalVariableNames(self, product):\r\n return []",
"def _variable_types(self):\n return self._variable_single_types + self._variable_array_types",
"def gettypes(self):\n return [str(self.sd.xlate(t[0])) for t in self.sd.types]",
"def _variable_types_in_byte(types_byte) -> List[ZMachineOperandTypes]:\n operand_types = []\n for _ in range(4):\n # Mask off all but bits 7 and 6\n type_bits = types_byte & 0xc0\n\n if type_bits == 0:\n operand_types.append(ZMachineOperandTypes.LARGE_CONSTANT)\n elif type_bits == 0b0100_0000:\n operand_types.append(ZMachineOperandTypes.SMALL_CONSTANT)\n elif type_bits == 0b1000_0000:\n operand_types.append(ZMachineOperandTypes.VARIABLE)\n elif type_bits == 0b1100_0000:\n operand_types.append(ZMachineOperandTypes.OMITTED)\n else:\n raise RuntimeError('No idea how this happened')\n\n types_byte <<= 2\n\n return operand_types",
"def variables_used (self) :\r\n\t\t## These names do not contain dimension specification (everything in brackets\r\n\t\t## that comes after a name is am array index - either the arry was declared\r\n\t\t## correctly or it is wrong anyway, there is no implicit declaration of arrays) !\r\n\r\n\t\tresult = []\r\n\r\n\t\tfor l in self.equ_lists :\r\n\t\t\tfor var_name in l :\r\n\t\t\t\tresult.append(var_name[0])\r\n\t\treturn result",
"def getDatasetTypes(self):\n\n list = []\n for attr in dir(self):\n if attr.startswith(\"map_\"):\n list.append(attr[4:])\n return list",
"def getInfoVariableNames(self, product):\r\n return []",
"def vars(*tensor_types):\n return map(var, tensor_types)",
"def get_mask(self, nodetype):\n print(\"Deprecation warning: Do it yourself.\")\n assert nodetype in [PAPER_TYPE, SUBJECT_TYPE, AUTHOR_TYPE], \"Unknown node type\"\n return (self.ndata.type == nodetype).values",
"def getMeteorologicalVariableNames(self, product):\r\n\r\n meteorological_variable_names = []\r\n\r\n return meteorological_variable_names",
"def getVariableNames(self, product):\r\n\r\n variable_names = self.getDataVariableNames(product) + \\\r\n self.getMaskVariableNames(product) + \\\r\n self.getMeteorologicalVariableNames(product) + \\\r\n self.getSensorVariableNames(product) + \\\r\n self.getInfoVariableNames(product)\r\n\r\n return variable_names",
"def input_mask(self):\n inputs = self.input\n if isinstance(inputs, list):\n return [getattr(x, '_keras_mask', None) for x in inputs]\n else:\n return getattr(inputs, '_keras_mask', None)",
"def _fixed_masks_arg(mask):\n return [\"NULL\", mask]",
"def variables(self):\n return {u for u in self if u.type == 'var'}",
"def getDataVariableNames(self, product):\r\n return []",
"def ntypes(self): # -> list[str]:\n ...",
"def core_mask(self):\n mask = []\n for atom in self.atoms:\n if \"shell\" not in atom.atom_type.label:\n mask.append(True)\n else:\n mask.append(False)\n return mask",
"def createMaskVariable(self, product, variable_name):\r\n\r\n mask_variable_dict = {'name': variable_name,\r\n 'dtype': None,\r\n 'vtype': 'mask',\r\n 'units': None,\r\n 'ndims': None,\r\n 'shape': None}\r\n\r\n mask_variable = Variable(mask_variable_dict)\r\n\r\n return mask_variable",
"def variable_names(self):\n \n return [x['variable'] for x in self.variable_dicts()]",
"def mask(self):\n return list(self._mask_generator())",
"def _MaskedImage_getArrays(self):\n return (self.getImage().getArray() if self.getImage() else None,\n self.getMask().getArray() if self.getMask() else None,\n self.getVariance().getArray() if self.getVariance() else None)",
"def data_variable_names(self):\n data_names = []\n mesh = self.mesh_names()\n prefix = mesh[0]+'_'\n for vname in self.nc.variables.keys():\n if vname.startswith(prefix):\n if self.nc.dimensions.has_key(vname):\n continue\n if hasattr(self.nc.variables[vname],'cf_role'):\n continue\n data_names.append( vname[len(prefix):] )\n return data_names",
"def get_ntype_featnames(ntype_name, schema_map):\n node_data = schema_map[constants.STR_NODE_DATA]\n feats = node_data.get(ntype_name, {})\n return [feat for feat in feats]",
"def mask(self) -> list[int]:\n return self._mask",
"def get_name_list(msh, varname):\n return [str(chartostring(v)) for v in msh.variables[varname]]",
"def type_list():\n for type_ in orm.DataFlagType.select():\n click.echo(type_.name)",
"def getVariableList(dataset):\n variables = [v for v in dataset.variables.keys() if v not in dataset.dimensions.keys()]\n for d in dataset.dimensions.keys():\n try:\n variables.pop(variables.index(dataset.variables[d].getncattr(\"bounds\")))\n except:\n pass\n return variables"
] |
[
"0.8208737",
"0.6958801",
"0.64847463",
"0.6349178",
"0.6011559",
"0.5973692",
"0.59298366",
"0.5911321",
"0.5737965",
"0.57031",
"0.5660973",
"0.564376",
"0.5616179",
"0.5612669",
"0.5611548",
"0.56073886",
"0.55775",
"0.55704635",
"0.556705",
"0.5525116",
"0.5524691",
"0.5501768",
"0.54871124",
"0.5459241",
"0.54539675",
"0.5436423",
"0.5435774",
"0.5416339",
"0.5393178",
"0.5382875"
] |
0.6996702
|
1
|
Returns list of product "sensor" type variable names
|
def getSensorVariableNames(self, product):
return []
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def getSensorVariableNames(self, product):\r\n return []",
"def getSensorVariables(self, product):\r\n sensor_variable_names = self.getSensorVariableNames(product)\r\n sensor_variables = [self.createSensorVariable(product, n) for n in sensor_variable_names]\r\n sensor_variables = [self.editSensorVariable(product, v) for v in sensor_variables]\r\n\r\n return sensor_variables",
"def getVariableNames(self, product):\r\n\r\n variable_names = self.getDataVariableNames(product) + \\\r\n self.getMaskVariableNames(product) + \\\r\n self.getMeteorologicalVariableNames(product) + \\\r\n self.getSensorVariableNames(product) + \\\r\n self.getInfoVariableNames(product)\r\n\r\n return variable_names",
"def gettypes(self):\n return [str(self.sd.xlate(t[0])) for t in self.sd.types]",
"def sensor_types():\n sensors = FetchandStore.get_data(\"https://tie.digitraffic.fi/api/v1/data/weather-data\")\n sensornames = [sensor[\"name\"] for sensor in sensors ]\n sensornames = list(set(sensornames))\n for index, sensorname in enumerate(sorted(sensornames)):\n print(index, sensorname)",
"def getDataVariableNames(self, product):\r\n return []",
"def getInfoVariableNames(self, product):\r\n return []",
"def get_sensed_prop_names(self) -> Mapping[str, Type]:\n return copy(self._sensor_vars)",
"def get_measurement_types():\n\n all_measures = ['temperature', 'humidity', 'pressure']\n\n ####################\n return all_measures\n ####################",
"def getInfoVariableNames(self, product):\r\n\r\n h = product.getSceneRasterHeight()\r\n\r\n # 10m resolution\r\n if h == 10980:\r\n return self.return_available_variables(product, INFO_VARIABLE_NAMES_10m)\r\n\r\n # 20m resolution\r\n elif h == 5490:\r\n return self.return_available_variables(product, INFO_VARIABLE_NAMES_20m)\r\n\r\n # 20m resolution\r\n elif h == 1830:\r\n return self.return_available_variables(product, INFO_VARIABLE_NAMES_60m)\r\n return []",
"def getMeteorologicalVariableNames(self, product):\r\n return []",
"def _variable_types(self):\n return self._variable_single_types + self._variable_array_types",
"def getDataVariableNames(self, product):\r\n\r\n h = product.getSceneRasterHeight()\r\n\r\n # 10m resolution\r\n if h == 10980:\r\n return self.return_available_variables(product, DATA_VARIABLE_NAMES_10m)\r\n\r\n # 20m resolution\r\n elif h == 5490:\r\n return self.return_available_variables(product, DATA_VARIABLE_NAMES_20m)\r\n\r\n # 20m resolution\r\n elif h == 1830:\r\n return self.return_available_variables(product, DATA_VARIABLE_NAMES_60m)",
"def variables_used (self) :\r\n\t\t## These names do not contain dimension specification (everything in brackets\r\n\t\t## that comes after a name is am array index - either the arry was declared\r\n\t\t## correctly or it is wrong anyway, there is no implicit declaration of arrays) !\r\n\r\n\t\tresult = []\r\n\r\n\t\tfor l in self.equ_lists :\r\n\t\t\tfor var_name in l :\r\n\t\t\t\tresult.append(var_name[0])\r\n\t\treturn result",
"def getMeteorologicalVariableNames(self, product):\r\n\r\n meteorological_variable_names = []\r\n\r\n return meteorological_variable_names",
"def _get_sensor_type(self) -> list[str | None]:\n pres = self.gateway.const.Presentation\n set_req = self.gateway.const.SetReq\n\n _sensor_type = SENSORS.get(set_req(self.value_type).name, [None, None, None])\n if isinstance(_sensor_type, dict):\n sensor_type = _sensor_type.get(\n pres(self.child_type).name, [None, None, None]\n )\n else:\n sensor_type = _sensor_type\n return sensor_type",
"def variable_names(self):\n \n return [x['variable'] for x in self.variable_dicts()]",
"def variables_used (self) :\r\n\t\t## These names possibly contain dimension specification!\r\n\t\treturn self.variable_names",
"def sensors(self) -> List[dict]:\n return self.items_by_domain(\"sensor\")",
"def data_variable_names(self):\n data_names = []\n mesh = self.mesh_names()\n prefix = mesh[0]+'_'\n for vname in self.nc.variables.keys():\n if vname.startswith(prefix):\n if self.nc.dimensions.has_key(vname):\n continue\n if hasattr(self.nc.variables[vname],'cf_role'):\n continue\n data_names.append( vname[len(prefix):] )\n return data_names",
"def getMaskVariableNames(self, product):\r\n return []",
"def ntypes(self): # -> list[str]:\n ...",
"def readVariables(self, product):\r\n data_variables = self.getDataVariables(product)\r\n mask_variables = self.getMaskVariables(product)\r\n meteorological_variables = self.getMeteorologicalVariables(product)\r\n sensor_variables = self.getSensorVariables(product)\r\n info_variables = self.getInfoVariables(product)\r\n\r\n return data_variables + mask_variables + meteorological_variables + sensor_variables + info_variables",
"def get_variable_names(self):\n return [var[1] for var in self.variables]",
"def get_field_names(self, product_name=None):\n if product_name is None:\n types = self.types.get_all()\n else:\n types = [self.types.get_by_name(product_name)]\n\n out = set()\n for type_ in types:\n out.update(type_.metadata_type.dataset_fields)\n return out",
"def typedAntennaNames() :\n a=s.getAntennaAssignments()\n namelist = []\n for i in a:\n namelist.append( i.typedAntennaName )\n return namelist",
"def get_datatypes(self, tid):\n return self._parametersdict[\"DATATYPES\"].get(tid)",
"def variables(self):\n return [i.name for i in self.inputs + self.outputs]",
"def get_variable_names(self):\n varNames = []\n for var in self.variables:\n # EstimationVariable\n varNames.append(var.name)\n return varNames",
"def variables_used (self) :\r\n\t\treturn [i[0] for i in self.parameters]"
] |
[
"0.8339771",
"0.7041706",
"0.6947138",
"0.69332373",
"0.69057155",
"0.68343633",
"0.682075",
"0.67683005",
"0.67416674",
"0.6687287",
"0.6636566",
"0.65286463",
"0.64218646",
"0.64184695",
"0.6377054",
"0.6244942",
"0.60869145",
"0.6007184",
"0.6000745",
"0.59992135",
"0.5956186",
"0.5940129",
"0.5934118",
"0.5899876",
"0.58997846",
"0.5895243",
"0.58915097",
"0.5891325",
"0.58879435",
"0.588697"
] |
0.83588517
|
0
|
Returns list of product "info" type variable names
|
def getInfoVariableNames(self, product):
h = product.getSceneRasterHeight()
# 10m resolution
if h == 10980:
return self.return_available_variables(product, INFO_VARIABLE_NAMES_10m)
# 20m resolution
elif h == 5490:
return self.return_available_variables(product, INFO_VARIABLE_NAMES_20m)
# 20m resolution
elif h == 1830:
return self.return_available_variables(product, INFO_VARIABLE_NAMES_60m)
return []
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def getInfoVariableNames(self, product):\r\n return []",
"def getInfoVariables(self, product):\r\n info_variable_names = self.getInfoVariableNames(product)\r\n info_variables = [self.createInfoVariable(product, n) for n in info_variable_names]\r\n info_variables = [self.editInfoVariable(product, v) for v in info_variables]\r\n\r\n return info_variables",
"def getDataVariableNames(self, product):\r\n return []",
"def gettypes(self):\n return [str(self.sd.xlate(t[0])) for t in self.sd.types]",
"def getVariableNames(self, product):\r\n\r\n variable_names = self.getDataVariableNames(product) + \\\r\n self.getMaskVariableNames(product) + \\\r\n self.getMeteorologicalVariableNames(product) + \\\r\n self.getSensorVariableNames(product) + \\\r\n self.getInfoVariableNames(product)\r\n\r\n return variable_names",
"def ntypes(self): # -> list[str]:\n ...",
"def info(obj):\n if type(obj) is tuple:\n return '({})'.format(', '.join(map(TypeTool.info,obj)))\n elif type(obj) is list:\n return 'List[{}]'.format(TypeTool.info(obj[0]))\n else:\n ctype_name = type(obj).__name__\n if ctype_name == 'ndarray': return '{}[{}]{}'.format(ctype_name,obj.dtype, obj.shape)\n elif ctype_name == 'str': return 'string'\n elif ctype_name == 'bytes': return 'List[byte]'\n else: return ctype_name",
"def getSensorVariableNames(self, product):\r\n return []",
"def getSensorVariableNames(self, product):\r\n\r\n return []",
"def _variable_types(self):\n return self._variable_single_types + self._variable_array_types",
"def info(self):\n\t\timport inspect\n\t\n\t\tmessage = \"All variables available for star ID %i\" % self.ID\t\t\n\t\tprint message\n\t\tprint '-'*len(message)\n\t\tattributes = inspect.getmembers(self, lambda a:not(inspect.isroutine(a)))\n\t\tfor a in attributes:\n\t\t\tif (a[0].startswith('__') and a[0].endswith('__')): continue\n\t\t\tprint a[0], \"=\", a[1]",
"def getMeteorologicalVariableNames(self, product):\r\n return []",
"def getInfo(self):\n return self.name + \" [\" + self.target_type + \"]\"",
"def info_types(self) -> Optional[List['outputs.PreventionInspectTemplateInspectConfigInfoType']]:\n return pulumi.get(self, \"info_types\")",
"def info(self):\n return (self.kind, self.value)",
"def getVariableInfo(self, variables, name):\r\n\r\n return [var.return_variable_dict() for var in variables if var.name == name][0]",
"def info_types(self) -> List['outputs.PreventionInspectTemplateInspectConfigRuleSetInfoType']:\n return pulumi.get(self, \"info_types\")",
"def get_field_names(self, product_name=None):\n if product_name is None:\n types = self.types.get_all()\n else:\n types = [self.types.get_by_name(product_name)]\n\n out = set()\n for type_ in types:\n out.update(type_.metadata_type.dataset_fields)\n return out",
"def ntypes(self): # -> list[None]:\n ...",
"def info() -> Dict[str, Any]:",
"def product_types(self):\n return list(self._product_type_plugins.keys())",
"def etypes(self): # -> list[str]:\n ...",
"def variables(self):\n return {u for u in self if u.type == 'var'}",
"def info(self):\n attr_list = []\n for name in self._metadata:\n attr_list.append(name + \": \" + str(getattr(self, name, None)) + \"\\n\")\n print(f\"{self.__class__}\\n\" + \"\".join(attr_list))",
"def info(self):\n attr_list = []\n for name in self._metadata:\n attr_list.append(name + \": \" + str(getattr(self, name, None)) + \"\\n\")\n print(f\"{self.__class__}\\n\" + \"\".join(attr_list))",
"def variables_used (self) :\r\n\t\t## These names do not contain dimension specification (everything in brackets\r\n\t\t## that comes after a name is am array index - either the arry was declared\r\n\t\t## correctly or it is wrong anyway, there is no implicit declaration of arrays) !\r\n\r\n\t\tresult = []\r\n\r\n\t\tfor l in self.equ_lists :\r\n\t\t\tfor var_name in l :\r\n\t\t\t\tresult.append(var_name[0])\r\n\t\treturn result",
"def get_variables_of_type(self, variable_type):\n if isinstance(variable_type,str):\n variable_key = variable_type\n else:\n #it is a class\n variable_key = variable_type.__name__\n return self._var_kinds[variable_key]",
"def product_types(self):\n return self._product_types",
"def variable_types(self, data_key, only_type=None):\r\n if self[data_key].meta['columns'] is None:\r\n return 'No meta attached to data_key: %s' %(data_key)\r\n else:\r\n types = {\r\n 'int': [],\r\n 'float': [],\r\n 'single': [],\r\n 'delimited set': [],\r\n 'string': [],\r\n 'date': [],\r\n 'time': [],\r\n 'array': []\r\n }\r\n not_found = []\r\n for col in self[data_key].data.columns:\r\n if not col in ['@1', 'id_L1', 'id_L1.1']: \r\n try:\r\n types[\r\n self[data_key].meta['columns'][col]['type']\r\n ].append(col)\r\n except:\r\n not_found.append(col) \r\n for mask in self[data_key].meta['masks'].keys():\r\n types[self[data_key].meta['masks'][mask]['type']].append(mask)\r\n if not_found:\r\n print '%s not found in meta file. Ignored.' %(not_found)\r\n if only_type:\r\n return types[only_type]\r\n else:\r\n return types",
"def variable_names(self):\n \n return [x['variable'] for x in self.variable_dicts()]"
] |
[
"0.8265886",
"0.7063334",
"0.6817584",
"0.66157585",
"0.6581945",
"0.65705514",
"0.65406626",
"0.65016574",
"0.6487752",
"0.6485825",
"0.6484793",
"0.645135",
"0.641339",
"0.63766634",
"0.61927",
"0.618237",
"0.61796445",
"0.6175805",
"0.61612755",
"0.6157979",
"0.6153227",
"0.6059466",
"0.60364884",
"0.6033323",
"0.6033323",
"0.6032389",
"0.59842676",
"0.59781545",
"0.5942692",
"0.59268546"
] |
0.715711
|
1
|
Test that the default manager is correct.
|
def test_default_manager(self):
self.assertIsInstance(FlatPage._default_manager, UrlNodeManager)
self.assertIsInstance(FlatPage.objects.all(), UrlNodeQuerySet)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_manager(self):\n manager = ISubscriptionManager(self.root.document, None)\n self.assertNotEqual(manager, None)\n self.assertTrue(verifyObject(ISubscriptionManager, manager),)\n\n manager = ISubscriptionManager(self.root, None)\n self.assertNotEqual(manager, None)\n self.assertTrue(verifyObject(ISubscriptionManager, manager),)\n\n # They are not available on asset\n manager = ISubscriptionManager(self.root.file, None)\n self.assertEqual(manager, None)",
"def test_default_manager(self):\n\n class Book(RestObject):\n pass\n\n class Author(RestObject):\n pass\n \n self.assertTrue(isinstance(Book.objects, RestManager))\n self.assertTrue(Book.objects.object_class, Book)\n\n self.assertTrue(isinstance(Author.objects, RestManager))\n self.assertTrue(Author.objects.object_class, Author)\n\n self.assertNotEqual(Book.objects, Author.objects)\n \n book = Book()\n # Cannot test AttributeError with self.assertRaises\n try:\n book.objects.all()\n except AttributeError, e:\n self.assertEqual('%s' % e, 'Manager is not accessible via Book instances')",
"def test_sm_initial_attrs(self):\n\n storage_group_mgr = self.console.storage_groups\n\n assert isinstance(storage_group_mgr, StorageGroupManager)\n\n # Verify all public properties of the manager object\n assert storage_group_mgr.resource_class == StorageGroup\n assert storage_group_mgr.session == self.session\n assert storage_group_mgr.parent == self.console\n assert storage_group_mgr.console == self.console",
"def setUp(self) -> None:\n self.manager = Manager()",
"def test_add_team_manager_to_team(self):\n pass",
"def manager():\n pass",
"def testSingleton(self):\r\n self.assertEqual(id(self.res_mgr), id(ReservationManager()))",
"def test_default_manager(self):\n partner = PartnerFactory(status=Partner.AVAILABLE)\n partner2 = PartnerFactory(status=Partner.NOT_AVAILABLE)\n partner3 = PartnerFactory(status=Partner.WAITLIST)\n\n all_partners = Partner.objects.all()\n assert partner in all_partners\n assert partner2 not in all_partners\n assert partner3 in all_partners\n\n # assertQuerysetEqual compares a queryset to a list of representations.\n # Sigh.\n self.assertQuerysetEqual(Partner.objects.all(),\n map(repr, Partner.even_not_available.filter(\n status__in=\n [Partner.WAITLIST, Partner.AVAILABLE])))",
"def test_defaults(self):\n persistence_helper = PersistenceHelper()\n self.assertEqual(persistence_helper.use_riak, False)\n self.assertEqual(persistence_helper.is_sync, False)",
"def test_get_riak_manager_sync(self):\n persistence_helper = self.add_helper(\n PersistenceHelper(use_riak=True, is_sync=True))\n manager = persistence_helper.get_riak_manager()\n self.add_cleanup(manager.close_manager)\n self.assertIsInstance(manager, self._RiakManager)\n self.assertEqual(persistence_helper._riak_managers, [manager])",
"def test_get_redis_manager_sync(self):\n persistence_helper = self.add_helper(PersistenceHelper(is_sync=True))\n manager = persistence_helper.get_redis_manager()\n self.add_cleanup(manager.close_manager)\n self.assertIsInstance(manager, self._RedisManager)\n self.assertEqual(persistence_helper._redis_managers, [manager])",
"def get_manager():\n return __manager__",
"def test_get_riak_manager_with_config(self):\n persistence_helper = self.add_helper(PersistenceHelper(use_riak=True))\n manager = persistence_helper.get_riak_manager({\"bucket_prefix\": \"foo\"})\n self.add_cleanup(manager.close_manager)\n self.assertEqual(manager.bucket_prefix, \"vumitestfoo\")",
"def test_rasterized_manager(self):\n rasterized_registry = self.manager._rasterized_registry()\n self.assertEqual(set(self.manager.registry), set(rasterized_registry))\n rast_manager = Manager(rasterized_registry)\n self.assertEqual(set(self.manager.registry), set(rast_manager.registry))\n self.assertEqual(self.manager.synonyms, rast_manager.synonyms)\n for prefix in self.manager.registry:\n with self.subTest(prefix=prefix):\n self.assertEqual(\n self.manager.is_deprecated(prefix),\n rast_manager.is_deprecated(prefix),\n )\n self.assertEqual(\n self.manager.get_example(prefix),\n rast_manager.get_example(prefix),\n )\n self.assertEqual(\n self.manager.get_uri_format(prefix),\n rast_manager.get_uri_format(prefix),\n )\n self.assertEqual(\n self.manager.get_name(prefix),\n rast_manager.get_name(prefix),\n )\n self.assertEqual(\n self.manager.get_pattern(prefix),\n rast_manager.get_pattern(prefix),\n )\n self.assertEqual(\n self.manager.get_preferred_prefix(prefix) or prefix,\n rast_manager.get_preferred_prefix(prefix),\n )\n self.assertEqual(\n self.manager.get_synonyms(prefix),\n rast_manager.get_synonyms(prefix),\n )\n self.assertEqual(\n self.manager.get_depends_on(prefix),\n rast_manager.get_depends_on(prefix),\n )\n self.assertEqual(\n self.manager.get_appears_in(prefix),\n rast_manager.get_appears_in(prefix),\n )\n self.assertEqual(\n self.manager.get_provides_for(prefix),\n rast_manager.get_provides_for(prefix),\n )\n self.assertEqual(\n self.manager.get_provided_by(prefix),\n rast_manager.get_provided_by(prefix),\n )\n self.assertEqual(\n self.manager.get_has_canonical(prefix),\n rast_manager.get_has_canonical(prefix),\n )\n self.assertEqual(\n self.manager.get_canonical_for(prefix),\n rast_manager.get_canonical_for(prefix),\n )\n self.assertEqual(\n self.manager.get_part_of(prefix),\n rast_manager.get_part_of(prefix),\n )\n self.assertEqual(\n self.manager.get_has_parts(prefix),\n rast_manager.get_has_parts(prefix),\n )",
"def test_get_system(self):\n pass",
"def test_get_riak_manager_unpatched(self):\n persistence_helper = PersistenceHelper()\n err = self.assertRaises(Exception, persistence_helper.get_riak_manager)\n self.assertTrue('setup() must be called' in str(err))",
"def test_default(self):\r\n self.assertEqual(self.option.default, '/tmp')",
"def test_init(self):\r\n self.assertEqual(self.default_app.Name, 'RdpTaxonAssigner')",
"def test_get_riak_manager_no_riak(self):\n persistence_helper = self.add_helper(PersistenceHelper())\n err = self.assertRaises(\n RuntimeError, persistence_helper.get_riak_manager)\n self.assertTrue(\n 'Use of Riak has been disabled for this test.' in str(err))",
"def test_update_system(self):\n pass",
"def _setManager(self, mgr: \"StrategyManager\") -> None:",
"def test_init_default(self):\n self._test_init_default()",
"def test_model_manager_will_return_same_instance_when_instantiated_many_times(self):\n # arrange, act\n # instantiating the model manager class twice\n first_model_manager = ModelManager()\n second_model_manager = ModelManager()\n\n # loading the MLModel objects from configuration\n first_model_manager.load_model(\"tests.mocks.MLModelMock\")\n\n first_model_object = first_model_manager.get_model(qualified_name=\"qualified_name\")\n second_model_object = second_model_manager.get_model(qualified_name=\"qualified_name\")\n\n # assert\n self.assertTrue(str(first_model_manager) == str(second_model_manager))\n self.assertTrue(str(first_model_object) == str(second_model_object))",
"def test_lockmgr(self):\n with LockMgr('test_lockmgr') as lck:\n lck.lock('test_lockmgr2')\n # Attempt to lock test_lockmgr again, which should cause a Locked exception\n with self.assertRaises(Locked, msg=\"getting lock test_lockmgr should raise Locked\"):\n lck.lock('test_lockmgr')\n self.assertTrue(is_locked('test_lockmgr2'), msg=\"is_locked('test_lockmgr2') == True\")\n # Confirm that both test_lockmgr + test_lockmgr2 are unlocked after the with statement.\n self.assertFalse(is_locked('test_lockmgr'), msg=\"is_locked('test_lockmgr') == False\")\n self.assertFalse(is_locked('test_lockmgr2'), msg=\"is_locked('test_lockmgr2') == False\")",
"def test_default(self):\r\n self.assertEqual(self.option.default, 'testing')",
"def test_style_guide_manager():\n formatter = mock.create_autospec(base.BaseFormatter, instance=True)\n options = create_options()\n guide = style_guide.StyleGuideManager(options, formatter=formatter)\n assert guide.default_style_guide.options is options\n assert len(guide.style_guides) == 1",
"def test_without_manager_defined():\n try:\n process = subprocess.check_output(['python', 'runserver.py'],\n env={},\n stderr=subprocess.STDOUT,\n shell=True)\n except subprocess.CalledProcessError as error:\n assert error.returncode != 0\n assert 'KeyError: None' in process.output\n assert 'JOB_MANAGER_IMPLEMENTATION' in process.output",
"def test_get_riak_manager_async(self):\n persistence_helper = self.add_helper(PersistenceHelper(use_riak=True))\n manager = persistence_helper.get_riak_manager()\n self.add_cleanup(manager.close_manager)\n self.assertIsInstance(manager, self._TxRiakManager)\n self.assertEqual(persistence_helper._riak_managers, [manager])",
"def test_access_sales_manager(self):\n SaleOrder = self.env['sale.order'].with_context(tracking_disable=True)\n # Manager can see the SO which is assigned to another salesperson\n self.order.read()\n # Manager can change a salesperson of the SO\n self.order.write({'user_id': self.company_data['default_user_salesman'].id})\n # Manager can create the SO for other salesperson\n sale_order = SaleOrder.create({\n 'partner_id': self.partner_a.id,\n 'user_id': self.company_data['default_user_salesman'].id\n })\n self.assertIn(sale_order.id, SaleOrder.search([]).ids, 'Sales manager should be able to create the SO of other salesperson')\n # Manager can confirm the SO\n sale_order.action_confirm()\n # Manager can not delete confirmed SO\n with self.assertRaises(UserError):\n sale_order.unlink()\n # Manager can delete the SO of other salesperson if SO is in 'draft' or 'cancel' state\n self.order.unlink()\n self.assertNotIn(self.order.id, SaleOrder.search([]).ids, 'Sales manager should be able to delete the SO')\n\n # Manager can create a Sales Team\n india_channel = self.env['crm.team'].with_context(tracking_disable=True).create({\n 'name': 'India',\n })\n self.assertIn(india_channel.id, self.env['crm.team'].search([]).ids, 'Sales manager should be able to create a Sales Team')\n # Manager can edit a Sales Team\n india_channel.write({'name': 'new_india'})\n self.assertEqual(india_channel.name, 'new_india', 'Sales manager should be able to edit a Sales Team')\n # Manager can delete a Sales Team\n india_channel.unlink()\n self.assertNotIn(india_channel.id, self.env['crm.team'].search([]).ids, 'Sales manager should be able to delete a Sales Team')",
"def test_default(self):\r\n self.assertEqual(self.option.default, 1234)"
] |
[
"0.71595335",
"0.70388806",
"0.66866904",
"0.64357024",
"0.6430612",
"0.64073193",
"0.63974637",
"0.6350312",
"0.63089633",
"0.6293002",
"0.62254477",
"0.61976516",
"0.61641526",
"0.61507624",
"0.61332285",
"0.61205125",
"0.6085805",
"0.6080796",
"0.6075874",
"0.603002",
"0.6012836",
"0.6004731",
"0.59974235",
"0.59952885",
"0.59935087",
"0.5986523",
"0.59805506",
"0.594707",
"0.59437716",
"0.59348285"
] |
0.7431091
|
0
|
Creates the Turtle and the Screen with the map background and coordinate system set to match latitude and longitude.
|
def irma_setup():
import tkinter
turtle.setup(965, 600) # set size of window to size of map
wn = turtle.Screen()
wn.title("Hurricane Irma")
# kludge to get the map shown as a background image,
# since wn.bgpic does not allow you to position the image
canvas = wn.getcanvas()
turtle.setworldcoordinates(-90, 0, -17.66, 45) # set the coordinate system to match lat/long
map_bg_img = tkinter.PhotoImage(file="images/atlantic-basin.gif")
# additional kludge for positioning the background image
# when setworldcoordinates is used
canvas.create_image(-1175, -580, anchor=tkinter.NW, image=map_bg_img)
t = turtle.Turtle()
t.speed(1)
wn.register_shape("images/hurricane.gif")
t.shape("images/hurricane.gif")
return (t, wn, map_bg_img)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def screen_setup(screen_size):\n window = turtle.Screen()\n window.bgcolor(\"black\")\n window.title(\"Maze Game\")\n window.setup(screen_size, screen_size)",
"def setup_screen():\n screen = Screen()\n screen.setup(width=600, height=600)\n screen.bgcolor(\"black\")\n screen.title(\"My Snake Game\")\n screen.tracer(0)\n return screen",
"def draw_environment():\n rect(screen, LIGHT_GRAY, (0, 0, 800, 450)) # grey sky\n rect(screen, WHITE, (0, 450, 800, 1000)) # white ground",
"def draw_final_screen(self):\r\n root = Tk()\r\n MapGUI(root, self)\r\n root.geometry('710x540')\r\n root.mainloop()",
"def main():\n draw_sun()\n draw_pavement()\n draw_building()\n martin.goto(12, 40) # lines 171, 173, and 175 move the turtle down to space out the windows on the building.\n draw_windows()\n martin.goto(12, 0)\n draw_windows()\n martin.goto(12, -40)\n draw_windows()\n draw_door()\n draw_doorknob()",
"def init():\n turtle.setworldcoordinates(-WINDOW_WIDTH / 2, -WINDOW_WIDTH / 2,\n WINDOW_WIDTH / 2, WINDOW_HEIGHT / 2)\n\n turtle.up()\n turtle.setheading(0)\n turtle.title('squares')\n pass",
"def main():\n # parse command-line arguments\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--no-export\", action=\"store_true\",\n help=\"Don't export an .eps file of the drawing\")\n parser.add_argument(\"--fast\", action=\"store_true\",\n help=\"Add triangles directly to the Tkinter canvas for speed\")\n parser.add_argument(\"--birds-eye\", action=\"store_true\",\n help=\"Show a bird's eye view of the entire terrain\")\n parser.add_argument(\"--random-terrain\", action=\"store_true\",\n help=\"Use a random seed for the terrain heightmap\")\n parser.add_argument(\"--random-color-offset\", action=\"store_true\",\n help=\"Use a random seed for the color offset heightmap\")\n args = parser.parse_args()\n \n # set up turtle parameters\n print(\"Setting up...\")\n turtle.setup(9999, 9999)\n win_scale = min(turtle.window_width()//22, turtle.window_height()//17)\n turtle.setup(win_scale*22, win_scale*17) # the largest 11x8.5 window possible\n turtle.title(\"Submission by Quinn Tucker\")\n turtle.tracer(0, 0)\n turtle.setundobuffer(None)\n turtle.hideturtle()\n turtle.penup()\n \n # fill the background with the sky gradient\n print(\"Filling the sky...\")\n fill_sky_gradient(256, 0.58)\n \n # set up the lights and camera\n lights = [\n #DirectionalLight(SUNLIGHT_DIRECTION, SUNLIGHT_COLOR, dot_clip=0.0),\n DirectionalLight(AMBIENT_LIGHT_DIRECTION, AMBIENT_LIGHT_COLOR, dot_clip=-0.0),\n ]\n if args.birds_eye:\n camera = Camera((0, 6.0, -2.4), math.pi*0.34, 0, 0, zoom=3.4, fog_factor=0, lights=lights, fast_draw=args.fast)\n else:\n camera = Camera((0, 0.07, -0.001), 0, 0, 0, zoom=1.2, fog_factor=FOG_FACTOR, lights=lights, fast_draw=args.fast)\n \n # generate and draw the terrain\n print(\"Generating terrain...\")\n if args.random_color_offset:\n color_offset_seed = random.getrandbits(32)\n print(f\" Color offset seed = {color_offset_seed}\")\n else:\n color_offset_seed = 3038607546\n random.seed(color_offset_seed)\n color_offset = Terrain(recursion_depth=9, noise_depth=4, scale=0.35)\n \n if args.random_terrain:\n terrain_seed = random.getrandbits(32)\n print(f\" Terrain seed = {terrain_seed}\")\n else:\n terrain_seed = 129477298\n random.seed(terrain_seed)\n terrain = Terrain(recursion_depth=9, noise_depth=7, scale=0.10,\n snow_height=0.025, tree_height=-0.015, color_offset_heightmap=color_offset)\n \n terrain.draw(camera)\n print(\"Updating the screen...\")\n turtle.update()\n \n # export the drawing to a file\n if not args.no_export:\n OUTPUT_FILE = \"output.eps\"\n print(f\"Exporting {OUTPUT_FILE}...\")\n turtle.getcanvas().postscript(file=OUTPUT_FILE, colormode=\"color\", pagewidth=\"11i\")\n \n # wait for the user to close the window\n print(\"Done!\")\n turtle.mainloop()",
"def make_window(colr, ttle):\n w = turtle.Screen()\n w.bgcolor(colr)\n w.title(ttle)\n w.setup(width=1800, height=600)\n return w",
"def init_turtle():\n turtle.up()\n turtle.home()",
"def initialize(turtle_shape, bg_color, turtle_color, turtle_speed):\n turtle_instance = turtle.Turtle()\n turtle_instance.shape(turtle_shape)\n turtle.bgcolor(bg_color)\n turtle_instance.color(turtle_color)\n turtle_instance.speed(turtle_speed)\n return turtle_instance",
"def _prepare_turtle():\n turtle.setup(width=screen_width)\n turtle.shape(turtle_shape)\n turtle.title(title)",
"def create_board_window():\n wn = turtle.Screen()\n wn.setworldcoordinates(0, 0, WIDTH+1, HEIGHT+1)\n t = turtle.Turtle()\n t.pensize(1)\n t.speed(0)\n t.hideturtle()\n return (wn, t)",
"def setTurtle(t):\r\n t.pu()\r\n t.goto(initialCoordinates())",
"def draw_objects():\n\n # Disable the turtle animation, and erase the scren.\n turtle.tracer(False)\n turtle.hideturtle()\n turtle.clear()\n\n # Draw all the parts of the scene.\n draw_ball()\n draw_target()\n draw_bounds()\n draw_pins()\n\n show_status()\n\n # Now show the screen, after everything has been drawn\n turtle.tracer(True)",
"def __init__(self):\n # Screen settings\n self.screen_width = 2400\n self.screen_height = 1600\n self.bg_color = (0, 0, 0)\n\n # Raindrop settings\n self.r_y_speed = 10",
"def Screen():\n if Myturtle._screen is None:\n Myturtle._screen = _Screen()\n return Myturtle._screen",
"def turtle_setup():\n # ___ ___ _ _ ___ _____ __ __ ___ ___ ___ _____ __\n # | \\ / _ \\ | \\| |/ _ \\_ _| | \\/ |/ _ \\| \\_ _| __\\ \\ / /\n # | |) | (_) | | .` | (_) || | | |\\/| | (_) | |) | || _| \\ V /\n # |___/ \\___/ |_|\\_|\\___/ |_| |_| |_|\\___/|___/___|_| |_|\n # _____ _ _ ___ ___ ___ _ _ _ _ ___ _____ ___ ___ _ _\n # |_ _| || |_ _/ __| | __| | | | \\| |/ __|_ _|_ _/ _ \\| \\| |\n # | | | __ || |\\__ \\ | _|| |_| | .` | (__ | | | | (_) | .` |\n # |_| |_||_|___|___/ |_| \\___/|_|\\_|\\___| |_| |___\\___/|_|\\_|\n #\n # Create the turtle graphics screen and set a few basic properties.\n screen = turtle.Screen()\n screen.setup( WIDTH, HEIGHT, MARGIN, MARGIN )\n screen.bgcolor( \"SkyBlue\" )\n\n # Create two turtles, one for drawing and one for writing.\n artist = turtle.Turtle()\n writer = turtle.Turtle()\n\n # Change the artist turtle's shape so the artist and writer are distinguishable.\n artist.shape( \"turtle\" )\n\n # Make the animation as fast as possible and hide the turtles.\n if DRAW_FAST:\n screen.delay( 0 )\n artist.hideturtle()\n artist.speed( \"fastest\" )\n writer.hideturtle()\n writer.speed( \"fastest\" )\n\n # Set a few properties of the writing turtle useful since it will only be writing.\n writer.setheading( 90 ) # Straight up, which makes it look sort of like a cursor.\n writer.penup() # A turtle's pen does not have to be down to write text.\n writer.setposition( 0, HEIGHT // 2 - FONT_SIZE * 2 ) # Centered at top of the screen.\n\n return screen, artist, writer",
"def make_window(colr, ttle):\n w = turtle.Screen()\n w.bgcolor(colr)\n w.title(ttle)\n return w",
"def draw_ground():\n for i in range(3):\n groundturtle.forward(1450)\n groundturtle.left(90)\n groundturtle.forward(25)\n groundturtle.left(90)\n groundturtle.forward(1450)\n groundturtle.right(90)\n groundturtle.forward(25)\n groundturtle.right(90)",
"def draw_sun():\n lisandro.penup()\n lisandro.goto(40, 90)\n lisandro.begin_fill()\n lisandro.circle(150) # draws out a circle with a radius of 150 for the sun.\n lisandro.end_fill()\n lisandro.hideturtle()",
"def background():\n sky_color = (66, 170, 255) # color of the sky\n grass_color = (0, 128, 0) # color of the grass\n\n rect(screen, sky_color, (0, 0, 500, 250), 0) # sky\n rect(screen, grass_color, (0, 250, 500, 250), 0) # grass",
"def create_screen(self, width, height):",
"def __init__(self):\n #Screen settings\n self.screen_width= 1200\n self.screen_height = 800\n self.bg_color = (230,230,230)",
"def background(self):\n sun = graphics.Circle(graphics.Point(200, 310), 50)\n sun.setFill('yellow')\n sun.draw(self.win)\n \n earth = graphics.Circle(graphics.Point(40, 250), 30)\n earth.setFill('blue')\n earth.draw(self.win)\n continent = graphics.Circle(graphics.Point(30, 265), 10)\n continent.setFill('green')\n continent.draw(self.win)\n cont_2 = graphics.Circle(graphics.Point(30, 235), 10)\n cont_2.setFill('green')\n cont_2.draw(self.win)\n cont_3 = graphics.Circle(graphics.Point(55, 245), 10)\n cont_3.setFill('green')\n cont_3.draw(self.win)\n \n stars = graphics.Circle(graphics.Point(250, 250), 5)\n stars.setFill('white')\n stars.draw(self.win)\n star1 = graphics.Circle(graphics.Point(100, 250), 5)\n star1.setFill('white')\n star1.draw(self.win)\n star2 = graphics.Circle(graphics.Point(150, 150), 5)\n star2.setFill('white')\n star2.draw(self.win)\n star3 = graphics.Circle(graphics.Point(50, 100), 5)\n star3.setFill('white')\n star3.draw(self.win)\n star3 = graphics.Circle(graphics.Point(100, 50), 5)\n star3.setFill('white')\n star3.draw(self.win)\n star4 = graphics.Circle(graphics.Point(250, 80), 5)\n star4.setFill('white')\n star4.draw(self.win)\n star4 = graphics.Circle(graphics.Point(200, 60), 5)\n star4.setFill('white')\n star4.draw(self.win)",
"def init(width, height):\n\tglClearColor(0.0, 0.0, 1.0, 0.0) #blue bg\n\tglMatrixMode(GL_PROJECTION)\n\tglLoadIdentity()\n\tglOrtho(-0.5, 2.5, -1.5, 1.5, -1.0, 1.0)",
"def _setup(self, width=turtle._CFG[\"width\"], height=turtle._CFG[\"height\"],\n startx=turtle._CFG[\"leftright\"], starty=turtle._CFG[\"topbottom\"]):\n if not hasattr(self._root, \"set_geometry\"):\n return\n \n sw = self._root.win_width()\n sh = self._root.win_height()\n if isinstance(width, float) and 0 <= width <= 1:\n width = sw*width\n if startx is None:\n startx = (sw - width) / 2\n if isinstance(height, float) and 0 <= height <= 1:\n height = sh*height\n if starty is None:\n starty = (sh - height) / 2\n self._root.set_geometry(width, height, startx, starty)\n self.update()",
"def __init__(self):\n\t\tself.screen_width = 600\n\t\tself.screen_height = 500\n\t\tself.bg_color = (0, 0, 255)",
"def draw(self, screen):\n self.draw_left_zone(screen)\n self.draw_middle_zone(screen)\n self.draw_right_zone(screen)",
"def drawCoordinatePlane_region():\r\n turtle2 = t.Screen()\r\n turtle2.title(\"Life Expectancy versus Region\")\r\n t2.speed(0)\r\n t3.speed(0)\r\n setTurtle(t0)\r\n setTurtle(t1)\r\n setTurtle(t2)\r\n setTurtle(t3)\r\n drawAxes(t0)\r\n t1.left(90)\r\n drawAxes(t1)\r\n t0.pu()\r\n t0.fd(-80)\r\n t0.lt(90)\r\n drawlabels(t0, t1)\r\n drawPoints(t0, t1)\r\n t0.pu()\r\n t1.pu()\r\n t2.pu()\r\n t3.pu()\r\n t0.goto(initialCoordinates())\r\n t1.goto(initialCoordinates())\r\n t2.goto(initialCoordinates())\r\n t3.goto(initialCoordinates())\r\n t1.lt(90)",
"def __init__(self):\n # Screen settings\n self.screen_width = 400\n self.screen_height = 300\n self.bg_color = (230, 230, 230)\n\n self.rocket_speed_factor= 1.5"
] |
[
"0.6898329",
"0.6388394",
"0.6379641",
"0.6363494",
"0.6310737",
"0.62915754",
"0.6289173",
"0.62512696",
"0.62480205",
"0.6170483",
"0.6141268",
"0.61327434",
"0.60947466",
"0.6038608",
"0.6035277",
"0.6034598",
"0.6033603",
"0.602872",
"0.597623",
"0.592445",
"0.58940214",
"0.5880404",
"0.5833782",
"0.58260614",
"0.5814398",
"0.5785128",
"0.57768935",
"0.5774621",
"0.5754416",
"0.5726228"
] |
0.68916774
|
1
|
Create a cartonActivity Inserts a new cartonActivity using the specified data. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response.
|
def add_carton_activity(self, body, **kwargs):
all_params = ['body']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method add_carton_activity" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `add_carton_activity`")
resource_path = '/beta/cartonActivity'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['api_key']
response = self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CartonActivity',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def add_carton_activity_tag(self, carton_activity_id, carton_activity_tag, **kwargs):\n\n all_params = ['carton_activity_id', 'carton_activity_tag']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method add_carton_activity_tag\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'carton_activity_id' is set\n if ('carton_activity_id' not in params) or (params['carton_activity_id'] is None):\n raise ValueError(\"Missing the required parameter `carton_activity_id` when calling `add_carton_activity_tag`\")\n # verify the required parameter 'carton_activity_tag' is set\n if ('carton_activity_tag' not in params) or (params['carton_activity_tag'] is None):\n raise ValueError(\"Missing the required parameter `carton_activity_tag` when calling `add_carton_activity_tag`\")\n\n resource_path = '/beta/cartonActivity/{cartonActivityId}/tag/{cartonActivityTag}'.replace('{format}', 'json')\n path_params = {}\n if 'carton_activity_id' in params:\n path_params['cartonActivityId'] = params['carton_activity_id']\n if 'carton_activity_tag' in params:\n path_params['cartonActivityTag'] = params['carton_activity_tag']\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json'])\n\n # Authentication setting\n auth_settings = ['api_key']\n\n response = self.api_client.call_api(resource_path, 'PUT',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type=None,\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def get_duplicate_carton_activity_by_id(self, carton_activity_id, **kwargs):\n\n all_params = ['carton_activity_id']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method get_duplicate_carton_activity_by_id\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'carton_activity_id' is set\n if ('carton_activity_id' not in params) or (params['carton_activity_id'] is None):\n raise ValueError(\"Missing the required parameter `carton_activity_id` when calling `get_duplicate_carton_activity_by_id`\")\n\n resource_path = '/beta/cartonActivity/duplicate/{cartonActivityId}'.replace('{format}', 'json')\n path_params = {}\n if 'carton_activity_id' in params:\n path_params['cartonActivityId'] = params['carton_activity_id']\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type([])\n\n # Authentication setting\n auth_settings = ['api_key']\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='CartonActivity',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def get_carton_activity_by_id(self, carton_activity_id, **kwargs):\n\n all_params = ['carton_activity_id']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method get_carton_activity_by_id\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'carton_activity_id' is set\n if ('carton_activity_id' not in params) or (params['carton_activity_id'] is None):\n raise ValueError(\"Missing the required parameter `carton_activity_id` when calling `get_carton_activity_by_id`\")\n\n resource_path = '/beta/cartonActivity/{cartonActivityId}'.replace('{format}', 'json')\n path_params = {}\n if 'carton_activity_id' in params:\n path_params['cartonActivityId'] = params['carton_activity_id']\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type([])\n\n # Authentication setting\n auth_settings = ['api_key']\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='CartonActivity',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def delete_carton_activity(self, carton_activity_id, **kwargs):\n\n all_params = ['carton_activity_id']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method delete_carton_activity\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'carton_activity_id' is set\n if ('carton_activity_id' not in params) or (params['carton_activity_id'] is None):\n raise ValueError(\"Missing the required parameter `carton_activity_id` when calling `delete_carton_activity`\")\n\n resource_path = '/beta/cartonActivity/{cartonActivityId}'.replace('{format}', 'json')\n path_params = {}\n if 'carton_activity_id' in params:\n path_params['cartonActivityId'] = params['carton_activity_id']\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type([])\n\n # Authentication setting\n auth_settings = ['api_key']\n\n response = self.api_client.call_api(resource_path, 'DELETE',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type=None,\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def add_carton_activity_audit(self, carton_activity_id, carton_activity_audit, **kwargs):\n\n all_params = ['carton_activity_id', 'carton_activity_audit']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method add_carton_activity_audit\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'carton_activity_id' is set\n if ('carton_activity_id' not in params) or (params['carton_activity_id'] is None):\n raise ValueError(\"Missing the required parameter `carton_activity_id` when calling `add_carton_activity_audit`\")\n # verify the required parameter 'carton_activity_audit' is set\n if ('carton_activity_audit' not in params) or (params['carton_activity_audit'] is None):\n raise ValueError(\"Missing the required parameter `carton_activity_audit` when calling `add_carton_activity_audit`\")\n\n resource_path = '/beta/cartonActivity/{cartonActivityId}/audit/{cartonActivityAudit}'.replace('{format}', 'json')\n path_params = {}\n if 'carton_activity_id' in params:\n path_params['cartonActivityId'] = params['carton_activity_id']\n if 'carton_activity_audit' in params:\n path_params['cartonActivityAudit'] = params['carton_activity_audit']\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json'])\n\n # Authentication setting\n auth_settings = ['api_key']\n\n response = self.api_client.call_api(resource_path, 'PUT',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type=None,\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def create_activity(self, created_user, source, action,\n privacy=Privacy.PRIVATE, **kwargs):\n Activity = get_activity_model()\n return Activity.objects.create(\n about=self,\n action=action,\n created_user=created_user,\n source=source,\n privacy=privacy,\n **kwargs\n )",
"def create_activity(request: Request, activity_type: str, msg_context: dict, object_id: UUID, user: User):\n dbsession = Session.object_session(user)\n\n stream = Stream.get_or_create_user_stream(user)\n\n a = Activity()\n a.object_id = object_id\n a.activity_type = activity_type\n a.msg_context = msg_context\n\n stream.activities.append(a)\n dbsession.flush()\n\n return a",
"def get_carton_activity_tags(self, carton_activity_id, **kwargs):\n\n all_params = ['carton_activity_id']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method get_carton_activity_tags\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'carton_activity_id' is set\n if ('carton_activity_id' not in params) or (params['carton_activity_id'] is None):\n raise ValueError(\"Missing the required parameter `carton_activity_id` when calling `get_carton_activity_tags`\")\n\n resource_path = '/beta/cartonActivity/{cartonActivityId}/tag'.replace('{format}', 'json')\n path_params = {}\n if 'carton_activity_id' in params:\n path_params['cartonActivityId'] = params['carton_activity_id']\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type([])\n\n # Authentication setting\n auth_settings = ['api_key']\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type=None,\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def create(self, validated_data):\n address_data = validated_data.pop('address')\n address = AddressSerializer.create(AddressSerializer(), validated_data=address_data)\n activity, created = Activity.objects.update_or_create(address=address, **validated_data)\n return activity",
"def delete_carton_activity_tag(self, carton_activity_id, carton_activity_tag, **kwargs):\n\n all_params = ['carton_activity_id', 'carton_activity_tag']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method delete_carton_activity_tag\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'carton_activity_id' is set\n if ('carton_activity_id' not in params) or (params['carton_activity_id'] is None):\n raise ValueError(\"Missing the required parameter `carton_activity_id` when calling `delete_carton_activity_tag`\")\n # verify the required parameter 'carton_activity_tag' is set\n if ('carton_activity_tag' not in params) or (params['carton_activity_tag'] is None):\n raise ValueError(\"Missing the required parameter `carton_activity_tag` when calling `delete_carton_activity_tag`\")\n\n resource_path = '/beta/cartonActivity/{cartonActivityId}/tag/{cartonActivityTag}'.replace('{format}', 'json')\n path_params = {}\n if 'carton_activity_id' in params:\n path_params['cartonActivityId'] = params['carton_activity_id']\n if 'carton_activity_tag' in params:\n path_params['cartonActivityTag'] = params['carton_activity_tag']\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type([])\n\n # Authentication setting\n auth_settings = ['api_key']\n\n response = self.api_client.call_api(resource_path, 'DELETE',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type=None,\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def create_cart():\n r = requests.post(self.carts_service_host + '/carts')\n return (jsonify(dict(total=0.0, cart=r.json())),\n 201)",
"def update_carton_activity(self, body, **kwargs):\n\n all_params = ['body']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method update_carton_activity\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `update_carton_activity`\")\n\n resource_path = '/beta/cartonActivity'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json'])\n\n # Authentication setting\n auth_settings = ['api_key']\n\n response = self.api_client.call_api(resource_path, 'PUT',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type=None,\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def log_create(action, *args, **kw):\n from olympia.activity.models import ActivityLog\n\n return ActivityLog.create(action, *args, **kw)",
"def create_catalog_item(\n self,\n request: catalog_service.CreateCatalogItemRequest = None,\n *,\n retry: retries.Retry = gapic_v1.method.DEFAULT,\n timeout: float = None,\n metadata: Sequence[Tuple[str, str]] = (),\n ) -> catalog.CatalogItem:\n # Create or coerce a protobuf request object.\n\n request = catalog_service.CreateCatalogItemRequest(request)\n\n # Wrap the RPC method; this adds retry and timeout information,\n # and friendly error handling.\n rpc = gapic_v1.method.wrap_method(\n self._transport.create_catalog_item,\n default_timeout=None,\n client_info=_client_info,\n )\n\n # Send the request.\n response = rpc(request, retry=retry, timeout=timeout, metadata=metadata)\n\n # Done; return the response.\n return response",
"def create_campaign(account, row, name, acc_type):\n country = None\n if acc_type == Account.COUNTRY:\n country_name = row['LOCATION']\n country = Country.objects.filter(name__iexact=country_name).first()\n if not country:\n logging.getLogger('peacecorps.sync_accounting').warning(\n \"%s: Country does not exist: %s\",\n row['PROJ_NO'], row['LOCATION'])\n return\n\n account.save()\n summary = clean_description(row['SUMMARY'])\n campaign = Campaign.objects.create(\n name=name, account=account, campaigntype=acc_type,\n description=json.dumps({\"data\": [{\"type\": \"text\",\n \"data\": {\"text\": summary}}]}),\n country=country)\n if acc_type == Account.SECTOR:\n # Make sure we remember the sector this is marked as\n SectorMapping.objects.create(pk=row['SECTOR'], campaign=campaign)",
"def test_activity_creation(self):\n\n res = self.register_login_get_token()\n self.assertEqual(res.status_code, 201)\n # create a activity\n res = self.client().post('/bucketlist/1/activities',\n headers=dict(\n Authorization=\"Bearer \" + self.access_token),\n data=self.activity)\n self.assertEqual(res.status_code, 201)\n self.assertIn('Shop in', str(res.data))",
"def create(self):\n c = Contest.objects.create(name=self.name,\n site_sport=self.site_sport,\n prize_structure=self.prize_structure,\n start=self.start,\n end=self.end,\n skill_level=self.skill_level)\n logger.info('Contest created: %s' % c)\n return c",
"def post(self):\n data = request.json\n\n now = datetime.utcnow().isoformat()\n data['created_time'] = now\n\n # Our type is already registered within the DB, so generate a\n # model object that looks like what we'll be interacting with\n try:\n activity = Activity(data)\n except KeyError:\n raise BadRequest(\"payload validation failed: {}\".format(data))\n\n activity.save()\n log.debug(\"Wrote activity: \" + str(activity._to_dict()))\n return activity._to_dict(), 201",
"def rpc_campaign_new(self, name):\n\t\tsession = db_manager.Session()\n\t\tcampaign = db_models.Campaign(name=name, user_id=self.basic_auth_user)\n\t\tsession.add(campaign)\n\t\tsession.commit()\n\t\treturn campaign.id",
"def add_activity(self,id_user, content, type):\n\n self.execute(TABELLE['activity']['insert'],(id_user,content,type))",
"def activity(self, activity_id):\r\n return activities.Activity(self, activity_id)",
"def createNewActivityAssistant(courseID):\n try:\n c = Course.objects.get(courseId=courseID)\n assistant = NewActivityCreated.objects.create(course=c)\n assistant.save()\n return assistant\n except Exception:\n return None",
"def test_create_success(self, acme_id, response_headers, args, request_params):\n\n # Setup the mocked response\n responses.add(responses.POST, self.api_url, headers=response_headers,\n match=[responses.json_params_matcher(request_params)],\n status=201)\n\n acme = ACMEAccount(client=self.client)\n response = acme.create(*args)\n\n self.assertEqual(response, {\"id\": acme_id})",
"def get_carton_activity_by_filter(self, **kwargs):\n\n all_params = ['filter', 'page', 'limit', 'sort']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method get_carton_activity_by_filter\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n resource_path = '/beta/cartonActivity/search'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'filter' in params:\n query_params['filter'] = params['filter']\n if 'page' in params:\n query_params['page'] = params['page']\n if 'limit' in params:\n query_params['limit'] = params['limit']\n if 'sort' in params:\n query_params['sort'] = params['sort']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type([])\n\n # Authentication setting\n auth_settings = ['api_key']\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='list[CartonActivity]',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def do_charge_purchase_create(cs, args):\n purchase = cs.charge_purchases.create(args.purchase_name)\n utils.print_dict(purchase._info)",
"def CreateTransaction(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)",
"def create_customer(cls, api, **data):\n return api.create_customer(**data)",
"def test_successful_activity_creation(self):\n result = self.app.create_activity(1)\n expected = {4: {'shoplist_id': 1, 'title': 'apples', 'description': 'Fresh Green Apples', 'status': True}}\n self.assertEqual(expected, result)",
"def create_cab(self):\n cab = Cab()\n cab.type = self.info['type']\n cab.driver_name = self.info.get('driver_name')\n cab.rc_number = self.info['rc_number']\n cab.city_id = self.info['city_id']\n cab.company_name = self.info['company_name']\n cab.model_name = self.info['model_name']\n cab.update_time = datetime.utcnow().replace(microsecond=0)\n self.cab_id = self.save(cab)\n\n # we can do asynchronously\n self.create_cab_state()\n return self.cab_id",
"def create(self, request, *args, **kwargs):\n atm_serializer = self.get_serializer()\n atm = atm_serializer.create(request.data)\n\n return Response(atm)"
] |
[
"0.65059453",
"0.5947981",
"0.5774275",
"0.5743377",
"0.5573552",
"0.5534408",
"0.5441946",
"0.5287218",
"0.5261877",
"0.5095724",
"0.49159083",
"0.48912296",
"0.48800573",
"0.4859543",
"0.48262998",
"0.48246723",
"0.4767928",
"0.46715868",
"0.465052",
"0.46045247",
"0.4596962",
"0.45950326",
"0.45888123",
"0.4582361",
"0.45671242",
"0.45025387",
"0.44946724",
"0.4493238",
"0.447097",
"0.446947"
] |
0.6539719
|
0
|
Add new audit for a cartonActivity Adds an audit to an existing cartonActivity. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response.
|
def add_carton_activity_audit(self, carton_activity_id, carton_activity_audit, **kwargs):
all_params = ['carton_activity_id', 'carton_activity_audit']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method add_carton_activity_audit" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'carton_activity_id' is set
if ('carton_activity_id' not in params) or (params['carton_activity_id'] is None):
raise ValueError("Missing the required parameter `carton_activity_id` when calling `add_carton_activity_audit`")
# verify the required parameter 'carton_activity_audit' is set
if ('carton_activity_audit' not in params) or (params['carton_activity_audit'] is None):
raise ValueError("Missing the required parameter `carton_activity_audit` when calling `add_carton_activity_audit`")
resource_path = '/beta/cartonActivity/{cartonActivityId}/audit/{cartonActivityAudit}'.replace('{format}', 'json')
path_params = {}
if 'carton_activity_id' in params:
path_params['cartonActivityId'] = params['carton_activity_id']
if 'carton_activity_audit' in params:
path_params['cartonActivityAudit'] = params['carton_activity_audit']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['api_key']
response = self.api_client.call_api(resource_path, 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'))
return response
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def add_carton_activity_tag(self, carton_activity_id, carton_activity_tag, **kwargs):\n\n all_params = ['carton_activity_id', 'carton_activity_tag']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method add_carton_activity_tag\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'carton_activity_id' is set\n if ('carton_activity_id' not in params) or (params['carton_activity_id'] is None):\n raise ValueError(\"Missing the required parameter `carton_activity_id` when calling `add_carton_activity_tag`\")\n # verify the required parameter 'carton_activity_tag' is set\n if ('carton_activity_tag' not in params) or (params['carton_activity_tag'] is None):\n raise ValueError(\"Missing the required parameter `carton_activity_tag` when calling `add_carton_activity_tag`\")\n\n resource_path = '/beta/cartonActivity/{cartonActivityId}/tag/{cartonActivityTag}'.replace('{format}', 'json')\n path_params = {}\n if 'carton_activity_id' in params:\n path_params['cartonActivityId'] = params['carton_activity_id']\n if 'carton_activity_tag' in params:\n path_params['cartonActivityTag'] = params['carton_activity_tag']\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json'])\n\n # Authentication setting\n auth_settings = ['api_key']\n\n response = self.api_client.call_api(resource_path, 'PUT',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type=None,\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def add_audit(self, entity_name, object_name, operation,\n data, auth_ctx, session):",
"def add_carton_activity(self, body, **kwargs):\n\n all_params = ['body']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method add_carton_activity\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `add_carton_activity`\")\n\n resource_path = '/beta/cartonActivity'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json'])\n\n # Authentication setting\n auth_settings = ['api_key']\n\n response = self.api_client.call_api(resource_path, 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='CartonActivity',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def add_audit(self, user, objs):\n self._add(self.audit, user, objs)",
"async def addAudit(self, name, description, status, type, data, userid) -> CreateAuditResponse:\n return await self.stub.CreateAudit(\n CreateAuditRequest(name=name,\n description=description, type=type, status=status, data=data, created_by=userid\n ))",
"def log_activity(self, log_entry):\n # open log file in \"append mode\"\n with open(self.log_filename, mode='a') as log_file:\n writer = csv.DictWriter(log_file, fieldnames=LogEntry.ENTRY_ORDER)\n # add a row to the log: the attributes of log_entry, in fieldnames order\n writer.writerow(log_entry.__dict__)",
"def add_shipment_audit(self, shipment_id, shipment_audit, **kwargs):\n\n all_params = ['shipment_id', 'shipment_audit']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method add_shipment_audit\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'shipment_id' is set\n if ('shipment_id' not in params) or (params['shipment_id'] is None):\n raise ValueError(\"Missing the required parameter `shipment_id` when calling `add_shipment_audit`\")\n # verify the required parameter 'shipment_audit' is set\n if ('shipment_audit' not in params) or (params['shipment_audit'] is None):\n raise ValueError(\"Missing the required parameter `shipment_audit` when calling `add_shipment_audit`\")\n\n resource_path = '/beta/shipment/{shipmentId}/audit/{shipmentAudit}'.replace('{format}', 'json')\n path_params = {}\n if 'shipment_id' in params:\n path_params['shipmentId'] = params['shipment_id']\n if 'shipment_audit' in params:\n path_params['shipmentAudit'] = params['shipment_audit']\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json'])\n\n # Authentication setting\n auth_settings = ['api_key']\n\n response = self.api_client.call_api(resource_path, 'PUT',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type=None,\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def audit_event(activity, origin_id=None, component_name=\"maintain-frontend\",\n business_service=\"LLC Maintain Frontend\", trace_id=None, supporting_info=None):\n\n if not origin_id:\n if hasattr(g, 'session'):\n origin_id = g.session.user.id\n else:\n origin_id = \"maintain-frontend\"\n if not trace_id:\n trace_id = g.trace_id\n\n event = {'activity': activity,\n 'activity_timestamp': datetime.now(timezone.utc).isoformat(),\n 'origin_id': origin_id,\n 'component_name': component_name,\n 'business_service': business_service,\n 'trace_id': trace_id}\n\n host_ip = socket.gethostbyname(socket.gethostname())\n\n if supporting_info:\n extra_info = copy.copy(supporting_info)\n extra_info['machine_ip'] = host_ip\n event['supporting_info'] = extra_info\n else:\n supporting_info = {'machine_ip': host_ip}\n event['supporting_info'] = supporting_info\n\n try:\n current_app.logger.info(\"Sending event to audit api\")\n response = g.requests.post('{}/records'.format(AUDIT_API_URL),\n data=json.dumps(event),\n headers={'Content-Type': 'application/json'})\n except Exception:\n current_app.logger.error(\"Error occurred performing audit\")\n raise ApplicationError(500)\n\n if response.status_code != 201:\n raise ApplicationError(500)",
"def create_test_audit(context, **kw):\n audit = get_test_audit(context, **kw)\n audit.create()\n return audit",
"def get_duplicate_carton_activity_by_id(self, carton_activity_id, **kwargs):\n\n all_params = ['carton_activity_id']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method get_duplicate_carton_activity_by_id\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'carton_activity_id' is set\n if ('carton_activity_id' not in params) or (params['carton_activity_id'] is None):\n raise ValueError(\"Missing the required parameter `carton_activity_id` when calling `get_duplicate_carton_activity_by_id`\")\n\n resource_path = '/beta/cartonActivity/duplicate/{cartonActivityId}'.replace('{format}', 'json')\n path_params = {}\n if 'carton_activity_id' in params:\n path_params['cartonActivityId'] = params['carton_activity_id']\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type([])\n\n # Authentication setting\n auth_settings = ['api_key']\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='CartonActivity',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def get_carton_activity_tags(self, carton_activity_id, **kwargs):\n\n all_params = ['carton_activity_id']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method get_carton_activity_tags\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'carton_activity_id' is set\n if ('carton_activity_id' not in params) or (params['carton_activity_id'] is None):\n raise ValueError(\"Missing the required parameter `carton_activity_id` when calling `get_carton_activity_tags`\")\n\n resource_path = '/beta/cartonActivity/{cartonActivityId}/tag'.replace('{format}', 'json')\n path_params = {}\n if 'carton_activity_id' in params:\n path_params['cartonActivityId'] = params['carton_activity_id']\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type([])\n\n # Authentication setting\n auth_settings = ['api_key']\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type=None,\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def create_audit(selenium, program, **kwargs):\n audit = entities_factory.AuditsFactory().create(**kwargs)\n audits_service = webui_service.AuditsService(selenium)\n audits_service.create_obj_via_tree_view(program, audit)\n audit.url = audits_service.open_widget_of_mapped_objs(\n program).tree_view.tree_view_items()[0].url()\n return audit",
"def get_carton_activity_by_id(self, carton_activity_id, **kwargs):\n\n all_params = ['carton_activity_id']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method get_carton_activity_by_id\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'carton_activity_id' is set\n if ('carton_activity_id' not in params) or (params['carton_activity_id'] is None):\n raise ValueError(\"Missing the required parameter `carton_activity_id` when calling `get_carton_activity_by_id`\")\n\n resource_path = '/beta/cartonActivity/{cartonActivityId}'.replace('{format}', 'json')\n path_params = {}\n if 'carton_activity_id' in params:\n path_params['cartonActivityId'] = params['carton_activity_id']\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type([])\n\n # Authentication setting\n auth_settings = ['api_key']\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='CartonActivity',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def update_carton_activity(self, body, **kwargs):\n\n all_params = ['body']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method update_carton_activity\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `update_carton_activity`\")\n\n resource_path = '/beta/cartonActivity'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json'])\n\n # Authentication setting\n auth_settings = ['api_key']\n\n response = self.api_client.call_api(resource_path, 'PUT',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type=None,\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def add_activity(self, activity, table):\n week = self.t.timeline[\"week\" + str(self.week)]\n self.t.add_activity(week, activity)\n self.clear_frame(table)\n self.show_table(self.t.timeline[\"week\" + str(self.week)], table)",
"def audit_log(self, account_id):\n from pureport_client.commands.accounts.audit_log import Command\n return Command(self.client, account_id)",
"def add_order_source_audit(self, order_source_id, order_source_audit, **kwargs):\n\n all_params = ['order_source_id', 'order_source_audit']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method add_order_source_audit\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'order_source_id' is set\n if ('order_source_id' not in params) or (params['order_source_id'] is None):\n raise ValueError(\"Missing the required parameter `order_source_id` when calling `add_order_source_audit`\")\n # verify the required parameter 'order_source_audit' is set\n if ('order_source_audit' not in params) or (params['order_source_audit'] is None):\n raise ValueError(\"Missing the required parameter `order_source_audit` when calling `add_order_source_audit`\")\n\n resource_path = '/beta/orderSource/{orderSourceId}/audit/{orderSourceAudit}'.replace('{format}', 'json')\n path_params = {}\n if 'order_source_id' in params:\n path_params['orderSourceId'] = params['order_source_id']\n if 'order_source_audit' in params:\n path_params['orderSourceAudit'] = params['order_source_audit']\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json'])\n\n # Authentication setting\n auth_settings = ['api_key']\n\n response = self.api_client.call_api(resource_path, 'PUT',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type=None,\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def delete_carton_activity(self, carton_activity_id, **kwargs):\n\n all_params = ['carton_activity_id']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method delete_carton_activity\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'carton_activity_id' is set\n if ('carton_activity_id' not in params) or (params['carton_activity_id'] is None):\n raise ValueError(\"Missing the required parameter `carton_activity_id` when calling `delete_carton_activity`\")\n\n resource_path = '/beta/cartonActivity/{cartonActivityId}'.replace('{format}', 'json')\n path_params = {}\n if 'carton_activity_id' in params:\n path_params['cartonActivityId'] = params['carton_activity_id']\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type([])\n\n # Authentication setting\n auth_settings = ['api_key']\n\n response = self.api_client.call_api(resource_path, 'DELETE',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type=None,\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def add_activity(self, activity):\n if (activity.start < self.start) or (activity.end > self.end):\n raise ActivityCollisionException(\n \"Activity cannot start before event start or end\",\n \"after event end.\"\n )\n\n activity.event = self\n self.activities.append(activity)",
"def create_audit_records(self, status_records, session_key):\n uri = '/services/receivers/simple'\n getargs = {'index': '_audit', 'sourcetype': 'incident_review', 'output_mode': 'json'}\n # Double list-comprehension:\n # a. Comma-separate the fields in each record, replacing \"None\" with the\n # empty string\n # b. Newline-separate the records so that the incident_review sourcetype\n # can pick up the individual audit records via SHOULD_LINEMERGE=false.\n data = '\\n'.join([','.join([str(getattr(r, k)) if getattr(r, k) is not None else '' for k in self.DEFAULT_AUDIT_FIELD_ORDER]) for r in status_records])\n\n response, content = splunk.rest.simpleRequest(uri,\n sessionKey=session_key,\n method='POST',\n getargs=getargs,\n jsonargs=data)\n\n if response['status'] != str(httplib.OK):\n logger.error('HTTP error when auditing notable events: response=\"%s\"', response)\n return False\n else:\n parsed_content = json.loads(content)\n if len(data) != parsed_content['bytes']:\n # Some audit data was not received.\n logger.error('Audit records could not be created for some notable event updates: content=\"%s\"', content)\n return False\n\n return True",
"def audit(self, message):\n channel = self.config.get('AUDIT_CHANNEL', False)\n log_file = self.config.get('AUDIT_FILE', False)\n if channel: outputs.append([channel, message])\n if log_file:\n with open(log_file, 'a') as f: f.write(message)\n logging.warning('AUDIT: ' + message)",
"def addAdminContextActivity(context, request):\n rest_params = {'actor': request.actor,\n 'verb': 'post'}\n\n # Initialize a Activity object from the request\n newactivity = Activity()\n newactivity.fromRequest(request, rest_params=rest_params)\n\n # Search if there's any activity from the same user with\n # the same actor in the last minute\n mmdb = MADMaxDB(context.db)\n query = {\n 'actor.url': request.actor.url,\n 'object.content': newactivity['object']['content'],\n 'published': {'$gt': newactivity.published - timedelta(minutes=1)}\n }\n duplicated = mmdb.activity.search(query)\n\n if duplicated:\n code = 200\n newactivity = duplicated[0]\n else:\n code = 201\n activity_oid = newactivity.insert()\n newactivity['_id'] = activity_oid\n\n handler = JSONResourceEntity(newactivity.flatten(), status_code=code)\n return handler.buildResponse()",
"def user_audit_create(sender, user, request, **kwargs):\n\n audit_key = get_hashed(request.session.session_key)\n try:\n audit = UserAudit.objects.get(audit_key=audit_key)\n except UserAudit.DoesNotExist:\n data = {\n 'user': request.user,\n 'audit_key': audit_key,\n 'user_agent': request.META.get('HTTP_USER_AGENT', 'Unknown'),\n 'ip_address': get_ip_address_from_request(request),\n 'referrer': request.META.get('HTTP_REFERER', 'Unknown'),\n 'last_page': request.path or '/',\n }\n audit = UserAudit(**data)\n logger.info(_('User {} logged in'.format(request.user.username)))\n audit.save()\n request.session[constants.USERWARE_AUDIT_KEY] = audit_key\n request.session.modified = True\n cleanup_user_audits(request.user)",
"def save_new_user_activities(self, user_id: int, activity: Activity):\n activity.user_id = user_id\n db.session.add(activity)\n db.session.commit()",
"def start_connectivity_audit(self, context, audit_uuid, masters, hosts,\n providernet, segments, extra_data):\n cctxt = self.client.prepare(topic=self.topic_pnet_connectivity_cast,\n fanout=True)\n cctxt.cast(context, 'start_connectivity_audit', audit_uuid=audit_uuid,\n masters=masters, hosts=hosts, providernet=providernet,\n segments=segments, extra_data=extra_data)",
"def upsert_activity(self, activity_log):\n if self.has_activity(activity_log[\"activity_log_id\"]):\n self._db.update('activity_log', activity_log)\n else:\n self._db.insert('activity_log', activity_log)",
"def set_activity(self, activity_name: str, activity_timestamp: datetime) -> None:\n activity = Activity(activity_name, activity_timestamp)\n self.activities.append(activity)",
"def add_activity(self, sources, render_function,\n timestamp_accessor=operator.attrgetter('timestamp')):\n renderer = get_renderer(render_function)\n for source in sources:\n self.activity.append(ActivitySource(\n source=source,\n timestamp=timestamp_accessor(source),\n renderer=renderer,\n ))",
"def enable_activity_log(self):\n self.add_payload('createActivityLog', 'true')",
"def activity(self, activity):\n if activity is None:\n raise ValueError(\"Invalid value for `activity`, must not be `None`\") # noqa: E501\n\n self._activity = activity"
] |
[
"0.5893889",
"0.56795913",
"0.5599321",
"0.5464882",
"0.5209918",
"0.50052756",
"0.49850735",
"0.49730384",
"0.49699482",
"0.49107304",
"0.4882419",
"0.47739702",
"0.47686157",
"0.46770212",
"0.45531315",
"0.45512614",
"0.452703",
"0.45114335",
"0.44965294",
"0.44539297",
"0.44446015",
"0.44285405",
"0.43990806",
"0.43902814",
"0.43855652",
"0.43629202",
"0.43148774",
"0.4296414",
"0.42733657",
"0.4261158"
] |
0.7834779
|
0
|
Add new tags for a cartonActivity. Adds a tag to an existing cartonActivity. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response.
|
def add_carton_activity_tag(self, carton_activity_id, carton_activity_tag, **kwargs):
all_params = ['carton_activity_id', 'carton_activity_tag']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method add_carton_activity_tag" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'carton_activity_id' is set
if ('carton_activity_id' not in params) or (params['carton_activity_id'] is None):
raise ValueError("Missing the required parameter `carton_activity_id` when calling `add_carton_activity_tag`")
# verify the required parameter 'carton_activity_tag' is set
if ('carton_activity_tag' not in params) or (params['carton_activity_tag'] is None):
raise ValueError("Missing the required parameter `carton_activity_tag` when calling `add_carton_activity_tag`")
resource_path = '/beta/cartonActivity/{cartonActivityId}/tag/{cartonActivityTag}'.replace('{format}', 'json')
path_params = {}
if 'carton_activity_id' in params:
path_params['cartonActivityId'] = params['carton_activity_id']
if 'carton_activity_tag' in params:
path_params['cartonActivityTag'] = params['carton_activity_tag']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['api_key']
response = self.api_client.call_api(resource_path, 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'))
return response
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_carton_activity_tags(self, carton_activity_id, **kwargs):\n\n all_params = ['carton_activity_id']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method get_carton_activity_tags\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'carton_activity_id' is set\n if ('carton_activity_id' not in params) or (params['carton_activity_id'] is None):\n raise ValueError(\"Missing the required parameter `carton_activity_id` when calling `get_carton_activity_tags`\")\n\n resource_path = '/beta/cartonActivity/{cartonActivityId}/tag'.replace('{format}', 'json')\n path_params = {}\n if 'carton_activity_id' in params:\n path_params['cartonActivityId'] = params['carton_activity_id']\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type([])\n\n # Authentication setting\n auth_settings = ['api_key']\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type=None,\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def tags_add(self, item, tags):\n self._createTagAction(item, \"tags_add\", tags)",
"def delete_carton_activity_tag(self, carton_activity_id, carton_activity_tag, **kwargs):\n\n all_params = ['carton_activity_id', 'carton_activity_tag']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method delete_carton_activity_tag\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'carton_activity_id' is set\n if ('carton_activity_id' not in params) or (params['carton_activity_id'] is None):\n raise ValueError(\"Missing the required parameter `carton_activity_id` when calling `delete_carton_activity_tag`\")\n # verify the required parameter 'carton_activity_tag' is set\n if ('carton_activity_tag' not in params) or (params['carton_activity_tag'] is None):\n raise ValueError(\"Missing the required parameter `carton_activity_tag` when calling `delete_carton_activity_tag`\")\n\n resource_path = '/beta/cartonActivity/{cartonActivityId}/tag/{cartonActivityTag}'.replace('{format}', 'json')\n path_params = {}\n if 'carton_activity_id' in params:\n path_params['cartonActivityId'] = params['carton_activity_id']\n if 'carton_activity_tag' in params:\n path_params['cartonActivityTag'] = params['carton_activity_tag']\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type([])\n\n # Authentication setting\n auth_settings = ['api_key']\n\n response = self.api_client.call_api(resource_path, 'DELETE',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type=None,\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def tags_add(self, item_id, tags, **params):\n\n if isinstance(tags, basestring):\n tags = tags.split(',')\n\n self.queue('tags_add', item_id=item_id, tags=tags, **params)",
"def add_tag(self, tag):\n self.tags.append(tag)",
"def add_tag(convo_ID, tag_ID):\n # Make API request\n url = \"https://api2.frontapp.com/conversations/\" + convo_ID + \"/tags\"\n payload = json.dumps({\"tag_ids\": [tag_ID]})\n headers = {\"Authorization\": BEARER_TOKEN, \"Content-Type\": \"application/json\"}\n requests.request(\"POST\", url, headers=headers, data=payload)",
"def add_tag(self, tag, attributes, extent):\n self.tags.append((tag, attributes, extent))",
"def create_tag(self, session, tags):\n self._tag(session.put, tags=tags, session=session)",
"def add_tag(self, session, tag):\n self._tag(session.put, key=tag, session=session)",
"def add_carton_activity(self, body, **kwargs):\n\n all_params = ['body']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method add_carton_activity\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `add_carton_activity`\")\n\n resource_path = '/beta/cartonActivity'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json'])\n\n # Authentication setting\n auth_settings = ['api_key']\n\n response = self.api_client.call_api(resource_path, 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='CartonActivity',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def run_addtags(options):\n addtags.addtags(\n bam=options.bam,\n tagfile=options.tagfile,\n trim_suffix=options.trim_suffix,\n output=options.output,\n sam=options.sam,\n nproc=options.nproc,\n mode=options.mode,\n )",
"def add_tag(self, key, value=''):\r\n status = self.connection.create_tags([self.id], {key : value})\r\n if self.tags is None:\r\n self.tags = TagSet()\r\n self.tags[key] = value",
"def add_tag(self, tag: str) -> None:\n tags = self.get_tag_index()\n tags.append(tag)\n self.write_tag_index(list(set(tags)))",
"def add_tags(self, tags):\n\n if isinstance(tags, string_types):\n message = \"tags should be a list or None, got tags={}\".format(tags)\n raise TypeError(message)\n\n for item in self.iteritems():\n item.add_tags(tags)",
"def add(self, tag):\n self.tags[tag.name] = tag",
"def add_tags(ResourceArn=None, Tags=None):\n pass",
"def add_tags(self, tags):\n for tag in tags:\n self.add_tag(tag)\n\n return self",
"def add_tags(self, tags):\n\n if isinstance(tags, string_types):\n message = \"tags should be a list or None, got tags={}\".format(tags)\n raise TypeError(message)\n\n self.tags = self.tags.union(tags)",
"def add_tag(self, tag):\n cp = self.copy()\n cp.tags.add(tag)\n return cp",
"def add_tags_to_resource(ResourceId=None, Tags=None):\n pass",
"def append_tags(self, tags):\n\n tags = H.to_list(tags)\n # self._tags.update(tags)\n self.tags.update(tags)",
"def create_a_tag(self, tag_id, contact_id):\n data = {\"contactTag\":{\"contact\":str(contact_id),\"tag\":str(tag_id)}}\n\n return self.client._post(\"/contactTags\", json=data)",
"def add_new_tag(self, body, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.add_new_tag_with_http_info(body, **kwargs)\n else:\n (data) = self.add_new_tag_with_http_info(body, **kwargs)\n return data",
"def _add_tags(self):\n\n if self.version != 'live':\n return\n\n tags = [t.strip() for t in self.tags_text.split(',')]\n tags = list(set(tags))\n\n for tag_name in tags:\n tag_slug = slugify(tag_name)\n if tag_slug:\n try:\n tag = Tag.objects.get(blog=self.blog, slug=tag_slug)\n except Tag.DoesNotExist:\n tag = Tag( blog = self.blog,\n name = tag_name,\n slug = tag_slug)\n\n tag.increment()\n tag.save()\n\n self.tags.add(tag)",
"def testAddTag(self):\n project = self.session.create_project()\n\n project.add_tag(\"test\")\n self.assertEqual(project.tags, [\"test\"], \"Can add a tag to a project.\")\n\n json_str = project.to_json()\n doc = json.loads(json_str)\n\n self.assertEqual(doc['meta']['tags'], [\"test\"],\n \"JSON representation had correct tags after add_tag().\")\n\n # Try adding the same tag yet again, shouldn't get a duplicate\n with self.assertRaises(ValueError):\n project.add_tag(\"test\")\n\n json_str = project.to_json()\n doc2 = json.loads(json_str)\n\n self.assertEqual(doc2['meta']['tags'], [\"test\"],\n \"JSON document did not end up with duplicate tags.\")",
"def add_tags(event):\n\n add_tags_from_presets()",
"def add_tag(session, tag_name, user_id=None, username='system_user'):\n session = validate_session(session)\n date_created=datetime.now()\n try:\n add_tag = TagInfo(tag_name, date_created, user_id)\n session.add(add_tag)\n session.commit()\n return(True, \"Tag %s added\" % (tag_name), add_tag)\n except Exception as e:\n session.rollback()\n return(False, \"Tag %s failed to add\" % (tag_name))",
"def push_tag(self, tag):\n _tag_entity('task', self.task_id, tag)",
"def add_tag(self, cr, uid, ids, code=None, name=None, create=False, context=None):\n tag_obj = self.pool.get('res.tag')\n tag_ids = tag_obj.get_tag_ids(cr, uid, self._name, code=code, name=name, context=context)\n if not tag_ids and create:\n model_id = self.pool.get('res.tag.model').search(cr, uid, [('model', '=', self._name)])[0]\n tag_ids = [tag_obj.create(cr, uid, {'name': name, 'code': code, 'model_id': model_id}, context=context)]\n\n if tag_ids:\n self.write(cr, uid, ids, {'tag_ids': [(4, tid) for tid in tag_ids]}, context=context)\n\n return bool(tag_ids)",
"def addItemTag(self, item, tag):\r\n if self.inItemTagTransaction:\r\n # XXX: what if item's parent is not a feed?\r\n if not tag in self.addTagBacklog:\r\n self.addTagBacklog[tag] = [] \r\n self.addTagBacklog[tag].append({'i': item.id, 's': item.parent.id})\r\n return \"OK\"\r\n else:\r\n return self._modifyItemTag(item.id, 'a', tag)"
] |
[
"0.6755179",
"0.5814125",
"0.56261075",
"0.5556964",
"0.54720086",
"0.5454889",
"0.5432447",
"0.5414081",
"0.5406628",
"0.5325057",
"0.5270936",
"0.5174351",
"0.5162913",
"0.5153116",
"0.5111313",
"0.5092139",
"0.50879234",
"0.5066982",
"0.50359213",
"0.5025138",
"0.5021132",
"0.5003088",
"0.5001756",
"0.49944663",
"0.49739608",
"0.4945665",
"0.49373707",
"0.49261612",
"0.48977992",
"0.4879194"
] |
0.73234785
|
0
|
Delete a cartonActivity Deletes the cartonActivity identified by the specified id. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response.
|
def delete_carton_activity(self, carton_activity_id, **kwargs):
all_params = ['carton_activity_id']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_carton_activity" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'carton_activity_id' is set
if ('carton_activity_id' not in params) or (params['carton_activity_id'] is None):
raise ValueError("Missing the required parameter `carton_activity_id` when calling `delete_carton_activity`")
resource_path = '/beta/cartonActivity/{cartonActivityId}'.replace('{format}', 'json')
path_params = {}
if 'carton_activity_id' in params:
path_params['cartonActivityId'] = params['carton_activity_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = ['api_key']
response = self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'))
return response
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def delete_carton_activity_tag(self, carton_activity_id, carton_activity_tag, **kwargs):\n\n all_params = ['carton_activity_id', 'carton_activity_tag']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method delete_carton_activity_tag\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'carton_activity_id' is set\n if ('carton_activity_id' not in params) or (params['carton_activity_id'] is None):\n raise ValueError(\"Missing the required parameter `carton_activity_id` when calling `delete_carton_activity_tag`\")\n # verify the required parameter 'carton_activity_tag' is set\n if ('carton_activity_tag' not in params) or (params['carton_activity_tag'] is None):\n raise ValueError(\"Missing the required parameter `carton_activity_tag` when calling `delete_carton_activity_tag`\")\n\n resource_path = '/beta/cartonActivity/{cartonActivityId}/tag/{cartonActivityTag}'.replace('{format}', 'json')\n path_params = {}\n if 'carton_activity_id' in params:\n path_params['cartonActivityId'] = params['carton_activity_id']\n if 'carton_activity_tag' in params:\n path_params['cartonActivityTag'] = params['carton_activity_tag']\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type([])\n\n # Authentication setting\n auth_settings = ['api_key']\n\n response = self.api_client.call_api(resource_path, 'DELETE',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type=None,\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def get_carton_activity_by_id(self, carton_activity_id, **kwargs):\n\n all_params = ['carton_activity_id']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method get_carton_activity_by_id\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'carton_activity_id' is set\n if ('carton_activity_id' not in params) or (params['carton_activity_id'] is None):\n raise ValueError(\"Missing the required parameter `carton_activity_id` when calling `get_carton_activity_by_id`\")\n\n resource_path = '/beta/cartonActivity/{cartonActivityId}'.replace('{format}', 'json')\n path_params = {}\n if 'carton_activity_id' in params:\n path_params['cartonActivityId'] = params['carton_activity_id']\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type([])\n\n # Authentication setting\n auth_settings = ['api_key']\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='CartonActivity',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def delete_adcampaign(self, campaign_id, batch=False):\n path = '%s' % campaign_id\n return self.make_request(path, 'DELETE', batch=batch)",
"def delete_campaign(self, campaignId, **kwargs) -> ApiResponse:\n return self._request(fill_query_params(kwargs.pop('path'), campaignId), params=kwargs)",
"def delete_activity(recipe_id, activity_id):\n if 'name' in session:\n PLAN.users[session['name']].delete_activity(recipe_id, activity_id)\n return redirect(url_for('view_activities', recipe_id=recipe_id))\n return redirect(url_for('log_in'))",
"def rpc_campaign_delete(self, campaign_id):\n\t\tsession = db_manager.Session()\n\t\tsession.delete(db_manager.get_row_by_id(session, db_models.Campaign, campaign_id))\n\t\tsession.commit()\n\t\tsession.close()\n\t\treturn",
"def delete_activity():\n pass",
"def delete(self, campaign_id):\n campaign = Campaign.query.get(campaign_id)\n if campaign is None:\n return {\"message\": \"Campaign could not be found.\"}, HTTPStatus.NOT_FOUND\n db.session.delete(campaign)\n db.sessioin.commit()\n return {}, HTTPStatus.NO_CONTENT",
"def get_duplicate_carton_activity_by_id(self, carton_activity_id, **kwargs):\n\n all_params = ['carton_activity_id']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method get_duplicate_carton_activity_by_id\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'carton_activity_id' is set\n if ('carton_activity_id' not in params) or (params['carton_activity_id'] is None):\n raise ValueError(\"Missing the required parameter `carton_activity_id` when calling `get_duplicate_carton_activity_by_id`\")\n\n resource_path = '/beta/cartonActivity/duplicate/{cartonActivityId}'.replace('{format}', 'json')\n path_params = {}\n if 'carton_activity_id' in params:\n path_params['cartonActivityId'] = params['carton_activity_id']\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type([])\n\n # Authentication setting\n auth_settings = ['api_key']\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='CartonActivity',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def remove_by_activity_id(self, a_id):\r\n self.__repo.remove_appointment_by_activity_id(a_id)",
"def team_members_id_team_billing_card_delete(self, id, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.team_members_id_team_billing_card_delete_with_http_info(id, **kwargs)\n else:\n (data) = self.team_members_id_team_billing_card_delete_with_http_info(id, **kwargs)\n return data",
"def delete(self, id):\n return self._post(\n request=ApiActions.DELETE.value,\n uri=ApiUri.ACTIONS.value,\n params={'id': id}\n )",
"def delete(self, id):\n return self._post(\n request=ApiActions.DELETE.value,\n uri=ApiUri.ACTIONS.value,\n params={'id': id}\n )",
"def delete_activity(self, activity_log_id):\n self._db.execute(\"\"\"\n DELETE FROM exception_log\n WHERE activity_log = ?\"\"\", (activity_log_id, ))",
"def delete_phone_asset(self, asset_id):\n return self.delete_asset(asset_id, 'PHONE')",
"def delete(self, id):\n url = self._format_url(self.url + \"/{id}\", {\"id\": id})\n\n return self._make_request('delete', url)",
"def delete_asset(self, asset_id, asset_type):\n return self.asset(asset_id, asset_type=asset_type, action='DELETE')",
"def delete(self, id):\n transacao = Transacoes.get_transacao(id)\n if not transacao:\n api.abort(404, 'Transacao not found')\n\n Transacoes.delete_transacao(transacao)\n return {\"msg\": \"Transacao deleted.\"}, 200",
"def delete_customer(cls, api, id, **params):\n return api.delete_customer(id, **params)",
"def delete(self, id):\n return self.app.post('/delete/' + str(id), data=dict(id=id),\n follow_redirects=True)",
"def delete(self, context, artifact_id):\n session = api.get_session()\n api.delete(context, artifact_id, session)",
"def delete(self, id):\n delete_entry(id)\n return None, 204",
"def team_members_id_team_billing_card_delete_with_http_info(self, id, **kwargs):\n\n all_params = ['id']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method team_members_id_team_billing_card_delete\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'id' is set\n if ('id' not in params) or (params['id'] is None):\n raise ValueError(\"Missing the required parameter `id` when calling `team_members_id_team_billing_card_delete`\")\n\n\n collection_formats = {}\n\n resource_path = '/TeamMembers/{id}/team/billing/card'.replace('{format}', 'json')\n path_params = {}\n if 'id' in params:\n path_params['id'] = params['id']\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])\n\n # Authentication setting\n auth_settings = ['access_token']\n\n return self.api_client.call_api(resource_path, 'DELETE',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='Billing',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n collection_formats=collection_formats)",
"def get_carton_activity_tags(self, carton_activity_id, **kwargs):\n\n all_params = ['carton_activity_id']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method get_carton_activity_tags\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'carton_activity_id' is set\n if ('carton_activity_id' not in params) or (params['carton_activity_id'] is None):\n raise ValueError(\"Missing the required parameter `carton_activity_id` when calling `get_carton_activity_tags`\")\n\n resource_path = '/beta/cartonActivity/{cartonActivityId}/tag'.replace('{format}', 'json')\n path_params = {}\n if 'carton_activity_id' in params:\n path_params['cartonActivityId'] = params['carton_activity_id']\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type([])\n\n # Authentication setting\n auth_settings = ['api_key']\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type=None,\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def team_members_id_requested_design_exports_delete(self, id, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.team_members_id_requested_design_exports_delete_with_http_info(id, **kwargs)\n else:\n (data) = self.team_members_id_requested_design_exports_delete_with_http_info(id, **kwargs)\n return data",
"def delete_action(self: object, *args, parameters: dict = None, **kwargs) -> dict:\n # [DELETE] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/recon/DeleteActionV1\n return process_service_request(\n calling_object=self,\n endpoints=Endpoints,\n operation_id=\"DeleteActionV1\",\n keywords=kwargs,\n params=handle_single_argument(args, parameters, \"ids\")\n )",
"def deleteCustomer(self, **params):\n self.__requireParams(params, ['id'])\n return self.__req('delete_customer', params)",
"def team_members_id_team_permission_delete(self, id, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.team_members_id_team_permission_delete_with_http_info(id, **kwargs)\n else:\n (data) = self.team_members_id_team_permission_delete_with_http_info(id, **kwargs)\n return data",
"def delete_adcampaign_group(self, campaign_group_id, batch=False):\n path = '%s' % campaign_group_id\n return self.make_request(path, 'DELETE', batch=batch)",
"def delete(self) -> requests.request:\n # Check if id is set\n if self.args.id is None:\n raise Exception('Provide id of asset you want to delete')\n\n # Send DELETE request\n return requests.delete(self.REQUEST_URL + str(self.args.id))"
] |
[
"0.68737936",
"0.57801306",
"0.5770761",
"0.5651873",
"0.5592482",
"0.54830134",
"0.5461021",
"0.54573774",
"0.53889877",
"0.5284232",
"0.52449757",
"0.5211619",
"0.5211619",
"0.51212776",
"0.5116634",
"0.5096319",
"0.50580716",
"0.5016581",
"0.49912858",
"0.49603426",
"0.49587122",
"0.493806",
"0.49171758",
"0.49022827",
"0.48699403",
"0.48370254",
"0.48358864",
"0.48318705",
"0.4826975",
"0.48155978"
] |
0.80336636
|
0
|
Delete a tag for a cartonActivity. Deletes an existing cartonActivity tag using the specified data. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response.
|
def delete_carton_activity_tag(self, carton_activity_id, carton_activity_tag, **kwargs):
all_params = ['carton_activity_id', 'carton_activity_tag']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_carton_activity_tag" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'carton_activity_id' is set
if ('carton_activity_id' not in params) or (params['carton_activity_id'] is None):
raise ValueError("Missing the required parameter `carton_activity_id` when calling `delete_carton_activity_tag`")
# verify the required parameter 'carton_activity_tag' is set
if ('carton_activity_tag' not in params) or (params['carton_activity_tag'] is None):
raise ValueError("Missing the required parameter `carton_activity_tag` when calling `delete_carton_activity_tag`")
resource_path = '/beta/cartonActivity/{cartonActivityId}/tag/{cartonActivityTag}'.replace('{format}', 'json')
path_params = {}
if 'carton_activity_id' in params:
path_params['cartonActivityId'] = params['carton_activity_id']
if 'carton_activity_tag' in params:
path_params['cartonActivityTag'] = params['carton_activity_tag']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = ['api_key']
response = self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'))
return response
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def delete_carton_activity(self, carton_activity_id, **kwargs):\n\n all_params = ['carton_activity_id']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method delete_carton_activity\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'carton_activity_id' is set\n if ('carton_activity_id' not in params) or (params['carton_activity_id'] is None):\n raise ValueError(\"Missing the required parameter `carton_activity_id` when calling `delete_carton_activity`\")\n\n resource_path = '/beta/cartonActivity/{cartonActivityId}'.replace('{format}', 'json')\n path_params = {}\n if 'carton_activity_id' in params:\n path_params['cartonActivityId'] = params['carton_activity_id']\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type([])\n\n # Authentication setting\n auth_settings = ['api_key']\n\n response = self.api_client.call_api(resource_path, 'DELETE',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type=None,\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def delete_tag(self, session, tag):\n self._tag(session.delete, key=tag, delete=True, session=session)",
"def delete(self, tag, params={}, **options):\n path = \"/tags/%s\" % (tag)\n return self.client.delete(path, params, **options)",
"def delete_a_tag(self, contact_tag_id):\n return self.client._delete(f\"/contactTags/{str(contact_tag_id)}\")",
"def delete_tag(self, tag):\n return self.__datacatalog.delete_tag(name=tag.name)",
"def remove_tag(convo_ID, tag_ID):\n # Make API request\n url = \"https://api2.frontapp.com/conversations/\" + convo_ID + \"/tags\"\n payload = json.dumps({\"tag_ids\": [tag_ID]})\n headers = {\"Authorization\": BEARER_TOKEN, \"Content-Type\": \"application/json\"}\n requests.request(\"DELETE\", url, headers=headers, data=payload)",
"def delete(self):\n request = self.tags_service.delete(path=self._path)\n request.execute()",
"def delete_tags(self, session):\n self._tag(session.delete, delete=True, session=session)",
"def delete_tag(tag):\n tag.destroy()",
"def delete_tag_by_id(self,\r\n access_token,\r\n tag_id):\r\n\r\n # Prepare query URL\r\n _url_path = '/tags/{tag_id}'\r\n _url_path = APIHelper.append_url_with_template_parameters(_url_path, { \r\n 'tag_id': tag_id\r\n })\r\n _query_builder = Configuration.base_uri\r\n _query_builder += _url_path\r\n _query_parameters = {\r\n 'access_token': access_token\r\n }\r\n _query_builder = APIHelper.append_url_with_query_parameters(_query_builder,\r\n _query_parameters, Configuration.array_serialization)\r\n _query_url = APIHelper.clean_url(_query_builder)\r\n\r\n # Prepare and execute request\r\n _request = self.http_client.delete(_query_url)\r\n CustomQueryAuth.apply(_request)\r\n _context = self.execute_request(_request)\r\n\r\n # Endpoint and global error handling using HTTP status codes.\r\n if _context.response.status_code == 0:\r\n raise APIException('Unexpected error.', _context)\r\n self.validate_response(_context)",
"def delete_tag_with_http_info(self, tag_id, **kwargs):\n\n all_params = ['tag_id']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method delete_tag\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'tag_id' is set\n if ('tag_id' not in params) or (params['tag_id'] is None):\n raise ValueError(\"Missing the required parameter `tag_id` when calling `delete_tag`\")\n\n\n collection_formats = {}\n\n path_params = {}\n if 'tag_id' in params:\n path_params['tag_id'] = params['tag_id']\n\n query_params = []\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n\n # Authentication setting\n auth_settings = ['X-Token']\n\n return self.api_client.call_api('/tags/{tag_id}', 'DELETE',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type=None,\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)",
"def delete_tag(tag, directory=None):\n execute_command('git tag -d {0}'.format(tag), shell=True, cwd=directory)",
"def DeleteForTag(cls, tag):\n parent_key = cls._GetParentKeyFromTag(tag)\n frontend_job = cls.query(ancestor=parent_key).get(keys_only=True)\n if frontend_job:\n frontend_job.delete()",
"def add_carton_activity_tag(self, carton_activity_id, carton_activity_tag, **kwargs):\n\n all_params = ['carton_activity_id', 'carton_activity_tag']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method add_carton_activity_tag\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'carton_activity_id' is set\n if ('carton_activity_id' not in params) or (params['carton_activity_id'] is None):\n raise ValueError(\"Missing the required parameter `carton_activity_id` when calling `add_carton_activity_tag`\")\n # verify the required parameter 'carton_activity_tag' is set\n if ('carton_activity_tag' not in params) or (params['carton_activity_tag'] is None):\n raise ValueError(\"Missing the required parameter `carton_activity_tag` when calling `add_carton_activity_tag`\")\n\n resource_path = '/beta/cartonActivity/{cartonActivityId}/tag/{cartonActivityTag}'.replace('{format}', 'json')\n path_params = {}\n if 'carton_activity_id' in params:\n path_params['cartonActivityId'] = params['carton_activity_id']\n if 'carton_activity_tag' in params:\n path_params['cartonActivityTag'] = params['carton_activity_tag']\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json'])\n\n # Authentication setting\n auth_settings = ['api_key']\n\n response = self.api_client.call_api(resource_path, 'PUT',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type=None,\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def get_carton_activity_tags(self, carton_activity_id, **kwargs):\n\n all_params = ['carton_activity_id']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method get_carton_activity_tags\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'carton_activity_id' is set\n if ('carton_activity_id' not in params) or (params['carton_activity_id'] is None):\n raise ValueError(\"Missing the required parameter `carton_activity_id` when calling `get_carton_activity_tags`\")\n\n resource_path = '/beta/cartonActivity/{cartonActivityId}/tag'.replace('{format}', 'json')\n path_params = {}\n if 'carton_activity_id' in params:\n path_params['cartonActivityId'] = params['carton_activity_id']\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type([])\n\n # Authentication setting\n auth_settings = ['api_key']\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type=None,\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def delete_tag(self, tag_id, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.delete_tag_with_http_info(tag_id, **kwargs)\n else:\n (data) = self.delete_tag_with_http_info(tag_id, **kwargs)\n return data",
"def subject_tag_delete(context, subject_id, value, session=None):\n _check_subject_id(subject_id)\n session = session or get_session()\n query = session.query(models.SubjectTag).filter_by(\n subject_id=subject_id).filter_by(\n value=value).filter_by(deleted=False)\n try:\n tag_ref = query.one()\n except sa_orm.exc.NoResultFound:\n raise exception.NotFound()\n\n tag_ref.delete(session=session)",
"def delete_tag(self, *tags: TagReference) -> None:\n return TagReference.delete(self, *tags)",
"def delete_activity():\n pass",
"def delete_adcampaign(self, campaign_id, batch=False):\n path = '%s' % campaign_id\n return self.make_request(path, 'DELETE', batch=batch)",
"def delete_tag(self,tag):\r\n\r\n # with shelf\r\n if self.using_shelf:\r\n del self.tag_dict[tag]",
"def remove_tag(self, tag):\n _tag_entity('task', self.task_id, tag, untag=True)",
"async def delete(message: discord.Message, tag: Annotate.Content):\n tag = tag_arg(tag)\n assert tag in time_cfg.data[\"countdown\"], \"Countdown with tag `{}` does not exist.\".format(tag)\n\n author_id = time_cfg.data[\"countdown\"][tag][\"author\"]\n assert message.author.id == author_id, \"You are not the author of this tag ({}).\".format(\n getattr(discord.utils.get(client.get_all_members(), id=author_id), \"name\", None) or \"~~Unknown~~\")\n\n del time_cfg.data[\"countdown\"][tag]\n time_cfg.save()\n await client.say(message, \"Countdown with tag `{}` removed.\".format(tag))",
"async def slashtag_remove(self, ctx: commands.Context, *, tag: GuildTagConverter):\n await ctx.send(await tag.delete())",
"async def delete(self, ctx: \"IceTeaContext\", *, otag: TagConverter):\n tag: models.Tag = otag\n if tag.alias:\n if ctx.author.guild_permissions.administrator or tag.author == ctx.author.id:\n try:\n await tag.delete()\n await ctx.send(\"aliases deleted\")\n except:\n await ctx.send(\"Alias unsuccessfully deleted\")\n elif not tag.alias:\n if ctx.author.guild_permissions.administrator or tag.author == ctx.author.id:\n try:\n await tag.delete()\n await ctx.send(\"Tag and all aliases deleted\")\n except:\n await ctx.send(\"Tag unsuccessfully deleted\")\n else:\n await ctx.send(\"No Tag with that name found\")",
"def delete_tag(self, *,\n id: str,\n tag: str,\n tag_type: str = 'default',\n resource_type: ResourceType = ResourceType.Table) -> None:\n LOGGER.info(f'Delete tag {tag} for {id} with type {tag_type} and resource_type: {resource_type.name}')\n\n resource_table = f'{resource_type.name.lower()}_tag'\n resource_model = self._get_model_from_table_name(resource_table)\n if not resource_model:\n raise NotImplementedError(f'{resource_type.name} is not defined!')\n\n resource_key = f'{resource_type.name.lower()}_rk'\n resource_attr = getattr(resource_model, resource_key)\n tag_attr = getattr(resource_model, 'tag_rk')\n try:\n with self.client.create_session() as session:\n session.query(resource_model).filter(resource_attr == id, tag_attr == tag).delete()\n session.commit()\n except Exception as e:\n LOGGER.exception(f'Failed to delete tag {tag} for {id}')\n raise e",
"def remove_by_activity_id(self, a_id):\r\n self.__repo.remove_appointment_by_activity_id(a_id)",
"def delete_tag(filename, tag_name):\n storeapps = APP.config[\"storage\"]\n filename = filename.encode(\"utf-8\")\n\n try:\n application = list(nativeapps.io.ls(storeapps, r\".*\" + filename + \"$\"))[0]\n meta_path = os.path.join(os.path.dirname(application), \"metadata.json\")\n metadata = json.loads(nativeapps.io.readfile(meta_path))\n tags = metadata.get(\"tags\", [])\n if tag_name in tags:\n tags.remove(tag_name)\n metadata[\"tags\"] = tags\n nativeapps.io.writefile(meta_path, json.dumps(metadata))\n except IndexError:\n return \"Unknown application: %s\" % (application), 404\n\n return \"removed\", 200",
"def delete_tag(tag_id):\n tag = Tag.query.get_or_404(tag_id)\n\n db.session.delete(tag)\n db.session.commit()\n\n return redirect('/tags')",
"def delete(self, id):\n return self._post(\n request=ApiActions.DELETE.value,\n uri=ApiUri.TAGS.value,\n params={'id': id}\n )"
] |
[
"0.65332395",
"0.62648904",
"0.6206653",
"0.60998005",
"0.60190386",
"0.5695426",
"0.5627239",
"0.5621899",
"0.55221844",
"0.53584176",
"0.53337467",
"0.5285021",
"0.52439374",
"0.5210847",
"0.5196048",
"0.5188383",
"0.5146715",
"0.50971574",
"0.50823796",
"0.50794446",
"0.50653726",
"0.5042972",
"0.50406855",
"0.5012714",
"0.50023186",
"0.49984276",
"0.49214324",
"0.4916321",
"0.49131438",
"0.48677444"
] |
0.7673404
|
0
|
Search cartonActivitys by filter Returns the list of cartonActivitys that match the given filter. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response.
|
def get_carton_activity_by_filter(self, **kwargs):
all_params = ['filter', 'page', 'limit', 'sort']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_carton_activity_by_filter" % key
)
params[key] = val
del params['kwargs']
resource_path = '/beta/cartonActivity/search'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'filter' in params:
query_params['filter'] = params['filter']
if 'page' in params:
query_params['page'] = params['page']
if 'limit' in params:
query_params['limit'] = params['limit']
if 'sort' in params:
query_params['sort'] = params['sort']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = ['api_key']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[CartonActivity]',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_carton_activity_by_id(self, carton_activity_id, **kwargs):\n\n all_params = ['carton_activity_id']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method get_carton_activity_by_id\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'carton_activity_id' is set\n if ('carton_activity_id' not in params) or (params['carton_activity_id'] is None):\n raise ValueError(\"Missing the required parameter `carton_activity_id` when calling `get_carton_activity_by_id`\")\n\n resource_path = '/beta/cartonActivity/{cartonActivityId}'.replace('{format}', 'json')\n path_params = {}\n if 'carton_activity_id' in params:\n path_params['cartonActivityId'] = params['carton_activity_id']\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type([])\n\n # Authentication setting\n auth_settings = ['api_key']\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='CartonActivity',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def search_activity(conn, request):\n\n c = conn.cursor()\n search_query = \"SELECT * FROM Activity T1 WHERE T1.Name LIKE ?\"\n c.execute(search_query, (request,))\n result = c.fetchall()\n return result",
"def activities(self):\r\n return v3.Activities(self)",
"def get_carton_activity_tags(self, carton_activity_id, **kwargs):\n\n all_params = ['carton_activity_id']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method get_carton_activity_tags\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'carton_activity_id' is set\n if ('carton_activity_id' not in params) or (params['carton_activity_id'] is None):\n raise ValueError(\"Missing the required parameter `carton_activity_id` when calling `get_carton_activity_tags`\")\n\n resource_path = '/beta/cartonActivity/{cartonActivityId}/tag'.replace('{format}', 'json')\n path_params = {}\n if 'carton_activity_id' in params:\n path_params['cartonActivityId'] = params['carton_activity_id']\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type([])\n\n # Authentication setting\n auth_settings = ['api_key']\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type=None,\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def activitySearch (listAct,activity):\n \n for act in listAct:\n if (act.name == activity.name): \n return True",
"def collect_activities(self, user_id, release, params=None):\n params = params or {}\n filter_params = {'user_id': user_id, 'release': release}\n filter_params.update(params)\n activities = []\n while True:\n resp = requests.get(self.url, filter_params)\n content = json.loads(resp.content)\n activities.extend(content['activity'])\n filter_params['start_record'] += self.page_size\n if len(content['activity']) == 0:\n break\n return activities",
"def get_activities_by_session_id(self, session_id):\n return self._db.get_all(\"\"\"\n SELECT * FROM activity_log\n WHERE session_id = ?\"\"\", (session_id, ))",
"def by_activity(cls,site_id=0,activity=None):\n return meta.DBSession.query(Activity).filter_by(site_id=site_id,activity=activity).all()",
"def get_activities(ts_activity, access_token):\n params = {'after': ts_activity, 'access_token': access_token}\n url = \"https://www.strava.com/api/v3/activities\"\n response = return_json(url, \"GET\", parameters=params)\n return response",
"def get_activities(self, activity_ids=None, max_records=50):\r\n return self.connection.get_all_activities(self, activity_ids, max_records)",
"def get_activities_dictionary(self):\r\n activities_dict_list = list()\r\n activities = self.get_specific_node_list('activity')\r\n for activity in activities:\r\n activities_dict = dict()\r\n activity_name = None\r\n category = None\r\n for key, val in activity.attrib.iteritems():\r\n if \"}name\" in key:\r\n activity_name = val.split(\".\")[-1]\r\n break\r\n if activity_name:\r\n intent_filter_node = self.get_specific_node_list('intent-filter', root_node=activity)\r\n if len(intent_filter_node) == 1:\r\n categories_nodes = self.get_specific_node_list('category', root_node=intent_filter_node[0])\r\n category = self.get_category_value(categories_nodes)\r\n else:\r\n category = None\r\n activities_dict[\"name\"] = activity_name\r\n activities_dict[\"category\"] = category\r\n activities_dict_list.append(activities_dict)\r\n return activities_dict_list",
"def get_duplicate_carton_activity_by_id(self, carton_activity_id, **kwargs):\n\n all_params = ['carton_activity_id']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method get_duplicate_carton_activity_by_id\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'carton_activity_id' is set\n if ('carton_activity_id' not in params) or (params['carton_activity_id'] is None):\n raise ValueError(\"Missing the required parameter `carton_activity_id` when calling `get_duplicate_carton_activity_by_id`\")\n\n resource_path = '/beta/cartonActivity/duplicate/{cartonActivityId}'.replace('{format}', 'json')\n path_params = {}\n if 'carton_activity_id' in params:\n path_params['cartonActivityId'] = params['carton_activity_id']\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type([])\n\n # Authentication setting\n auth_settings = ['api_key']\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='CartonActivity',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def activities(self, start=None, limit=None, done=None, exclude=None):\r\n params = base.get_params(None, locals())\r\n url = '{0}/activities'.format(self.get_url())\r\n return http.Request('GET', url, params), parsers.parse_json",
"def activities(self, start=None, limit=None, done=None, exclude=None):\r\n params = base.get_params(None, locals())\r\n url = '{0}/activities'.format(self.get_url())\r\n return http.Request('GET', url, params), parsers.parse_json",
"def activities(self, start=None, limit=None, done=None, exclude=None):\r\n params = base.get_params(None, locals())\r\n url = '{0}/activities'.format(self.get_url())\r\n return http.Request('GET', url, params), parsers.parse_json",
"def activities(self, start=None, limit=None, done=None, exclude=None):\r\n params = base.get_params(None, locals())\r\n url = '{0}/activities'.format(self.get_url())\r\n return http.Request('GET', url, params), parsers.parse_json",
"def find_all_activities(manifest_path):\n tree = ET.parse(manifest_path)\n root = tree.getroot()\n\n package = root.attrib['package']\n\n application = [child for child in root if child.tag == 'application']\n activities = [x for x in application[0] if x.tag == 'activity']\n\n result = []\n\n for activity in activities:\n is_launcher_activity = False\n\n intent_filters = [x for x in activity if x.tag == 'intent-filter']\n for intent_filter in intent_filters:\n if intent_filter.attrib != {}:\n for category in intent_filter:\n if category.tag == 'category':\n if ('{http://schemas.android.com/apk/res/android}name' in category.attrib\n and category.attrib['{http://schemas.android.com/apk/res/android}name'] == \"android.intent.category.LAUNCHER\"):\n is_launcher_activity = True\n\n result.append( (get_activity_name(activity), is_launcher_activity) )\n return result",
"def __ui_list_all_activities(self):\n activities_list = self.__activity_service.service_get_list_of_activities()\n if len(activities_list) == 0:\n print(\"The list of activities is empty!\\n\")\n else:\n for activity in activities_list:\n print(activity)\n print(\"\")",
"def search_with_activitystream(query):\n request = requests.Request(\n method=\"GET\",\n url=settings.ACTIVITY_STREAM_API_URL,\n data=query).prepare()\n\n auth = Sender(\n {\n 'id': settings.ACTIVITY_STREAM_API_ACCESS_KEY,\n 'key': settings.ACTIVITY_STREAM_API_SECRET_KEY,\n 'algorithm': 'sha256'\n },\n settings.ACTIVITY_STREAM_API_URL,\n \"GET\",\n content=query,\n content_type='application/json',\n ).request_header\n\n # Note that the X-Forwarded-* items are overridden by Gov PaaS values\n # in production, and thus the value of ACTIVITY_STREAM_API_IP_WHITELIST\n # in production is irrelivant. It is included here to allow the app to\n # run locally or outside of Gov PaaS.\n request.headers.update({\n 'X-Forwarded-Proto': 'https',\n 'X-Forwarded-For': settings.ACTIVITY_STREAM_API_IP_WHITELIST,\n 'Authorization': auth,\n 'Content-Type': 'application/json'\n })\n\n return requests.Session().send(request)",
"def activities(self, activities):\n \n self._activities = activities",
"def get_activities(self, type=None):\n return flattrclient._get_query_dict(type=type)",
"def get_activities(self, user_id=None, group_id=None, app_id=None,\n activity_id=None, start_index=0, count=0):\n raise NotImplementedError()",
"def getUserActivities(context, request):\n mmdb = MADMaxDB(context.db)\n query = {}\n query['actor.username'] = request.actor['username']\n query['verb'] = 'post'\n chash = request.params.get('context', None)\n if chash:\n query['contexts.hash'] = chash\n\n is_head = request.method == 'HEAD'\n activities = mmdb.activity.search(query, sort=\"_id\", keep_private_fields=False, flatten=1, count=is_head, **searchParams(request))\n\n handler = JSONResourceRoot(activities, stats=is_head)\n return handler.buildResponse()",
"def activities(self, activities):\n\n self._activities = activities",
"def activities(self):\n return self._activities",
"def filter(self):\n\t\tparameters = {}\n\n\t\tif self.keywords:\n\t\t\tparameters['track'] = ','.join(self.keywords)\n\n\t\tif self.locations:\n\t\t\tparameters['locations'] = ','.join([','.join([str(latlong) for latlong in loc]) for loc in self.locations])\n\n\t\tif self.usernames:\n\t\t\tparameters['follow'] = ','.join([str(u) for u in self.usernames])\n\n\t\tself.launch('statuses/filter.json', parameters)",
"def add_carton_activity(self, body, **kwargs):\n\n all_params = ['body']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method add_carton_activity\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `add_carton_activity`\")\n\n resource_path = '/beta/cartonActivity'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json'])\n\n # Authentication setting\n auth_settings = ['api_key']\n\n response = self.api_client.call_api(resource_path, 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='CartonActivity',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def activities_list(self):\n self.__load_activities_from_file_into_memory()\n return self._activities_list",
"def find_launcher_activities(manifest_path):\n tree = ET.parse(manifest_path)\n root = tree.getroot()\n\n package = root.attrib['package']\n\n application = [child for child in root if child.tag == 'application']\n activities = [x for x in application[0] if x.tag == 'activity']\n\n activities_with_intent_filter = []\n\n for activity in activities:\n # print activity\n intent_filters = [x for x in activity if x.tag == 'intent-filter']\n for intent_filter in intent_filters:\n #for action in [x for x in intent_filter if x.tag == 'action']:\n #if ('{http://schemas.android.com/apk/res/android}name' in action.attrib\n #and action.attrib['{http://schemas.android.com/apk/res/android}name'] == \"android.intent.action.MAIN\"):\n #activities_with_intent_filter.append(get_activity_name(activity))\n\n if intent_filter.attrib != {}:\n for category in intent_filter:\n if category.tag == 'category':\n if ('{http://schemas.android.com/apk/res/android}name' in category.attrib\n and category.attrib['{http://schemas.android.com/apk/res/android}name'] == \"android.intent.category.LAUNCHER\"):\n activities_with_intent_filter.append(get_activity_name(activity))\n return list(set(activities_with_intent_filter))",
"def get_activities(cls):\n objs = cls.objects\n return objs"
] |
[
"0.53114",
"0.5290665",
"0.5160244",
"0.51501334",
"0.5136461",
"0.5019022",
"0.50118244",
"0.4994216",
"0.49656084",
"0.49583882",
"0.49430656",
"0.48703286",
"0.48623845",
"0.48623845",
"0.48623845",
"0.48623845",
"0.48564547",
"0.485406",
"0.4849172",
"0.48109105",
"0.48000395",
"0.47079924",
"0.46991488",
"0.46850494",
"0.46350762",
"0.45743567",
"0.45685405",
"0.45484006",
"0.45366526",
"0.45163885"
] |
0.75492764
|
0
|
Get a cartonActivity by id Returns the cartonActivity identified by the specified id. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response.
|
def get_carton_activity_by_id(self, carton_activity_id, **kwargs):
all_params = ['carton_activity_id']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_carton_activity_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'carton_activity_id' is set
if ('carton_activity_id' not in params) or (params['carton_activity_id'] is None):
raise ValueError("Missing the required parameter `carton_activity_id` when calling `get_carton_activity_by_id`")
resource_path = '/beta/cartonActivity/{cartonActivityId}'.replace('{format}', 'json')
path_params = {}
if 'carton_activity_id' in params:
path_params['cartonActivityId'] = params['carton_activity_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = ['api_key']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CartonActivity',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_duplicate_carton_activity_by_id(self, carton_activity_id, **kwargs):\n\n all_params = ['carton_activity_id']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method get_duplicate_carton_activity_by_id\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'carton_activity_id' is set\n if ('carton_activity_id' not in params) or (params['carton_activity_id'] is None):\n raise ValueError(\"Missing the required parameter `carton_activity_id` when calling `get_duplicate_carton_activity_by_id`\")\n\n resource_path = '/beta/cartonActivity/duplicate/{cartonActivityId}'.replace('{format}', 'json')\n path_params = {}\n if 'carton_activity_id' in params:\n path_params['cartonActivityId'] = params['carton_activity_id']\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type([])\n\n # Authentication setting\n auth_settings = ['api_key']\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='CartonActivity',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def delete_carton_activity(self, carton_activity_id, **kwargs):\n\n all_params = ['carton_activity_id']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method delete_carton_activity\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'carton_activity_id' is set\n if ('carton_activity_id' not in params) or (params['carton_activity_id'] is None):\n raise ValueError(\"Missing the required parameter `carton_activity_id` when calling `delete_carton_activity`\")\n\n resource_path = '/beta/cartonActivity/{cartonActivityId}'.replace('{format}', 'json')\n path_params = {}\n if 'carton_activity_id' in params:\n path_params['cartonActivityId'] = params['carton_activity_id']\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type([])\n\n # Authentication setting\n auth_settings = ['api_key']\n\n response = self.api_client.call_api(resource_path, 'DELETE',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type=None,\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def get(self, id):\n activity = Activity().get(id)\n if not activity:\n abort(404, \"Activity not found\")\n return activity",
"def get_carton_activity_tags(self, carton_activity_id, **kwargs):\n\n all_params = ['carton_activity_id']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method get_carton_activity_tags\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'carton_activity_id' is set\n if ('carton_activity_id' not in params) or (params['carton_activity_id'] is None):\n raise ValueError(\"Missing the required parameter `carton_activity_id` when calling `get_carton_activity_tags`\")\n\n resource_path = '/beta/cartonActivity/{cartonActivityId}/tag'.replace('{format}', 'json')\n path_params = {}\n if 'carton_activity_id' in params:\n path_params['cartonActivityId'] = params['carton_activity_id']\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type([])\n\n # Authentication setting\n auth_settings = ['api_key']\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type=None,\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def get_activity_with_id(cls, motion_id):\n obj = cls.objects(motion_id=motion_id).first()\n return obj",
"def activity(self, activity_id):\r\n return activities.Activity(self, activity_id)",
"def get_carton_activity_by_filter(self, **kwargs):\n\n all_params = ['filter', 'page', 'limit', 'sort']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method get_carton_activity_by_filter\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n resource_path = '/beta/cartonActivity/search'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'filter' in params:\n query_params['filter'] = params['filter']\n if 'page' in params:\n query_params['page'] = params['page']\n if 'limit' in params:\n query_params['limit'] = params['limit']\n if 'sort' in params:\n query_params['sort'] = params['sort']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type([])\n\n # Authentication setting\n auth_settings = ['api_key']\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='list[CartonActivity]',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def sms_get_campaign_info(self, id):\n if not id:\n return self.__handle_error(\"Empty campaign id\")\n\n logger.info(\"Function call: sms_get_campaign_info from: {}\".format(id, ))\n return self.__handle_result(self.__send_request('/sms/campaigns/info/{}'.format(id, )))",
"def activity(self, activity_id):\r\n return resources.Activity(self, activity_id)",
"def get_campaign_info(self, id):\n logger.info(\"Function call: get_campaign_info from: {}\".format(id, ))\n return self.__handle_error(\"Empty campaign id\") if not id else self.__handle_result(self.__send_request('campaigns/{}'.format(id, )))",
"def get_performed_activity_by_id(session, id:int):\n performed_activity = session.query(Performed_Activity).filter_by(id=id).first()\n return performed_activity",
"def get_cart_by_id(cls, cart_id):\n\n cart = Cart.query.filter_by(cart_id=cart_id).one()\n\n return cart",
"def add_carton_activity_audit(self, carton_activity_id, carton_activity_audit, **kwargs):\n\n all_params = ['carton_activity_id', 'carton_activity_audit']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method add_carton_activity_audit\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'carton_activity_id' is set\n if ('carton_activity_id' not in params) or (params['carton_activity_id'] is None):\n raise ValueError(\"Missing the required parameter `carton_activity_id` when calling `add_carton_activity_audit`\")\n # verify the required parameter 'carton_activity_audit' is set\n if ('carton_activity_audit' not in params) or (params['carton_activity_audit'] is None):\n raise ValueError(\"Missing the required parameter `carton_activity_audit` when calling `add_carton_activity_audit`\")\n\n resource_path = '/beta/cartonActivity/{cartonActivityId}/audit/{cartonActivityAudit}'.replace('{format}', 'json')\n path_params = {}\n if 'carton_activity_id' in params:\n path_params['cartonActivityId'] = params['carton_activity_id']\n if 'carton_activity_audit' in params:\n path_params['cartonActivityAudit'] = params['carton_activity_audit']\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json'])\n\n # Authentication setting\n auth_settings = ['api_key']\n\n response = self.api_client.call_api(resource_path, 'PUT',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type=None,\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def test_api_get_activity_by_id(self):\n # create a bucket\n res = self.register_login_get_token()\n self.assertEqual(res.status_code, 201)\n\n # create a activity\n res = self.client().post('/bucketlist/1/activities',\n headers=dict(\n Authorization=\"Bearer \" + self.access_token),\n data=self.activity)\n self.assertEqual(res.status_code, 201)\n # get activity created\n activity_created = json.loads(res.data.decode())\n # get activity by its ID\n res = self.client().get('/bucketlist/1/activities/{}'.format(activity_created['id']),\n headers=dict(\n Authorization=\"Bearer \" + self.access_token))\n self.assertEqual(res.status_code, 200)\n self.assertIn('Shop in', str(res.data))",
"def add_carton_activity_tag(self, carton_activity_id, carton_activity_tag, **kwargs):\n\n all_params = ['carton_activity_id', 'carton_activity_tag']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method add_carton_activity_tag\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'carton_activity_id' is set\n if ('carton_activity_id' not in params) or (params['carton_activity_id'] is None):\n raise ValueError(\"Missing the required parameter `carton_activity_id` when calling `add_carton_activity_tag`\")\n # verify the required parameter 'carton_activity_tag' is set\n if ('carton_activity_tag' not in params) or (params['carton_activity_tag'] is None):\n raise ValueError(\"Missing the required parameter `carton_activity_tag` when calling `add_carton_activity_tag`\")\n\n resource_path = '/beta/cartonActivity/{cartonActivityId}/tag/{cartonActivityTag}'.replace('{format}', 'json')\n path_params = {}\n if 'carton_activity_id' in params:\n path_params['cartonActivityId'] = params['carton_activity_id']\n if 'carton_activity_tag' in params:\n path_params['cartonActivityTag'] = params['carton_activity_tag']\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json'])\n\n # Authentication setting\n auth_settings = ['api_key']\n\n response = self.api_client.call_api(resource_path, 'PUT',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type=None,\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def get_campaign(self, campaign_id: str) -> dict:\n return self.http_request(\"GET\", f'/campaign/{campaign_id}')",
"def get_campaign(self, campaignId, **kwargs) -> ApiResponse:\n return self._request(fill_query_params(kwargs.pop('path'), campaignId), params=kwargs)",
"def delete_carton_activity_tag(self, carton_activity_id, carton_activity_tag, **kwargs):\n\n all_params = ['carton_activity_id', 'carton_activity_tag']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method delete_carton_activity_tag\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'carton_activity_id' is set\n if ('carton_activity_id' not in params) or (params['carton_activity_id'] is None):\n raise ValueError(\"Missing the required parameter `carton_activity_id` when calling `delete_carton_activity_tag`\")\n # verify the required parameter 'carton_activity_tag' is set\n if ('carton_activity_tag' not in params) or (params['carton_activity_tag'] is None):\n raise ValueError(\"Missing the required parameter `carton_activity_tag` when calling `delete_carton_activity_tag`\")\n\n resource_path = '/beta/cartonActivity/{cartonActivityId}/tag/{cartonActivityTag}'.replace('{format}', 'json')\n path_params = {}\n if 'carton_activity_id' in params:\n path_params['cartonActivityId'] = params['carton_activity_id']\n if 'carton_activity_tag' in params:\n path_params['cartonActivityTag'] = params['carton_activity_tag']\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type([])\n\n # Authentication setting\n auth_settings = ['api_key']\n\n response = self.api_client.call_api(resource_path, 'DELETE',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type=None,\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def get_cart(id):\n url = carts_service_host + '/cart/' + id\n cart = requests.get(url).json()\n total = self._get_cart_total(cart['items'])\n return (jsonify(dict(total=total, cart=cart)),\n 200)",
"async def fetch_clan(self, id: utils.Intable) -> Clan | None:\n id64 = make_id64(id=id, type=Type.Clan)\n return await self._connection.fetch_clan(id64)",
"def get(self, id):\n return Contacts().get_one(id)",
"def add_carton_activity(self, body, **kwargs):\n\n all_params = ['body']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method add_carton_activity\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `add_carton_activity`\")\n\n resource_path = '/beta/cartonActivity'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json'])\n\n # Authentication setting\n auth_settings = ['api_key']\n\n response = self.api_client.call_api(resource_path, 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='CartonActivity',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def get_cart_invoice(id):\n cart = requests.get(self.carts_service_host +\n '/cart/' + id).json()['items']\n r = self._request_cart_invoice(cart)\n return (jsonify(r.json()), 200)",
"def asset_activity(self, asset_id):\n response = self._client.get('workbenches/assets/%(asset_id)s/activity',\n path_params={'asset_id': asset_id})\n return AssetActivityList.from_json(response.text)",
"def get_clan(self, id: utils.Intable) -> Clan | None:\n steam_id = SteamID(id=id, type=Type.Clan)\n return self._connection.get_clan(steam_id.id)",
"def get_activities_by_session_id(self, session_id):\n return self._db.get_all(\"\"\"\n SELECT * FROM activity_log\n WHERE session_id = ?\"\"\", (session_id, ))",
"def get(self, campaign_id):\n campaign = Campaign.query.filter_by(mailchimp_id=campaign_id).first()\n if campaign is None:\n return {\"message\": \"Campaign could not be found.\"}, HTTPStatus.NOT_FOUND\n return self.schema.dump(campaign), HTTPStatus.OK",
"def getXDMBatchConversionActivities(self,conversionId:str=None)->dict:\n if conversionId is None:\n raise ValueError(\"Require a conversion ID\")\n path = f\"/xdmBatchConversions/{conversionId}/activities\"\n res = self.connector.getData(self.endpoint+path)\n return res",
"def getactivity(self) -> Optional[ba.Activity]:\n stats = self._stats()\n if stats is not None:\n return stats.getactivity()\n return None",
"def get_by_id(c_id):\n return cr.get_by_id(c_id)"
] |
[
"0.6449085",
"0.6341287",
"0.63101023",
"0.5809725",
"0.5605849",
"0.559797",
"0.5246748",
"0.5207538",
"0.52007586",
"0.51091886",
"0.50567985",
"0.50264806",
"0.50061727",
"0.49880445",
"0.49682713",
"0.49606812",
"0.4948862",
"0.48991984",
"0.48133543",
"0.47327924",
"0.47246924",
"0.47184125",
"0.4713513",
"0.47057065",
"0.46684226",
"0.4634406",
"0.46330717",
"0.46116254",
"0.46056366",
"0.4591458"
] |
0.7842999
|
0
|
Get the tags for a cartonActivity. Get all existing cartonActivity tags. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response.
|
def get_carton_activity_tags(self, carton_activity_id, **kwargs):
all_params = ['carton_activity_id']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_carton_activity_tags" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'carton_activity_id' is set
if ('carton_activity_id' not in params) or (params['carton_activity_id'] is None):
raise ValueError("Missing the required parameter `carton_activity_id` when calling `get_carton_activity_tags`")
resource_path = '/beta/cartonActivity/{cartonActivityId}/tag'.replace('{format}', 'json')
path_params = {}
if 'carton_activity_id' in params:
path_params['cartonActivityId'] = params['carton_activity_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = ['api_key']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'))
return response
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def list_tags(self, session):\n result = self._tag(session.get, session=session)\n return result['tags']",
"def get_tags(self, *args, **kwargs):\n \n tags_data = api.get_tags(\n *args,\n api_key=self.__creds.api_key_v2,\n **kwargs)\n return [en.Tag(tag_data) for tag_data in tags_data]",
"def add_carton_activity_tag(self, carton_activity_id, carton_activity_tag, **kwargs):\n\n all_params = ['carton_activity_id', 'carton_activity_tag']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method add_carton_activity_tag\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'carton_activity_id' is set\n if ('carton_activity_id' not in params) or (params['carton_activity_id'] is None):\n raise ValueError(\"Missing the required parameter `carton_activity_id` when calling `add_carton_activity_tag`\")\n # verify the required parameter 'carton_activity_tag' is set\n if ('carton_activity_tag' not in params) or (params['carton_activity_tag'] is None):\n raise ValueError(\"Missing the required parameter `carton_activity_tag` when calling `add_carton_activity_tag`\")\n\n resource_path = '/beta/cartonActivity/{cartonActivityId}/tag/{cartonActivityTag}'.replace('{format}', 'json')\n path_params = {}\n if 'carton_activity_id' in params:\n path_params['cartonActivityId'] = params['carton_activity_id']\n if 'carton_activity_tag' in params:\n path_params['cartonActivityTag'] = params['carton_activity_tag']\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json'])\n\n # Authentication setting\n auth_settings = ['api_key']\n\n response = self.api_client.call_api(resource_path, 'PUT',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type=None,\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def get_tags(self, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.get_tags_with_http_info(**kwargs)\n else:\n (data) = self.get_tags_with_http_info(**kwargs)\n return data",
"def get_tags(self):\n resp = self.get(_u.build_uri(\"tags\", domain=self.domain))\n return utils.handle_response(resp)",
"def get(self, currency, entity):\n check_inputs(currency=currency, entity=entity)\n tags = entitiesDAO.list_entity_tags(currency, entity)\n return tags",
"def delete_carton_activity_tag(self, carton_activity_id, carton_activity_tag, **kwargs):\n\n all_params = ['carton_activity_id', 'carton_activity_tag']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method delete_carton_activity_tag\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'carton_activity_id' is set\n if ('carton_activity_id' not in params) or (params['carton_activity_id'] is None):\n raise ValueError(\"Missing the required parameter `carton_activity_id` when calling `delete_carton_activity_tag`\")\n # verify the required parameter 'carton_activity_tag' is set\n if ('carton_activity_tag' not in params) or (params['carton_activity_tag'] is None):\n raise ValueError(\"Missing the required parameter `carton_activity_tag` when calling `delete_carton_activity_tag`\")\n\n resource_path = '/beta/cartonActivity/{cartonActivityId}/tag/{cartonActivityTag}'.replace('{format}', 'json')\n path_params = {}\n if 'carton_activity_id' in params:\n path_params['cartonActivityId'] = params['carton_activity_id']\n if 'carton_activity_tag' in params:\n path_params['cartonActivityTag'] = params['carton_activity_tag']\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type([])\n\n # Authentication setting\n auth_settings = ['api_key']\n\n response = self.api_client.call_api(resource_path, 'DELETE',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type=None,\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def list(self):\n return self._post(\n request='list',\n uri=ApiUri.TAGS.value,\n ).get('tags')",
"def tags(self):\r\n url = '{0}/{1}'.format(self.get_url(), 'tags')\r\n\r\n return http.Request('GET', url), parsers.parse_json",
"def tags(self):\r\n url = '{0}/tags/'.format(self.get_url())\r\n request = http.Request('GET', url)\r\n\r\n return request, parsers.parse_json",
"def ListTags(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def get_tags_of_type(self, sentence: str, tag_type: PosType):\n spacy_tags = SpacyWrapper.TAGS[tag_type]\n doc = self.model(sentence)\n # Token and Tag\n tags = [str(token) for token in doc if token.pos_ in spacy_tags]\n return tags",
"def tags(self):\r\n url = self.base_url + 'tags/'\r\n return json.loads(self.bb.load_url(url))",
"def get_tags_list(*args, **kwargs):\n return Tag.objects.active()",
"def get_tags_list(*args, **kwargs):\n return Tag.objects.active()",
"def do_list_tags(cs, args):\n resp, tags = cs.repositories.list_tags(args.repository)\n tags = [{\"Tag\": t} for t in tags]\n utils.print_list(tags, [\"Tag\"], sortby=\"Tag\")",
"def tags(self):\n return self._item.get(\"tags\")",
"def get_carton_activity_by_filter(self, **kwargs):\n\n all_params = ['filter', 'page', 'limit', 'sort']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method get_carton_activity_by_filter\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n resource_path = '/beta/cartonActivity/search'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'filter' in params:\n query_params['filter'] = params['filter']\n if 'page' in params:\n query_params['page'] = params['page']\n if 'limit' in params:\n query_params['limit'] = params['limit']\n if 'sort' in params:\n query_params['sort'] = params['sort']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type([])\n\n # Authentication setting\n auth_settings = ['api_key']\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='list[CartonActivity]',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def get_tags(requests_session: object, project_id: str, server_id: str) -> tuple:\n\n tags_response = requests_session.get(\n ECS_ENDPOINT_URL + ECS_API_URIS.get('tags_get').format(project_id=project_id, server_id=server_id))\n status_code = tags_response.status_code\n tags = []\n if status_code == 200:\n tags = tags_response.json()['tags']\n message = \"success\"\n else:\n message = tags_response.content\n return tags, status_code, message",
"def get_all_tags():\n try:\n tags = g.projects.distinct('tags')\n return jsonify(sorted(tags, key=str.lower))\n except Exception as err:\n raise ApiException(str(err), 500)",
"def get_tags(self):\n\n return self.tags",
"def get_tags(self):\n return self.tags",
"def tags(self) -> list[str]:\n _args: list[Arg] = []\n _ctx = self._select(\"tags\", _args)\n return _ctx.execute_sync(list[str])",
"def tags(self) -> Sequence[str]:\n return pulumi.get(self, \"tags\")",
"def tags(self):\n return self._changeset.get('tags', None)",
"def get(self):\n res = SmartAPI.get_tags(self.args.field)\n self.finish(res)",
"def get(self):\n res = SmartAPI.get_tags(self.args.field)\n self.finish(res)",
"def get_activities(ts_activity, access_token):\n params = {'after': ts_activity, 'access_token': access_token}\n url = \"https://www.strava.com/api/v3/activities\"\n response = return_json(url, \"GET\", parameters=params)\n return response",
"def pos_tags(self):\n \n msg(\"Getting POS tag list...\")\n tags = []\n \n # loop through sentences\n for sent in self.tagged_sents:\n \n # loop through tagged words\n for (word, pos) in sent:\n \n # add tag if it's not already in list\n if pos not in tags:\n tags.append(pos)\n\n msg(\"done\\n\")\n \n return tags",
"def tags(self):\n return self.get(\"tags\")"
] |
[
"0.5847432",
"0.58260983",
"0.5702042",
"0.56724346",
"0.5530561",
"0.5507581",
"0.5481185",
"0.54222554",
"0.5383679",
"0.5249438",
"0.5207172",
"0.51760936",
"0.5166908",
"0.51572615",
"0.51572615",
"0.51154923",
"0.50972337",
"0.508396",
"0.5074662",
"0.5072342",
"0.50515753",
"0.5048102",
"0.5043595",
"0.49802387",
"0.49538866",
"0.49193275",
"0.49193275",
"0.48975337",
"0.4891862",
"0.4881634"
] |
0.7763086
|
0
|
Get a duplicated a cartonActivity by id Returns a duplicated cartonActivity identified by the specified id. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response.
|
def get_duplicate_carton_activity_by_id(self, carton_activity_id, **kwargs):
all_params = ['carton_activity_id']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_duplicate_carton_activity_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'carton_activity_id' is set
if ('carton_activity_id' not in params) or (params['carton_activity_id'] is None):
raise ValueError("Missing the required parameter `carton_activity_id` when calling `get_duplicate_carton_activity_by_id`")
resource_path = '/beta/cartonActivity/duplicate/{cartonActivityId}'.replace('{format}', 'json')
path_params = {}
if 'carton_activity_id' in params:
path_params['cartonActivityId'] = params['carton_activity_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = ['api_key']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CartonActivity',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_carton_activity_by_id(self, carton_activity_id, **kwargs):\n\n all_params = ['carton_activity_id']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method get_carton_activity_by_id\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'carton_activity_id' is set\n if ('carton_activity_id' not in params) or (params['carton_activity_id'] is None):\n raise ValueError(\"Missing the required parameter `carton_activity_id` when calling `get_carton_activity_by_id`\")\n\n resource_path = '/beta/cartonActivity/{cartonActivityId}'.replace('{format}', 'json')\n path_params = {}\n if 'carton_activity_id' in params:\n path_params['cartonActivityId'] = params['carton_activity_id']\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type([])\n\n # Authentication setting\n auth_settings = ['api_key']\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='CartonActivity',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def activity(self, activity_id):\r\n return activities.Activity(self, activity_id)",
"def delete_carton_activity(self, carton_activity_id, **kwargs):\n\n all_params = ['carton_activity_id']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method delete_carton_activity\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'carton_activity_id' is set\n if ('carton_activity_id' not in params) or (params['carton_activity_id'] is None):\n raise ValueError(\"Missing the required parameter `carton_activity_id` when calling `delete_carton_activity`\")\n\n resource_path = '/beta/cartonActivity/{cartonActivityId}'.replace('{format}', 'json')\n path_params = {}\n if 'carton_activity_id' in params:\n path_params['cartonActivityId'] = params['carton_activity_id']\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type([])\n\n # Authentication setting\n auth_settings = ['api_key']\n\n response = self.api_client.call_api(resource_path, 'DELETE',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type=None,\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def get_duplicate_shipment_by_id(self, shipment_id, **kwargs):\n\n all_params = ['shipment_id']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method get_duplicate_shipment_by_id\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'shipment_id' is set\n if ('shipment_id' not in params) or (params['shipment_id'] is None):\n raise ValueError(\"Missing the required parameter `shipment_id` when calling `get_duplicate_shipment_by_id`\")\n\n resource_path = '/beta/shipment/duplicate/{shipmentId}'.replace('{format}', 'json')\n path_params = {}\n if 'shipment_id' in params:\n path_params['shipmentId'] = params['shipment_id']\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type([])\n\n # Authentication setting\n auth_settings = ['api_key']\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='Shipment',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def get_carton_activity_tags(self, carton_activity_id, **kwargs):\n\n all_params = ['carton_activity_id']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method get_carton_activity_tags\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'carton_activity_id' is set\n if ('carton_activity_id' not in params) or (params['carton_activity_id'] is None):\n raise ValueError(\"Missing the required parameter `carton_activity_id` when calling `get_carton_activity_tags`\")\n\n resource_path = '/beta/cartonActivity/{cartonActivityId}/tag'.replace('{format}', 'json')\n path_params = {}\n if 'carton_activity_id' in params:\n path_params['cartonActivityId'] = params['carton_activity_id']\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type([])\n\n # Authentication setting\n auth_settings = ['api_key']\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type=None,\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def get(self, id):\n activity = Activity().get(id)\n if not activity:\n abort(404, \"Activity not found\")\n return activity",
"def get_activity_with_id(cls, motion_id):\n obj = cls.objects(motion_id=motion_id).first()\n return obj",
"def activity(self, activity_id):\r\n return resources.Activity(self, activity_id)",
"def add_carton_activity_audit(self, carton_activity_id, carton_activity_audit, **kwargs):\n\n all_params = ['carton_activity_id', 'carton_activity_audit']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method add_carton_activity_audit\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'carton_activity_id' is set\n if ('carton_activity_id' not in params) or (params['carton_activity_id'] is None):\n raise ValueError(\"Missing the required parameter `carton_activity_id` when calling `add_carton_activity_audit`\")\n # verify the required parameter 'carton_activity_audit' is set\n if ('carton_activity_audit' not in params) or (params['carton_activity_audit'] is None):\n raise ValueError(\"Missing the required parameter `carton_activity_audit` when calling `add_carton_activity_audit`\")\n\n resource_path = '/beta/cartonActivity/{cartonActivityId}/audit/{cartonActivityAudit}'.replace('{format}', 'json')\n path_params = {}\n if 'carton_activity_id' in params:\n path_params['cartonActivityId'] = params['carton_activity_id']\n if 'carton_activity_audit' in params:\n path_params['cartonActivityAudit'] = params['carton_activity_audit']\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json'])\n\n # Authentication setting\n auth_settings = ['api_key']\n\n response = self.api_client.call_api(resource_path, 'PUT',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type=None,\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def test_api_get_activity_by_id(self):\n # create a bucket\n res = self.register_login_get_token()\n self.assertEqual(res.status_code, 201)\n\n # create a activity\n res = self.client().post('/bucketlist/1/activities',\n headers=dict(\n Authorization=\"Bearer \" + self.access_token),\n data=self.activity)\n self.assertEqual(res.status_code, 201)\n # get activity created\n activity_created = json.loads(res.data.decode())\n # get activity by its ID\n res = self.client().get('/bucketlist/1/activities/{}'.format(activity_created['id']),\n headers=dict(\n Authorization=\"Bearer \" + self.access_token))\n self.assertEqual(res.status_code, 200)\n self.assertIn('Shop in', str(res.data))",
"def get_performed_activity_by_id(session, id:int):\n performed_activity = session.query(Performed_Activity).filter_by(id=id).first()\n return performed_activity",
"def get_carton_activity_by_filter(self, **kwargs):\n\n all_params = ['filter', 'page', 'limit', 'sort']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method get_carton_activity_by_filter\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n resource_path = '/beta/cartonActivity/search'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'filter' in params:\n query_params['filter'] = params['filter']\n if 'page' in params:\n query_params['page'] = params['page']\n if 'limit' in params:\n query_params['limit'] = params['limit']\n if 'sort' in params:\n query_params['sort'] = params['sort']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type([])\n\n # Authentication setting\n auth_settings = ['api_key']\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='list[CartonActivity]',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def add_carton_activity_tag(self, carton_activity_id, carton_activity_tag, **kwargs):\n\n all_params = ['carton_activity_id', 'carton_activity_tag']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method add_carton_activity_tag\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'carton_activity_id' is set\n if ('carton_activity_id' not in params) or (params['carton_activity_id'] is None):\n raise ValueError(\"Missing the required parameter `carton_activity_id` when calling `add_carton_activity_tag`\")\n # verify the required parameter 'carton_activity_tag' is set\n if ('carton_activity_tag' not in params) or (params['carton_activity_tag'] is None):\n raise ValueError(\"Missing the required parameter `carton_activity_tag` when calling `add_carton_activity_tag`\")\n\n resource_path = '/beta/cartonActivity/{cartonActivityId}/tag/{cartonActivityTag}'.replace('{format}', 'json')\n path_params = {}\n if 'carton_activity_id' in params:\n path_params['cartonActivityId'] = params['carton_activity_id']\n if 'carton_activity_tag' in params:\n path_params['cartonActivityTag'] = params['carton_activity_tag']\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json'])\n\n # Authentication setting\n auth_settings = ['api_key']\n\n response = self.api_client.call_api(resource_path, 'PUT',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type=None,\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def get_duplicate_order_source_by_id(self, order_source_id, **kwargs):\n\n all_params = ['order_source_id']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method get_duplicate_order_source_by_id\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'order_source_id' is set\n if ('order_source_id' not in params) or (params['order_source_id'] is None):\n raise ValueError(\"Missing the required parameter `order_source_id` when calling `get_duplicate_order_source_by_id`\")\n\n resource_path = '/beta/orderSource/duplicate/{orderSourceId}'.replace('{format}', 'json')\n path_params = {}\n if 'order_source_id' in params:\n path_params['orderSourceId'] = params['order_source_id']\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type([])\n\n # Authentication setting\n auth_settings = ['api_key']\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='OrderSource',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def find_activity(self, searched_activity_id):\n self.__load_activities_from_file_into_memory()\n return super().find_activity(searched_activity_id)",
"def get_prospecting_campaign_id(self, acc_id):\n account = AdAccount(acc_id).get_campaigns()\n for camp in account:\n campaign = Campaign(camp['id'])\n campaign.remote_read(fields=[\n Campaign.Field.name,\n ])\n if campaign['name'] == 'Prospecting':\n return camp['id']",
"def get_activities_by_session_id(self, session_id):\n return self._db.get_all(\"\"\"\n SELECT * FROM activity_log\n WHERE session_id = ?\"\"\", (session_id, ))",
"def delete_carton_activity_tag(self, carton_activity_id, carton_activity_tag, **kwargs):\n\n all_params = ['carton_activity_id', 'carton_activity_tag']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method delete_carton_activity_tag\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'carton_activity_id' is set\n if ('carton_activity_id' not in params) or (params['carton_activity_id'] is None):\n raise ValueError(\"Missing the required parameter `carton_activity_id` when calling `delete_carton_activity_tag`\")\n # verify the required parameter 'carton_activity_tag' is set\n if ('carton_activity_tag' not in params) or (params['carton_activity_tag'] is None):\n raise ValueError(\"Missing the required parameter `carton_activity_tag` when calling `delete_carton_activity_tag`\")\n\n resource_path = '/beta/cartonActivity/{cartonActivityId}/tag/{cartonActivityTag}'.replace('{format}', 'json')\n path_params = {}\n if 'carton_activity_id' in params:\n path_params['cartonActivityId'] = params['carton_activity_id']\n if 'carton_activity_tag' in params:\n path_params['cartonActivityTag'] = params['carton_activity_tag']\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type([])\n\n # Authentication setting\n auth_settings = ['api_key']\n\n response = self.api_client.call_api(resource_path, 'DELETE',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type=None,\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def get_paste_by_id(self, id):\n if self.caching and id in self._cache:\n return self._cache[id]\n else:\n return self._get_paste_from_result(\n self._lodgeit.pastes.getPaste(id))",
"def get_activity_history_get(self, characterId, count, destinyMembershipId, membershipType, mode, page):\n # TODO: Assuming first server is good - need to make fallback logic\n return self.session.get_any(\"{base}{request_url}\".format(base=self.servers[0],\n request_url=F\"/Destiny2/{membershipType}/Account/{destinyMembershipId}/Character/{characterId}/Stats/Activities/\"))",
"def duplicate(self, request, *args, **kwargs):\n if not request.user.is_authenticated:\n raise exceptions.NotFound\n\n inherit_collection = request.data.get(\"inherit_collection\", False)\n ids = self.get_ids(request.data)\n queryset = Entity.objects.filter(id__in=ids).filter_for_user(request.user)\n actual_ids = queryset.values_list(\"id\", flat=True)\n missing_ids = list(set(ids) - set(actual_ids))\n if missing_ids:\n raise exceptions.ParseError(\n \"Entities with the following ids not found: {}\".format(\n \", \".join(map(str, missing_ids))\n )\n )\n\n task = queryset.duplicate(\n contributor=request.user, inherit_collection=inherit_collection\n )\n serializer = BackgroundTaskSerializer(task)\n return Response(serializer.data)",
"def get_last_activity(self):\n return Activity.objects.filter(campaign=self.campaign, status=\"P\", contact=self.contact).latest(\"id\")",
"def get_cart_by_id(cls, cart_id):\n\n cart = Cart.query.filter_by(cart_id=cart_id).one()\n\n return cart",
"def getXDMBatchConversionActivities(self,conversionId:str=None)->dict:\n if conversionId is None:\n raise ValueError(\"Require a conversion ID\")\n path = f\"/xdmBatchConversions/{conversionId}/activities\"\n res = self.connector.getData(self.endpoint+path)\n return res",
"def asset_activity(self, asset_id):\n response = self._client.get('workbenches/assets/%(asset_id)s/activity',\n path_params={'asset_id': asset_id})\n return AssetActivityList.from_json(response.text)",
"def test_activity_id(self):\n new_activity = self.app\n self.assertTrue(Activity.activity_id, 0)\n new_activity.create_activity(1)\n self.assertTrue(new_activity.activity_id, 1)\n for key in new_activity.activities:\n self.assertEqual(new_activity.activity_id, key)",
"def activity_type(self, type_id):\r\n return activities.ActivityType(self, type_id)",
"def get_continuous_activity(self):\n from .continuousactivity import DSSContinuousActivity\n return DSSContinuousActivity(self.client, self.project_key, self.recipe_name)",
"def get(self, id):\n return Contacts().get_one(id)",
"def get_by_id(c_id):\n return cr.get_by_id(c_id)"
] |
[
"0.68601924",
"0.59325856",
"0.57145596",
"0.5497775",
"0.5472108",
"0.54584974",
"0.5320074",
"0.5269935",
"0.50570655",
"0.48682854",
"0.48382714",
"0.47813615",
"0.47610122",
"0.47371498",
"0.47237444",
"0.4635629",
"0.45927307",
"0.45755008",
"0.45652983",
"0.45400804",
"0.45129225",
"0.44982818",
"0.44535685",
"0.44417202",
"0.43605176",
"0.4277224",
"0.4234389",
"0.42184561",
"0.41760868",
"0.41737425"
] |
0.8400414
|
0
|
Update a cartonActivity Updates an existing cartonActivity using the specified data. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response.
|
def update_carton_activity(self, body, **kwargs):
all_params = ['body']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_carton_activity" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `update_carton_activity`")
resource_path = '/beta/cartonActivity'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['api_key']
response = self.api_client.call_api(resource_path, 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'))
return response
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def delete_carton_activity(self, carton_activity_id, **kwargs):\n\n all_params = ['carton_activity_id']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method delete_carton_activity\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'carton_activity_id' is set\n if ('carton_activity_id' not in params) or (params['carton_activity_id'] is None):\n raise ValueError(\"Missing the required parameter `carton_activity_id` when calling `delete_carton_activity`\")\n\n resource_path = '/beta/cartonActivity/{cartonActivityId}'.replace('{format}', 'json')\n path_params = {}\n if 'carton_activity_id' in params:\n path_params['cartonActivityId'] = params['carton_activity_id']\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type([])\n\n # Authentication setting\n auth_settings = ['api_key']\n\n response = self.api_client.call_api(resource_path, 'DELETE',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type=None,\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def put(self, id):\n activity = Activity().get(id)\n if not activity:\n abort(404, \"Activity not found\")\n\n return activity._update(request.json)",
"def update_activity(self, to_update_activity_id, updated_activity):\n self.__load_activities_from_file_into_memory()\n super().update_activity(to_update_activity_id, updated_activity)\n self.__save_activities_from_memory_to_file()",
"def get_carton_activity_by_id(self, carton_activity_id, **kwargs):\n\n all_params = ['carton_activity_id']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method get_carton_activity_by_id\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'carton_activity_id' is set\n if ('carton_activity_id' not in params) or (params['carton_activity_id'] is None):\n raise ValueError(\"Missing the required parameter `carton_activity_id` when calling `get_carton_activity_by_id`\")\n\n resource_path = '/beta/cartonActivity/{cartonActivityId}'.replace('{format}', 'json')\n path_params = {}\n if 'carton_activity_id' in params:\n path_params['cartonActivityId'] = params['carton_activity_id']\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type([])\n\n # Authentication setting\n auth_settings = ['api_key']\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='CartonActivity',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def update_activity(recipe_id, activity_id):\n if 'name' in session:\n if request.method == 'POST':\n PLAN.users[session['name']].update_activity(recipe_id,\n activity_id,\n request.form['name'],\n request.form['description'])\n return redirect(url_for('view_activities', recipe_id=recipe_id))\n activity = PLAN.users[session['name']].get_recipe_from_id(\n recipe_id).object_from_id(activity_id)\n return render_template('updateactivity.html', activity=activity)\n\n return redirect(url_for('log_in'))",
"def activity(self, activity):\n if activity is None:\n raise ValueError(\"Invalid value for `activity`, must not be `None`\") # noqa: E501\n\n self._activity = activity",
"def add_carton_activity(self, body, **kwargs):\n\n all_params = ['body']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method add_carton_activity\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `add_carton_activity`\")\n\n resource_path = '/beta/cartonActivity'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json'])\n\n # Authentication setting\n auth_settings = ['api_key']\n\n response = self.api_client.call_api(resource_path, 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='CartonActivity',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def get_duplicate_carton_activity_by_id(self, carton_activity_id, **kwargs):\n\n all_params = ['carton_activity_id']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method get_duplicate_carton_activity_by_id\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'carton_activity_id' is set\n if ('carton_activity_id' not in params) or (params['carton_activity_id'] is None):\n raise ValueError(\"Missing the required parameter `carton_activity_id` when calling `get_duplicate_carton_activity_by_id`\")\n\n resource_path = '/beta/cartonActivity/duplicate/{cartonActivityId}'.replace('{format}', 'json')\n path_params = {}\n if 'carton_activity_id' in params:\n path_params['cartonActivityId'] = params['carton_activity_id']\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type([])\n\n # Authentication setting\n auth_settings = ['api_key']\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='CartonActivity',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def updateWorkflow(self, session, activity_code, order_item_id=None, \n batch_id=None, product_item_id=None, user_id=User.AUTO, \n comments=None):\n try:\n entity_name, entity_id = self._getEntityId(order_item_id, \n batch_id, \n product_item_id)\n except WorkflowError, e:\n # convert to update error\n entity_id = order_item_id or batch_id or product_item_id\n raise WorkflowUpdateError('Not found: %s' % entity_id)\n entity_type = entity_name[:-3] # strip _id\n if entity_name == 'order_item_id':\n workitem = self.getWorkflowItem(session, entity_id)\n self._updateWorkflowItem(session, activity_code, workitem, user_id)\n elif entity_name == 'product_item_id':\n orderitems = self.productItems.getProductItem(\n session, entity_id).order_items\n # loop through all order_items with given product_item_id\n for oi in orderitems:\n workitem = self.getWorkflowItem(session, oi.order_item_id)\n self._updateWorkflowItem(session, activity_code, \n workitem, user_id)\n elif entity_name == 'batch_id':\n batchitems = self.batches.getBatch(session, entity_id).items\n # loop through all order_items with given batch_id\n for bi in batchitems:\n if not bi.active: continue\n workitem = self.getWorkflowItem(session, bi.order_item_id)\n self._updateWorkflowItem(session, activity_code, \n workitem, user_id)\n # update history\n now = datetime.now()\n self.histories.updateHistory(session, entity_type, entity_id,\n activity_code, now, user_id, comments)",
"def add_carton_activity_tag(self, carton_activity_id, carton_activity_tag, **kwargs):\n\n all_params = ['carton_activity_id', 'carton_activity_tag']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method add_carton_activity_tag\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'carton_activity_id' is set\n if ('carton_activity_id' not in params) or (params['carton_activity_id'] is None):\n raise ValueError(\"Missing the required parameter `carton_activity_id` when calling `add_carton_activity_tag`\")\n # verify the required parameter 'carton_activity_tag' is set\n if ('carton_activity_tag' not in params) or (params['carton_activity_tag'] is None):\n raise ValueError(\"Missing the required parameter `carton_activity_tag` when calling `add_carton_activity_tag`\")\n\n resource_path = '/beta/cartonActivity/{cartonActivityId}/tag/{cartonActivityTag}'.replace('{format}', 'json')\n path_params = {}\n if 'carton_activity_id' in params:\n path_params['cartonActivityId'] = params['carton_activity_id']\n if 'carton_activity_tag' in params:\n path_params['cartonActivityTag'] = params['carton_activity_tag']\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json'])\n\n # Authentication setting\n auth_settings = ['api_key']\n\n response = self.api_client.call_api(resource_path, 'PUT',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type=None,\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def update(self, **kwargs):\n _url = f\"{self.connector.base_url}/projects/{self.project_id}\"\n\n # TODO: Verify that the passed kwargs are supported ones\n _response = self.connector.http_call(\"put\", _url, json_data=kwargs)\n\n # Update object\n self._update(_response.json())",
"def _updateWorkflowItem(self, session, activity_code, \n workitem, user_id=User.AUTO):\n activity_id = self.activities.getId(activity_code)\n if activity_id <> workitem.activity_id:\n msg = 'Current and update activities do not match'\n raise WorkflowValidationError(msg)\n # get next activity\n activity_code = self.getNextActivity(session, workitem)\n if activity_code:\n workitem.activity_id = self.activities.getId(activity_code)\n else: # finishing last activity\n workitem.state_id = State.COMPLETE\n workitem.user_id = user_id\n if activity_code in self.skip_activities[workitem.workflow.code]:\n if activity_code in workitem.skip_activities:\n raise WorkflowUpdateError('Attempting to repeat a one-time '\n 'action.')\n elif workitem.skip_activities:\n workitem.skip_activities += ',' + activity_code\n else:\n workitem.skip_activities += activity_code\n session.add(workitem)\n return True",
"def put(self, _id):\n payload = self.request.json\n # TODO: validate the json before updating the db\n self.app.db.jobs.update({'_id': int(_id)}, {'$set': {'status': payload.get('status'), 'activity': payload.get('activity')}})",
"def update(self, **kwargs):\n _url = (\n f\"{self.connector.base_url}/projects/{self.project_id}/links/{self.link_id}\"\n )\n\n # TODO: Verify that the passed kwargs are supported ones\n _response = self.connector.http_call(\"put\", _url, json_data=kwargs)\n\n # Update object\n self._update(_response.json())",
"def update_catalog_item(\n self,\n request: catalog_service.UpdateCatalogItemRequest = None,\n *,\n catalog_item: catalog.CatalogItem = None,\n update_mask: field_mask.FieldMask = None,\n retry: retries.Retry = gapic_v1.method.DEFAULT,\n timeout: float = None,\n metadata: Sequence[Tuple[str, str]] = (),\n ) -> catalog.CatalogItem:\n # Create or coerce a protobuf request object.\n # Sanity check: If we got a request object, we should *not* have\n # gotten any keyword arguments that map to the request.\n if request is not None and any([catalog_item, update_mask]):\n raise ValueError(\n \"If the `request` argument is set, then none of \"\n \"the individual field arguments should be set.\"\n )\n\n request = catalog_service.UpdateCatalogItemRequest(request)\n\n # If we have keyword arguments corresponding to fields on the\n # request, apply these.\n if catalog_item is not None:\n request.catalog_item = catalog_item\n # If we have keyword arguments corresponding to fields on the\n # request, apply these.\n if update_mask is not None:\n request.update_mask = update_mask\n\n # Wrap the RPC method; this adds retry and timeout information,\n # and friendly error handling.\n rpc = gapic_v1.method.wrap_method(\n self._transport.update_catalog_item,\n default_timeout=None,\n client_info=_client_info,\n )\n\n # Send the request.\n response = rpc(request, retry=retry, timeout=timeout, metadata=metadata)\n\n # Done; return the response.\n return response",
"def update(self, request, pk=None):\n\n job = Job.objects.get(pk=pk)\n job.title = request.data[\"title\"]\n job.description = request.data[\"description\"]\n job.city = request.data[\"city\"]\n job.state = request.data[\"state\"]\n job.application = request.data[\"application\"]\n user = request.auth.user\n job.user = user\n job.save()\n\n return Response({}, status=status.HTTP_204_NO_CONTENT)",
"def update(self, catalog: Metadata, action: str):\n self._insert_request(self.update_queue, catalog, action)",
"def add_carton_activity_audit(self, carton_activity_id, carton_activity_audit, **kwargs):\n\n all_params = ['carton_activity_id', 'carton_activity_audit']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method add_carton_activity_audit\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'carton_activity_id' is set\n if ('carton_activity_id' not in params) or (params['carton_activity_id'] is None):\n raise ValueError(\"Missing the required parameter `carton_activity_id` when calling `add_carton_activity_audit`\")\n # verify the required parameter 'carton_activity_audit' is set\n if ('carton_activity_audit' not in params) or (params['carton_activity_audit'] is None):\n raise ValueError(\"Missing the required parameter `carton_activity_audit` when calling `add_carton_activity_audit`\")\n\n resource_path = '/beta/cartonActivity/{cartonActivityId}/audit/{cartonActivityAudit}'.replace('{format}', 'json')\n path_params = {}\n if 'carton_activity_id' in params:\n path_params['cartonActivityId'] = params['carton_activity_id']\n if 'carton_activity_audit' in params:\n path_params['cartonActivityAudit'] = params['carton_activity_audit']\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json'])\n\n # Authentication setting\n auth_settings = ['api_key']\n\n response = self.api_client.call_api(resource_path, 'PUT',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type=None,\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def update_activity():\n pass",
"def update(self, request, pk=None, **kwargs):\n self.permission_classes.append(IsAuthorOrReadOnly)\n comment = get_object_or_404(Comment, pk=self.kwargs[\"id\"])\n self.check_object_permissions(self.request, comment)\n data = request.data\n serializer = self.serializer_class(comment, data=request.data, partial=True)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n return Response({\"comment\" : serializer.data, \"Status\": \"Edited\" }, status=status.HTTP_201_CREATED)",
"def update(self, **kwargs):\n\n return self.api_request(self._get_method_fullname(\"update\"), kwargs)",
"def update(self, **kwargs):\n\n return self.api_request(self._get_method_fullname(\"update\"), kwargs)",
"def update(self, **kwargs):\n\n return self.api_request(self._get_method_fullname(\"update\"), kwargs)",
"def delete_carton_activity_tag(self, carton_activity_id, carton_activity_tag, **kwargs):\n\n all_params = ['carton_activity_id', 'carton_activity_tag']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method delete_carton_activity_tag\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'carton_activity_id' is set\n if ('carton_activity_id' not in params) or (params['carton_activity_id'] is None):\n raise ValueError(\"Missing the required parameter `carton_activity_id` when calling `delete_carton_activity_tag`\")\n # verify the required parameter 'carton_activity_tag' is set\n if ('carton_activity_tag' not in params) or (params['carton_activity_tag'] is None):\n raise ValueError(\"Missing the required parameter `carton_activity_tag` when calling `delete_carton_activity_tag`\")\n\n resource_path = '/beta/cartonActivity/{cartonActivityId}/tag/{cartonActivityTag}'.replace('{format}', 'json')\n path_params = {}\n if 'carton_activity_id' in params:\n path_params['cartonActivityId'] = params['carton_activity_id']\n if 'carton_activity_tag' in params:\n path_params['cartonActivityTag'] = params['carton_activity_tag']\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type([])\n\n # Authentication setting\n auth_settings = ['api_key']\n\n response = self.api_client.call_api(resource_path, 'DELETE',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type=None,\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def __ui_update_activity(self):\n to_update_activity_id = int(input(\"The ID of the activity you want to update: \"))\n existing_persons_ids = self.__person_service.get_existing_persons_ids()\n string_of_participants_ids = input(\n f\"New participants IDs (you can choose from the list: {existing_persons_ids})\\n > \")\n updated_list_of_participants_ids = self.__ui_convert_ids_string_to_list(string_of_participants_ids)\n updated_activity_description = input(\"Updated description: \")\n updated_activity_date = {\n \"year\": int(input(\"Updated year: \")),\n \"month\": int(input(\"Updated month: \")),\n \"day\": int(input(\"Updated day: \"))\n }\n updated_activity_time = int(input(\"Updated time: \"))\n self.__activity_service.service_update_activity(to_update_activity_id,\n updated_list_of_participants_ids,\n updated_activity_date,\n updated_activity_time,\n updated_activity_description)\n print(\"Activity successfully updated!\\n\")",
"def update_asset(cls, id, asset_data):\n\n return ph_base._update_record('asset', id, asset_data)",
"def update_interaction_model_catalog_v1(self, catalog_id, update_request, **kwargs):\n # type: (str, UpdateRequest_12e0eebe, **Any) -> Union[ApiResponse, object, StandardizedError_f5106a89, BadRequestError_f854b05]\n operation_name = \"update_interaction_model_catalog_v1\"\n params = locals()\n for key, val in six.iteritems(params['kwargs']):\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'catalog_id' is set\n if ('catalog_id' not in params) or (params['catalog_id'] is None):\n raise ValueError(\n \"Missing the required parameter `catalog_id` when calling `\" + operation_name + \"`\")\n # verify the required parameter 'update_request' is set\n if ('update_request' not in params) or (params['update_request'] is None):\n raise ValueError(\n \"Missing the required parameter `update_request` when calling `\" + operation_name + \"`\")\n\n resource_path = '/v1/skills/api/custom/interactionModel/catalogs/{catalogId}/update'\n resource_path = resource_path.replace('{format}', 'json')\n\n path_params = {} # type: Dict\n if 'catalog_id' in params:\n path_params['catalogId'] = params['catalog_id']\n\n query_params = [] # type: List\n\n header_params = [] # type: List\n\n body_params = None\n if 'update_request' in params:\n body_params = params['update_request']\n header_params.append(('Content-type', 'application/json'))\n header_params.append(('User-Agent', self.user_agent))\n\n # Response Type\n full_response = False\n if 'full_response' in params:\n full_response = params['full_response']\n\n # Authentication setting\n access_token = self._lwa_service_client.get_access_token_from_refresh_token()\n authorization_value = \"Bearer \" + access_token\n header_params.append(('Authorization', authorization_value))\n\n error_definitions = [] # type: List\n error_definitions.append(ServiceClientResponse(response_type=None, status_code=204, message=\"No content, indicates the fields were successfully updated.\"))\n error_definitions.append(ServiceClientResponse(response_type=\"ask_smapi_model.v1.bad_request_error.BadRequestError\", status_code=400, message=\"Server cannot process the request due to a client error.\"))\n error_definitions.append(ServiceClientResponse(response_type=\"ask_smapi_model.v1.skill.standardized_error.StandardizedError\", status_code=401, message=\"The auth token is invalid/expired or doesn't have access to the resource.\"))\n error_definitions.append(ServiceClientResponse(response_type=\"ask_smapi_model.v1.bad_request_error.BadRequestError\", status_code=403, message=\"The operation being requested is not allowed.\"))\n error_definitions.append(ServiceClientResponse(response_type=\"ask_smapi_model.v1.skill.standardized_error.StandardizedError\", status_code=404, message=\"There is no catalog defined for the catalogId.\"))\n error_definitions.append(ServiceClientResponse(response_type=\"ask_smapi_model.v1.skill.standardized_error.StandardizedError\", status_code=429, message=\"Exceed the permitted request limit. Throttling criteria includes total requests, per API, ClientId, and CustomerId.\"))\n error_definitions.append(ServiceClientResponse(response_type=\"ask_smapi_model.v1.skill.standardized_error.StandardizedError\", status_code=500, message=\"Internal Server Error.\"))\n error_definitions.append(ServiceClientResponse(response_type=\"ask_smapi_model.v1.skill.standardized_error.StandardizedError\", status_code=503, message=\"Service Unavailable.\"))\n\n api_response = self.invoke(\n method=\"POST\",\n endpoint=self._api_endpoint,\n path=resource_path,\n path_params=path_params,\n query_params=query_params,\n header_params=header_params,\n body=body_params,\n response_definitions=error_definitions,\n response_type=None)\n\n if full_response:\n return api_response\n \n return None",
"def update(self) -> requests.request:\n # Check if id is set\n if self.args.id is None:\n raise Exception('Provide id of asset you want to update')\n\n # Check URL validity\n if self.args.url is not None and self.check_url_invalidity():\n raise Exception('Provided URL is not valid')\n\n # Send PUT request\n return requests.put(\n self.REQUEST_URL + str(self.args.id),\n {'title': self.args.title, 'label': self.args.label, 'url': self.args.url}\n )",
"def updateCustomer(self, **params):\n self.__requireParams(params, ['id'])\n return self.__req('update_customer', params)",
"def update_contact(self, context, payload):\n\n if context.get('headers').get('api_key') is None or context.get('headers').get('app_id') is None:\n raise Exception(\"Please provide Api-Key and Api-Appid\")\n \n # Set headers\n headers = {\n \"Api-Key\": context.get('headers').get('api_key'),\n \"Api-Appid\": context.get('headers').get('app_id'),\n \"Content-Type\": \"application/json\"\n }\n payload[\"id\"] = payload.get(\"contact_id\")\n response = requests.request(\"PUT\", f'{self.url}Contacts', headers=headers, data=payload).text\n response = json.loads(response)\n response = response[\"data\"][\"attrs\"]\n return response"
] |
[
"0.5470711",
"0.53928757",
"0.536302",
"0.51352435",
"0.4850845",
"0.4835823",
"0.4745151",
"0.47427103",
"0.4730338",
"0.470771",
"0.46928594",
"0.46521118",
"0.4649748",
"0.4622234",
"0.45755666",
"0.45750722",
"0.4569502",
"0.45484254",
"0.45429483",
"0.4441744",
"0.44411325",
"0.44411325",
"0.44411325",
"0.44297063",
"0.44112095",
"0.42949182",
"0.42716533",
"0.42518005",
"0.42500195",
"0.42462435"
] |
0.64768636
|
0
|
Checks whether or not a player can take on the input color.
|
def validPlayerColor(color):
if color not in (RED, GREEN, BLUE, YELLOW):
return False
else:
return True
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def color_in_check(self, mycolor):\n\n opponent = self.__players[self.get_opponent_color(mycolor)]\n\n x, y = None, None\n for (u, v) in self.__players[mycolor]:\n piece = self.get_piece(u, v)\n if not piece:\n raise ValueError()\n\n if self.get_piece(u, v).name == 'king':\n x, y = u, v\n break\n\n for (u, v) in opponent:\n if (x, y) in self._get_piece_moves(u, v):\n return True\n\n return False",
"def valid_color(self, color):\n valid = False\n if (isinstance(color, list) and len(color) == 3):\n valid = True\n for chan in color:\n valid = valid and (0 <= chan <= 15)\n if not valid:\n _LOGGER.warn(\"{0} was not a valid color\".format(color))\n return valid",
"def is_a_player_in_check(self, color):\n\n current_player = color\n if current_player == 'red':\n opposing_player = 'blue'\n else:\n opposing_player = 'red'\n\n if self.can_checkmate(opposing_player):\n return self.get_general(current_player)\n\n if self.can_checkmate(current_player):\n return self.get_general(opposing_player)\n\n return False",
"def _is_color_valid(self, color):\n # make sure it is a tuple\n if type(color).__name__ != 'tuple':\n return False\n # check the length of the tuple\n if len(color) != 3:\n return False\n # verify that component colors are between _MIN and _MAX\n for c in color:\n if c < MIN or c > MAX:\n return False\n return True",
"def validColor(color):\n if color not in (RED, GREEN, BLUE, YELLOW, EMPTY):\n return False\n else:\n return True",
"def check_color_card(player, color):\n for card in player.cards:\n if card.suit == color:\n return True",
"def can_play(self) -> bool:\n purple_card = self.game.board.purple\n return (\n self.game.current_player != self\n and purple_card is not None\n and purple_card.space > len(self.game.board.yellow[self])\n )",
"def isColor(self,color):\n return self.color==color",
"def is_valid_color(value):\n if is_str(value):\n return is_hex_string(value)\n elif is_tuple_or_list(value):\n return (is_tuple_or_list(value)\n and is_three_channeled(value)\n and has_valid_channel_values(value))\n else:\n return is_str_or_coll(value)",
"def color_check_mate(self, mycolor):\n\n if not self.color_in_check(mycolor):\n return False\n\n incheck = True\n for (x, y) in self.__players[mycolor]:\n moves = self._get_piece_moves(x, y)\n for to in moves:\n res, captured = self._make_move((x, y), to)\n if not self.color_in_check(mycolor):\n incheck = False\n\n self._unmake_move(to, (x, y), captured)\n if not incheck:\n return False\n\n return incheck",
"def check_win(self, color):\n if dijkstra(self, color) == 0:\n return True\n else:\n return False",
"def IsOk(*args, **kwargs):\n return _gdi_.Colour_IsOk(*args, **kwargs)",
"def test_is_valid_color(self):\n self.assertTrue(is_valid_color('black'))\n self.assertTrue(is_valid_color('#aabb11'))\n self.assertTrue(is_valid_color('rgba(23,45,67, .5)'))\n self.assertFalse(is_valid_color('bl(ack'))",
"def can_checkmate(self, color):\n\n if color == 'red':\n opposing_color = 'blue'\n else:\n opposing_color = 'red'\n general_location = None\n\n for piece in self._active_pieces[opposing_color]:\n if type(piece) is General:\n general_location = self.translate_to_algebraic(piece.get_location())\n break\n for piece in self._active_pieces[color]:\n piece_location = self.translate_to_algebraic(piece.get_location())\n if piece.validate_move(piece_location, general_location, self._board):\n return True\n return False",
"def __isValidColor(self, name):\n try:\n if self.__isHexString(name) and len(name) in [3, 6, 9, 12]:\n return True\n return QColor.isValidColor(name)\n except AttributeError:\n if name.startswith(\"#\"):\n if len(name) not in [4, 7, 10, 13]:\n return False\n hexCheckStr = name[1:]\n return self.__isHexString(hexCheckStr)\n else:\n if self.__isHexString(name) and len(name) in [3, 6, 9, 12]:\n return True\n return name in QColor.colorNames()",
"def hasColor(self, color):\n if type(color) is not int:\n raise TypeError( \"Color must be of type int.\" )\n else:\n return color in self.__colordict__",
"def can_change_colors(self):\n return self.condition is None or self.get_state(self.condition[\"entity\"]) == self.condition[\"state\"]",
"def is_color(color):\n # check if color is\n # 1) the default empty value\n # 2) auto\n # 3) a color name from the 16 color palette\n # 4) a color index from the 256 color palette\n # 5) an HTML-style color code\n if (color in ['', 'auto'] or\n color in COLORS.keys() or\n (color.isdigit() and int(color) >= 0 and int(color) <= 255) or\n (color.startswith('#') and (len(color) in [4, 7, 9]) and\n all(c in '01234567890abcdefABCDEF' for c in color[1:]))):\n return color\n raise VdtValueError(color)",
"def IsColor(self, *args):\n return _XCAFDoc.XCAFDoc_ColorTool_IsColor(self, *args)",
"def checkWin(color_list):\n startcolor = color_list[0][0] #Saves color of [0][0] to variable for easy access\n for i in range(15):\n for k in range(25):\n if color_list[i][k] != startcolor: #If any color is not same as color on [0][0] stop and return False since game is not won\n return False\n return True #If all colors are the same as [0][0] the game ahs been won and return Tture",
"def _validate_color(color):\n if not isinstance(color, (list, tuple)):\n raise ValueError(\"Color has to be list, or tuple\")\n if len(color) != 3:\n raise ValueError(\"Color have to contain exactly 3 values: [R, G, B]\")\n for channel in color:\n validate_channel_value(channel)",
"def isRGB(color):\n if not(isinstance(color, list) or isinstance(color, tuple)):\n raise pgUIException(str(color) + ' is not a valid color',\n code = 20)\n if len(color) != 3:\n raise pgUIException(str(color) + ' color has to have three components',\n code = 21)\n if not(isinstance(color[0], int))\\\n or not(isinstance(color[1], int))\\\n or not(isinstance(color[2], int)):\n raise pgUIException(str(color) + ' color components have to be integers',\n code = 23)\n for c in color:\n if c < 0 or c > 255:\n raise pgUIException(str(color) +\n ' color components are to be in between 0 and 255',\n code = 22)\n return True",
"def validate_hair_color(passport: map) -> bool:\n if passport.get('hcl'):\n regex = re.compile('#[0-9a-f]{6}')\n match = regex.match(passport['hcl'])\n return bool(match)\n\n return False",
"def is_cue_color(color, tolerance = 100):\n return col_diff(color, CUE_COLOR) <= tolerance",
"def _iscolorstring(self, color):\n try:\n rgb = self.cv.winfo_rgb(color)\n ok = True\n except TK.TclError:\n ok = False\n return ok",
"def _is_color(cls, obj: Any) -> bool:\n\n return isinstance(obj, Color)",
"def is_valid_eye_color(eye_color: str) -> str:\n return eye_color in [\"amb\", \"blu\", \"brn\", \"gry\", \"grn\", \"hzl\", \"oth\"]",
"def test_is_valid_color_name(self):\n self.assertTrue(is_valid_color_name('black'))\n self.assertTrue(is_valid_color_name('red'))\n self.assertFalse(is_valid_color_name('#aabb11'))\n self.assertFalse(is_valid_color_name('bl(ack'))",
"def is_color(self, color: ColorLike) -> bool:\n\n if isinstance(color, Color):\n return self.color == color\n elif isinstance(color, str):\n return str(self.color) == color\n elif isinstance(color, int):\n return int(self.color) == color\n return False",
"def test_is_valid_rgb_color(self):\n self.assertTrue(is_valid_rgb_color('rgb(12,23,5)'))\n self.assertTrue(is_valid_rgb_color('rgb(12, 223, 225)'))\n self.assertTrue(is_valid_rgb_color('rgba(12, 223, 225, 1)'))\n self.assertTrue(is_valid_rgb_color('rgba(12, 223, 225, 1.0)'))\n self.assertTrue(is_valid_rgb_color('rgba(12, 223, 225, 0)'))\n self.assertTrue(is_valid_rgb_color('rgba(12, 223, 225, .3)'))\n self.assertTrue(is_valid_rgb_color('rgba(12, 223, 225, .34521)'))\n\n # invalid cases\n self.assertFalse(is_valid_rgb_color('rgb(12, 223, 225, 0.5)'))\n self.assertFalse(is_valid_rgb_color('rgb(12, 223, 225, 5)'))\n self.assertFalse(is_valid_rgb_color('rgb(1234, 223, 225)'))\n self.assertFalse(is_valid_rgb_color('rgba(1234, 223, 225,.5)'))\n self.assertFalse(is_valid_rgb_color('rgba(1234, 223, 225,1.1)'))"
] |
[
"0.7221195",
"0.71365774",
"0.713204",
"0.7071138",
"0.702432",
"0.693353",
"0.6682949",
"0.6586236",
"0.6583911",
"0.6565734",
"0.6541181",
"0.6510639",
"0.6480027",
"0.6437859",
"0.6409153",
"0.63930285",
"0.639012",
"0.63468397",
"0.62909555",
"0.6262261",
"0.6245493",
"0.6235397",
"0.62092656",
"0.6189819",
"0.6099259",
"0.6097983",
"0.60918856",
"0.60880786",
"0.60632557",
"0.60513735"
] |
0.8037171
|
0
|
Check whether or not the input color is a valid color (including EMPTY).
|
def validColor(color):
if color not in (RED, GREEN, BLUE, YELLOW, EMPTY):
return False
else:
return True
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_is_valid_color(self):\n self.assertTrue(is_valid_color('black'))\n self.assertTrue(is_valid_color('#aabb11'))\n self.assertTrue(is_valid_color('rgba(23,45,67, .5)'))\n self.assertFalse(is_valid_color('bl(ack'))",
"def _is_color_valid(self, color):\n # make sure it is a tuple\n if type(color).__name__ != 'tuple':\n return False\n # check the length of the tuple\n if len(color) != 3:\n return False\n # verify that component colors are between _MIN and _MAX\n for c in color:\n if c < MIN or c > MAX:\n return False\n return True",
"def is_valid_color(value):\n if is_str(value):\n return is_hex_string(value)\n elif is_tuple_or_list(value):\n return (is_tuple_or_list(value)\n and is_three_channeled(value)\n and has_valid_channel_values(value))\n else:\n return is_str_or_coll(value)",
"def is_color(color):\n # check if color is\n # 1) the default empty value\n # 2) auto\n # 3) a color name from the 16 color palette\n # 4) a color index from the 256 color palette\n # 5) an HTML-style color code\n if (color in ['', 'auto'] or\n color in COLORS.keys() or\n (color.isdigit() and int(color) >= 0 and int(color) <= 255) or\n (color.startswith('#') and (len(color) in [4, 7, 9]) and\n all(c in '01234567890abcdefABCDEF' for c in color[1:]))):\n return color\n raise VdtValueError(color)",
"def valid_color(self, color):\n valid = False\n if (isinstance(color, list) and len(color) == 3):\n valid = True\n for chan in color:\n valid = valid and (0 <= chan <= 15)\n if not valid:\n _LOGGER.warn(\"{0} was not a valid color\".format(color))\n return valid",
"def _validate_color(color):\n if not isinstance(color, (list, tuple)):\n raise ValueError(\"Color has to be list, or tuple\")\n if len(color) != 3:\n raise ValueError(\"Color have to contain exactly 3 values: [R, G, B]\")\n for channel in color:\n validate_channel_value(channel)",
"def test_is_valid_rgb_color(self):\n self.assertTrue(is_valid_rgb_color('rgb(12,23,5)'))\n self.assertTrue(is_valid_rgb_color('rgb(12, 223, 225)'))\n self.assertTrue(is_valid_rgb_color('rgba(12, 223, 225, 1)'))\n self.assertTrue(is_valid_rgb_color('rgba(12, 223, 225, 1.0)'))\n self.assertTrue(is_valid_rgb_color('rgba(12, 223, 225, 0)'))\n self.assertTrue(is_valid_rgb_color('rgba(12, 223, 225, .3)'))\n self.assertTrue(is_valid_rgb_color('rgba(12, 223, 225, .34521)'))\n\n # invalid cases\n self.assertFalse(is_valid_rgb_color('rgb(12, 223, 225, 0.5)'))\n self.assertFalse(is_valid_rgb_color('rgb(12, 223, 225, 5)'))\n self.assertFalse(is_valid_rgb_color('rgb(1234, 223, 225)'))\n self.assertFalse(is_valid_rgb_color('rgba(1234, 223, 225,.5)'))\n self.assertFalse(is_valid_rgb_color('rgba(1234, 223, 225,1.1)'))",
"def test_is_valid_color_name(self):\n self.assertTrue(is_valid_color_name('black'))\n self.assertTrue(is_valid_color_name('red'))\n self.assertFalse(is_valid_color_name('#aabb11'))\n self.assertFalse(is_valid_color_name('bl(ack'))",
"def validPlayerColor(color):\n if color not in (RED, GREEN, BLUE, YELLOW):\n return False\n else:\n return True",
"def invalid_colour(colour):\n error_message = f\"`{colour}` is not a valid RGB colour\"\n\n if not isinstance(colour, list):\n return error_message\n\n if not all([0 <= component <= 255 for component in colour]):\n return error_message\n\n return False",
"def __isValidColor(self, name):\n try:\n if self.__isHexString(name) and len(name) in [3, 6, 9, 12]:\n return True\n return QColor.isValidColor(name)\n except AttributeError:\n if name.startswith(\"#\"):\n if len(name) not in [4, 7, 10, 13]:\n return False\n hexCheckStr = name[1:]\n return self.__isHexString(hexCheckStr)\n else:\n if self.__isHexString(name) and len(name) in [3, 6, 9, 12]:\n return True\n return name in QColor.colorNames()",
"def isRGB(color):\n if not(isinstance(color, list) or isinstance(color, tuple)):\n raise pgUIException(str(color) + ' is not a valid color',\n code = 20)\n if len(color) != 3:\n raise pgUIException(str(color) + ' color has to have three components',\n code = 21)\n if not(isinstance(color[0], int))\\\n or not(isinstance(color[1], int))\\\n or not(isinstance(color[2], int)):\n raise pgUIException(str(color) + ' color components have to be integers',\n code = 23)\n for c in color:\n if c < 0 or c > 255:\n raise pgUIException(str(color) +\n ' color components are to be in between 0 and 255',\n code = 22)\n return True",
"def _iscolorstring(self, color):\n try:\n rgb = self.cv.winfo_rgb(color)\n ok = True\n except TK.TclError:\n ok = False\n return ok",
"def test_color__int_arg_invalid(self):\n with self.assertRaises(ValueError):\n color = pygame.Color(0x1FFFFFFFF)",
"def print_illegal_color_format_screen( enteredBGColor, enteredFGColor, convertedBGColor, convertedFGColor ):\n print \"\"\n print \"Error: are the passed in colors valid?\"\n print \" - passed in background-color '\" + enteredBGColor + \"' was converted to '\" + convertedBGColor + \"'.\"\n print \" - passed in foreground-color '\" + enteredFGColor + \"' was converted to '\" + convertedFGColor + \"'.\"\n print \"\"",
"def isRGB(color):\n try:\n if color[0:4] != 'rgb(':\n return False\n if color[-1:] != ')':\n return False\n if len(color[4:-1].split(',')) != 3:\n return False\n for i in color[4:-1].split(','):\n if i.replace(' ', '').isdigit() == False:\n return False\n if int(i.replace(' ', '')) < 0 or int(i.replace(' ', '')) > 255:\n return False\n return True\n except TypeError:\n return False",
"def is_valid_hair_color(hair_color: str) -> bool:\n return re.match(r'^#[a-f|0-9]{5}', hair_color)",
"def is_rgb_color(v):\n if hasattr(v, \"r\") and hasattr(v, \"g\") and hasattr(v, \"b\"):\n v = [v.r, v.g, v.b]\n if not isiterable(v) or len(v) < 3:\n return False\n try:\n return all([0 <= int(x) <= 255 for x in v[:3]])\n except (TypeError, ValueError):\n return False",
"def test_color(self):\n color = pygame.Color(0, 0, 0, 0)\n\n self.assertIsInstance(color, pygame.Color)",
"def is_color(s):\n def in_range(i): return 0 <= i <= int('0xFFFFFF', 0)\n\n try:\n if type(s) == int:\n return in_range(s)\n elif type(s) not in (str, bytes):\n return False\n elif s in webcolors.css3_names_to_hex:\n return True\n elif s[0] == '#':\n return in_range(int('0x' + s[1:], 0))\n elif s[0:2] == '0x':\n return in_range(int(s, 0))\n elif len(s) == 6:\n return in_range(int('0x' + s, 0))\n except ValueError:\n return False",
"def assert_color(\n color: Union[ColorInputType, List[int]],\n warn_if_invalid: bool = True\n) -> ColorType:\n color = format_color(color, warn_if_invalid=warn_if_invalid)\n assert isinstance(color, VectorInstance), \\\n f'color must be a tuple or list, not type \"{type(color)}\"'\n assert 4 >= len(color) >= 3, \\\n 'color must be a tuple or list of 3 or 4 numbers'\n for i in range(3):\n assert isinstance(color[i], int), \\\n f'\"{color[i]}\" in element color {color} must be an integer, not type \"{type(color)}\"'\n assert 0 <= color[i] <= 255, \\\n f'\"{color[i]}\" in element color {color} must be an integer between 0 and 255'\n if len(color) == 4:\n assert isinstance(color[3], int), \\\n f'alpha channel must be an integer between 0 and 255, not type \"{type(color)}\"'\n assert 0 <= color[3] <= 255, \\\n f'opacity of color {color} must be an integer between 0 and 255; ' \\\n f'where 0 is fully-transparent and 255 is fully-opaque'\n return color",
"def __isValidRgbaColor(self, color):\n rgba = []\n \n parts = color.split(\",\")\n if len(parts) not in [3, 4]:\n return False, []\n \n for part in parts:\n try:\n c = int(part)\n except ValueError:\n return False, []\n \n if c < 0 or c > 255:\n return False, []\n \n rgba.append(c)\n \n return True, rgba",
"def isColor(self,color):\n return self.color==color",
"def test_color__rgba_int_args_invalid_value_without_alpha(self):\n self.assertRaises(ValueError, pygame.Color, 256, 10, 105)\n self.assertRaises(ValueError, pygame.Color, 10, 256, 105)\n self.assertRaises(ValueError, pygame.Color, 10, 105, 256)",
"def _is_base_color(self, color):\n # is it a valid color\n if not self._is_color_valid(color):\n return False\n # general logic\n has_a_min = color[0] == MIN or color[1] == MIN or color[2] == MIN\n has_a_max = color[0] == MAX or color[1] == MAX or color[2] == MAX\n if has_a_min and has_a_max:\n # is a base color\n return True\n # not a base color\n return False",
"def validate_channel_value(value: int) -> None:\n if 0 <= value <= 255:\n pass\n else:\n raise ValueError(\"Color channel has to be in range [0; 255]\")",
"def hasColor(self, color):\n if type(color) is not int:\n raise TypeError( \"Color must be of type int.\" )\n else:\n return color in self.__colordict__",
"def is_red(node):\n if node is None:\n return False\n return node.colour is True",
"def is_color(self, color: ColorLike) -> bool:\n\n if isinstance(color, Color):\n return self.color == color\n elif isinstance(color, str):\n return str(self.color) == color\n elif isinstance(color, int):\n return int(self.color) == color\n return False",
"def is_invalid():\n print(colored('Invalid input\\n', 'red', attrs=['bold']))"
] |
[
"0.7934704",
"0.7657638",
"0.7578412",
"0.74673444",
"0.7333226",
"0.71767724",
"0.7126696",
"0.7092184",
"0.7017223",
"0.7006884",
"0.69969285",
"0.69682324",
"0.68484133",
"0.6763875",
"0.67374766",
"0.6728577",
"0.6533575",
"0.65154064",
"0.64646447",
"0.6460852",
"0.6455157",
"0.645052",
"0.6361383",
"0.63550675",
"0.6352525",
"0.6331287",
"0.63263196",
"0.631492",
"0.63048595",
"0.6296004"
] |
0.827423
|
0
|
Get the string corresponding to a color.
|
def getColorString(color):
if type(color) is not int:
raise TypeError("The input to getColorString is not of type int.")
if color in COLOR_STRINGS:
return COLOR_STRINGS[color]
else:
raise ValueError("Input color not found.")
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_colorstring(color) -> str:\n return f\"#{int(color[0]*255):02x}{int(color[1]*255):02x}{int(color[2]*255):02x}\"",
"def get_color(self) -> str:\r\n return self.color",
"def get_color(self) -> str:\n return self.color",
"def _colorstr(self, color):\n if len(color) == 1:\n color = color[0]\n if isinstance(color, str):\n if self._iscolorstring(color) or color == \"\":\n return color\n else:\n raise TurtleGraphicsError(\"bad color string: %s\" % str(color))\n try:\n r, g, b = color\n except (TypeError, ValueError):\n raise TurtleGraphicsError(\"bad color arguments: %s\" % str(color))\n if self._colormode == 1.0:\n r, g, b = [round(255.0*x) for x in (r, g, b)]\n if not ((0 <= r <= 255) and (0 <= g <= 255) and (0 <= b <= 255)):\n raise TurtleGraphicsError(\"bad color sequence: %s\" % str(color))\n return \"#%02x%02x%02x\" % (r, g, b)",
"def get_coloured_text_string(text, colour):\n if colour==\"red\":\n return (\"\\033[31m\" + text + \"\\033[0m\")\n if colour==\"green\":\n return (\"\\033[32m\" + text + \"\\033[0m\")\n if colour==\"yellow\":\n return (\"\\033[33m\" + text + \"\\033[0m\")\n if colour==\"blue\":\n return (\"\\033[34m\" + text + \"\\033[0m\")\n if colour==\"purple\":\n return (\"\\033[35m\" + text + \"\\033[0m\")\n if colour==\"cyan\":\n return (\"\\033[36m\" + text + \"\\033[0m\")\n if colour==\"white\":\n return (\"\\033[37m\" + text + \"\\033[0m\")\n return text",
"def GetAsString(*args, **kwargs):\n return _gdi_.Colour_GetAsString(*args, **kwargs)",
"def get_color(self) -> Optional[str]:\n return self.color",
"def get_color(self, node: Node) -> str:\n\n idx = hash(node.get_kind_name()) % len(self.colors_)\n return self.colors_[idx]",
"def get_color(self):\r\n if self.color:\r\n return \"RED\"\r\n else:\r\n return \"BLACK\"",
"def colored (string_, color, attrs):\n return string_",
"def __str__(self):\n value = COLOR_NAMES.get(self.color.upper())\n # if value:\n # return str(value)\n # else:\n # return \"Unknown\"\n return str(value) if value else \"Unknown\"",
"def getResourceName(color):\n if color == 1:\n return 'news'\n if color == 2:\n return 'cash'\n if color == 3:\n return 'hype'",
"def rgbString(red,green,blue):\n return chr(red)+chr(green)+chr(blue)",
"def colorize(text, color):\n return COLOR_DICT[color] + str(text) + COLOR_DICT['end']",
"def get_color(col, color):\n if color is None and col is None:\n return 'C0'\n if col is None:\n return color\n if not isinstance(col, int):\n raise ValueError(\"`col` must be an integer. Consider using `color` instead.\")\n return 'C{}'.format(col)",
"def _color_string(string, color):\n if color is None:\n return string\n else:\n return color + string + '\\033[0m'",
"def _colorstr(self, args):",
"def get_color(self):\n return \"yellow\"",
"def color(self):\n return self['color']",
"def color(name):\n\tif name not in colors:\n\t\traise ValueError('Bad color %s' % repr(name))\n\treturn u'§' + colors[name]",
"def color_string(self, data, type):\n\n # http://stackoverflow.com/questions/287871/print-in-terminal-with-colors-using-python\n\n if self.options['no_color']:\n return data\n\n CEND = '\\x1b[0m'\n\n if type == ColorType.ok:\n return '\\x1b[1;32;40m{0}{1}'.format(data, CEND)\n if type == ColorType.error:\n return '\\x1b[1;31;40m{0}{1}'.format(data, CEND)\n if type == ColorType.warning:\n return '\\x1b[1;36;40m{0}{1}'.format(data, CEND)\n if type == ColorType.info:\n return '\\x1b[1;34;40m{0}{1}'.format(data, CEND)\n\n return str",
"def get_colour(self) -> str:\n return self.colour",
"def color(value):\r\n return 'RGB({}, {}, {})'.format(value.red(), value.blue(), value.green())",
"def getrandomcolor(self) -> str:\n return self.tab10[random.randint(0, len(self.tab10)-1)]",
"def _color_info_text(self):\n\n t = ''\n for info in self.color_info:\n if info == 'rgbhex':\n t1 = tks.color_funcs.rgb_to_hex_string(self.rgb)\n elif info == 'rgb':\n t1 = tks.color_funcs.rgb_to_rgb_string(self.rgb, dp=2)\n elif info == 'hsv':\n t1 = tks.color_funcs.rgb_to_hsv_string(self.rgb, dp=2)\n elif info == 'hls':\n t1 = tks.color_funcs.rgb_to_hls_string(self.rgb, dp=2)\n\n t = t + '%s\\n' % t1\n\n return t",
"def color(self) -> Optional[str]:\n return self.colour",
"def getColor(k) :\n colors = [\"#862B59\",\"#A10000\",\"#0A6308\",\"#123677\",\"#ff8100\",\"#F28686\",\"#6adf4f\",\"#58ccdd\",\"#3a3536\",\"#00ab7c\"]\n return colors[k]",
"def color(self, s, fg=None, style=None):\n return LogStr(s, fg=fg, style=style)",
"def _get_color(self, color_name):\n if not color_name:\n return 0\n\n if color_name == 'ORANGE':\n color = self.COLOR_ORANGE\n else:\n color = getattr(curses, 'COLOR_' + color_name)\n return curses.color_pair(color)",
"def color(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"color\")"
] |
[
"0.83270156",
"0.77107364",
"0.75636035",
"0.7493946",
"0.74208045",
"0.7393732",
"0.7268761",
"0.7196634",
"0.71570015",
"0.71258074",
"0.7123171",
"0.7118092",
"0.7080076",
"0.7004553",
"0.6993604",
"0.69608295",
"0.6912071",
"0.6907833",
"0.6881702",
"0.68744445",
"0.6846003",
"0.6840833",
"0.682782",
"0.67866033",
"0.67730457",
"0.6730848",
"0.6701165",
"0.6700251",
"0.6669117",
"0.66669965"
] |
0.8094415
|
1
|
The concatenation uses AnnData.concatenate(), here we test the concatenation result on region, region_key, instance_key
|
def test_concatenate_tables():
table0 = _get_table(region="shapes/circles", instance_key="instance_id")
table1 = _get_table(region="shapes/poly", instance_key="instance_id")
table2 = _get_table(region="shapes/poly2", instance_key="instance_id")
with pytest.raises(ValueError):
_concatenate_tables([])
assert len(_concatenate_tables([table0])) == len(table0)
assert len(_concatenate_tables([table0, table1, table2])) == len(table0) + len(table1) + len(table2)
table0.obs["annotated_element_merged"] = np.arange(len(table0))
c0 = _concatenate_tables([table0, table1])
assert len(c0) == len(table0) + len(table1)
d = c0.uns[TableModel.ATTRS_KEY]
d["region"] = sorted(d["region"])
assert d == {
"region": ["shapes/circles", "shapes/poly"],
"region_key": "region",
"instance_key": "instance_id",
}
table3 = _get_table(region="shapes/circles", region_key="annotated_shapes_other", instance_key="instance_id")
with pytest.raises(ValueError):
_concatenate_tables([table0, table3], region_key="region")
table4 = _get_table(
region=["shapes/circles1", "shapes/poly1"], region_key="annotated_shape0", instance_key="instance_id"
)
table5 = _get_table(
region=["shapes/circles2", "shapes/poly2"], region_key="annotated_shape0", instance_key="instance_id"
)
table6 = _get_table(
region=["shapes/circles3", "shapes/poly3"], region_key="annotated_shape1", instance_key="instance_id"
)
with pytest.raises(ValueError, match="`region_key` must be specified if tables have different region keys"):
_concatenate_tables([table4, table5, table6])
assert len(_concatenate_tables([table4, table5, table6], region_key="region")) == len(table4) + len(table5) + len(
table6
)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def concatenate_data():",
"def test_concat_data(self):\n\n this_satellite_dict = satellite_io.concat_data(\n satellite_dicts=[\n SATELLITE_DICT_SUBSET_BY_INDEX, SATELLITE_DICT_SUBSET_BY_TIME\n ]\n )\n\n self.assertTrue(compare_satellite_dicts(\n this_satellite_dict, SATELLITE_DICT_CONCAT\n ))",
"def assemble_row(r1, r2):\n r1.extend(r2)\n return r1",
"def test_concatenate(self):\n header = BDFHeader.from_path(TestData.bdf_256)\n header2 = BDFHeader.from_path(TestData.bdf_256)\n assert header.nb_data_records == 60\n assert header.data_duration == 1\n assert header2.nb_data_records == 60\n assert header2.data_duration == 1\n assert (header.nb_channels + 1) * 256 == header.bytes_in_header\n header.concatenate(header2)\n assert (header.nb_channels + 1) * 256 == header.bytes_in_header\n assert header.nb_data_records == 120\n assert header.data_duration == 2\n assert header2.nb_data_records == 60\n assert header2.data_duration == 1\n header2.max_dimensions = [99999999] * header2.nb_channels\n header2.min_dimensions = [-9999999] * header2.nb_channels\n header2.max_digital = [99999999] * header2.nb_channels\n header2.min_digital = [-9999999] * header2.nb_channels\n header.concatenate(header2)\n assert header.nb_data_records == 180\n assert header.data_duration == 3\n assert header.max_dimensions == [99999999] * header2.nb_channels\n assert header.min_dimensions == [-9999999] * header2.nb_channels\n assert header.max_digital == [99999999] * header2.nb_channels\n assert header.min_digital == [-9999999] * header2.nb_channels\n assert (header.nb_channels + 1) * 256 == header.bytes_in_header",
"def mergeAggregatedCsvData(self, contexts, obj, aggData1, aggData2):\n return aggData1 + aggData2",
"def paired_interval_extend(uniq_fragment,fragment_cov,gtf_dic):\n out_dic = {}\n total_reads = 0\n for key in uniq_fragment.keys():\n chr_no = key[0]\n #print (frag_start,frag_end)\n frag_strand = key[3]\n interval_comp = uniq_fragment[key][0]\n complete_info = uniq_fragment[key][1]\n frag_cov = fragment_cov[key]\n total_reads += frag_cov\n geneNA = 'NA'\n geneType = 'NA'\n geneRegion = 'NA'\n flag = 0\n for trans in gtf_dic[(chr_no,frag_strand)]:\n frag_start,frag_end = key[1:3]\n # for trans in gtf_dic[('chr1','-')]:\n # if chr_no == 'chr1' and frag_strand == '-':\n if frag_start > trans[0] and frag_end < trans[1]:\n #print 'Hello!'\n # print (trans)\n geneNA = trans[4]\n geneType = trans[5]\n if geneType == 'protein_coding':\n CDS_start,CDS_end = trans[2:4]\n if frag_start >= CDS_start and frag_end <= CDS_end:\n geneRegion = 'CDS'\n elif frag_strand == '+':\n if frag_end <= CDS_start:\n geneRegion = '5UTR'\n elif frag_start < CDS_start and frag_end > CDS_start:\n geneRegion = '5UTR-CDS'\n elif frag_start < CDS_end and frag_end > CDS_end:\n geneRegion = 'CDS-3UTR'\n elif frag_start >= CDS_end:\n geneRegion = '3UTR'\n elif frag_strand == '-':\n if frag_end <= CDS_start:\n geneRegion = '3UTR'\n elif frag_start < CDS_start and frag_end > CDS_start:\n geneRegion = 'CDS-3UTR'\n elif frag_start < CDS_end and frag_end > CDS_end:\n geneRegion = '5UTR-CDS'\n elif frag_start >= CDS_end:\n geneRegion = '5UTR'\n else:\n geneRegion = 'Null'\n # print (frag_start,frag_end,CDS_start,CDS_end,geneNA,geneRegion)\n#------------------------------------------------------------------------------ intersect of fragments interval and exons interval\n frag_intersect = interval_comp & trans[-1]\n interval_comp_length = sum([interval_comp[a].upper- interval_comp[a].lower for a in range(0,len(interval_comp))])\n # print (interval_comp)\n # print (frag_intersect)\n#------------------------------------------------------------------------------ fragments located in introns\n if frag_intersect == P.empty(): \n flag = 1\n start_out = []\n length_out = []\n for interval_region in list(interval_comp):\n start_out.append(str(int(interval_region.lower - frag_start)))\n length_out.append(str(int(interval_region.upper - interval_region.lower)))\n out_dic.setdefault((chr_no,frag_start,frag_end,frag_strand),[]).append((chr_no,str(frag_start),str(frag_end),\\\n geneNA,geneType,frag_strand,\\\n str(frag_start),str(frag_end),'intron',str(len(start_out)),\\\n ','.join(length_out),','.join(start_out),str(frag_cov),flag,complete_info))\n else:\n if complete_info == 'complete':\n flag = 3\n #print interval_comp\n#------------------------------------------------------------------------------ reduce alignment noise\n frag_intersect_length = sum([frag_intersect[a].upper-frag_intersect[a].lower for a in range(0,len(frag_intersect))])\n absolute_diff = abs(frag_intersect_length-interval_comp_length)\n if absolute_diff == 0:\n#------------------------------------------------------------------------------ \n start_region = []\n length_region = []\n for region in frag_intersect:\n start_region.append(str(int(region.lower - frag_start)))\n length_region.append(str(int(region.upper - region.lower)))\n out_dic.setdefault((chr_no,frag_start,frag_end,frag_strand),[]).append((chr_no,str(frag_start),str(frag_end),\\\n geneNA,geneType,frag_strand,\\\n str(frag_start),str(frag_end),geneRegion,str(len(start_region)),\\\n ','.join(length_region),','.join(start_region),str(frag_cov),flag,complete_info))\n else:\n start_region = []\n length_region = []\n for region in interval_comp:\n start_region.append(str(int(region.lower - frag_start)))\n length_region.append(str(int(region.upper - region.lower)))\n out_dic.setdefault((chr_no,frag_start,frag_end,frag_strand),[]).append((chr_no,str(frag_start),str(frag_end),geneNA,geneType,\\\n frag_strand,str(frag_start),str(frag_end),'intron-containing',str(len(start_region)),\\\n ','.join(length_region),','.join(start_region),str(frag_cov),flag,complete_info))\n else:\n #print interval_comp\n #print frag_intersect\n#------------------------------------------------------------------------------ fragments boundaries located in exons\n #print frag_intersect[0][0],frag_start,frag_intersect[-1][1],frag_end\n #print abs_position\n # print (P.closedopen(frag_start,frag_end),trans[-1])\n interval_update = P.closedopen(frag_start,frag_end) & trans[-1]\n # print (interval_update)\n frag_trans_length = sum([interval_update[a].upper-interval_update[a].lower for a in range(0,len(interval_update))])\n absolute_diff = abs(frag_trans_length-interval_comp_length)\n #print absolute_diff\n #print geneRegion\n #print interval_comp\n #print abs_position\n if absolute_diff <= 300: #insert sequence length <=200nt\n #print frag_trans_length,interval_comp_length\n #print geneRegion\n flag = 2\n start_out = []\n length_out = []\n for interval_region in list(interval_update):\n start_out.append(str(int(interval_region.lower - frag_start)))\n length_out.append(str(int(interval_region.upper - interval_region.lower)))\n out_dic.setdefault((chr_no,frag_start,frag_end,frag_strand),[]).append((chr_no,str(frag_start),str(frag_end),\\\n geneNA,geneType,frag_strand,\\\n str(frag_start),str(frag_end),geneRegion,str(len(start_out)),\\\n ','.join(length_out),','.join(start_out),str(frag_cov),flag,complete_info))\n else:\n # print (trans)\n flag = 1\n start_out = []\n length_out = []\n for interval_region in list(interval_comp):\n start_out.append(str(int(interval_region.lower - frag_start)))\n length_out.append(str(int(interval_region.upper - interval_region.lower)))\n out_dic.setdefault((chr_no,frag_start,frag_end,frag_strand),[]).append((chr_no,str(frag_start),str(frag_end),\\\n geneNA,geneType,frag_strand,\\\n str(frag_start),str(frag_end),'intron-containing',str(len(start_out)),\\\n ','.join(length_out),','.join(start_out),str(frag_cov),flag,complete_info))\n if flag == 0:\n start_out = []\n length_out = []\n for interval_region in list(interval_comp):\n start_out.append(str(int(interval_region.lower - frag_start)))\n length_out.append(str(int(interval_region.upper - interval_region.lower)))\n out_dic[(chr_no,frag_start,frag_end,frag_strand)] = [(chr_no,str(frag_start),str(frag_end),'intergenic','intergenic',frag_strand,\\\n str(frag_start),str(frag_end),geneRegion,str(len(start_out)),\\\n ','.join(length_out),','.join(start_out),str(frag_cov),flag,complete_info)]\n print ('Total treated fragments: ' + str(total_reads))\n return out_dic",
"def _concatenate_features(features):\n pass",
"def merge_data_base_kvks(self):\n\n # create a data frame with all the unique kvk number/name combi\n df = self.url_df[[KVK_KEY, NAME_KEY]]\n df.set_index(KVK_KEY, inplace=True, drop=True)\n df = df[~df.index.duplicated()]\n\n # also create a data frame from the unique address kvk's\n name_key2 = NAME_KEY + \"2\"\n df2 = self.address_df[[KVK_KEY, NAME_KEY]]\n df2 = df2.rename(columns={NAME_KEY: name_key2})\n df2.set_index(KVK_KEY, inplace=True, drop=True)\n df2 = df2[~df2.index.duplicated()]\n\n # merge them on the outer, so we can create a combined kvk list\n df3 = pd.concat([df, df2], axis=1, join=\"outer\")\n\n # replace al the empty field in NAME_KEY with tih\n df3[NAME_KEY].where(~df3[NAME_KEY].isnull(), df3[name_key2], inplace=True)\n\n df3.drop(name_key2, inplace=True, axis=1)\n\n difference = df3.index.difference(df2.index)\n new_kvk_name = df3.loc[difference, :]\n\n n_before = self.address_df.index.size\n self.address_df.set_index(KVK_KEY, inplace=True)\n\n # append the new address to the address data base\n self.address_df = pd.concat([self.address_df, new_kvk_name], axis=0, sort=True)\n self.address_df.sort_index(inplace=True)\n self.address_df.reset_index(inplace=True)\n try:\n self.address_df.drop([\"index\"], axis=1, inplace=True)\n except KeyError as err:\n self.logger.info(err)\n\n n_after = self.address_df.index.size\n self.logger.info(\"Added {} kvk from url list to addresses\".format(n_after - n_before))",
"def _concatenate_batch(\n self, data_batch: Dict[str, List[str]]\n ) -> Tuple[List[Dict[str, any]], List[Dict[str, any]]]:\n concatenated_batch = []\n evidences_batch = []\n\n emotion_batch = data_batch[\"emotion\"]\n target_utterance_batch = data_batch[\"target_utterance\"]\n evidence_utterance_batch = data_batch[\"evidence_utterance\"]\n conversation_history_batch = data_batch[\"conversation_history\"]\n\n for i, (\n emotion,\n target_utterance,\n evidence_utterance,\n conversation_history,\n ) in enumerate(\n zip(\n emotion_batch,\n target_utterance_batch,\n evidence_utterance_batch,\n conversation_history_batch,\n )\n ):\n concatenated_qns = (\n \"The target utterance is \"\n + target_utterance\n + \"The evidence utterance is \"\n + evidence_utterance\n + \"What is the causal span from context that is relevant to the target utterance's emotion \"\n + emotion\n + \" ?\"\n )\n inputs = {\n \"id\": i,\n \"question\": concatenated_qns,\n \"answers\": [{\"text\": \" \", \"answer_start\": 0}],\n \"is_impossible\": False,\n }\n instance_dict = {\"context\": conversation_history, \"qas\": [inputs]}\n concatenated_batch.append(instance_dict)\n\n evidence = {\"id\": i, \"evidence\": evidence_utterance}\n evidences_batch.append(evidence)\n\n return concatenated_batch, evidences_batch",
"def testRegisterConcatenation(self):\n reg_one = ShiftRegister(2)\n reg_one.shift(\"a\")\n reg_one.shift(\"b\")\n reg_two = ShiftRegister(3)\n reg_two.shift(\"c\")\n reg_two.shift(\"d\")\n reg_two.shift(\"e\")\n reg_cat = reg_one.concatenate(reg_two)\n self.assertEqual(''.join(reg_cat), \"abcde\")",
"def test_merge_repl(self):\n ars = self.ar[2009][11]['general']\n ars2 = awstats_reader.AwstatsReader(test_file_dir,\n 'joshuakugler.com')[2009][11]['general']\n self.assertEqual(ars.merge(ars2, 'LastLine', 'signature'), '')",
"def brepalgo_ConcatenateWire(*args):\n return _BRepAlgo.brepalgo_ConcatenateWire(*args)",
"def _resample_and_merge(ts, agg_dict):\n grouped = ts.group_serie(agg_dict['sampling'])\n existing = agg_dict.get('return')\n name = agg_dict.get(\"name\")\n resource = None if name is None else mock.Mock(id=str(uuid.uuid4()))\n metric = mock.Mock(id=str(uuid.uuid4()), name=name)\n agg_dict['return'] = (\n processor.MetricReference(metric, \"mean\", resource),\n carbonara.AggregatedTimeSerie.from_grouped_serie(\n grouped,\n carbonara.Aggregation(agg_dict['agg'],\n agg_dict['sampling'],\n None)))\n if existing:\n existing[2].merge(agg_dict['return'][2])\n agg_dict['return'] = existing",
"def _merge(self):\n raise NotImplementedError",
"def concatenate_observation_data(\n self, compartment, date, measurementmethod, orig_srid, origx, origy,\n parameter, property, quality, sampledevice, samplemethod, unit, value,\n ):\n # Convert to string before joining\n data = map(str, [\n compartment, date, measurementmethod, orig_srid, origx, origy,\n parameter, property, quality, sampledevice, samplemethod, unit,\n value\n ])\n return ''.join(data)",
"def combine_data(self, object, additional_data):\n for k, v in additional_data.items():\n if isinstance(v, list):\n object[k] = object.get(k, []) + v\n else:\n object[k] = v\n for instance in object.get(\"instances\", []):\n if instance.get(\"sub_container\", {}).get(\"top_container\", {}).get(\"_resolved\"):\n del instance[\"sub_container\"][\"top_container\"][\"_resolved\"]\n object = super(ArchivalObjectMerger, self).combine_data(object, additional_data)\n return combine_references(object)",
"def mergeWith(self, others):",
"def merge_struct_arrays(self, data1, data2):\n data_final = np.concatenate((data1, data2))\n return data_final",
"def variant_add(v1: dict, v2: dict) -> Dict[str, Any]:\n left = set(v1.keys()).difference(v2.keys())\n right = set(v2.keys()).difference(v1.keys())\n joint = set(v1.keys()) & set(v2.keys())\n\n # deal with __migrator: ordering\n if \"__migrator\" in v2:\n ordering = v2[\"__migrator\"].get(\"ordering\", {})\n operation = v2[\"__migrator\"].get(\"operation\")\n # handle special operations\n if operation:\n return VARIANT_OP[operation](v1, v2)\n else:\n ordering = {}\n\n # special keys\n if \"__migrator\" in right:\n right.remove(\"__migrator\")\n\n # special keys in joint\n special_variants = {}\n if \"pin_run_as_build\" in joint:\n # For run_as_build we enforce the migrator's pin\n # TODO: should this just be a normal ordering merge, favoring more exact pins?\n joint.remove(\"pin_run_as_build\")\n special_variants[\"pin_run_as_build\"] = {\n **v1[\"pin_run_as_build\"],\n **v2[\"pin_run_as_build\"],\n }\n\n if \"zip_keys\" in joint:\n # zip_keys is a bit weird to join on as we don't have a particularly good way of identifying\n # a block. Longer term having these be named blocks would make life WAY simpler\n # That does require changes to conda-build itself though\n #\n # A zip_keys block is deemed mergeable if zkₛ,ᵢ ⊂ zkₘ,ᵢ\n zk_out = []\n zk_l = {frozenset(e) for e in v1[\"zip_keys\"]}\n zk_r = {frozenset(e) for e in v2[\"zip_keys\"]}\n\n for zk_r_i in sorted(zk_r, key=lambda x: -len(x)):\n for zk_l_i in sorted(zk_l, key=lambda x: -len(x)):\n # Merge the longest common zk first\n if zk_l_i.issubset(zk_r_i):\n zk_l.remove(zk_l_i)\n zk_r.remove(zk_r_i)\n zk_out.append(zk_r_i)\n break\n else:\n # Nothing to do\n pass\n\n zk_out.extend(zk_l)\n zk_out.extend(zk_r)\n zk_out = sorted(\n [sorted(zk) for zk in zk_out], key=lambda x: (len(x), str(x))\n )\n\n joint.remove(\"zip_keys\")\n special_variants[\"zip_keys\"] = zk_out\n\n joint_variant = {}\n for k in joint:\n v_left, v_right = ensure_list(v1[k]), ensure_list(v2[k])\n joint_variant[k] = variant_key_add(\n k, v_left, v_right, ordering=ordering.get(k, None)\n )\n\n out = {\n **toolz.keyfilter(lambda k: k in left, v1),\n **toolz.keyfilter(lambda k: k in right, v2),\n **joint_variant,\n **special_variants,\n }\n\n return out",
"def array_merge(a1, a2, inplace=False, empty_source=False): \n if inplace:\n out = a1\n else:\n out = copy.deepcopy(a1)\n if empty_source:\n for i in range(len(out)):\n out.pop()\n for k in a2:\n out[k] = a2[k]\n return out",
"def addAppRecordMerge (self, nodeList) :\n\t\tfor i in range(len(nodeList)) :\n\t\t\tnodeList[i].addAppData(\"id\",\"data \" + nodeList[i].instanceID , Node.ALL, Node.ALL)\n\t\t\tnodeList[i].serialize((Node.ALL, Node.ALL))",
"def ConcatenateWire(*args):\n return _BRepAlgo.brepalgo_ConcatenateWire(*args)",
"def merge_ranges():",
"def test_aggregation_operation(self, space, time, expected):\n # Two regions, three intervals\n data = np.array([[333.333, 333.333, 333.333], [333.333, 333.333, 333.333]])\n intermediate = Adaptor.convert_with_coefficients(data, space, 0)\n actual = Adaptor.convert_with_coefficients(intermediate, time, 1)\n np.testing.assert_allclose(actual, expected, rtol=1e-2)",
"def brepalgo_ConcatenateWireC0(*args):\n return _BRepAlgo.brepalgo_ConcatenateWireC0(*args)",
"def concatenate_record(record):\n new_record = {}\n for k,v in record.items():\n if k in ['AB','FX','PA','TI','RP','ID']:\n new_v = ' '.join(v)\n \n if k == 'ID':\n new_v = new_v.split('; ')\n \n new_record[k] = new_v\n elif k == 'CR':\n previous_citation = ''\n new_citations = []\n for citation in v:\n if previous_citation.endswith('DOI'):\n new_citations[-1] += ' ' + citation\n previous_citation = new_citations[-1]\n else :\n new_citations.append(citation)\n previous_citation = citation\n \n new_record[k] = new_citations\n else :\n new_record[k] = v\n \n return new_record",
"def concatenate_result(keys, datalist, resultkey, resultdata):\n # append result row by row\n i = 0\n for row in datalist:\n row.append(resultdata[i])\n i += 1\n # append column name\n newkeys = list(keys)\n newkeys.append(resultkey)\n print(\"Display first 5 classification result: customer_id, gender(femal=0)\")\n for row in datalist[:5]:\n print(row[0], row[-1])\n return newkeys, datalist",
"def combine(new_data, raw_data):\n return pd.merge(new_data, raw_data, on=[\"location\", \"date\"], how=\"outer\")",
"def to_upsert():\n \n return (out['parameters']['dataset'], out['parameters']['timezone'], \n out['parameters']['rows'], out['parameters']['format'], \n out['parameters']['refine']['ano'], out['parameters']['refine']['mes'], \n out['parameters']['metadata']['fecha_ejecucion'], \n out['parameters']['metadata']['parametros_url'], \n out['parameters']['metadata']['ip_address'], \n out['parameters']['metadata']['usuario'], \n out['parameters']['metadata']['nombre_archivo'], \n out['parameters']['metadata']['ruta'])",
"def add_Longhurst_Province_raster_to_array(ds):\n import geopandas\n from rasterio import features\n from affine import Affine\n # Get the shape files\n provinces = geopandas.read_file('/work/home/ts551/data/longhurst_v4_2010')\n shapes = [(shape, n) for n, shape in enumerate(provinces.geometry)]\n # Now add the existing array\n ds_tmp = ds[list(ds.data_vars)[0]].copy().mean(dim='time')\n # Add raster the provinces onto this\n ds_tmp['LonghurstProvince'] = rasterize(shapes, ds_tmp.coords)\n # Then update the variable\n ds['LonghurstProvince'] = ds_tmp['LonghurstProvince']\n # Add Some attributes\n attrs = {\n 'Long name': 'Longhurst Provinces',\n 'data downloaded from': 'http://www.marineregions.org/downloads.php#longhurst',\n 'version': 'Version 4 - March 2010',\n 'Citations': \"Longhurst, A.R et al. (1995). An estimate of global primary production in the ocean from satellite radiometer data. J. Plankton Res. 17, 1245-1271 ; Longhurst, A.R. (1995). Seasonal cycles of pelagic production and consumption. Prog. Oceanogr. 36, 77-167 ; Longhurst, A.R. (1998). Ecological Geography of the Sea. Academic Press, San Diego. 397p. (IMIS) ; Longhurst, A.R. (2006). Ecological Geography of the Sea. 2nd Edition. Academic Press, San Diego, 560p.\",\n }\n ds['LonghurstProvince'].attrs = attrs\n return ds"
] |
[
"0.63169724",
"0.55213654",
"0.52088755",
"0.51720846",
"0.5145792",
"0.49678332",
"0.49579075",
"0.48907503",
"0.48401648",
"0.47977793",
"0.47825605",
"0.4763224",
"0.47080132",
"0.46887326",
"0.4682378",
"0.4668646",
"0.46574107",
"0.46545583",
"0.4639255",
"0.46378735",
"0.4629885",
"0.4608086",
"0.46067613",
"0.45870695",
"0.45715204",
"0.45658806",
"0.4549699",
"0.45402318",
"0.45183748",
"0.4514984"
] |
0.5730689
|
1
|
surface plot of the temperature in the whole room
|
def plotWholeRoom(mesh):
fig = plt.figure()
ax = fig.gca(projection='3d')
X = np.arange(0, mesh.xLength+mesh.meshsize, mesh.meshsize)
Y = np.arange(0, mesh.yLength+mesh.meshsize, mesh.meshsize)
X, Y = np.meshgrid(X,Y)
numberOfXNodes = mesh.x_res#round(mesh.xLength/mesh.meshsize)+1
numberOfYNodes = mesh.y_res#round(mesh.yLength/mesh.meshsize)+1
Z = np.array([[mesh.grid[i,j].funcVal for i in range(numberOfYNodes)] for j in range(numberOfXNodes)])
if mesh.y_res==2:
print()
surf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cm.coolwarm,
linewidth=0, antialiased=False)
# add vmin=4, vmax=41, to define lower and upper value for the color-scheme
# set limits for z-axis
ax.set_zlim(np.amin(Z)-mesh.meshsize, np.amax(Z)+mesh.meshsize)
# don't know what these two lines are for
# x.zaxis.set_major_locator(LinearLocator(10))
# ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
# don't know what these two lines are for
fig.colorbar(surf, shrink=0.5, aspect=5)
plt.show()
return fig
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def plot_surface(self):\n X, Y = np.meshgrid(self.x, self.y)\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n ax.plot_surface(X=X, Y=Y, Z=self.z)\n plt.show()",
"def imshow_surface(self):\n plt.imshow(self.z)\n plt.colorbar()\n plt.show()",
"def compute_surface_temperature(heat_flux):\n\n return 1.1e-4*heat_flux + 323",
"def plotSurface(X):\n from mpl_toolkits.mplot3d import Axes3D\n from mpl_toolkits.mplot3d import proj3d\n f=plt.figure()\n ax=f.add_subplot(111,projection='3d')\n xi=np.arange(10,14,0.05)\n yi=np.arange(12,16,0.05)\n z = matplotlib.mlab.griddata(X[:,0], X[:,1], X[:,2], xi, yi, interp='nn')\n x, y = np.meshgrid(xi, yi)\n ax.plot_surface(x, y, z)\n return f",
"def DisplayMesh():\r\n \r\n # Load Surface Mesh Data and generate normals\r\n VTKString = OpenData('C:/Users/Tim/Documents/University/Year 4/Final Project/FinalYearProjectCode/TEH_Code/InputFiles','muscle_surface.vtk')\r\n header, Vertices, Triangles = CreateMatrixVTK(VTKString)\r\n \r\n fig = plt.figure()\r\n ax1 = fig.add_subplot(111,projection = '3d')\r\n ax1.plot_trisurf(Vertices[:,0],Vertices[:,1],Vertices[:,2],triangles= Triangles[:,1:])\r\n ax1.set_zlabel('z')\r\n ax1.set_ylabel('y')\r\n ax1.set_xlabel('x')\r\n plt.show()",
"def plot_slice(self,res):\n x = np.linspace(0,1,res)\n y = np.linspace(0,1,res)\n X,Y = np.meshgrid(x,y)\n plt.figure()\n ax = plt.axes(projection = '3d')\n ax.plot_surface(X,Y,abs(self.psi)[:,:,math.floor(res/2)])\n plt.show()",
"def plot3surface( pot, **kwargs ): \n \n fig = plt.figure( figsize = (8., 8.) ) \n gs = matplotlib.gridspec.GridSpec( 3,2, wspace=0.2) \n \n # Make a list with three perpendicular directions which \n # will define the three surface cuts \n perp = [(np.pi/2., 0.), (np.pi/2., -np.pi/2.), (0., -1.*np.pi/2.) ]\n \n # Iterate to plot the three surface cuts\n yMin = 1e16\n yMax = -1e16 \n Ims = []\n for i in range(3):\n ax0 = fig.add_subplot( gs[i,0], projection='3d')\n ax1 = fig.add_subplot( gs[i,1]) \n \n T0, T1, X, Y, Z = surfcut_points( normal = perp[i], \\\n ax0=ax0, **kwargs ) \n \n EVAL = pot.evalpotential(X,Y,Z)\n im = ax1.pcolormesh( T0, T1, EVAL, \\\n cmap=plt.get_cmap('jet') ) \n plt.axes( ax1 ) \n cbar = plt.colorbar(im)\n cbar.set_label( pot.unitlabel, rotation=0) \n \n ymin = EVAL.min()\n ymax = EVAL.max()\n \n Ims.append(im) \n if ymin < yMin : yMin = ymin\n if ymax > yMax : yMax = ymax \n \n for im in Ims:\n im.set_clim( vmin=yMin, vmax=yMax)",
"def _plot_sr_surface(self, varname):\n\n fig = plt.figure()\n varname = self.layer_lookup[varname]\n data = self.node_data[varname]\n x_node_loc = np.arange(self.mins[0], self.maxs[0], self.min_grid_size) + self.min_grid_size/2\n y_node_loc = np.arange(self.mins[1], self.maxs[1], self.min_grid_size) + self.min_grid_size/2\n lon2d, lat2d = np.meshgrid(x_node_loc, y_node_loc)\n\n # mask NaN values\n data_m = np.ma.array(data, mask=np.isnan(data))\n plt.pcolormesh(lon2d, lat2d, data_m.T, vmin=data_m.min(), vmax=data_m.max())",
"def plot_phase(self,res):\n x = np.linspace(0,1,res)\n y = np.linspace(0,1,res)\n X,Y = np.meshgrid(x,y)\n plt.figure()\n ax = plt.axes(projection = '3d')\n ax.plot_surface(X,Y,np.angle(self.psi)[:,:,math.floor(res/2)])\n plt.show()",
"def surfaceRender(nodal_mesh, focus, ax=None):\n\t# If no axes were passed, generate new set of axes\n\tif not ax:\n\t\tfig = mplt.figure()\n\t\tax = fig.add_subplot(111, projection='3d')\n\n\t# Sort the mesh by first 3 columns\n\tnodal_mesh = nodal_mesh[nodal_mesh[:, 0].argsort()]\n\tnodal_mesh = nodal_mesh[nodal_mesh[:, 1].argsort(kind='mergesort')]\n\tnodal_mesh = nodal_mesh[nodal_mesh[:, 2].argsort(kind='mergesort')]\n\t\n\t# Set up number of divisions and calculate e for each division (as a ratio)\n\tnum_div = 20\n\te = [i/num_div for i in range(num_div + 1)]\n\t# Convert angular values from degrees to radians\n\trads = math.pi/180\n\tnodal_mesh[:, 1:3] *= rads\n\t# Store the shapes and sizes of the mesh values\n\tm = nodal_mesh.shape[0]\n\tsize_nodal_nu = np.where(nodal_mesh[:, 2] == 0)[0].size\n\tsize_nodal_phi = m/size_nodal_nu\n\t# Get the mu and theta values from the mesh\n\tnodal_nu = nodal_mesh[:size_nodal_nu, 1]\n\tnodal_phi = nodal_mesh[::size_nodal_nu, 2]\n\t# Convert apex node from prolate to cartesian, then plot with scatter\n\tif min(nodal_nu) == 0:\n\t\tx, y, z = mathhelper.prolate2cart(nodal_mesh[0, 0], nodal_mesh[0, 1], nodal_mesh[0, 2], focus)\n\t\tax.scatter(z, y, -x)\n\t\tstart_nu = 1\n\telse:\n\t\tstart_nu = 0\n\t# Plot circumferential element boundaries\n\tfor i in range(start_nu, size_nodal_nu):\n\t\tfor j in range(int(size_nodal_phi)):\n\t\t\t# Define nodal values for interpolation\n\t\t\tif j == size_nodal_phi-1:\n\t\t\t\tind0 = i\n\t\t\t\tp0 = 2*math.pi\n\t\t\telse:\n\t\t\t\tind0 = (j+1)*size_nodal_nu + i\n\t\t\t\tp0 = nodal_phi[j+1]\n\t\t\tind1 = (j)*size_nodal_nu + i\n\t\t\tp1 = nodal_phi[j]\n\t\t\t# Get mu and dM/dm1\n\t\t\tm0 = nodal_mesh[ind0, 0]\n\t\t\tdm0 = nodal_mesh[ind0, 3]\n\t\t\tm1 = nodal_mesh[ind1, 0]\n\t\t\tdm1 = nodal_mesh[ind1, 3]\n\t\t\t# Convert to cartesian\n\t\t\tn0x, n0y, n0z = mathhelper.prolate2cart(nodal_mesh[ind0, 0], nodal_mesh[ind0, 1], nodal_mesh[ind0, 2], focus)\n\t\t\t# Plot the node\n\t\t\tax.scatter(n0z, n0y, -n0x)\n\t\t\t# Plot the arc segments\n\t\t\tfor k in range(2, len(e)):\n\t\t\t\t# Determine starting point to use\n\t\t\t\tif k == 2:\n\t\t\t\t\tpt_x, pt_y, pt_z = n0x, n0y, n0z\n\t\t\t\telse:\n\t\t\t\t\tpt_x, pt_y, pt_z = x_here, y_here, z_here\n\t\t\t\t# Get lambda\n\t\t\t\thm0 = 1 - 3*(e[k]**2) + 2*(e[k]**3)\n\t\t\t\thdm0 = e[k]*(e[k] - 1)**2\n\t\t\t\thm1 = (e[k]**2)*(3 - 2*e[k])\n\t\t\t\thdm1 = (e[k]**2)*(e[k] - 1)\n\t\t\t\tm = hm0 * m0 + hdm0 * dm0 + hm1 * m1 + hdm1 * dm1\n\t\t\t\t# Get theta\n\t\t\t\tp_here = p0 - e[k]*(p0 - p1)\n\t\t\t\t# Convert to cartesian\n\t\t\t\tx_here, y_here, z_here = mathhelper.prolate2cart(m, nodal_nu[i], p_here, focus)\n\t\t\t\t# Create vectors\n\t\t\t\tx = np.append(pt_x, x_here)\n\t\t\t\ty = np.append(pt_y, y_here)\n\t\t\t\tz = np.append(pt_z, z_here)\n\t\t\t\t# Plot segments\n\t\t\t\tax.plot(z, y, -x, 'k-.')\n\t# Plot longitudinal element boundaries\n\tfor i in range(int(size_nodal_phi)):\n\t\tfor j in range(size_nodal_nu-1):\n\t\t\t# Define nodal values needeed for interpolation\n\t\t\tind0 = i*size_nodal_nu + j\n\t\t\tind1 = ind0 + 1\n\t\t\tn0 = nodal_nu[j]\n\t\t\tn1 = nodal_nu[j+1]\n\t\t\t# Get lambda and dL/de2\n\t\t\tm0 = nodal_mesh[ind0, 0]\n\t\t\tdm0 = nodal_mesh[ind0, 4]\n\t\t\tm1 = nodal_mesh[ind1, 0]\n\t\t\tdm1 = nodal_mesh[ind1, 4]\n\t\t\t# Convert nodal points to cartesian\n\t\t\tn0x, n0y, n0z = mathhelper.prolate2cart(nodal_mesh[ind0, 0], nodal_mesh[ind0, 1], nodal_mesh[ind0, 2], focus)\n\t\t\t# Plot arc\n\t\t\tfor k in range(2, len(e)):\n\t\t\t\t# Determine point to use\n\t\t\t\tif k == 2:\n\t\t\t\t\tpt_x, pt_y, pt_z = n0x, n0y, n0z\n\t\t\t\telse:\n\t\t\t\t\tpt_x, pt_y, pt_z = x_here, y_here, z_here\n\t\t\t\t# Get lambda\n\t\t\t\thm0 = 1 - 3*(e[k]**2) + 2*(e[k]**3)\n\t\t\t\thdm0 = e[k]*(e[k] - 1)**2\n\t\t\t\thm1 = (e[k]**2)*(3 - 2*e[k])\n\t\t\t\thdm1 = (e[k]**2)*(e[k] - 1)\n\t\t\t\tm = hm0 * m0 + hdm0 * dm0 + hm1 * m1 + hdm1 * dm1\n\t\t\t\t# Get nu\n\t\t\t\tn_here = n0 + e[k]*(n1-n0)\n\t\t\t\t# Convert to cartesian\n\t\t\t\tx_here, y_here, z_here = mathhelper.prolate2cart(m, n_here, nodal_phi[i], focus)\n\t\t\t\t# Append the vectors for plotting\n\t\t\t\tx = np.append(pt_x, x_here)\n\t\t\t\ty = np.append(pt_y, y_here)\n\t\t\t\tz = np.append(pt_z, z_here)\n\t\t\t\t# Plot the segment\n\t\t\t\tax.plot(z, y, -x, 'k-.')\n\t\t\t\t\n\treturn(ax)",
"def draw_surfaces(self, x_src, y_src, x_grd, y_grd, energy, xmax):\n import matplotlib.pyplot as plt \n fig = plt.figure(figsize=(12, 6))\n nom1 = fig.add_subplot(121)\n self.draw_nominal_surface(x_src, y_src, x_grd, y_grd, energy,\n xmax, nom1, bins=30, range=0.5 * u.deg)\n nom1.plot(x_src, y_src, \"wo\")\n\n tilt1 = fig.add_subplot(122)\n self.draw_tilted_surface(x_src, y_src, x_grd, y_grd, energy,\n xmax, tilt1, bins=30, range=100 * u.m)\n tilt1.plot(x_grd, y_grd, \"wo\")\n\n plt.show()\n\n return",
"def calculateTemperature(self):\n \n # CIE XYZ space\n self.X = (1/0.17697)*((0.49)*self.R + (0.31)*self.G + (0.2)*self.B)\n self.Y = (1/0.17697)*((0.17697)*self.R + (0.81240)*self.G + (0.01063)*self.B)\n self.Z = (1/0.17697)*((0)*self.R + (0.010)*self.G + (0.99)*self.B)\n\n # CIE Chromaticities xy\n self.x = self.X/(self.X + self.Y + self.Z)\n self.y = self.Y/(self.X + self.Y + self.Z)\n \n # CIE Chromaticities uv\n #self.u = (0.4661*self.x + 0.1593*self.y)/(self.y - 0.15735*self.x + 0.2424)\n #self.v = (0.6581*self.y)/(self.y - 0.15735*self.x + 0.2424)\n \n # constant for McCamy's/Hernandez-Andrés formula\n n = (self.x - self.x_e)/(self.y - self.y_e)\n \n # Correlated color temperature according to Hernández-Andrés (1999)\n self.color_temp = ( self.A_0 + \n self.A_1*np.exp(-n/self.t_1) + \n self.A_2*np.exp(-n/self.t_2) + \n self.A_3*np.exp(-n/self.t_3) )\n \n # Delete too high values\n self.color_temp[self.color_temp > 30000] = 0\n \n # Affichage de la CCT\n self.mean_temp = int(round(self.color_temp.mean()))\n self.mean_temp_label.setText(\"Temperature moyenne = \"+str(self.mean_temp))\n self.mean_temp_label.adjustSize()\n \t\n # Affichage de l'illuminance (Y)\n self.mean_illu = int(round((self.Y.mean())))\n self.illuminance_label.setText(\"Illuminance moyenne = \"+str(self.mean_illu))\n self.illuminance_label.adjustSize()",
"def surface():\n \"\"\"\n Get surface for plotting.\n\n :return fsaverage: surface locations as in nilearn\n :return surf: surface for plotting\n \"\"\"\n\n fsaverage = fetch_surf_fsaverage('fsaverage')\n surf = {}\n\n for key in [t + '_' + h for t in ['pial', 'infl'] for h in ['left', 'right']]:\n\n surf = load_surf_mesh(fsaverage[key])\n x, y, z = np.asarray(surf[0].T, dtype='<f4')\n i, j, k = np.asarray(surf[1].T, dtype='<i4')\n\n surf[key] = dict(x=x, y=y, z=z, i=i, j=j, k=k)\n\n return fsaverage, surf",
"def aspheresurface(self):\n\t\tR = self.coefficients[0]\n\t\ttheta = np.linspace(0, 2*np.pi, 100)\n\t\trho = np.linspace(0, R, 100)\n\t\t[u,r] = np.meshgrid(theta,rho)\n\t\tX = r*cos(u)\n\t\tY = r*sin(u)\n\t\tZ = aspherepolar(self.coefficients,r)\n\t\tfig = plt.figure(figsize=(12, 8), dpi=80)\n\t\tax = fig.gca(projection='3d')\n\t\tsurf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cm.RdYlGn,\n\t linewidth=0, antialiased=False, alpha = 0.6)\n\t\tplt.show()\n\t\treturn 0",
"def surface_tension(name, temp):\n if name == 'CO2':\n # Data for 217-302 K. Extrapolated for higher temperatures.\n if temp < 302.136780079:\n par = [\n 1.05646453e+02,\n - 5.60265519e-01,\n 6.97039246e-04\n ]\n else:\n par = [\n 0, 0, 0\n ]\n else:\n # Data for 283-313 K. Extrapolated for higher temperatures.\n if temp < 408.482827604:\n par = [\n 2.21308934e+01,\n 1.45524444e-01,\n - 4.88888889e-04\n ]\n else:\n par = [\n 0, 0, 0\n ]\n return 1e-3 * (par[0] + par[1] * temp + par[2] * temp**2)",
"def plot_surface(self, varname):\n\n if self.is_vr:\n self._plot_vr_surface(varname)\n else:\n self._plot_sr_surface(varname)",
"def draw_f():\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n x_matrix = np.arange(-10, 11, 0.1)\n y_matrix = np.arange(-10, 11, 0.1)\n x_matrix, y_matrix = np.meshgrid(x_matrix, y_matrix)\n # print(x_matrix)\n u_matrix = x_matrix.copy()\n for i in range(x_matrix.shape[0]):\n for j in range(x_matrix.shape[0]):\n u_matrix[i][j] = f(x_matrix[i][j], y_matrix[i][j])\n surf = ax.plot_surface(x_matrix, y_matrix, u_matrix)\n\n plt.show()\n return surf",
"def plot_3d(x_data, y_data, Z, df, xlabel, ylabel, xrange=None,\n yrange=None, figsize=(12, 12)):\n fig = pyplot.figure(figsize=figsize)\n ax = fig.add_subplot(111, projection='3d')\n nsamp, nsen = Z.shape\n\n sen_index = df.columns.names.index('sensor')\n senlist = df.columns.levels[sen_index]\n pyplot.yticks(y_data, senlist)\n ax.plot_surface(\n np.repeat(x_data,\n nsen, axis=1),\n np.repeat(np.matrix(y_data), nsamp, axis=0),\n df.values,\n cmap=cm.coolwarm)\n pyplot.xlabel(xlabel)\n pyplot.ylabel('Sensor name')\n ax.set_zlabel(ylabel)\n ax.view_init(elev=45., azim=-130)\n ax.tick_params(axis='y', which='major', labelsize=4)\n pyplot.show()",
"def plot_surface(\n condition: bool,\n function: typing.Callable,\n x: typing.List[float],\n t: typing.List[float],\n p: typing.List[float],\n t_min: float,\n t_max: float,\n x_v: numpy.array,\n):\n # TODO: docstring\n fig = plt.figure()\n ax = fig.add_subplot(projection=\"3d\")\n\n if condition:\n ax.scatter(x, t, p, marker=\"o\")\n\n t_v = numpy.linspace((t_min - 10), (t_max + 10), num=50)\n x_fit, t_fit = numpy.meshgrid(x_v, t_v)\n p_fit = numpy.array([function(x_fit[i], t_fit[i]) for i in range(len(x_fit))])\n ax.plot_surface(x_fit, t_fit, p_fit, alpha=0.2)\n\n ax.set_xlabel(\"First component fraction\")\n ax.set_ylabel(\"Temperature K\")\n ax.set_zlabel(\"Permeance\")\n fig.suptitle(\"Fit Illustration\", fontsize=10)\n plt.show()",
"def plot_2D_edp(self, xmin=-100, xmax=100, zmin=-100, zmax=100, N=201):\n rho_xz = []\n xgrid = np.linspace(xmin, xmax, num=N)\n zgrid = np.linspace(zmin, zmax, num=N)\n for x in xgrid:\n for z in zgrid:\n tmp = self.phase * self.F * np.cos(self.qx*x+self.qz*z)\n rho_xz.append([x, z, tmp.sum(axis=0)])\n rho_xz = np.array(rho_xz, float) \n X, Y, Z= rho_xz[:,0], rho_xz[:,1], rho_xz[:,2]\n #Y = rho_xz[:,1]\n #Z = rho_xz[:,2]\n X.shape = (N, N)\n Y.shape = (N, N)\n Z.shape = (N, N)\n plt.figure()\n plt.contourf(X, Y, Z)",
"def plotSurface(surfaceFile, comp=2, points=False, tris=False,\n profile=False, ax=None, annotate=True, norm=None,xscale=1, yscale=1):\n verts,data,tris = load_h5(surfaceFile)\n\n if comp==3: #radial displacements\n z = np.hypot(data[:,:,0], data[:,:,1]).flatten()\n else:\n z = data[:,:,comp].flatten()\n #z = data[:,:,comp].flatten()\n x = verts[:,0] / xscale\n y = verts[:,1] / yscale\n\n #NOTE: need to change grid for linear spacing to work properly\n xi = np.linspace(x.min(), x.max(), x.size)\n yi = np.linspace(y.min(), y.max(), y.size)\n zi = griddata(x,y,z, xi,yi, interp='nn') #'nn'\n\n #NOTE: getting error message here...\n # linear interpolation requires exactly the same limits\n #xi=np.arange(-15000.0,15000.0+1e-14,30000.0/x.size)\n #yi=np.arange(-15000.0,15000.0+1e-14,30000.0/x.size)\n #zi = griddata(x,y,z, xi,yi, interp='linear') #'nn'\n #ValueError: output grid must have constant spacing when using interp='linear'\n\n if ax==None:\n plt.figure()\n else:\n ax = plt.axes(ax)\n\n #plt.pcolor(xi, yi, zi, cmap=plt.cm.jet) #Very slow...\n x1, x2, y1, y2 = [x.min(), x.max(), y.min(), y.max()]\n im = plt.imshow(zi, cmap=plt.cm.jet, norm=norm, extent=[x1, x2, y1, y2])\n\n if annotate:\n compdict = {0:'Ux',1:'Uy',2:'Uz',3:'Ur'}\n plt.title('{} Displacement'.format(compdict[comp]))\n plt.xlabel('Distance [m]')\n plt.ylabel('Distance [m]')\n cb = plt.colorbar()\n cb.set_label('[m]')\n\n if points:\n plt.plot(x,y,'k.')\n\n if type(tris) is np.ndarray:\n plt.triplot(x, y, tris, 'k-')\n\n # EW profile line through the x-axis\n if profile:\n plt.axhline(linewidth=2, color='r')\n Zi = zi[x.size/2,:]\n plt.figure()\n plt.plot(xi, Zi, 'b.-')\n plt.title('Profile')\n plt.xlabel('Distance [m]')\n plt.ylabel('{} Displacement [m]'.format(compdict[comp]))\n\n return im",
"def plot_surface(self, a, masked_values=None, **kwargs):\n return self.__cls.plot_surface(a=a, masked_values=masked_values,\n **kwargs)",
"def test_surf():\n def f(x, y):\n sin, cos = numpy.sin, numpy.cos\n return sin(x + y) + sin(2 * x - y) + cos(3 * x + 4 * y)\n\n x, y = numpy.mgrid[-7.:7.05:0.1, -5.:5.05:0.05]\n s = surf(x, y, f)\n mlab.show()\n #cs = contour_surf(x, y, f, contour_z=0)\n return",
"def sample_and_plot(self):\n fig = plt.figure()\n ax = plt.axes(projection = '3d')\n ax.plot_surface(self.X, self.Y, self.sample(), cmap = plt.cm.jet, rstride = 2, cstride = 2, linewidth = 1)\n plt.show()",
"def cloudy_grid_surface(**kwargs):\n\n p = copy.copy(params)\n for key,val in kwargs.items():\n setattr(p,key,val)\n\n cloudy_library = clo.library()\n model_number_matrix,grid_table = cloudy_library._restore_grid_table(grid_ext=p.grid_ext)\n\n fig = plt.figure(figsize=(10,7))\n ax = plt.axes(projection='3d')\n\n key1, key2 = list(p.cloudy_param.keys())[0],list(p.cloudy_param.keys())[1]\n value1, value2 = list(p.cloudy_param.values())[0],list(p.cloudy_param.values())[1]\n\n # Decide on what goes on x and y axis\n cloudy_parameters = np.array(['NH','FUV','hden','Z'])\n x_index = cloudy_parameters[(cloudy_parameters != key1) &\\\n (cloudy_parameters != key2)][0]\n y_index = cloudy_parameters[(cloudy_parameters != key1) &\\\n (cloudy_parameters != key2)][1]\n\n # Cut in grid table\n grid_table_cut = grid_table.iloc[np.where((grid_table[key1].values == value1) & \\\n (grid_table[key2].values == value2))[0]]\n x, y = grid_table_cut[x_index].values, grid_table_cut[y_index].values\n X, Y = np.meshgrid(np.unique(grid_table_cut[x_index].values), np.unique(grid_table_cut[y_index].values))\n\n # Plot line ratio?\n if '_' in p.line:\n L1 = grid_table_cut[p.line.split('_')[0]].values\n L2 = grid_table_cut[p.line.split('_')[1]].values\n L2[L2 == 0] = 1e9\n line_lum = (L1/L2).astype(float)\n vmin = np.min(np.log10(line_lum[L2 < 1e9]))\n\n else:\n line_lum = grid_table_cut[p.line].values.astype(float)\n vmin = np.min(np.log10(line_lum[line_lum > 0]))\n\n lum = np.log10(line_lum)\n lum = lum.reshape([len(np.unique(x)), len(np.unique(y))]).T\n\n # ########## Patching the grid !!\n # line_lum[np.isnan(line_lum)] = -1 # what are these?\n # # 0 values: not sure if we have any?\n # line_lum[line_lum == 0] = np.min(line_lum[line_lum > 0])\n # # Negative numbers: missing grid point\n # i_missing = np.where(line_lum < 0)[0]\n # while len(i_missing) > 0:\n # lum = np.log10(line_lum)\n # for i in i_missing:\n # # print(lum[i-1],lum[i+1])\n # try: \n # lum[i] = (lum[i-1] + lum[i+1])/ 2\n # except:\n # pass\n # # print('he',np.isnan(lum[i]))\n # if np.isnan(lum[i]):\n # try:\n # lum[i] = lum[i-1] \n # except:\n # pass\n # if np.isnan(lum[i]):\n # try:\n # lum[i] = lum[i+1] \n # except:\n # pass \n # line_lum[i] = 10.**lum[i]\n # # print(i,lum[i])\n # i_missing = np.where(line_lum < 0)[0]\n # ########## End of patching\n\n\n # pdb.set_trace()\n ax.plot_surface(X, Y, lum, cmap=\"autumn_r\", vmin=vmin, lw=0, rstride=1, cstride=1,alpha=0.8)\n\n ax.set_xlabel('\\n\\n' + getlabel('l'+x_index))\n ax.set_ylabel('\\n\\n' + getlabel('l'+y_index))\n\n try:\n ax.set_zlabel('\\n\\n' + getlabel('l%s' % p.line))\n except:\n ax.set_zlabel('\\n\\n log ' + p.line.replace('_','/'))\n\n\n ax.scatter(x[line_lum > 10.**vmin],y[line_lum > 10.**vmin],np.log10(line_lum[line_lum > 10.**vmin]),\\\n 'o',c=np.log10(line_lum[line_lum > 10.**vmin]),cmap='autumn_r',s=50)\n\n # print(x)\n # print(line_lum)\n ax.view_init(30, p.angle)\n\n if p.savefig:\n if not os.path.isdir(p.d_plot + 'look-up/'): os.mkdir(p.d_plot + 'look-up/') \n plt.savefig(p.d_plot + 'look-up/cloudy_grid_%s.%s' % (p.line, p.format), format=p.format, dpi=300) \n # pdb.set_trace()",
"def plot_cube(ax: Axes, x: ArrayLike, y: ArrayLike, f_low: callable, f_upp: callable, **kwargs):\n # lower\n xm, ym = np.meshgrid(x, y)\n zm = f_low(xm, ym)\n ax.plot_surface(xm, ym, zm, **kwargs)\n\n # upper\n zm = f_upp(xm, ym)\n ax.plot_surface(xm, ym, zm, **kwargs)\n\n # north\n xm, ym = np.array([x, x]), y[0]*np.ones([2, len(y)])\n zm = np.array([f_low(x, y[0]), f_upp(x, y[0])])\n ax.plot_surface(xm, ym, zm, **kwargs)\n\n # south\n xm, ym = np.array([x, x]), y[-1]*np.ones([2, len(y)])\n zm = np.array([f_low(x, y[-1]), f_upp(x, y[-1])])\n ax.plot_surface(xm, ym, zm, **kwargs)\n\n # east\n xm, ym = x[0]*np.ones([2, len(x)]), np.array([y, y])\n zm = np.array([f_low(x[0], y), f_upp(x[0], y)])\n ax.plot_surface(xm, ym, zm, **kwargs)\n\n # west\n xm, ym = x[-1]*np.ones([2, len(x)]), np.array([y, y])\n zm = np.array([f_low(x[-1], y), f_upp(x[-1], y)])\n ax.plot_surface(xm, ym, zm, **kwargs)",
"def plot_temp():\r\n work_book = xlrd.open_workbook(\"Temp.xls\")\r\n sheet1 = work_book.sheet_by_name(\"Temperature\")\r\n time_x = sheet1.col_values(1)\r\n temp_y = sheet1.col_values(0)\r\n plt.title(\"Time\")\r\n plt.xlabel(\"Time\")\r\n plt.ylabel(\"Temperature\")\r\n plt.plot(time_x, temp_y)\r\n plt.show()",
"def _run():\n\n temperatures_kelvins = _create_temperature_grid()\n first_derivs_kelvins_pt01 = numpy.gradient(temperatures_kelvins)\n second_derivs_kelvins_pt01 = numpy.gradient(\n numpy.absolute(first_derivs_kelvins_pt01)\n )\n\n this_ratio = (\n numpy.max(temperatures_kelvins) /\n numpy.max(first_derivs_kelvins_pt01)\n )\n\n first_derivs_unitless = first_derivs_kelvins_pt01 * this_ratio\n\n this_ratio = (\n numpy.max(temperatures_kelvins) /\n numpy.max(second_derivs_kelvins_pt01)\n )\n\n second_derivs_unitless = second_derivs_kelvins_pt01 * this_ratio\n\n _, axes_object = pyplot.subplots(\n 1, 1, figsize=(FIGURE_WIDTH_INCHES, FIGURE_HEIGHT_INCHES)\n )\n\n temperature_handle = axes_object.plot(\n temperatures_kelvins, color=TEMPERATURE_COLOUR, linestyle='solid',\n linewidth=SOLID_LINE_WIDTH\n )[0]\n\n second_deriv_handle = axes_object.plot(\n second_derivs_unitless, color=SECOND_DERIV_COLOUR, linestyle='solid',\n linewidth=SOLID_LINE_WIDTH\n )[0]\n\n first_deriv_handle = axes_object.plot(\n first_derivs_unitless, color=FIRST_DERIV_COLOUR, linestyle='dashed',\n linewidth=DASHED_LINE_WIDTH\n )[0]\n\n this_min_index = numpy.argmin(second_derivs_unitless)\n second_derivs_unitless[\n (this_min_index - 10):(this_min_index + 10)\n ] = second_derivs_unitless[this_min_index]\n\n tfp_handle = axes_object.plot(\n -1 * second_derivs_unitless, color=TFP_COLOUR, linestyle='dashed',\n linewidth=DASHED_LINE_WIDTH\n )[0]\n\n axes_object.set_yticks([0])\n axes_object.set_xticks([], [])\n\n x_label_string = r'$x$-coordinate (increasing to the right)'\n axes_object.set_xlabel(x_label_string)\n\n legend_handles = [\n temperature_handle, first_deriv_handle, second_deriv_handle,\n tfp_handle\n ]\n\n legend_strings = [\n TEMPERATURE_LEGEND_STRING, FIRST_DERIV_LEGEND_STRING,\n SECOND_DERIV_LEGEND_STRING, TFP_LEGEND_STRING\n ]\n\n axes_object.legend(legend_handles, legend_strings, loc='lower right')\n\n print 'Saving figure to file: \"{0:s}\"...'.format(OUTPUT_FILE_NAME)\n pyplot.savefig(OUTPUT_FILE_NAME, dpi=FIGURE_RESOLUTION_DPI)\n pyplot.close()",
"def surface_plot(name: str = 'start_date_analysis1.pkl'):\n df = pd.read_pickle(name)\n\n # set up a figure twice as wide as it is tall\n fig = plt.figure(figsize=plt.figaspect(0.5))\n # ===============\n # First subplot\n # ===============\n # set up the axes for the first plot\n ax = fig.add_subplot(1, 2, 1, projection='3d')\n ax.set_title('Modifications per File')\n ax.set_xlabel('Date (Months)')\n ax.set_ylabel('Threshold Individual')\n for idx, row in enumerate(sorted(df['threshold_pairs'].unique())):\n data = df[df['threshold_pairs'] == row]\n label = 'Threshold pairs ' + str(row)\n # Plot the surface.\n surf = ax.plot_trisurf(data['date'], data['threshold'], data['mpf'], alpha=0.7,\n linewidth=0, antialiased=False, label=label)\n surf._facecolors2d = surf._facecolors3d\n surf._edgecolors2d = surf._edgecolors3d\n # ===============\n # Second subplot\n # ===============\n # set up the axes for the second plot\n ax = fig.add_subplot(1, 2, 2, projection='3d')\n ax.set_title('Transitions per Test')\n ax.set_xlabel('Date (Months)')\n ax.set_ylabel('Threshold Individual')\n for idx, row in enumerate(sorted(df['threshold_pairs'].unique())):\n data = df[df['threshold_pairs'] == row]\n label = 'Threshold pairs ' + str(row)\n # Plot the surface.\n\n surf = ax.plot_trisurf(data['date'], data['threshold'], data['tpt'], alpha=0.7,\n linewidth=0, antialiased=False, label=label)\n\n surf._facecolors2d = surf._facecolors3d\n surf._edgecolors2d = surf._edgecolors3d\n\n # cbar = fig.colorbar(surf)\n # cbar.locator = LinearLocator(numticks=10)\n # cbar.update_ticks()\n\n plt.suptitle('Threshold Start Date Analysis 3D', fontsize=14)\n plt.legend()\n plt.show()",
"def plot_surface(\n trj: TrajaDataFrame,\n bins: Optional[Union[int, tuple]] = None,\n cmap: str = \"viridis\",\n **surfaceplot_kws: dict,\n) -> Figure:\n\n after_plot_args, surfaceplot_kws = _get_after_plot_args(**surfaceplot_kws)\n\n X, Y, U, V = coords_to_flow(trj, bins)\n Z = np.sqrt(U * U + V * V)\n\n fig = plt.figure()\n ax = fig.gca(projection=\"3d\")\n ax.plot_surface(\n X, Y, Z, cmap= cmap, linewidth=0, **surfaceplot_kws\n )\n\n ax = _label_axes(trj, ax)\n try:\n ax.set_aspect(\"equal\")\n except NotImplementedError:\n # 3D\n pass\n\n _process_after_plot_args(**after_plot_args)\n return ax"
] |
[
"0.7008557",
"0.6671473",
"0.657766",
"0.65188175",
"0.64394766",
"0.62508905",
"0.6203389",
"0.6193158",
"0.6164005",
"0.61388636",
"0.61340743",
"0.6104772",
"0.60910434",
"0.60061425",
"0.6003217",
"0.5999266",
"0.5967324",
"0.59593225",
"0.5940153",
"0.59139824",
"0.5909729",
"0.5867711",
"0.58313215",
"0.5829921",
"0.5808028",
"0.5804889",
"0.58003527",
"0.57961464",
"0.57918334",
"0.57830197"
] |
0.68719196
|
1
|
Reads API private key stored at keyPath.
|
def readKey(self, keyPath):
try:
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_private_key():\n if not os.path.exists(_private_key_path):\n return None\n\n try:\n with open(_private_key_path) as secret_file:\n return secret_file.read()\n\n except Exception as exc:\n log.error(f'Could not read private key.\\n{exc}')\n traceback.print_exc(file=sys.stderr)",
"def read_api_key(path):\n path = os.path.abspath(path)\n if not os.path.exists(path):\n raise ValueError(\"no key found at given path: \" + path)\n with open(path) as f:\n return f.readline().strip()",
"def get_private_key(self):\n# _log.debug(\"get_private_key: node_name={}\".format(self.node_name))\n with open(os.path.join(self.runtime_dir, \"private\", \"private.key\"), 'rb') as f:\n return f.read()",
"def GetKeyByPath(self, key_path):",
"async def read(self, key: str) -> ResponseOrKey:",
"def get_apiKey(kpath):\n with open(kpath, 'r') as f:\n apiKey = f.readline().strip()\n return (apiKey)",
"def _load_key(self, path):\n with open(path, 'r') as f:\n self._key = f.readline().strip()\n self._secret = f.readline().strip()",
"def load_key():\n return open(\"Secret.key\",\"rb\").read()",
"def load_key(self):\n\t return open(\"key.key\", \"rb\").read()",
"async def retrieve_private_key(self) -> Tuple[str, str]:\n\n filename, file_path = random.choice(self._private_keys)\n async with aiofiles.open(file_path, mode='r') as file:\n private_key = await file.read()\n return private_key, self._create_public_key_identifier(filename)",
"def load_private_key(self, private_key):\n if not self.curve:\n self.curve = private_key.curve\n if self.curve != private_key.curve:\n raise InvalidCurveError(\"Curve mismatch.\")\n self.private_key = private_key\n return self.private_key.get_verifying_key()",
"def load_key():\n return open(\"secret.key\", \"rb\").read()",
"def get(self, key_name: str, password: str = None) -> PrivateKey:\n pass",
"def load_key():\n return open(\"pass.key\", \"rb\").read()",
"def read_key():\n path = os.path.join(os.path.dirname(__file__), 'data')\n f = open(os.path.join(path, 'credential.txt'), 'r')\n key = f.read()\n f.close()\n return key",
"def get_private_key_pem( pkey_path ):\n \n # get the OpenCloud private key \n observer_pkey = syndicate_storage.read_private_key( pkey_path )\n if observer_pkey is None:\n logger.error(\"Failed to load Observer private key\")\n return None\n \n observer_pkey_pem = observer_pkey.exportKey()\n \n return observer_pkey_pem",
"def load_private_key_bytes(self, private_key):\n if not self.curve:\n raise NoCurveError(\"Curve must be set prior to key load.\")\n return self.load_private_key(\n SigningKey.from_string(private_key, curve=self.curve))",
"def load_private(file):\n with open(file, \"rb\") as pemfile:\n key = jwk.JWK.from_pem(pemfile.read())\n\n logging.info('Loaded private key from {}'.format(file))\n return key",
"def load_key():\r\n\r\n key_dir = os.path.join(os.path.dirname(__file__), \"resources/key\")\r\n\r\n try:\r\n return open(key_dir, \"rb\").read()\r\n except:\r\n return None",
"def load_key(key_name):\n if not p.exists(key_name):\n write_key(key_name)\n\n return open(key_name, \"rb\").read()",
"def get_private_key(self):\n return self._private_key",
"def get_key_from_keyring(self):\n private_key = keyring.get_password(self.keyring_service_name, \"private_key\")\n\n if private_key is not None:\n return base64.b64decode(private_key)\n else:\n return None",
"def read_key(stub, key):\n try:\n response = stub.Read(keyval_pb2.ReadRequest(key=key))\n print(\"Read result:\")\n print_response(response)\n except grpc.RpcError as exception:\n print_response(exception)",
"def Read(key):\n rsa = json.loads(key)\n pub = RsaPublicKey.Read(json.dumps(rsa['publicKey']))\n params = {'privateExponent': util.Decode(rsa['privateExponent']),\n 'primeP': util.Decode(rsa['primeP']),\n 'primeQ': util.Decode(rsa['primeQ']),\n 'primeExponentP': util.Decode(rsa['primeExponentP']),\n 'primeExponentQ': util.Decode(rsa['primeExponentQ']),\n 'crtCoefficient': util.Decode(rsa['crtCoefficient'])\n }\n\n key = RSA.construct((util.BytesToLong(pub.params['modulus']),\n util.BytesToLong(pub.params['publicExponent']),\n util.BytesToLong(params['privateExponent']),\n util.BytesToLong(params['primeQ']),\n util.BytesToLong(params['primeP']),\n util.BytesToLong(params['crtCoefficient'])))\n return RsaPrivateKey(params, pub, key, rsa['size'])",
"def GetSubkeyByPath(self, key_path):",
"def get_private_key(self) -> str:\n\t\treturn self._privateKey",
"def private_key(self):\n if self._private_key is not None:\n return self._private_key[0]\n\n spk = self.serialized_private_key\n passphrase = self.passphrase\n\n try:\n self._private_key = [\n serialization.load_pem_private_key(\n self.serialized_private_key,\n backend=default_backend(),\n password=self.passphrase)]\n\n return self._private_key[0]\n\n except:\n raise\n self._private_key = [None]\n return self._private_key[0]",
"def private_key(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"private_key\")",
"def private_key(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"private_key\")",
"def load_key(fn, psw=None):\n if not fn:\n die(\"Need private key\")\n if psw:\n psw = as_bytes(psw)\n data = load_gpg_file(fn)\n key = load_pem_private_key(data, password=psw, backend=get_backend())\n return key"
] |
[
"0.72051257",
"0.70617217",
"0.69600236",
"0.6915302",
"0.668007",
"0.66768414",
"0.6642561",
"0.6624825",
"0.6615483",
"0.6496666",
"0.64925957",
"0.6490158",
"0.6469417",
"0.6446457",
"0.6441216",
"0.64364284",
"0.64151865",
"0.6385343",
"0.6289841",
"0.62765306",
"0.62684745",
"0.62085587",
"0.62057316",
"0.61941826",
"0.618101",
"0.61789614",
"0.6166437",
"0.61623573",
"0.61623573",
"0.61616683"
] |
0.7792327
|
0
|
Robust way to access `obj.__orig_class__`. Compared to a direct access this has the
|
def get_orig_class(obj, default_to__class__=False):
try:
# See https://github.com/Stewori/pytypes/pull/53:
# Returns `obj.__orig_class__` protecting from infinite recursion in `__getattr[ibute]__`
# wrapped in a `checker_tp`.
# (See `checker_tp` in `typechecker._typeinspect_func for context)
# Necessary if:
# - we're wrapping a method (`obj` is `self`/`cls`) and either
# - the object's class defines __getattribute__
# or
# - the object doesn't have an `__orig_class__` attribute
# and the object's class defines __getattr__.
# In such a situation, `parent_class = obj.__orig_class__`
# would call `__getattr[ibute]__`. But that method is wrapped in a `checker_tp` too,
# so then we'd go into the wrapped `__getattr[ibute]__` and do
# `parent_class = obj.__orig_class__`, which would call `__getattr[ibute]__`
# again, and so on. So to bypass `__getattr[ibute]__` we do this:
return object.__getattribute__(obj, '__orig_class__')
except AttributeError:
if sys.version_info.major >= 3:
cls = object.__getattribute__(obj, '__class__')
else:
# Python 2 may return instance objects from object.__getattribute__.
cls = obj.__class__
if is_Generic(cls):
# Workaround for https://github.com/python/typing/issues/658
stck = stack()
# Searching from index 2 is sufficient: At 0 is get_orig_class, at 1 is the caller.
# We assume the caller is not typing._GenericAlias.__call__ which we are after.
for line in stck[2:]:
try:
res = line[0].f_locals['self']
if res.__origin__ is cls:
return res
except (KeyError, AttributeError):
pass
if default_to__class__:
return cls # Fallback
raise
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_orig_class(obj, default_to__class__=False):\n try:\n # See https://github.com/Stewori/pytypes/pull/53:\n # Returns `obj.__orig_class__` protecting from infinite recursion in `__getattr[ibute]__`\n # wrapped in a `checker_tp`.\n # (See `checker_tp` in `typechecker._typeinspect_func for context)\n # Necessary if:\n # - we're wrapping a method (`obj` is `self`/`cls`) and either\n # - the object's class defines __getattribute__\n # or\n # - the object doesn't have an `__orig_class__` attribute\n # and the object's class defines __getattr__.\n # In such a situation, `parent_class = obj.__orig_class__`\n # would call `__getattr[ibute]__`. But that method is wrapped in a `checker_tp` too,\n # so then we'd go into the wrapped `__getattr[ibute]__` and do\n # `parent_class = obj.__orig_class__`, which would call `__getattr[ibute]__`\n # again, and so on. So to bypass `__getattr[ibute]__` we do this:\n return object.__getattribute__(obj, '__orig_class__')\n except AttributeError:\n if sys.version_info.major >= 3:\n cls = object.__getattribute__(obj, '__class__')\n else:\n # Python 2 may return instance objects from object.__getattribute__.\n cls = obj.__class__\n if _typing_3_7 and is_Generic(cls):\n # Workaround for https://github.com/python/typing/issues/658\n # Searching from index 2 is sufficient: At 0 is get_orig_class, at 1 is the caller.\n # We assume the caller is not typing._GenericAlias.__call__ which we are after.\n frame = currentframe().f_back.f_back\n try:\n while frame:\n try:\n res = frame.f_locals['self']\n if res.__origin__ is cls:\n return res\n except (KeyError, AttributeError):\n frame = frame.f_back\n finally:\n del frame\n\n if default_to__class__:\n return cls # Fallback\n raise",
"def _class(self, class_):\r\n\r\n if class_:\r\n if hasattr(class_, '__mro__'):\r\n #this is a class\r\n return class_\r\n else:\r\n #this is an instance\r\n return type(class_)",
"def orig_obj(self):\n return self._orig_obj",
"def orig_obj(self):\n return self._orig_obj",
"def orig_obj(self):\n return self._orig_obj",
"def _class(self):\n return self.__class",
"def getclass(instance_or_cls):\n return instance_or_cls if inspect.isclass(instance_or_cls) \\\n else instance_or_cls.__class__",
"def get_class_name(obj) -> str:\n return obj.__class__.__name__",
"def inspect_oldclass_instance():\n obj = OldClass(11)\n print(dir(obj)) # ['__doc__', '__init__', '__module__', 'number']\n # Why isn't __class__ seen by dir()? I don't know but it exists\n print(obj.__class__) # __main__.OldClass\n print(obj.__dict__) # {'number': 11}\n #print(obj.__getattribute__) # AttributeError\n #print(obj.__hash__) # AttributeError\n #print(obj.__reduce__) # AttributeError\n #print(obj.__setattr__) # AttributeError\n #print(obj.__delattr__) # AttributeError\n #print(obj.__new__) # AttributeError\n #print(obj.__subclasshook__) # AttributeError\n #print(obj.__weakref__) # AttributeError",
"def _class(self, *args):\r\n\r\n if hasattr(args[0], '__mro__'):\r\n #this is a class\r\n return args[0]\r\n else:\r\n #this is an instance\r\n return type(args[0])",
"def get_class(cls):\n class Foo(object):\n def __init__(self):\n pass\n x = Foo()\n x.__class__ = cls\n return x.__class__",
"def __class__(self, ???):",
"def change_real_class(self):\r\n if not self.type:\r\n #objects was just created, set type and return\r\n self.type = self.__class__.__name__\r\n return self\r\n if self.type == self.__class__.__name__:\r\n return self\r\n #type is set, we can do actual change of the class\r\n #TODO it could be cached during creation of relevant subclasses\r\n for cls in self.subclasses():\r\n if cls.__name__ == self.type:\r\n self.__class__ = cls\r\n return self\r\n raise RuntimeError(\"Subclass not found: %s %s\", self.type, self.__class__.__name__)",
"def wrap(cls, orig):\n # hack to give the timestamp this class' specialized methods\n orig.__class__ = cls\n return orig",
"def class_casting(obj: object, cls: type):\n orig_cls = obj.__class__\n obj.__class__ = cls\n yield\n obj.__class__ = orig_cls",
"def _declaring_class(obj):\n name = _qualname(obj)\n return name[:name.rfind('.')]",
"def get_object(cls):\n values = cls._METHOD(get_object(TOP_OBJECT))\n return StratisdConstants.get_class(cls._CLASSNAME, values)",
"def get_obj_class(self, obj_type: str) -> Type[TgnObject]:\n pass",
"def get_deserialization_instance(cls):\n if cls.__orig__ is None:\n return cls()\n else:\n return cls.__orig__()",
"def objects(self, cls):\n for name, info in direct_fields(self.__class__).items():\n if issubclass(cls, info.sub_fields[0].type_):\n return getattr(self, name)\n raise TypeError(cls)",
"def __getattribute__(self, name):\n return object.__getattribute__(object.__getattribute__(self, 'orig'),\n name)",
"def identify_class(self, cls):",
"def __getattribute__(self, attr):\n try:\n return super(ObjectProxy, self).__getattribute__(attr)\n except AttributeError:\n info = sys.exc_info()\n try:\n return getattr(self._proxied, attr)\n except AttributeError:\n six.reraise(info[0], info[1], info[2].tb_next)",
"def inherits_from(obj, a_class):\n return isinstance(obj, a_class) and type(obj) != a_class",
"def inherits_from(obj, a_class):\n return isinstance(obj, a_class) and type(obj) != a_class",
"def inherits_from(obj, a_class):\n return isinstance(obj, a_class) and type(obj) is not a_class",
"def inherits_from(obj, a_class):\n return isinstance(obj, a_class) and type(obj) is not a_class",
"def instance_class(self):\n return self._instance_class",
"def inherits_from(obj, a_class):\n\n return isinstance(obj, a_class) and type(obj) is not a_class",
"def get_class_from_frame(fr):\n args, _, _, value_dict = inspect.getargvalues(fr)\n # Check the first parameter of the frame function is named `self` - if it is,\n # `self` will be referenced in `value_dict`.\n if len(args) != 0 and args[0] == 'self':\n instance = value_dict.get('self', None)\n if instance:\n return getattr(instance, '__class__', None)\n return None"
] |
[
"0.76980686",
"0.6622713",
"0.662021",
"0.662021",
"0.662021",
"0.6512145",
"0.63538855",
"0.6307841",
"0.62945986",
"0.6189339",
"0.6158842",
"0.6116041",
"0.60950243",
"0.60413843",
"0.60330874",
"0.6005985",
"0.60026515",
"0.5988169",
"0.59284616",
"0.5886291",
"0.5862983",
"0.58621544",
"0.5861976",
"0.5852304",
"0.5852304",
"0.58387065",
"0.58387065",
"0.5823234",
"0.57945544",
"0.57906234"
] |
0.7655947
|
1
|
Returns the requested campaign bid modifier in full detail.
|
def GetCampaignBidModifier(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def getBid(self):\r\n\t\treturn self.data['bid']",
"def _bidding_function(self):\n return self._bid_value",
"def getModifier(self, *args):\n return _libsbml.Reaction_getModifier(self, *args)",
"def get_adcreative(self, creative_id, fields, batch=False):\n path = '%s' % creative_id\n args = {'fields': fields}\n return self.make_request(path, 'GET', args, batch=batch)",
"def get_announcement(self, cid):\n cid = cid.upper()\n query = \"SELECT * from announcement where cid = %s\"\n inputs = (cid, )\n result = self.database_manager.execute_query(query, inputs)\n if result:\n announcement = \"Announcement for {} ({}): {}\".format(result[0][0], result[0][3], result[0][2])\n else:\n announcement = \"No announcement for this {}\".format(cid)\n return announcement",
"def getBudget(movieInfo):\n if \"budget\" in movieInfo:\n return int(movieInfo[\"budget\"])\n else:\n raise AttributeError(\"%s instance has no attribute budget\" % movieInfo)",
"def detail(request, bid):\n result = {\n 'status': '', # 'success' or 'failure'\n 'msg': '', # msg of the book\n 'error_msg': '', # notes of failure\n }\n # the error method\n if not is_get(request, result):\n return HttpResponse(json.dumps(result))\n\n # filter the list of book\n # actually, the number of book is 0 or 1\n books = models.BookInfo.objects.filter(id=bid).first()\n\n # if the number of book is 0\n if books is None:\n result['status'] = 'failure'\n result['error_msg'] = 'invalid book id'\n return HttpResponse(json.dumps(result))\n\n book_dict = dict()\n # transfer db obj to dict\n book = process_book_obj(books)\n book_dict[str(books.id)] = json.dumps(book)\n\n result['status'] = \"success\"\n result['msg'] = json.dumps(book_dict)\n\n return HttpResponse(json.dumps(result))",
"def get_gift(self):\r\n return self.gift",
"def get_bribe(self):\r\n return self.bribe",
"def getBandwidthDetail(self, identifier):\n _mask = \"\"\"activeDetails[allocation],projectedPublicBandwidthUsage, billingCyclePublicBandwidthUsage,\n hardware[outboundBandwidthUsage,bandwidthAllotmentDetail[allocation]],inboundPublicBandwidthUsage,\n virtualGuests[outboundPublicBandwidthUsage,bandwidthAllotmentDetail[allocation]],\n bareMetalInstances[outboundBandwidthUsage,bandwidthAllotmentDetail[allocation]]\"\"\"\n return self.client['SoftLayer_Network_Bandwidth_Version1_Allotment'].getObject(id=identifier, mask=_mask)",
"def getB(self):\n\t\treturn self.b",
"def get_auction(request):\n db = request.registry.db\n tender_id = request.matchdict['tender_id']\n tender = TenderDocument.load(db, tender_id)\n if not tender:\n request.errors.add('url', 'tender_id', 'Not Found')\n request.errors.status = 404\n return\n auction_info = tender.serialize(\"auction\")\n return {'data': auction_info}",
"def getDetail(self):\n\t\t\n\t\treturn (super().setParameters(0,self.getDefense(),0))\n\t\t\n\t\t#return \"\\n#########################################################\\n\"+\"\\nItem of Defense, Name of item:\"+self.getName()+\"\\nCapacity of defense:\"+str(self.getDefense())+\"\\nCapacity of attack:0 \\n Capacity of heal:0 \\n\"+\"#########################################################\\n\"",
"def returnMorbid(self, morbid):\n m = re.search(', (\\d+) \\(',morbid)\n if m:\n return re.sub(m.group(1),'<a href=\"http://www.omim.org/entry/%s\" target=\"_blank\">%s</a>' % (m.group(1),m.group(1)),morbid)\n else:\n return morbid",
"def get(self, request, pk, bid, format=None):\n benchmarkmodel = self.get_object(pk, bid)\n serializer = ModelApprovalSerializer(benchmarkmodel)\n return Response(serializer.data)",
"def modifier(self):\n return self._modifier",
"def get_card(self):\n return self.card",
"def breedte(self):\n return self._breedte.get_waarde()",
"def getBanter(self):\r\n return self.__banterAssembler.assembleString()",
"def getBid(self):\r\n\t\tdata = self.pair.data\r\n\t\tif data['ask'] == None:\r\n\t\t\treturn None\r\n\t\treturn 1. / data['ask']",
"def get_transfer_bid(self):\n return self._connect.post(constants.GET_TRANSFER_BID)",
"def _get_reward_bidding(self, pos):\n winner = self._game_winner\n bomb_num = self._game_bomb_num\n if winner == 'landlord':\n return 1.0 * 2**(self._env.bid_count-1) / 8\n else:\n return -1.0 * 2**(self._env.bid_count-1) / 8",
"def is_bid(self):\n return self.type == 'B'",
"def get_mentor(self, mentor_id):\n\t\t\n\t\t\n\t\tmentor = self.matcher.get_mentor(mentor_id)\n\t\tif mentor is not None:\n\t\t\treturn mentor.to_dict()",
"def get_team_bid(self, team_id):\n\t\tplayer_ids = [(self.reference_id + team_id) % 4, (self.reference_id + team_id + 2) % 4]\n\t\tres = []\n\t\tsummable = True\n\t\tfor player_id in player_ids:\n\t\t\tif player_id in self.bids:\n\t\t\t\tif self.bids[player_id] == \"N\" or self.bids[player_id] == \"B\":\n\t\t\t\t\tsummable = False\n\t\t\t\tres.append(self.bids[player_id])\n\t\t\telse:\n\t\t\t\tsummable = False\n\t\t\t\tres.append('?')\n\t\tif summable:\n\t\t\tres = [res[0] + res[1]]\n\t\treturn res",
"def value_bids(self):\n return OrderBookUtils.book_value(self.bid)",
"def info_reactions_complex_biochemical_get():\n reactions = _reaction_by_group(662) # 662 == Complex Biochemical Reactions\n return reactions, 200",
"def getBeta(self):\n\t\treturn self.relativistic_beta",
"def highest_bid(self):\n (price_eur, volume, _) = self._order_book['bids'][0]\n price_usd = Decimal(price_eur) * self._eurusd_rate\n return {\n 'price': Decimal(price_usd),\n 'volume': Decimal(volume),\n }",
"def get_pbc_info(self):\n return"
] |
[
"0.64405745",
"0.5491782",
"0.5352912",
"0.53522617",
"0.53101903",
"0.5145897",
"0.5034038",
"0.5004115",
"0.5001304",
"0.49945304",
"0.4993839",
"0.4987359",
"0.49709523",
"0.49398753",
"0.4900452",
"0.488487",
"0.4844014",
"0.48204824",
"0.4778102",
"0.47757906",
"0.47701168",
"0.476054",
"0.47485307",
"0.47356093",
"0.47305736",
"0.4730003",
"0.47157845",
"0.47146153",
"0.47063822",
"0.46960753"
] |
0.6848136
|
0
|
Creates, updates, or removes campaign bid modifiers. Operation statuses are returned.
|
def MutateCampaignBidModifiers(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def GetCampaignBidModifier(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def MutateCampaignBudgets(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def bid(command):\n env_banner()\n\n game = Game.read()\n if game is None:\n game = Game.init_game()\n\n command = command.lower().strip()\n\n if command not in ['start', 'pause', 'resume']:\n click.echo('Valid options are start, pause or resume.')\n return\n\n if game.user_count == 0 or game.player_count == 0:\n click.echo('Init the game first by uploading player data.')\n return\n\n if game.player_to_bid == 0:\n click.echo('Bidding is complete')\n return\n\n if command == 'start':\n if game.bid_in_progress:\n click.echo('Bid is already in progress.')\n return\n if game.player_to_bid != game.player_count:\n click.echo('Bidding has already started. Use resume option.')\n return\n invite_bid()\n click.echo('Bidding has been started.')\n return\n\n if command == 'pause':\n if not game.bid_in_progress:\n click.echo('Bid is NOT in progress.')\n return\n game.bid_in_progress = False\n game.update()\n click.echo('Bidding has been paused.')\n return\n\n if command == 'resume':\n if game.bid_in_progress:\n click.echo('Bid is already in progress.')\n return\n if game.player_to_bid == game.player_count:\n click.echo('Bidding has not yet started. Use start option.')\n return\n game.bid_in_progress = True\n game.update()\n click.echo('Bidding has been resumed.')",
"def place_bid():\n if not request.get_json():\n abort(400)\n data = request.get_json(force=True)\n\n if not data.get('userID'):\n abort(400)\n if not data.get('amount'):\n abort(400)\n if not data.get('petID'):\n abort(400)\n\n #new_uuid = str(uuid.uuid4())\n mod.place_a_bid(data['petID'], data['amount'], data['userID'])\n # HTTP 200 Created\n # return jsonify({\"id\": new_uuid}), 200\n resp = {\"status\": \"OK\"}\n return jsonify(resp)",
"def add_bid(self, bid, player_id):\n\t\tglobal_id = self.globalize_id(player_id)\n\t\tassert len(self.bids) < self.data_size and global_id not in self.bids\n\t\tif bid == 0:\n\t\t\tbid = \"N\"\n\t\tself.bids[global_id] = bid",
"async def bid(self, guid, mask, verdicts, confidences, metadatas, min_allowed_bid, chain):\n min_bid = max(min_allowed_bid * self.min_bid_multiplier, min_allowed_bid)\n max_bid = max(min_allowed_bid * self.max_bid_multiplier, min_allowed_bid)\n\n asserted_confidences = [c for b, c in zip(mask, confidences) if b]\n avg_confidence = sum(asserted_confidences) / len(asserted_confidences) if asserted_confidences else 0\n bid = int(min_bid + ((max_bid - min_bid) * avg_confidence))\n\n # Clamp bid between min_bid and max_bid\n return max(min_bid, min(bid, max_bid))",
"def update_adset(self, campaign_id, name=None, campaign_status=None,\n daily_budget=None, lifetime_budget=None,\n start_time=None, end_time=None,\n bid_type=None, bid_info=None, promoted_object=None, targeting=None, batch=False):\n path = '%s' % campaign_id\n args = {}\n if name:\n args['name'] = name\n if campaign_status:\n args['campaign_status'] = campaign_status\n if daily_budget:\n args['daily_budget'] = daily_budget\n if lifetime_budget:\n args['lifetime_budget'] = lifetime_budget\n if start_time:\n args['start_time'] = start_time\n if end_time is not None:\n args['end_time'] = end_time\n if bid_type:\n args['bid_type'] = bid_type\n if bid_info:\n args['bid_info'] = bid_info\n if promoted_object:\n args['promoted_object'] = json.dumps(promoted_object)\n if targeting:\n args['targeting'] = json.dumps(targeting)\n\n return self.make_request(path, 'POST', args, batch=batch)",
"def create_adset(self, account_id, campaign_group_id, name,\n campaign_status, daily_budget=None, lifetime_budget=None,\n start_time=None, end_time=None,\n bid_type=None, bid_info=None, promoted_object=None, targeting=None, batch=False):\n if daily_budget is None and lifetime_budget is None:\n raise AdsAPIError(\"Either a lifetime_budget or a daily_budget \\\n must be set when creating a campaign\")\n if lifetime_budget is not None and end_time is None:\n raise AdsAPIError(\"end_time is required when lifetime_budget \\\n is specified\")\n\n path = 'act_%s/adcampaigns' % account_id\n args = {\n 'campaign_group_id': campaign_group_id,\n 'name': name,\n 'campaign_status': campaign_status,\n }\n if daily_budget:\n args['daily_budget'] = daily_budget\n if lifetime_budget:\n args['lifetime_budget'] = lifetime_budget\n if start_time:\n args['start_time'] = start_time\n if end_time:\n args['end_time'] = end_time\n if bid_type:\n args['bid_type'] = bid_type\n if bid_info:\n args['bid_info'] = bid_info\n if promoted_object:\n args['promoted_object'] = json.dumps(promoted_object)\n if targeting:\n args['targeting'] = json.dumps(targeting)\n\n return self.make_request(path, 'POST', args, batch=batch)",
"def step_bid(self, action_bid):\n if self.done_bidding:\n raise Exception(\"No more actions can be taken\")\n\n # action_bid must be in [0; AUCTION_SPACE_SIZE - 1]\n if action_bid < 0 or action_bid > AUCTION_SPACE_SIZE - 1:\n raise Exception(\"illegal action\")\n\n # what happens when we get a pass\n if action_bid == PASS_IDX:\n\n # we are not allowed to make a double for now\n self.elim_sig_bid[DOUBLE_IDX] = 0\n\n self.history_bid[action_bid] = 1\n\n if self.max_bid == -1:\n self.auction_history[self.n_pass] = 1\n elif self.n_pass < 2:\n self.auction_history[\n 3 + 8*self.max_bid + 3*(self.n_double + self.n_redouble) + self.n_pass + 1] = 1\n\n # incrementing the current number of passes\n self.n_pass += 1\n\n # what happens when we get a contract bid\n elif action_bid < PASS_IDX:\n\n if action_bid <= self.max_bid:\n raise Exception(\"illegal bidding.\")\n\n # resetting n_pass, n_double and n_redouble\n self.n_pass = 0\n self.n_double = 0\n self.n_redouble = 0\n self.max_bid = action_bid\n\n self.history_bid[action_bid] = 1\n self.history_bid[-1] = 0\n self.auction_history[3 + 8*self.max_bid] = 1\n\n # this action and all the actions below can no longer be performed\n self.elim_sig_bid[:(1 + self.max_bid)] = 0\n\n # doubles are now permitted, redoubles are not permitted\n self.elim_sig_bid[DOUBLE_IDX] = 1\n self.elim_sig_bid[REDOUBLE_IDX] = 0\n\n strain = convert_action2strain(action_bid)\n group = Seat2Group[self.turn_bid]\n if self.strain_declarer[group].get(strain, '') == '':\n self.strain_declarer[group][strain] = self.turn_bid # which one\n self.group_declarer = group # which group\n\n # what happens when we get a double\n elif action_bid == DOUBLE_IDX:\n # doubles are not permitted when\n # no contract bids have been made OR\n # a double bid has already been made OR\n # a redouble bid has been made\n if (self.max_bid == -1) or (self.n_double == 1) or (self.n_redouble == 1):\n raise Exception(\"double is not currently allowed\")\n\n self.n_double = 1\n self.elim_sig_bid[DOUBLE_IDX] = 0\n self.elim_sig_bid[REDOUBLE_IDX] = 1\n self.auction_history[3 + 8*self.max_bid + 3] = 1\n\n # what happens when we get a redouble\n elif action_bid == REDOUBLE_IDX:\n # redoubles are not permitted when\n # no contract bids have been made OR\n # a double bid has not been made OR\n # a redouble bid has already been made\n if (self.max_bid == -1) or (self.n_double == 0) or (self.n_redouble == 1):\n raise Exception(\"redouble is not currently allowed\")\n\n self.n_redouble = 1\n self.elim_sig_bid[DOUBLE_IDX] = 0\n self.elim_sig_bid[REDOUBLE_IDX] = 0\n self.auction_history[3 + 8*self.max_bid + 6] = 1\n\n # updating the index of the next bidding player\n self.turn_bid = (self.turn_bid + 1) % len(Seat)\n\n # move to the participant\n # NB: this code is only useful if not all players are bidding (i.e. self.bidding_seats\n # does not contain everybody)\n while True:\n if self.turn_bid not in self.bidding_seats:\n self.turn_bid = (self.turn_bid + 1) % len(Seat)\n self.n_pass += 1\n else:\n break\n\n hand = self.one_hot_deal[self.turn_bid]\n reward = 0\n\n # state is the next bidding player's state\n if (self.n_pass >= 3 and self.max_bid < 0) or self.max_bid == 34:\n\n if self.max_bid < 0:\n raise Exception(\"illegal bidding\")\n # extract the declarer, strain , level\n strain = convert_action2strain(self.max_bid)\n level = convert_action2level(self.max_bid)\n # single thread\n # reward = np.mean(Deal.score_st(dealer=self.deal, level=level, strain=strain, declarer=declarer, tries=self.nmc, mode=self.score_mode))\n # parallel threads\n\n # np.mean is moved to score\n declarer = self.strain_declarer[self.group_declarer][strain] # thise group's first declarer\n\n # TODO[ス: game rewards / scores will no longer be calculated during bidding - the next\n # bit of code needs to be removed\n reward = Deal.score(dealer=self.deal,\n level=level,\n strain=strain,\n declarer=declarer,\n tries=self.nmc,\n mode=self.score_mode)\n self.done_bidding = True\n\n # storing the contract\n self.contract.from_bid(bid=self.max_bid,\n double=(self.n_double > 0),\n redouble=(self.n_redouble > 0))\n\n # setting the index of the first player\n self.turn_play = (self.turn_bid + 1) % len(Seat)\n\n # since bidding is now done, we need to set the initial value of self.score_play\n self._update_score()\n\n # TODO[ス: remove the next lines - this method should no longer return anything\n state = (hand, self.history_bid)\n info = {\"turn\": Seat[self.turn_bid], \"max_bid\": self.max_bid}\n if self.debug:\n log_state(state, reward, self.done_bidding, info)\n\n return state, reward, self.done_bidding, info",
"def CreateBiddingStrategy(client):\n # Initialize appropriate service.\n bidding_strategy_service = client.GetService(\n 'BiddingStrategyService', version='v201809')\n\n # Create a shared bidding strategy.\n shared_bidding_strategy = {\n 'name': 'Maximize Clicks %s' % uuid.uuid4(),\n 'biddingScheme': {\n 'xsi_type': 'TargetCpaBiddingScheme',\n # Optionally set additional bidding scheme parameters.\n \n 'targetCpa': {\n 'microAmount': '2000000'\n }\n }\n }\n\n # Create operation.\n operation = {\n 'operator': 'ADD',\n 'operand': shared_bidding_strategy\n }\n\n response = bidding_strategy_service.mutate([operation])\n new_bidding_strategy = response['value'][0]\n\n print ('Shared bidding strategy with name \"%s\" and ID \"%s\" of type \"%s\"'\n 'was created.' %\n (new_bidding_strategy['name'], new_bidding_strategy['id'],\n new_bidding_strategy['biddingScheme']['BiddingScheme.Type']))\n\n return new_bidding_strategy",
"def create_adgroup(self, account_id, name, campaign_id,\n creative_id, bid_type=None, bid_info=None, max_bid=None,\n tracking_specs=None, view_tags=None, objective=None,\n adgroup_status=None, targeting=None, conversion_specs=None, batch=False):\n path = 'act_%s/adgroups' % account_id\n args = {\n 'name': name,\n 'campaign_id': campaign_id,\n 'creative': json.dumps({'creative_id': creative_id}),\n }\n if bid_type:\n args['bid_type'] = bid_type\n if max_bid:\n # can only use max_bid with CPM bidding\n args['max_bid'] = max_bid\n elif bid_info:\n args['bid_info'] = json.dumps(bid_info)\n\n if tracking_specs:\n args['tracking_specs'] = json.dumps(tracking_specs)\n if view_tags:\n args['view_tags'] = json.dumps(view_tags)\n if objective:\n args['objective'] = objective\n if adgroup_status:\n args['adgroup_status'] = adgroup_status\n if targeting:\n args['targeting'] = json.dumps(targeting)\n if conversion_specs:\n args['conversion_specs'] = json.dumps(conversion_specs)\n return self.make_request(path, 'POST', args, batch=batch)",
"def bid(self):\n # log.debug(\"{0} is bidding...\".format(self.label))\n for bid in range(5):\n if self.is_legal_bid(bid):\n self.send_bid(bid)\n return",
"def gen_updateBCCustomer(args):\n bal, ytd, pay, data, wid, did, cid = tuple(args)\n bal, ytd, pay = tuple(map(float, [bal, ytd, pay]))\n wid, did, cid = tuple(map(int, [wid, did, cid]))\n\n SET = [\n (\"C_BALANCE\", bal), \n (\"C_YTD_PAYMENT\", ytd), \n (\"C_PAYMENT_CNT\", pay),\n (\"C_DATA\", data)\n ]\n WHERE = [\n (\"C_W_ID\", \"=\", wid),\n (\"C_D_ID\", \"=\", did),\n (\"C_ID\", \"=\", cid)\n ]\n return {\n \"type\": \"UPDATE\",\n \"set\": SET, \n \"where\": WHERE\n }",
"def add_modifier( modifier_id, reaction, model, compartment = \"default\", arguments = DEFAULT_ARGUMENTS):\n if modifier_id is None and len( reaction.getListOfModifiers()) == 1:\n return reaction.getListOfModifiers()[0].getId()\n else:\n if not modifier_id:\n reaction_name = reaction.getName();\n reaction_id = reaction.getId();\n modifier_id = reaction_id + \"_modifier_nr\" + str(len( reaction.getListOfModifiers()));\n modifier_name = reaction_name[0:3].lower() + \"modifier\";\n add_species( None, model, id = modifier_id, name = modifier_name, compartment = compartment, arguments = arguments);\n \n modifier_ref = reaction.createModifier()\n check( modifier_ref, 'create modifier reference', arguments = arguments);\n check( modifier_ref.setSpecies( modifier_id), 'assign modifier species', arguments = arguments);\n check( modifier_ref.setMetaId( \"metaid_0000\" + modifier_id), 'set meta ID', arguments = arguments);\n check( modifier_ref.addCVTerm(add_cvterm(GENERIC_REACTION_SBO_MAPPING[\"modifier\"])), 'set controlled vocab SBO term for cause', arguments = arguments);\n return modifier_ref",
"def update_instruction(bet_id, new_persistence_type):\n args = locals()\n return {\n to_camel_case(k): v for k, v in args.items() if v is not None\n }",
"def bid_dataset_0():\n\n bm = BidManager()\n bm.add_bid(1, 3, 0, True, 0)\n bm.add_bid(2, 4, 1, True, 0)\n bm.add_bid(5, 1, 2, True, 0)\n\n bm.add_bid(4, 2, 3, False, 0)\n bm.add_bid(1, 1, 4, False, 0)\n bm.add_bid(5, 6, 5, False, 0)\n \n return bm.get_df()",
"def post(self):\n json_data = request.get_json()\n json_data[\"sender_id\"] = current_user.id\n try:\n new_campaign = self.schema.load(json_data)\n except ValidationError as err:\n return {\"message\": err.messages}, HTTPStatus.BAD_REQUEST\n if Campaign.query.filter_by(mailchimp_id=new_campaign.mailchimp_id).first() is not None:\n return {\"message\": \"Campaign already exists.\"}, HTTPStatus.CONFLICT\n db.session.add(new_campaign)\n db.session.commit()\n return self.schema.dump(new_campaign), HTTPStatus.CREATED",
"def __init__(__self__, *,\n account_id: Optional[pulumi.Input[str]] = None,\n action_id: Optional[pulumi.Input[str]] = None,\n action_threshold: Optional[pulumi.Input['BudgetActionActionThresholdArgs']] = None,\n action_type: Optional[pulumi.Input[str]] = None,\n approval_model: Optional[pulumi.Input[str]] = None,\n arn: Optional[pulumi.Input[str]] = None,\n budget_name: Optional[pulumi.Input[str]] = None,\n definition: Optional[pulumi.Input['BudgetActionDefinitionArgs']] = None,\n execution_role_arn: Optional[pulumi.Input[str]] = None,\n notification_type: Optional[pulumi.Input[str]] = None,\n status: Optional[pulumi.Input[str]] = None,\n subscribers: Optional[pulumi.Input[Sequence[pulumi.Input['BudgetActionSubscriberArgs']]]] = None):\n if account_id is not None:\n pulumi.set(__self__, \"account_id\", account_id)\n if action_id is not None:\n pulumi.set(__self__, \"action_id\", action_id)\n if action_threshold is not None:\n pulumi.set(__self__, \"action_threshold\", action_threshold)\n if action_type is not None:\n pulumi.set(__self__, \"action_type\", action_type)\n if approval_model is not None:\n pulumi.set(__self__, \"approval_model\", approval_model)\n if arn is not None:\n pulumi.set(__self__, \"arn\", arn)\n if budget_name is not None:\n pulumi.set(__self__, \"budget_name\", budget_name)\n if definition is not None:\n pulumi.set(__self__, \"definition\", definition)\n if execution_role_arn is not None:\n pulumi.set(__self__, \"execution_role_arn\", execution_role_arn)\n if notification_type is not None:\n pulumi.set(__self__, \"notification_type\", notification_type)\n if status is not None:\n pulumi.set(__self__, \"status\", status)\n if subscribers is not None:\n pulumi.set(__self__, \"subscribers\", subscribers)",
"def game_bid():\n game = current_user.get_active_game()\n bid = request.form.get('bid', '')\n\n # Validate bid\n if not isinstance(bid, str):\n # Invalid input for bid, but no need to alert user\n return redirect(url_for('game_home'))\n bid = bid.strip()\n if not BID_REGEX.match(bid):\n flash('Your bid must be an integer bid from zero (0) to thirteen (13).')\n return redirect(url_for('game_home'))\n\n if game is None:\n flash('If you want to join a game, click the Join button.')\n return redirect(url_for('home'))\n else:\n hand = game.get_latest_hand()\n # Attempt to place the bid\n try:\n hand.place_bid(current_user.user_id, int(bid), game)\n except UserCanNotBidError:\n flash('Bidding is not available at this time for you.')\n return redirect(url_for('game_home'))\n except BadGameStateError:\n flash('An error occurred while trying to pace your bid. Please try again.')\n return redirect(url_for('game_home'))\n else:\n flash(f'Your bid of {bid} has been placed.')\n return redirect(url_for('game_home'))",
"def patch_auction(request):\n db = request.registry.db\n tender_id = request.matchdict['tender_id']\n tender = TenderDocument.load(db, tender_id)\n if not tender:\n request.errors.add('url', 'tender_id', 'Not Found')\n request.errors.status = 404\n return\n src = tender.serialize(\"plain\")\n auction_data = filter_data(request.json_body['data'])\n if auction_data:\n auction_data['tenderID'] = tender.tenderID\n tender.import_data(auction_data)\n patch = make_patch(tender.serialize(\"plain\"), src).patch\n if patch:\n tender.revisions.append(revision({'changes': patch}))\n try:\n tender.store(db)\n except Exception, e:\n return request.errors.add('body', 'data', str(e))\n return {'data': tender.serialize(\"auction\")}",
"def do_bay_update(cs, args):\n if args.rollback and args.magnum_api_version and \\\n args.magnum_api_version in ('1.0', '1.1', '1.2'):\n raise exceptions.CommandError(\n \"Rollback is not supported in API v%s. \"\n \"Please use API v1.3+.\" % args.magnum_api_version)\n patch = magnum_utils.args_array_to_patch(args.op, args.attributes[0])\n bay = cs.bays.update(args.bay, patch, args.rollback)\n if args.magnum_api_version and args.magnum_api_version == '1.1':\n _show_bay(bay)\n else:\n print(\"Request to update bay %s has been accepted.\" % args.bay)",
"def edit_campaigns(self, **kwargs) -> ApiResponse:\n return self._request(kwargs.pop('path'), data=kwargs.pop('body'), params=kwargs)",
"def bid_dataset_1():\n\n bm = BidManager()\n\n bm.add_bid(1, 6.7, 0, True, 0)\n bm.add_bid(1, 6.6, 1, True, 0)\n bm.add_bid(1, 6.5, 2, True, 0)\n bm.add_bid(1, 6.4, 3, True, 0)\n bm.add_bid(1, 6.3, 4, True, 0)\n bm.add_bid(1, 6, 5, True, 0)\n\n bm.add_bid(1, 1, 6, False, 0)\n bm.add_bid(1, 2, 7, False, 0)\n bm.add_bid(2, 3, 8, False, 0)\n bm.add_bid(2, 4, 9, False, 0)\n bm.add_bid(1, 6.1, 10, False, 0)\n \n return bm.get_df()",
"def createModifier(self):\n return _libsbml.Reaction_createModifier(self)",
"def apply_mods(obj):\n bpy.context.view_layer.objects.active = obj\n for modifier in obj.modifiers:\n bpy.ops.object.modifier_apply(modifier = modifier.name)",
"def cleanAllDecimateModifiers(self, id, decimateRatio= 0.1):\r\n \r\n self.cleanratio = decimateRatio \r\n \r\n filepath_in = self.name + self.extention \r\n a = self.name +\"_Decimate_%d\" %(id) \r\n self.name = a\r\n filepath_out = self.name + self.extention\r\n \r\n if self.extention == \".dae\": \r\n\r\n bpy.ops.wm.collada_import(filepath=filepath_in) \r\n \r\n bpy.ops.object.modifier_add(type=\"DECIMATE\")\r\n bpy.context.object.modifiers[\"Decimate\"].decimate_type = \"COLLAPSE\"\r\n bpy.context.object.modifiers[\"Decimate\"].ratio = self.cleanratio\r\n #bpy.context.object.modifiers[\"Decimate\"].iterations = self.cleanterations\r\n #bpy.context.object.modifiers[\"Decimate\"].use_collapse_triangulate=True\r\n #bpy.context.object.modifiers[\"Decimate\"].delimit = {\"NORMAL\"} \r\n \r\n bpy.ops.wm.collada_export(filepath=filepath_out, check_existing=True, filter_blender=True, filter_image=True, filter_movie=False, filter_python=False, filter_font=False, filter_sound=False, filter_text=False, filter_btx=False, filter_collada=True, filter_folder=True, filemode=8, apply_modifiers=True, export_mesh_type=0, export_mesh_type_selection='view', selected=True, include_children=False, include_armatures=False, deform_bones_only=False, active_uv_only=False, include_shapekeys=False, use_texture_copies=False, use_object_instantiation=True, sort_by_name=False)\r\n\r\n else:\r\n \r\n bpy.ops.import_scene.obj(filepath=filepath_in)\r\n bpy.ops.object.modifier_add(type=\"DECIMATE\")\r\n bpy.context.object.modifiers[\"Decimate\"].decimate_type = \"COLLAPSE\"\r\n bpy.context.object.modifiers[\"Decimate\"].ratio = self.cleanratio\r\n\r\n bpy.ops.wm.collada_export(filepath=filepath_out, check_existing=True, filter_blender=True, filter_image=True, filter_movie=False, filter_python=False, filter_font=False, filter_sound=False, filter_text=False, filter_btx=False, filter_collada=True, filter_folder=True, filemode=8, apply_modifiers=True, export_mesh_type=0, export_mesh_type_selection='view', selected=True, include_children=False, include_armatures=False, deform_bones_only=False, active_uv_only=False, include_shapekeys=False, use_texture_copies=True, use_object_instantiation=True, sort_by_name=False)",
"def append_armature_modifier(self, b_obj, b_armature):\n armature_name = b_armature.name\n b_mod = b_obj.modifiers.new(armature_name,'ARMATURE')\n b_mod.object = b_armature\n b_mod.use_bone_envelopes = False\n b_mod.use_vertex_groups = True",
"async def augment(self, ctx, *, augment: str):\n try:\n augment = self.get_entry('Augment', augment.lower())\n except RuntimeError as e:\n return await ctx.send(e)\n\n type = augment['Type']\n price = augment['Sell Price']\n miranium = augment.get('Required Miranium')\n mat_1 = augment.get('Material 1')\n mat_2 = augment.get('Material 2')\n mat_3 = augment.get('Material 3')\n drop = augment.get('Drop')\n resource = augment.get('Precious Resource')\n\n total_tickets = 0\n\n embed = discord.Embed(title=augment['Name'], color=self.colors[augment[\"Rarity\"]])\n embed.add_field(name='Effect', value=augment['Effect'], inline=False)\n\n if type != 'Augment': # Remove when augment json fully updated\n embed.add_field(name='Type', value=type)\n\n if price != 0: # Remove when augment json fully updated\n embed.add_field(name='Sell Price', value=price)\n\n if miranium:\n embed.add_field(name='Required Miranium', value=miranium)\n\n if mat_1:\n name = mat_1[\"Name\"]\n amount = mat_1[\"Amount\"]\n\n tickets = self.materials[name.lower()]['price'] * amount\n total_tickets += tickets\n\n embed.add_field(name='Material 1', value=f'{amount} {name}\\n({tickets} Tickets)')\n\n if mat_2:\n name = mat_2[\"Name\"]\n amount = mat_2[\"Amount\"]\n\n tickets = self.materials[name.lower()]['price'] * amount\n total_tickets += tickets\n\n embed.add_field(name='Material 2', value=f'{amount} {name}\\n({tickets} Tickets)')\n\n if mat_3:\n name = mat_3[\"Name\"]\n amount = mat_3[\"Amount\"]\n\n tickets = self.materials[name.lower()]['price'] * amount\n total_tickets += tickets\n\n embed.add_field(name='Material 3', value=f'{amount} {name}\\n({tickets} Tickets)')\n\n if drop:\n embed.add_field(name='Drop', value=drop)\n if resource:\n embed.add_field(name='Precious Resource', value=f'{resource[\"Amount\"]} {resource[\"Name\"]}', inline=False)\n\n if total_tickets != 0:\n embed.add_field(name='Total Tickets', value=total_tickets)\n\n await ctx.send(embed=embed)",
"def apply_enhancements(ability: dict, target: Player, self: Player) -> None:\n self.status_effects.append([\"enhancement_sickness\", 1])\n\n for enhancement in ability[\"enhancements\"]:\n if enhancement[\"target\"] == \"target\":\n getattr(combat_effects, \"inflict_\" + enhancement[\"effect\"])(\n value=enhancement[\"value\"], player=target\n )\n elif enhancement[\"target\"] == \"self\":\n getattr(combat_effects, \"inflict_\" + enhancement[\"effect\"])(\n value=enhancement[\"value\"], player=self\n )",
"def associate_asset_with_campaigns(\n client, customer_id, promotion_asset_resource_name, campaign_ids\n):\n if len(campaign_ids) == 0:\n print(f\"Asset was not associated with any campaigns.\")\n return\n\n campaign_service = client.get_service(\"CampaignService\")\n campaign_asset_service = client.get_service(\"CampaignAssetService\")\n\n operations = []\n\n for campaign_id in campaign_ids:\n operation = client.get_type(\"CampaignAssetOperation\")\n campaign_asset = operation.create\n campaign_asset.asset = promotion_asset_resource_name\n campaign_asset.field_type = client.enums.AssetFieldTypeEnum.PROMOTION\n campaign_asset.campaign = campaign_service.campaign_path(\n customer_id, campaign_id\n )\n operations.append(operation)\n\n response = campaign_asset_service.mutate_campaign_assets(\n customer_id=customer_id, operations=operations\n )\n\n for result in response.results:\n print(\n \"Created campaign asset with resource name: \"\n f\"'{result.resource_name}'\"\n )"
] |
[
"0.6355806",
"0.54915804",
"0.5289272",
"0.50759953",
"0.4918394",
"0.49026936",
"0.48864666",
"0.4789297",
"0.47348252",
"0.46732575",
"0.45410955",
"0.45076388",
"0.4457251",
"0.44238174",
"0.44031692",
"0.43514192",
"0.43435964",
"0.43363217",
"0.432393",
"0.43129963",
"0.430963",
"0.42873853",
"0.42776242",
"0.4277313",
"0.4240546",
"0.42127135",
"0.41918692",
"0.4174795",
"0.41659844",
"0.41604367"
] |
0.6816361
|
0
|
Finds all the unique integers that correspond to a change in class.
|
def __find_integers_with_class_change(self, sorted_data):
potential_integer_splits = []
for row in range(sorted_data.shape[0] - 1):
if sorted_data[row, 1] != sorted_data[row + 1, 1]:
potential_integer_splits.append(sorted_data[row + 1, 0])
# return only the unique integers that are found
unique_ints = list(dict.fromkeys(potential_integer_splits))
return unique_ints
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def findIndices(g):\r\n change = [0]\r\n seen = [g[0]]\r\n for i in range(1, len(g)):\r\n if not g[i] in seen:\r\n change.append(i)\r\n seen.append(g[i])\r\n return change",
"def remove_duplicates(numbers: List[int]) -> List[int]:\n#[SOLUTION]\n import collections\n c = collections.Counter(numbers)\n return [n for n in numbers if c[n] <= 1]",
"def to_class_id(y):\n ret_val = []\n for y_id in range(len(y)):\n if y[y_id] > 3: ret_val.append(2)\n if y[y_id] < 3: ret_val.append(0)\n if y[y_id] == 3: ret_val.append(1)\n return ret_val",
"def breakpoints_in_class_membership(self):\n return [index for index in range(len(self.klass_values) - 1) if not self.klass_values[index] == self.klass_values[index + 1]]",
"def compute_equivalent_class(record):\n equivalent_class = {}\n class_members=[]\n max_class_number = -1\n for pair in record:\n if (pair[0] in equivalent_class) and (not (pair[1] in equivalent_class)):\n equivalent_class[pair[1]] = equivalent_class[pair[0]]\n if (not(pair[0] in equivalent_class)) and (not (pair[1] in equivalent_class)):\n max_class_number+=1\n equivalent_class[pair[0]] = max_class_number\n equivalent_class[pair[1]] = max_class_number\n for c in range(max_class_number+1):\n class_members.append([index for index,val in equivalent_class.items() if val==c])\n return class_members",
"def get_valid_sets(self, inverse):\n class_names = [0] * self._counter\n for element in inverse:\n class_names[self._class_names[element]] += 1\n return [i for i, value in enumerate(class_names)\n if value != 0 and value != len(self.part[i])]",
"def __init__(self):\n self.ids_seen = set()",
"def class_nodes(self):\n # timing is stored by node, we compute timing by class on demand\n rval = {}\n for (fgraph, node), count in self.apply_callcount.items():\n typ = type(node.op)\n rval.setdefault(typ, 0)\n rval[typ] += 1\n return rval",
"def getChange(number):",
"def class_nodes(self):\r\n # timing is stored by node, we compute timing by class on demand\r\n rval = {}\r\n for node, count in self.apply_callcount.items():\r\n typ = type(node.op)\r\n rval.setdefault(typ, 0)\r\n rval[typ] += 1\r\n return rval",
"def non_mcnugget():\n nugget = [0, 6, 9, 20]\n mcnugget = set([6, 9, 20])\n\n while True:\n mcnugget = set([m+n for m in mcnugget for n in nugget])\n\n for m in mcnugget:\n found = all([m+j in mcnugget for j in range(6)])\n if found:\n return [k for k in range(1, m) if k not in mcnugget]",
"def __hash__(self) -> int:",
"def unique_instance(un_data):\n test_dict = dict()\n indexed = list()\n count = 0\n for i,item in enumerate(un_data):\n if not test_dict.has_key( hash(item) ):\n test_dict[ hash(item) ] = 0\n else:\n count = count + 1\n indexed.append(i)\n return count, indexed",
"def classes(self):\n if not hasattr(self, '_unique_classes'):\n # build when we don't have\n self._unique_classes = self.data['label'].unique()\n self._unique_classes.sort()\n\n ret = self._unique_classes\n return ret",
"def _ids(self):\n prev_values = set()\n while True:\n next_value = self._time_ns()\n while True:\n if next_value not in prev_values:\n break\n next_value += 1000\n prev_values.add(next_value)\n yield next_value",
"def __uniqueCounts(rows):\n results = {} #Initialize a dictionary to store the results\n for row in rows: #Iterate over all rows of data\n #The result is the last column\n r = row[-1]\n if r not in results: results[r] = 0 #Start the count for each class at zero\n results[r] += 1 #Increment the count for this row's class by 1\n return results",
"def ids(self):\n return frozenset([seq.id for seq in self])",
"def scan(self) -> list[int]:",
"def unique_digits(n):\n #return len(set(str(n)))\n f = sorted(list(str(n)))\n return len([x for i,x in enumerate(f) if x not in f[i+1:]])\n #print(len(set(str(n))), r)",
"def returns_distinct_classes(self):\n assert simple_class() is not simple_class()",
"def _get_unique_improper_types(structure, epsilon_conversion_factor):\n unique_improper_set = set()\n for improper in structure.impropers:\n unique_improper_set.add(\n _get_improper_type_key(improper, epsilon_conversion_factor)\n )\n\n improper_key_dict = {\n improper_key: i + 1\n for i, improper_key in enumerate(unique_improper_set)\n }\n\n return improper_key_dict",
"def find_uniques_consecutive(array):\n val = array[0]\n indices = []\n for idx, element in enumerate(array):\n if element != val:\n indices.append(idx)\n val = element\n return indices",
"def change_class_labels(classes):\n u,indices=np.unique(classes,return_inverse=True)\n return u,indices",
"def task7_unique_number(lst):\n unique = []\n for elem in lst:\n check_list = lst.copy()\n lst.remove(elem)\n if elem not in lst:\n unique.append(elem)\n lst = check_list\n return unique",
"def get_replay_classes_helper(self):\n\n if self.replay_source is None or self.test_type == 'JOINT':\n return []\n\n class_from = self.get_encoder_number() + 1\n\n class_to = sum(self.test_structure) - self.test_structure[-1]\n\n if class_from > class_to:\n return []\n\n return [class_from + i for i in range((class_to - class_from) + 1)]",
"def __hash__(self) -> int:\n result = []\n for i in self._all_types:\n try:\n hash(i)\n result.append(i)\n except TypeError:\n pass\n return hash(tuple(result))",
"def getChangeIdsLessThanIdNow(self, new_changeid):\n change_obj = rpc.RpcProxy('software_dev.commit')\n cids = change_obj.search([('id', '<', new_changeid)])\n t = Token()\n changes = self.runInteractionNow(self._get_change_num, cids)\n changes.sort(key=lambda c: c.number)\n return changes",
"def CountsByDiffStatus(self):\n ret = [0, 0, 0, 0]\n for sym in self:\n ret[sym.diff_status] += 1\n return tuple(ret)",
"def scan(self) -> List[int]:",
"def scan(self) -> List[int]:"
] |
[
"0.5589745",
"0.5522386",
"0.55177635",
"0.54917526",
"0.5476852",
"0.53103954",
"0.5294811",
"0.52648157",
"0.52156276",
"0.519498",
"0.5180705",
"0.5176299",
"0.5175594",
"0.51660055",
"0.51583177",
"0.5150952",
"0.51428884",
"0.5121026",
"0.51114494",
"0.51038694",
"0.5096585",
"0.5086813",
"0.50756234",
"0.50481415",
"0.5040784",
"0.50395894",
"0.5028399",
"0.5005997",
"0.5000308",
"0.5000308"
] |
0.76339954
|
0
|
Calculates each of the information gains for a list of integer splits with the given sorted feature and class data.
|
def __info_gain_from_splits(self, potential_integer_splits, sorted_data):
info_gains = []
for split in map(int, potential_integer_splits):
left_child = sorted_data[sorted_data[:, 0].astype(int) < split, :]
right_child = sorted_data[sorted_data[:, 0].astype(int) >= split, :]
info_gains.append(self.__calc_info_gain(sorted_data, left_child,
right_child))
return info_gains
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_split(data):\n \"\"\" gets the best feature, and best value \"\"\"\n\n best_feature = None\n best_value = 0.0\n columns = data.columns\n gini_base = gini_impurity(data)\n n_rows = len(data.index) # total number of rows of data before split\n\n # Fininding which split yields the best gini gain\n max_gain = 0\n\n for i in range(len(columns)-1): # -1 b.c. class is final column\n xs = data[columns[i]].unique() # get values to test\n for x in xs: # test values\n # split dataset\n df_left = data[data[columns[i]] < x]\n df_right = data[data[columns[i]] >= x]\n\n # get gini impurities\n gini_left = gini_impurity(df_left)\n gini_right = gini_impurity(df_right)\n \n\n # Calculated weighted gini impurity\n w_left = len(df_left.index) / n_rows\n w_right = len(df_right.index) / n_rows\n\n w_gini = gini_left * w_left + gini_right * w_right\n \n\n # Calculate gini gain (we want to minimize w_gini for the smallest impurity. Ideal split is perfect Left=c1, Right=c2)\n # why not just find min w_gin instead of uding gini_gain and gini_base vaiables?\n gini_gain = gini_base - w_gini\n\n # check if this is the best split so far, store values, update max_gini\n if gini_gain > max_gain:\n best_feature = columns[i]\n best_value = x\n max_gain = gini_gain\n\n df_left = data.loc[data[best_feature] < best_value]\n df_right = data.loc[data[best_feature] >= best_value]\n \n\n return best_feature, best_value, df_left, df_right",
"def __find_best_split_in_feature(self, feature_and_class):\n\n # sort the feature and class and use changes in the class to reduce\n # number of potential split info gain calculations\n sorted_data = feature_and_class[\n feature_and_class[:, 0].astype(np.int).argsort()]\n potential_splits = self.__find_integers_with_class_change(sorted_data)\n info_gains = self.__info_gain_from_splits(potential_splits,\n sorted_data)\n\n # returning nothing in no information gains are found\n if len(info_gains) == 0:\n return None, None\n\n index = info_gains.index(max(info_gains))\n return info_gains[index], potential_splits[index]",
"def pick_best_split(self,db,labels,ids,features=None):\n idlabels = [labels[id] for id in ids]\n if misclassification_error(idlabels) == 0:\n #base case: no misclassifications\n self.type = 'v'\n self.value = idlabels[0]\n return 0\n best = None\n bestCost = 0\n splitval = None\n discrete = True\n if features == None:\n if len(ids) < db.numFeatures():\n #look at all present features in the training set\n features = db.getPresentFeatures(ids)\n #print len(features),\"of\",db.numFeatures(),\"features selected\"\n else:\n features = range(db.numFeatures())\n elif callable(features):\n features = features()\n for i in features:\n if len(db.entryLists[i]) == 0: continue\n idiscrete = db.discreteFeature[i]\n if idiscrete:\n #count number of labels of a certain value\n splitter = defaultdict(lambda:defaultdict(int))\n #count of labels for missing values\n nmissing = defaultdict(int)\n for id in ids:\n val = db[i,id]\n if val is None:\n #missing values go down to all splits\n nmissing[labels[id]] += 1\n continue\n splitter[val][labels[id]] += 1\n if len(splitter) > continuous_variable_threshold:\n #print \"Determined to be a continuous variable\"\n idiscrete = False\n break\n if idiscrete:\n if len(splitter) <= 1:\n #only a single value\n continue\n #count number of missing values in all splits\n cmax = 0\n for k in splitter:\n for l,v in nmissing.iteritems():\n splitter[k][l] += v\n cmax = max(cmax,sum(splitter[k].values()))\n #shrink by fraction of (# of ids - largest child)/(# of ids)\n scale = (1.0-float(cmax)/float(len(ids)))*len(splitter)\n #evaluate cost\n cost = split_cost(splitter.values())*scale\n #print \"Split on\",i,\"information gain\",-cost,splitter.values()\n else:\n #continuous, need to learn the best split\n vals = []\n presentlabels = []\n nonelabels = []\n for id in ids:\n val = db[i,id]\n if val is None:\n nonelabels.append(labels[id])\n continue\n vals.append(val)\n presentlabels.append(labels[id])\n if len(vals) <= 1:\n print \"No values for feature\",i,\"?\"\n print vals\n continue\n #print \"Considering continuous split on\",i\n s,cost = best_split(vals,presentlabels,nonelabels)\n scale = (1.0-float(len(presentlabels)/2+len(nonelabels))/float(len(ids)))*2\n cost *= scale\n #print \"Result\",s,\"Information gain\",-cost\n \n if cost < bestCost:\n best = i\n bestCost = cost\n discrete = idiscrete\n if not idiscrete:\n splitval = s\n \n if best is None:\n self.type = 'v'\n if len(ids) > 0:\n self.value = vote(idlabels)\n return misclassification_error(idlabels)\n else:\n self.value = None\n return 0\n else:\n self.feature = best\n #discrete or inequality split\n if discrete:\n self.type = 's'\n else:\n self.type = 'i'\n self.value = splitval\n return bestCost",
"def test_split(self,X,y,splits):\n n_data = len(y) #Number of data points\n splits=(X[splits]+X[splits+1])/2\n\n idx_greater = (X>splits[:,None]) #index for greater split\n idx_lower = (X<splits[:,None]) #index for lower split\n\n imp_greater =[self.impurity(y[idx]) for idx in idx_greater] #impurity for greater\n imp_lower = [self.impurity(y[idx]) for idx in idx_lower] #impurity lower\n\n impur = [sum(idx_great)/n_data*imp_great+sum(idx_low)/n_data*imp_low for idx_great,imp_great,idx_low,imp_low in zip(idx_greater,imp_greater,idx_lower,imp_lower)] #Weighted impurity\n return (impur,splits)",
"def fit(self, data, targets):\n # update these three\n self.idx = 0\n self.val = None\n self.left = None\n self.right = None\n ### YOUR CODE HERE\n # i have added a slow and a fast version\n \n num_points, num_features = data.shape\n # print('num points, num_features', num_points, num_features)\n \n def feat_score(feat_idx):\n feat = data[:, feat_idx].copy()\n perm = np.argsort(feat)\n s_feat = feat[perm]\n s_targets = targets[perm]\n target_var = ((s_targets - s_targets.mean())**2).sum()\n s_left, s_right = sum_squares(s_targets)\n def score(idx, _vals):\n ## slow version\n #left = _vals[0:idx]\n #right = _vals[idx:]\n #assert len(left) + len(right) == len(_vals), (len(left), len(right), len(_vals))\n #left_mean = np.mean(left)\n #right_mean = np.mean(right)\n #left_error = np.sum((left-left_mean)**2)\n #assert np.allclose(left_error, s_left[idx]) \n #right_error = np.sum((right-right_mean)**2)\n #assert np.allclose(right_error, s_right[idx])\n # return left_error+right_error\n # fast version\n return s_left[idx] + s_right[idx]\n # score for every split\n scores = np.array([score(x, s_targets) for x in range(0, num_points)])\n assert scores.min() <= target_var, target_var\n best_score_idx = np.argmin(scores)\n best_score = scores[best_score_idx]\n val = s_feat[best_score_idx]\n # print('best score', feat_idx, best_score, best_score_idx, val, s_feat[best_score_idx+1])\n \n return best_score, {'val': val, \n 'left': np.mean(s_targets[:best_score_idx]), \n 'right': np.mean(s_targets[best_score_idx:])\n } \n\n split_scores = []\n for f in range(0, num_features):\n total_score, _params = feat_score(f)\n split_scores.append(total_score)\n # print('score of {0} - {1}'.format(feat_names[f], total_score))\n # print('feature scores:', np.array(split_scores))\n best_feat = np.argmin(split_scores)\n best_score = split_scores[best_feat]\n # print('Best Feature idx: {0} - Best Cost: {1}'.format(best_feat, best_score))\n score_again, params = feat_score(best_feat)\n # print('double check score', score_again, best_score)\n self.idx = best_feat\n self.val = params['val']\n self.left = params['left']\n self.right = params['right']\n print(\"idx={}, val={}, left={}, right={}\".format(self.idx, self.val, self.left, self.right))\n assert not np.isnan(self.left)\n assert not np.isnan(self.right)\n ### END CODE",
"def classify():\n yes_dataset = df[df[\"_class\"] == 1] # 470588\n no_dataset = df[df[\"_class\"] == 0] # 1971\n\n parameter_analysis = list()\n for criterion in np.arange(0.05, 0.91, 0.05):\n print(\"doing experiment at criterion = %s ...\" % criterion)\n rate_list = list()\n for i in range(10):\n # shuffle yes_dataset and no_dataset, so we can randomly choose 90% yes_dataset\n # + 90% no_dataset as train dataset, 10% yes_dataset + 10% no_dataset as test dataset\n yes_index = yes_dataset.index.tolist()\n random.shuffle(yes_index)\n no_index = no_dataset.index.tolist()\n random.shuffle(no_index)\n \n # concatenate 90%yes + 90%no, 10%yes + 10%no\n train = pd.concat([\n yes_dataset.loc[yes_index[:1774], :],\n no_dataset.loc[no_index[:423530], :]\n ])\n test = pd.concat([\n yes_dataset.loc[yes_index[1774:], :],\n no_dataset.loc[no_index[423530:], :]\n ]) \n \n # split data and label\n train_data, train_label = (train[[\"Revenue.Code\", \n \"Service.Code\", \n \"Procedure.Code\", \n \"Diagnosis.Code\", \n \"Subscriber.Index\"]], \n train[\"_class\"])\n test_data, test_label = (test[[\"Revenue.Code\", \n \"Service.Code\", \n \"Procedure.Code\", \n \"Diagnosis.Code\", \n \"Subscriber.Index\"]], \n test[\"_class\"])\n \n # apply classifier\n clf = GaussianNB()\n clf.fit(train_data, train_label)\n probability = clf.predict_proba(test_data).T[1]\n \n result = pd.DataFrame()\n result[\"_class\"] = test_label\n result[\"_predict\"] = probability >= criterion\n \n result_yes = result[result[\"_class\"] == 1]\n yes_yes_rate = sum(result_yes[\"_class\"] == result_yes[\"_predict\"])/len(result_yes[\"_predict\"])\n \n result_no = result[result[\"_class\"] == 0]\n no_no_rate = sum(result_no[\"_class\"] == result_no[\"_predict\"])/len(result_no[\"_predict\"])\n \n rate_list.append((yes_yes_rate, no_no_rate))\n\n rate_list = pd.DataFrame(rate_list)\n yes_yes_rate, no_no_rate = rate_list.mean()[0], rate_list.mean()[1]\n parameter_analysis.append((criterion, yes_yes_rate, no_no_rate))\n \n # save data to excel spreadsheet\n parameter_analysis = pd.DataFrame(parameter_analysis, columns=[\"criterion\", \"yes_yes_rate\", \"no_no_rate\"])\n writer = pd.ExcelWriter(\"parameter_analysis.xlsx\")\n parameter_analysis.to_excel(writer, \"parameter_analysis\", index=False)\n writer.save()",
"def index_data(self, data, class_):\r\n # !!! Write code to compute and store the parameters in self.conditional_prob[class_].\r\n for feat_index in range(len(data[0])):\r\n \r\n values = [i[feat_index] for i in data]\r\n \r\n deviation = np.std(values)\r\n mean = np.mean(values)\r\n\r\n self.conditional_prob[class_][feat_index] = [mean, deviation]\r\n\r\n # !!! Write code to compute prior.\r\n # Seems like it's been done for us?\r\n self.class_prob[class_] = float(len(data))/self.data_size",
"def classify(self, data):\n\n \"*** YOUR CODE HERE ***\"\n # should compute (validationData[i] - trainingData[j])^2\n result = np.zeros(data.shape[0])\n for i in range(data.shape[0]):\n distances = np.linalg.norm(self.trainingData - data[i], axis=1)\n nearest = np.argsort(distances)[:self.num_neighbors]\n nearest_tags = [self.trainingLabels[j] for j in nearest]\n result[i] = max(nearest_tags, key=lambda x: nearest_tags.count(x))\n return result",
"def choose_split_value(attrs, classes):\n indices = np.argsort(attrs)\n classes = classes[indices]\n attrs = attrs[indices]\n max_gain = 0.0\n max_gain_value = None\n for i in range(len(attrs) - 1):\n if classes[i] != classes[i+1]:\n mean = (attrs[i] + attrs[i+1]) / 2.0\n gain = inform_gain(attrs, classes, mean)\n if gain > max_gain:\n max_gain = gain\n max_gain_value = mean\n return max_gain_value, max_gain",
"def determine_best_split(data, potential_splits, mltask):\n\n first_iteration = True\n for column_index in potential_splits:\n for value in potential_splits[column_index]:\n data_below,data_above = split_data(data, column_index, value)\n \n if mltask == 'regression':\n current_overall_metric = calculate_overall_metric(data_below, data_above, metric_function = calculate_mse)\n \n # classification\n else:\n current_overall_metric = calculate_overall_metric(data_below, data_above, metric_function = calculate_entropy)\n \n \n if first_iteration or current_overall_metric <= best_overall_metric:\n first_iteration = False\n \n best_overall_metric = current_overall_metric\n best_split_column = column_index\n best_split_value = value\n \n \n return best_split_column,best_split_value",
"def evaluate_all(\n data: List[Tuple[int, str, List[Instance]]], pred_data: List[List[Instance]],\n) -> Tuple[Score, Score, Tuple[List[List[str]], List[List[str]]]]:\n inexact_counts = defaultdict(lambda: np.zeros(3))\n exact_counts = defaultdict(lambda: np.zeros(3))\n\n iobs = []\n for (_, txt, gold), pred in zip(data, pred_data):\n\n for k, v in evaluate(gold, pred, exact=False).items():\n inexact_counts[k] += v\n for k, v in evaluate(gold, pred, exact=True).items():\n exact_counts[k] += v\n iobs.append(iob_conversion(txt, gold, pred))\n\n gold_iob, pred_iob = zip(*iobs)\n\n return (counts_to_scores(inexact_counts), counts_to_scores(exact_counts), (gold_iob, pred_iob))",
"def choose_split(data,treshold):\n n_features = len(data[0]) - 1 # number of columns\n quest_gain = [] # keep track of the gains and questions\n\n for col in range(1,n_features): # for each feature\n values = set([row[col] for row in data]) # unique values in the column\n for val in values: # for each value\n question = Question(col, val)\n \n # try splitting the dataset\n true_rows, false_rows = partition(data, question)\n\n # Skip this split if it doesn't divide the dataset.\n if len(true_rows) == 0 or len(false_rows) == 0:\n continue\n\n # Calculate the information gain from this split\n gain = info_gain(data, true_rows, false_rows)\n quest_gain.append(Question_gain(gain,question))\n\n possible_question = [] # possible questions to ask\n n_quest_gain = len(quest_gain)\n\n if n_quest_gain == 0:\n return float('Inf'), float('NaN') #\n\n for x in range(n_quest_gain):\n if (quest_gain[x].gain >= treshold):\n possible_question.append(Question_gain(quest_gain[x].gain,quest_gain[x].question))\n \n n_possible_question = len(possible_question)\n if n_possible_question == 0:\n return float('Inf'), float('NaN')\n\n if n_possible_question>=2:\n [i, j] = random.sample(range(0, n_possible_question), 2)\n else:\n i = j = random.randint(0,n_possible_question-1)\n\n if possible_question[i].gain>=possible_question[j].gain:\n return possible_question[i].gain, possible_question[i].question\n else:\n return possible_question[j].gain, possible_question[j].question",
"def classify(self, data ):\n guesses = []\n for datum in data:\n vectors = util.Counter()\n for l in self.legalLabels:\n vectors[l] = self.weights[l] * datum\n guesses.append(vectors.argMax())\n return guesses",
"def classify(self, data):\n guesses = []\n for datum in data:\n vectors = util.Counter()\n for l in self.legalLabels:\n vectors[l] = self.weights[l] * datum\n guesses.append(vectors.argMax())\n return guesses",
"def classify(self, data ):\n\t\tguesses = []\n\t\tfor datum in data:\n\t\t\tvectors = util.Counter()\n\t\t\tfor l in self.legalLabels:\n\t\t\t\tvectors[l] = self.weights[l] * datum\n\t\t\tguesses.append(vectors.argMax())\n\t\treturn guesses",
"def classify(self, data ):\n\t\tguesses = []\n\t\tfor datum in data:\n\t\t\tvectors = util.Counter()\n\t\t\tfor l in self.legalLabels:\n\t\t\t\tvectors[l] = self.weights[l] * datum\n\t\t\tguesses.append(vectors.argMax())\n\t\treturn guesses",
"def prepareSplitClassifier(df, models, choice):\n\n\n def classificationOutput(clf, X, Y):\n \"\"\"\n Fit the model and print the classification results\n - confusion_matrix\n - avg scores etc\n \"\"\"\n n_samples = 36\n\n print \"\\n\\nClassifier: \\n %s\" % (clf)\n print \"#\" * 79\n # classifier_gnb = naive_bayes.GaussianNB() # initiating the classifier\n\n clf.fit(X[:n_samples], Y[:n_samples]) # train on first n_samples and test on last 10\n\n expected = Y[n_samples:]\n predicted = clf.predict(X[n_samples:])\n print(\"Classification report:\\n%s\\n\" % (metrics.classification_report(expected, predicted)))\n print(\"\\nConfusion matrix:\\n%s\" % metrics.confusion_matrix(expected, predicted))\n\n\n\n\n def splitclassify(cDf):\n \"\"\"\n Given the dataframe combined with equal fair and unfair apps,\n classify them\n \"\"\"\n cDf = cDf.reindex(np.random.permutation(cDf.index)) # shuffle the dataframe\n featCols = set(cDf.columns)\n featCols.remove('appLabel')\n\n features = cDf[list(featCols)].astype('float')\n\n ## Scale the features to a common range\n min_max_scaler = preprocessing.MinMaxScaler()\n X = min_max_scaler.fit_transform(features.values)\n\n Y = cDf['appLabel'].values\n\n\n if choice == 'all':\n for key in models:\n classifier = models[key]\n classificationOutput(classifier, X, Y)\n else:\n if choice in models:\n classifier = models[choice]\n classificationOutput(classifier, X, Y)\n else:\n print \"Incorrect Choice\"\n\n\n\n fairDf = df[df['appLabel'] == False]\n unfairDf = df[df['appLabel'] == True]\n\n\n # calculate total possible splits of fair data frame relatie to\n # size of unfair dataframe\n splits = len(fairDf) // len(unfairDf)\n\n for i in range(splits):\n clDf = fairDf[i : i+len(unfairDf)].append(unfairDf)\n\n # print fairDf.values, unfairDf.values\n print \"Classifying %d th split of fair apps with unfair app\" % (i)\n print \"-\" * 79\n splitclassify(clDf)\n print \"\\n\\n\"",
"def classify(self, data):\n guesses = []\n for datum in data:\n vectors = util.Counter()\n for l in self.legalLabels:\n vectors[l] = self.weights[l] * datum + self.bias[l]\n guesses.append(vectors.argMax())\n return guesses",
"def splitmetric(self, dataset, attr, target_attr):\n freq = {}\n splitinfo = 0.0\n \n #Call information gain\n gain = ID3.splitmetric(self, dataset, attr, target_attr);\n samplenumbers = len(dataset)\n # Calculate the frequency of each of the values in the split attribute\n for record in dataset:\n if (record[attr] in freq):\n freq[record[attr]] += 1.0\n else:\n freq[record[attr]] = 1.0\n \n #Calculate split info, entropy of splitter\n for val in list(freq.values()):\n splitinfo += (- val / samplenumbers) * math.log(val / samplenumbers, 2)\n \n #Split info equals 0 when there only one class in data set\n if splitinfo == 0:\n splitinfo = 0.00000001\n \n return gain / splitinfo",
"def best_split1(self,X,attributes):\n if (self.criterion==\"information_gain\"):\n global_if = float('-inf') # the highest value of varience seen so far\n attr , val = None, None\n for attribute in attributes[::-1]:\n attr_val = pd.Series(X[attribute].unique()).sort_values(ignore_index=True)\n last_val = attr_val[0]\n for i in range(1,attr_val.size):\n cur_val = attr_val[i]\n valc = round((last_val+cur_val)/2,4)\n last_val = cur_val\n cur_if = information_gain1(valc,X[attribute],X[\"Output\"],self.type)\n if (cur_if>global_if):\n global_if,attr,val = cur_if,attribute,valc\n return attr,val\n else:\n global_if = float('inf') # the lowest value of varience seen so far\n attr , val = None, None\n for attribute in attributes[::-1]:\n attr_val = pd.Series(X[attribute].unique()).sort_values(ignore_index=True)\n last_val = attr_val[0]\n for i in range(1,attr_val.size):\n cur_val = attr_val[i]\n valc = round((last_val+cur_val)/2,4)\n last_val = cur_val\n cur_if = gini_gain1(X[\"Output\"],X[attribute], valc)\n if (global_if>cur_if):\n global_if,attr,val = cur_if,attribute,valc\n return attr,val",
"def score(self, data):\n\n score_mappings = {\n \"0\": np.log(self.class_zero_doc_count / self.total_docs),\n \"1\": np.log(self.class_one_doc_count / self.total_docs)\n }\n\n features = self.featurize(data)\n\n for f in features:\n\n if(f[0] in self.class_zero):\n cond_prob_zero = np.log((self.class_zero[f[0]] + 1) / (self.class_zero_feature_count + len(self.vocab)))\n elif(f[0] in self.vocab):\n cond_prob_zero = np.log(1 / (self.class_zero_feature_count + len(self.vocab)))\n else:\n cond_prob_zero = 0\n\n if(f[0] in self.class_one):\n cond_prob_one = np.log((self.class_one[f[0]] + 1) / (self.class_one_feature_count + len(self.vocab)))\n elif(f[0] in self.vocab):\n cond_prob_one = np.log(1 / (self.class_one_feature_count + len(self.vocab)))\n else:\n cond_prob_one = 0\n\n score_mappings[\"0\"] += cond_prob_zero\n score_mappings[\"1\"] += cond_prob_one\n\n score_mappings[\"0\"] = np.exp(score_mappings[\"0\"])\n score_mappings[\"1\"] = np.exp(score_mappings[\"1\"])\n\n return score_mappings",
"def postprocess2(scores, classes, bboxes, iou_threshold=0.2, score_threshold=0.5):\n n = len(scores)\n \n count_per_class = {cls:0 for cls in classes}\n bbox_per_class = {cls:[] for cls in classes}\n score_per_class = {cls:[] for cls in classes}\n\n for i in range(n):\n count_per_class[classes[i]] += 1\n bbox_per_class[classes[i]] += [bboxes[i]]\n score_per_class[classes[i]] += [scores[i]]\n \n det_num = 0\n det_classes = [] \n det_scores = []\n det_bboxes = []\n\n for cls in count_per_class:\n current_count = count_per_class[cls]\n current_scores = np.array(score_per_class[cls], np.float32)\n current_bboxes = np.array(bbox_per_class[cls], np.int32)\n\n idx = np.argsort(current_scores)[::-1]\n sorted_scores = current_scores[idx]\n sorted_bboxes = current_bboxes[idx]\n\n top_k_ids = []\n size = 0\n i = 0\n\n while i < current_count:\n if sorted_scores[i] < score_threshold:\n break\n top_k_ids.append(i)\n det_num += 1\n det_classes.append(cls)\n det_scores.append(sorted_scores[i])\n det_bboxes.append(sorted_bboxes[i])\n size += 1\n i += 1\n\n while i < current_count:\n tiled_bbox_i = np.tile(sorted_bboxes[i], (size, 1))\n ious, iofs, ioss = iou_bbox(tiled_bbox_i, sorted_bboxes[top_k_ids])\n max_iou = np.max(ious)\n # max_iof = np.max(iofs)\n # max_ios = np.max(ioss)\n # temp = np.max((max_iof, max_ios))\n if max_iou > iou_threshold:\n i += 1\n else:\n break\n\n return det_num, np.array(det_scores, np.float32), np.array(det_classes, np.int32), np.array(det_bboxes, np.int32)",
"def evaluate_SURF(x,y,NN,feature,inst,data,multiclass_map,maxInst):\r\n diff = 0\r\n if not data.discretePhenotype: #if continuous phenotype\r\n same_class_bound=data.phenSD #boundary to determine similarity between classes for continuous attributes\r\n \r\n if data.attributeInfo[feature][0]: #Continuous Attribute\r\n #determining boundaries for continuous attributes\r\n min_bound=data.attributeInfo[feature][1][0]\r\n max_bound=data.attributeInfo[feature][1][1]\r\n \r\n diff_hit=0 #initializing the score to 0\r\n diff_miss=0\r\n \r\n count_hit=0\r\n count_miss=0\r\n \r\n if data.discretePhenotype:\r\n if len(data.phenotypeList) > 2: #multiclass endpoint\r\n class_Store = {}\r\n missClassPSum = 0\r\n for each in multiclass_map:\r\n if each != y[inst]: #Store all miss classes\r\n class_Store[each] = [0,0] #stores cout_miss and diff_miss\r\n missClassPSum += multiclass_map[each]\r\n \r\n for i in range(len(NN)): #for all nearest neighbors\r\n if x[inst][feature]!=data.labelMissingData and x[NN[i]][feature]!=data.labelMissingData: # add appropriate normalization.\r\n if y[inst]==y[NN[i]]: #HIT\r\n count_hit+=1\r\n if x[inst][feature]!=x[NN[i]][feature]:\r\n if data.attributeInfo[feature][0]: #Continuous Attribute\r\n diff_hit-=abs(x[inst][feature]-x[NN[i]][feature])/(max_bound-min_bound)\r\n else:#Discrete\r\n diff_hit-=1\r\n else: #MISS\r\n for missClass in class_Store:\r\n if y[NN[i]] == missClass:\r\n class_Store[missClass][0] += 1\r\n if x[inst][feature]!=x[NN[i]][feature]:\r\n if data.attributeInfo[feature][0]: #Continuous Attribute\r\n class_Store[missClass][1]+=abs(x[inst][feature]-x[NN[i]][feature])/(max_bound-min_bound)\r\n else:#Discrete\r\n class_Store[missClass][1]+=1\r\n \r\n #Corrects for both multiple classes, as well as missing data.\r\n missSum = 0 \r\n for each in class_Store:\r\n missSum += class_Store[each][0]\r\n missAverage = missSum/float(len(class_Store))\r\n \r\n hit_proportion=count_hit/float(len(NN)) #Correcting for Missing Data.\r\n for each in class_Store:\r\n diff_miss += (multiclass_map[each]/float(missClassPSum))*class_Store[each][1]\r\n \r\n diff = diff_miss*hit_proportion\r\n miss_proportion=missAverage/float(len(NN))\r\n diff += diff_hit*miss_proportion\r\n \r\n else: #Binary Class Problem\r\n for i in range(len(NN)): #for all nearest neighbors\r\n if x[inst][feature]!=data.labelMissingData and x[NN[i]][feature]!=data.labelMissingData: # add appropriate normalization.\r\n \r\n if y[inst]==y[NN[i]]: #HIT\r\n count_hit+=1\r\n if x[inst][feature]!=x[NN[i]][feature]:\r\n if data.attributeInfo[feature][0]: #Continuous Attribute\r\n diff_hit-=abs(x[inst][feature]-x[NN[i]][feature])/(max_bound-min_bound)\r\n else:#Discrete\r\n diff_hit-=1\r\n else: #MISS\r\n count_miss+=1\r\n if x[inst][feature]!=x[NN[i]][feature]:\r\n if data.attributeInfo[feature][0]: #Continuous Attribute\r\n diff_miss+=abs(x[inst][feature]-x[NN[i]][feature])/(max_bound-min_bound)\r\n else:#Discrete\r\n diff_miss+=1 \r\n\r\n #Take hit/miss inbalance into account (coming from missing data)\r\n hit_proportion=count_hit/float(len(NN))\r\n miss_proportion=count_miss/float(len(NN))\r\n \r\n diff=diff_hit*miss_proportion + diff_miss*hit_proportion #applying weighting scheme to balance the scores \r\n \r\n else: #continuous endpoint\r\n for i in range(len(NN)): #for all nearest neighbors\r\n if x[inst][feature]!=data.labelMissingData and x[NN[i]][feature]!=data.labelMissingData: # add appropriate normalization.\r\n \r\n if abs(y[inst]-y[NN[i]])<same_class_bound: #HIT\r\n count_hit+=1 \r\n if x[inst][feature]!=x[NN[i]][feature]:\r\n if data.attributeInfo[feature][0]: #Continuous Attribute\r\n diff_hit-=abs(x[inst][feature]-x[NN[i]][feature])/(max_bound-min_bound)\r\n else:#Discrete\r\n diff_hit-=1\r\n else: #MISS\r\n count_miss+=1\r\n if x[inst][feature]!=x[NN[i]][feature]:\r\n if data.attributeInfo[feature][0]: #Continuous Attribute\r\n diff_miss+=abs(x[inst][feature]-x[NN[i]][feature])/(max_bound-min_bound)\r\n else:#Discrete\r\n diff_miss+=1\r\n\r\n #Take hit/miss inbalance into account (coming from missing data, or inability to find enough continuous neighbors)\r\n hit_proportion=count_hit/float(len(NN))\r\n miss_proportion=count_miss/float(len(NN))\r\n \r\n diff=diff_hit*miss_proportion + diff_miss*hit_proportion #applying weighting scheme to balance the scores \r\n \r\n return diff",
"def learn(self):\n metrics_hist = dict()\n max_runs = 3\n for run in range(max_runs):\n all_indices, initial_indices = self._init_al_dataset()\n\n metrics_hist[str(run)] = dict()\n\n current_indices = list(initial_indices)\n \n for split in self.data_splits_frac:\n print(f'\\nRUN {run} - SPLIT - {split*100:0.0f}%')\n\n # Initialise models\n self._init_models(mode='svaal')\n\n # Do some label stuff\n unlabelled_indices = np.setdiff1d(list(all_indices), current_indices)\n unlabelled_sampler = data.sampler.SubsetRandomSampler(unlabelled_indices)\n unlabelled_dataloader = data.DataLoader(self.datasets['train'],\n sampler=unlabelled_sampler,\n batch_size=64,\n drop_last=False)\n\n print(f'Labelled: {len(current_indices)} Unlabelled: {len(unlabelled_indices)} Total: {len(all_indices)}')\n\n # TODO: Make the SVAAL allow 100% labelled and 0% unlabelled to pass through it. Breaking out of loop for now when data hits 100% labelled.\n if len(unlabelled_indices) == 0:\n break\n\n metrics, svae, discriminator = self.train(dataloader_l=self.labelled_dataloader,\n dataloader_u=unlabelled_dataloader,\n dataloader_v=self.val_dataloader,\n dataloader_t=self.test_dataloader,\n mode='svaal') \n print(f'Test Eval.: F1 Scores - Macro {metrics[0]*100:0.2f}% Micro {metrics[1]*100:0.2f}%') \n \n # Record performance at each split\n metrics_hist[str(run)][str(split)] = metrics\n\n \n sampled_indices = self.sample_adversarial(svae, discriminator, unlabelled_dataloader, indices=unlabelled_indices, cuda=True) # TODO: review usage of indices arg\n current_indices = list(current_indices) + list(sampled_indices)\n sampler = data.sampler.SubsetRandomSampler(current_indices)\n self.labelled_dataloader = data.DataLoader(self.datasets['train'], sampler=sampler, batch_size=self.batch_size, drop_last=True)\n \n # write results to disk\n with open('results.json', 'w') as fj:\n json.dump(metrics_hist, fj, indent=4)",
"def evaluate_hmdb51_fusion():\n vlen = 0\n ob_suffix = '-max.feat.npy.gz'\n fv_suffix = '_fv.npy.gz'\n ob_root = '/home/syq/research_final/data/features/ob_hmdb51_pooled_python/'\n fv_root = '/home/syq/research_final/data/dense-traj/fv_hmdb51_python/'\n hmdb_splits = 'testTrainMulti_7030_splits/'\n categories = os.listdir(fv_root)\n weight = 1.0\n weights = [i / 20.0 for i in range(21)]\n acc_to_weights = {}\n\n for weight in weights:\n print \"Weight: %.2f\" % weight\n accs = np.zeros(3)\n for splitnum in range(1,4):\n ts = time.time()\n trainfiles, testfiles = hmdb51_splits.loadsplit(categories,\n hmdb_splits,\n splitnum)\n print 'Have %d train files' % len(trainfiles)\n print 'Have %d test files' % len(testfiles)\n\n if not vlen:\n fp = gzip.open(os.path.join(ob_root,'%s%s'%(trainfiles[0][0][:-4],\n ob_suffix)),\"rb\")\n vlen_ob = len(np.load(fp))\n fp.close()\n print \"OB vector length is %d\" % vlen_ob\n fp = gzip.open(os.path.join(fv_root,'%s%s'%(trainfiles[0][0][:-4],\n fv_suffix)),\"rb\")\n vlen_fv = len(np.load(fp))\n fp.close()\n print \"IDTFV vector length is %d\" % vlen_fv\n\n Dtrain_ob = np.zeros( (len(trainfiles),vlen_ob), np.float32 )\n Dtrain_fv = np.zeros( (len(trainfiles),vlen_fv), np.float32 )\n\n Ytrain = np.ones ( (len(trainfiles) )) * -1000\n\n for fi,f in enumerate(trainfiles):\n fp = gzip.open(os.path.join(ob_root,'%s%s'%(f[0][:-4],\n ob_suffix)),\"rb\")\n Dtrain_ob[fi][:] = np.load(fp)\n fp.close()\n Ytrain[fi] = f[1]\n\n fp = gzip.open(os.path.join(fv_root,'%s%s'%(f[0][:-4],\n fv_suffix)),\"rb\")\n Dtrain_fv[fi][:] = np.load(fp)\n fp.close()\n\n Dtest_ob = np.zeros( (len(testfiles),vlen_ob), np.float32 )\n Dtest_fv = np.zeros( (len(testfiles),vlen_fv), np.float32 )\n\n Ytest = np.ones ( (len(testfiles) )) * -1000\n\n for fi,f in enumerate(testfiles):\n fp = gzip.open(os.path.join(ob_root,'%s%s'%(f[0][:-4],\n ob_suffix)),\"rb\")\n Dtest_ob[fi][:] = np.load(fp)\n fp.close()\n Ytest[fi] = f[1]\n\n fp = gzip.open(os.path.join(fv_root,'%s%s'%(f[0][:-4],\n fv_suffix)),\"rb\")\n Dtest_fv[fi][:] = np.load(fp)\n fp.close()\n\n \"\"\"\n Early fusion\n Dtrain = np.hstack((Dtrain_ob, Dtrain_fv))\n Dtest = np.hstack((Dtest_ob, Dtest_fv))\n\n clf = OneVsRestClassifier(estimator=LinearSVC(C=100), n_jobs=8)\n acc = clf.fit(Dtrain, Ytrain).score(Dtest, Ytest)\n \"\"\"\n fv_clf = OneVsRestClassifier(estimator=LinearSVC(C=100), n_jobs=8)\n\n ob_clf = OneVsRestClassifier(estimator=SVC(C=10,\n cache_size=1000,\n kernel='linear',\n probability=True),\n n_jobs=-1)\n\n # Get probabilities for late fusion\n Dtrain_fv = fv_clf.fit(Dtrain_fv, Ytrain).decision_function(Dtrain_fv)\n Dtrain_ob = ob_clf.fit(Dtrain_ob, Ytrain).decision_function(Dtrain_ob)\n Dtest_fv = fv_clf.decision_function(Dtest_fv)\n Dtest_ob = ob_clf.decision_function(Dtest_ob)\n\n # Scale decision values b/w 0 and 1\n Dtrain_fv = preprocessing.normalize(Dtrain_fv)\n Dtrain_ob = preprocessing.normalize(Dtrain_ob)\n Dtest_fv = preprocessing.normalize(Dtest_fv)\n Dtest_ob = preprocessing.normalize(Dtest_ob)\n\n # Late fusion\n scores_train = (Dtrain_fv * weight) + (Dtrain_ob * (1 - weight))\n latefusion_clf = OneVsRestClassifier(estimator=LinearSVC(C=100), n_jobs=-1)\n latefusion_clf.fit(scores_train, Ytrain)\n\n scores_test = (Dtest_fv * weight) + (Dtest_ob * (1 - weight))\n acc = latefusion_clf.score(scores_test, Ytest)\n print 'Fold', splitnum, 'late fusion acc', acc\n print \"Train & testing time %.3f\" % (time.time() - ts)\n accs[splitnum-1] = acc\n acc_to_weights[weight] = accs\n\n print \"Mean accuracy: %.3f\" % accs.mean()\n with open(\"hmdb51_weight_gridsearch.txt\", \"w\") as f:\n for weight, accs in acc_to_weights.items():\n f.write(str(weight) + str(accs) + '\\n')\n return acc_to_weights\n\n \"\"\"\n with open('fv_hmdb51_accs.txt', 'w') as f:\n f.write(\"%s\\nMean:%.3f\" % (str(accs), np.mean(accs)))\n \"\"\"",
"def encode_splits(data):\n lookup = {'train': 0, 'val': 1, 'test': 2}\n return [lookup[datum['split']] for datum in data]",
"def compute_statistics(self):",
"def featureLikelihood():\r\n\r\n\t# Lists\r\n\twords = []\r\n\tfinalWords = []\r\n\tposWords = []\r\n\tnegWords = []\r\n\tfeatureListPos = []\r\n\tfeatureListNeg = []\r\n\r\n\t# Counters\r\n\tposCount = 0.0\r\n\tnegCount = 0.0\r\n\r\n\t# Temporary Lists for formating\r\n\tfeatureListPosFormat = []\r\n\tfeatureListNegFormat = []\r\n\r\n\t# Strings\r\n\ts = \" \"\r\n\tposString = \"\"\r\n\tnegString = \"\"\r\n\r\n\tseen = set()\r\n\r\n\t# Add all words to words list and count positive & negative occurences\r\n\tfor item in trainingData:\r\n\t\tfor word in item[2]:\r\n\t\t\twords.append(word)\r\n\t\tif item[1] == '0':\r\n\t\t\tfor word in item[2]:\r\n\t\t\t\tposWords.append(word)\r\n\t\t\t\tposCount += 1\r\n\t\tif item[1] == '1':\r\n\t\t\tfor word in item[2]:\r\n\t\t\t\tnegWords.append(word)\r\n\t\t\t\tnegCount +=1\r\n\r\n\t# Adds all values into finalWords, skipping duplicates\r\n\tfor values in words:\r\n\t\tif values not in seen:\r\n\t\t\tfinalWords.append(values)\r\n\t\t\tseen.add(values)\r\n\t\t\t\r\n\t\t\t\r\n\t\t\t\r\n\t# Add positive and negative counts to feature list and dictionaries\r\n\tfor word in finalWords:\r\n\t\ts += '{:12s}'.format(word)\r\n\t\t\r\n\t\tpCount = 0\r\n\t\tnCount = 0\r\n\t\t\r\n\t\tfor row in trainingData:\r\n\t\t\tif row[1] == '0':\r\n\t\t\t\tif word in row[2]: pCount += 1\r\n\t\t\tif row[1] == '1':\r\n\t\t\t\tif word in row[2]: nCount += 1\r\n\t\t\t\t\r\n\t\tfeatureListPos.append((pCount + 1) / (posCount + 9))\r\n\t\tclass0Dict[word] = ((pCount + 1) / (posCount + 9))\r\n\t\t\r\n\t\tfeatureListNeg.append((nCount + 1) / (negCount + 9))\r\n\t\tclass1Dict[word] = ((nCount + 1) / (negCount + 9))\r\n\r\n\t\t\r\n\t\t\r\n\t# Formatting for the positive feature list\r\n\tfor item in featureListPos:\r\n\t\tfeatureListPosFormat.append('{0:.5f}'.format(item))\r\n\t\t\r\n\tfor item in featureListPosFormat:\r\n\t\tposString += '{:12s}'.format(item)\r\n\r\n\t# Formatting for the negative feature list\r\n\tfor item in featureListNeg:\r\n\t\tfeatureListNegFormat.append('{0:.5f}'.format(item))\r\n\t\t\r\n\tfor item in featureListNegFormat:\r\n\t\tnegString += '{:12s}'.format(item)\r\n\r\n\r\n\t\t\r\n\treturn(s, posString, negString)",
"def split_by(data_set, question, entropy_before):\r\n yea_set = []\r\n nay_set = []\r\n abstain_set = []\r\n\r\n for data_point in data_set:\r\n vote = data_point.dat_votes[ord(question)-97] #the way this representative voted is the data_point's votes indexed by question\r\n party = data_point.dat_party\r\n\r\n if vote == \"+\":\r\n yea_set.append(data_point)\r\n if vote == \"-\":\r\n nay_set.append(data_point)\r\n if vote == \".\":\r\n abstain_set.append(data_point)\r\n\r\n yea_entropy = abs(calc_entropy(yea_set) * (len(yea_set))/len(data_set))\r\n nay_entropy = abs(calc_entropy(nay_set) * (len(nay_set))/len(data_set))\r\n abstain_entropy = abs(calc_entropy(abstain_set) * (len(abstain_set))/len(data_set))\r\n calc_gain = entropy_before - (yea_entropy + nay_entropy + abstain_entropy)\r\n return (calc_gain,(yea_set, nay_set, abstain_set))",
"def best_split(self, X, y, attributes):\n if (self.criterion==\"information_gain\"):\n global_if = float('-inf') # the highest value of information gain/gini gain seen so far\n attr = None\n for attribute in attributes:\n attr_val = X[attribute].copy()\n cur_if = information_gain(y,attr_val,self.type)\n if (cur_if>global_if):\n # Update when a better split is receieved\n global_if = cur_if\n attr = attribute\n return attr\n else:\n global_if = float('inf')\n attr = None\n for attribute in attributes:\n attr_val = X[attribute].copy()\n cur_if = gini_gain(y,attr_val)\n if (global_if>cur_if):\n # Update when a better split is receieved\n global_if = cur_if\n attr = attribute\n return attr"
] |
[
"0.66188765",
"0.62608063",
"0.6256927",
"0.62233657",
"0.6083229",
"0.59658355",
"0.5856173",
"0.58538896",
"0.5840882",
"0.57807654",
"0.5772018",
"0.5771111",
"0.5770281",
"0.5768238",
"0.5764604",
"0.5764604",
"0.57252175",
"0.56902254",
"0.5675291",
"0.5666471",
"0.56187814",
"0.5603054",
"0.5599793",
"0.55944383",
"0.55775213",
"0.5564469",
"0.5517871",
"0.5517496",
"0.5516587",
"0.55131304"
] |
0.69695956
|
0
|
Calculates the information gain given a parent and its two children (after a specific split).
|
def __calc_info_gain(self, parent, left_child, right_child):
parent_entropy = self.__entropy(parent[:, -1])
num_rows_left = left_child.shape[0]
num_rows_right = right_child.shape[0]
num_rows_total = num_rows_left + num_rows_right
# don't calculate if any of the children rows are empty
if num_rows_left == 0 or num_rows_right == 0:
return 0
# calculate entropy of the children data
left_child_entropy = self.__entropy(left_child[:, -1])
right_child_entropy = self.__entropy(right_child[:, -1])
left_child_contribution = (num_rows_left/num_rows_total)*left_child_entropy
right_child_contribution = (num_rows_right/num_rows_total)*right_child_entropy
new_entropy = left_child_contribution + right_child_contribution
info_gain = parent_entropy - new_entropy
return info_gain
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def calc_information_gain(data, split_name, target_name):\n # Calculate the original entropy\n original_entropy = calc_entropy(data[target_name])\n \n # Find the median of the column we're splitting\n column = data[split_name]\n median = column.median()\n \n # Make two subsets of the data, based on the median\n left_split = data[column <= median]\n right_split = data[column > median]\n \n # Loop through the splits and calculate the subset entropies\n to_subtract = 0\n for subset in [left_split, right_split]:\n prob = (subset.shape[0] / data.shape[0]) \n to_subtract += prob * calc_entropy(subset[target_name])\n \n # Return information gain\n return original_entropy - to_subtract",
"def calc_information_gain(data, split_name, target_name):\r\n # Calculate the original entropy\r\n original_entropy = calc_entropy(data[target_name])\r\n \r\n # Find the median of the column we're splitting\r\n column = data[split_name]\r\n median = column.median()\r\n \r\n # Make two subsets of the data, based on the median\r\n left_split = data[column <= median]\r\n right_split = data[column > median]\r\n \r\n # Loop through the splits and calculate the subset entropies\r\n to_subtract = 0\r\n for subset in [left_split, right_split]:\r\n prob = (subset.shape[0] / data.shape[0]) \r\n to_subtract += prob * calc_entropy(subset[target_name])\r\n \r\n # Return information gain\r\n return original_entropy - to_subtract",
"def _information_gain(self, y, X_column, split_thersh):\n # parent E\n parent_entropy = entropy(y)\n # generate split\n left_idxs, right_idxs = self._split(X_column, split_thersh)\n\n if len(left_idxs) == 0 or len(right_idxs) == 0:\n return 0\n # weighted avg child E\n n = len(y)\n n_left_samples, n_right_samples = len(left_idxs), len(right_idxs)\n entropy_left, entropy_right = entropy(y[left_idxs]), entropy(y[right_idxs])\n child_entropy = (n_left_samples/n) * entropy_left + (n_right_samples/n) * entropy_right\n\n # return IG\n ig = parent_entropy - child_entropy\n return ig",
"def test_information_gain(self):\n self.decision_tree.train(self.data,\n list(range(self.data.num_samples)),\n max_depth=1,\n min_samples_per_node=1,\n use_stop_conditions=False,\n max_p_value_chi_sq=None)\n self.assertEqual(self.decision_tree.get_root_node().node_split.separation_attrib_index, 1)\n self.assertEqual(self.decision_tree.get_root_node().node_split.splits_values,\n [set([0]), set([1]), set([2])])\n self.assertEqual(self.decision_tree.get_root_node().node_split.values_to_split,\n {0:0, 1:1, 2:2})\n self.assertAlmostEqual(self.decision_tree.get_root_node().node_split.criterion_value,\n 2. * -0.3 * math.log2(0.3) - 0.4 * math.log2(0.4))",
"def test_information_gain(self):\n self.decision_tree.train(self.data,\n list(range(self.data.num_samples)),\n max_depth=1,\n min_samples_per_node=1,\n use_stop_conditions=False,\n max_p_value_chi_sq=None)\n self.assertEqual(self.decision_tree.get_root_node().node_split.separation_attrib_index, 0)\n self.assertEqual(self.decision_tree.get_root_node().node_split.splits_values,\n [set([0]), set([1])])\n self.assertEqual(self.decision_tree.get_root_node().node_split.values_to_split,\n {0:0, 1:1})\n self.assertEqual(self.decision_tree.get_root_node().node_split.criterion_value, 1.0)",
"def __info_gain_from_splits(self, potential_integer_splits, sorted_data):\n info_gains = []\n for split in map(int, potential_integer_splits):\n left_child = sorted_data[sorted_data[:, 0].astype(int) < split, :]\n right_child = sorted_data[sorted_data[:, 0].astype(int) >= split, :]\n info_gains.append(self.__calc_info_gain(sorted_data, left_child,\n right_child))\n return info_gains",
"def test_gain_ratio(self):\n self.decision_tree.train(self.data,\n list(range(self.data.num_samples)),\n max_depth=1,\n min_samples_per_node=1,\n use_stop_conditions=False,\n max_p_value_chi_sq=None)\n self.assertEqual(self.decision_tree.get_root_node().node_split.separation_attrib_index, 1)\n self.assertEqual(self.decision_tree.get_root_node().node_split.splits_values,\n [set([0]), set([1]), set([2])])\n self.assertEqual(self.decision_tree.get_root_node().node_split.values_to_split,\n {0:0, 1:1, 2:2})\n self.assertEqual(self.decision_tree.get_root_node().node_split.criterion_value, 1.0)",
"def calcInfoGainBySplitValue(self, data, structure, colName, splitVal):\n result = self.calcDataEntropy(data, structure) - self.calcEntropyBySplitValue(data, structure, colName, splitVal)\n result = 0 if result < 0 else result\n return round(result, 3)",
"def test_gain_ratio(self):\n self.decision_tree.train(self.data,\n list(range(self.data.num_samples)),\n max_depth=1,\n min_samples_per_node=1,\n use_stop_conditions=False,\n max_p_value_chi_sq=None)\n self.assertEqual(self.decision_tree.get_root_node().node_split.separation_attrib_index, 0)\n self.assertEqual(self.decision_tree.get_root_node().node_split.splits_values,\n [set([0]), set([1])])\n self.assertEqual(self.decision_tree.get_root_node().node_split.values_to_split,\n {0:0, 1:1})\n self.assertEqual(self.decision_tree.get_root_node().node_split.criterion_value, 1.0)",
"def evaluate_split( df, attribute, split ):\n mask = df[attribute] <= split\n \n # split the dataset on the split attribute\n dfl = df[mask]\n dfr = df[~mask]\n \n \n # calculate weighting factors for child\n weighting_factor_left = float(dfl.shape[0])/df.shape[0]\n weighting_factor_right = float(dfr.shape[0])/df.shape[0]\n\n # calculate gini for left and right\n gini_parent = gini_impurity(df)\n gini_left = gini_impurity(dfl)\n gini_right = gini_impurity(dfr)\n \n # calculate weighted gini for this split \n weighted_gini = gini_parent - (weighting_factor_left*gini_left + weighting_factor_right*gini_right)\n return weighted_gini",
"def _information_gain(self, y, subsets):\n n = y.shape[0]\n child_entropy = 0\n\n for y_i in subsets:\n child_entropy += self._entropy(y_i) * y_i.shape[0] / float(n)\n\n return self._entropy(y) - child_entropy",
"def gini_gain(previous_classes, current_classes):\n I_parent = gini_impurity(previous_classes)\n I_child = 0\n for elem in current_classes:\n I_child += len(elem)/float(len(previous_classes))*gini_impurity(elem)\n return I_parent - I_child",
"def splitmetric(self, dataset, attr, target_attr):\n freq = {}\n splitinfo = 0.0\n \n #Call information gain\n gain = ID3.splitmetric(self, dataset, attr, target_attr);\n samplenumbers = len(dataset)\n # Calculate the frequency of each of the values in the split attribute\n for record in dataset:\n if (record[attr] in freq):\n freq[record[attr]] += 1.0\n else:\n freq[record[attr]] = 1.0\n \n #Calculate split info, entropy of splitter\n for val in list(freq.values()):\n splitinfo += (- val / samplenumbers) * math.log(val / samplenumbers, 2)\n \n #Split info equals 0 when there only one class in data set\n if splitinfo == 0:\n splitinfo = 0.00000001\n \n return gain / splitinfo",
"def calcGainRatioSplitByColumn(self, data, structure, colIName):\n splitInfo, colIndex = 0, structure[colIName]['index']\n for value in structure[colIName]['values']:\n newData = list(filter(lambda x: x[colIndex] == value, data))\n p = len(newData) / len(data) if len(newData) != 0 else 1\n splitInfo += (-1) * p * log2(p)\n splitInfo = 1 if splitInfo == 0 else splitInfo\n return round(self.calcInfoGainByColumnSplit(data, structure, colIName) / splitInfo, 3)",
"def propagate_path_improvements(parent):\n for child in parent.children:\n if parent.g + 1 < child.g:\n child.set_parent(parent)\n child.g = parent.g + child.get_arc_cost()\n child.f = child.g + child.h\n # Recursive call to propagate possible path improvements to all children of the children\n propagate_path_improvements(child)",
"def split_next(self):\n # Consider the node with the highest loss reduction (a.k.a. gain)\n node = heappop(self.splittable_nodes)\n\n tic = time()\n (sample_indices_left,\n sample_indices_right,\n right_child_pos) = self.splitter.split_indices(node.split_info,\n node.sample_indices)\n self.total_apply_split_time += time() - tic\n\n depth = node.depth + 1\n n_leaf_nodes = len(self.finalized_leaves) + len(self.splittable_nodes)\n n_leaf_nodes += 2\n\n left_child_node = TreeNode(depth,\n sample_indices_left,\n node.split_info.sum_gradient_left,\n node.split_info.sum_hessian_left,\n parent=node)\n right_child_node = TreeNode(depth,\n sample_indices_right,\n node.split_info.sum_gradient_right,\n node.split_info.sum_hessian_right,\n parent=node)\n left_child_node.sibling = right_child_node\n right_child_node.sibling = left_child_node\n node.right_child = right_child_node\n node.left_child = left_child_node\n\n # set start and stop indices\n left_child_node.partition_start = node.partition_start\n left_child_node.partition_stop = node.partition_start + right_child_pos\n right_child_node.partition_start = left_child_node.partition_stop\n right_child_node.partition_stop = node.partition_stop\n\n self.n_nodes += 2\n\n if self.max_depth is not None and depth == self.max_depth:\n self._finalize_leaf(left_child_node)\n self._finalize_leaf(right_child_node)\n return left_child_node, right_child_node\n\n if (self.max_leaf_nodes is not None\n and n_leaf_nodes == self.max_leaf_nodes):\n self._finalize_leaf(left_child_node)\n self._finalize_leaf(right_child_node)\n self._finalize_splittable_nodes()\n return left_child_node, right_child_node\n\n if left_child_node.n_samples < self.min_samples_leaf * 2:\n self._finalize_leaf(left_child_node)\n if right_child_node.n_samples < self.min_samples_leaf * 2:\n self._finalize_leaf(right_child_node)\n\n # Compute histograms of childs, and compute their best possible split\n # (if needed)\n should_split_left = left_child_node.value is None # node isn't a leaf\n should_split_right = right_child_node.value is None\n if should_split_left or should_split_right:\n\n # We will compute the histograms of both nodes even if one of them\n # is a leaf, since computing the second histogram is very cheap\n # (using histogram subtraction).\n n_samples_left = left_child_node.sample_indices.shape[0]\n n_samples_right = right_child_node.sample_indices.shape[0]\n if n_samples_left < n_samples_right:\n smallest_child = left_child_node\n largest_child = right_child_node\n else:\n smallest_child = right_child_node\n largest_child = left_child_node\n\n # We use the brute O(n_samples) method on the child that has the\n # smallest number of samples, and the subtraction trick O(n_bins)\n # on the other one.\n tic = time()\n smallest_child.histograms = \\\n self.histogram_builder.compute_histograms_brute(\n smallest_child.sample_indices)\n largest_child.histograms = \\\n self.histogram_builder.compute_histograms_subtraction(\n node.histograms, smallest_child.histograms)\n self.total_compute_hist_time += time() - tic\n\n tic = time()\n if should_split_left:\n self._compute_best_split_and_push(left_child_node)\n if should_split_right:\n self._compute_best_split_and_push(right_child_node)\n self.total_find_split_time += time() - tic\n\n return left_child_node, right_child_node",
"def _estimate_edges_to_children_per_parent(\n schema_info, query_metadata, parameters, parent_location, child_location\n):\n edge_counts = _query_statistics_for_vertex_edge_vertex_count(\n schema_info.statistics, query_metadata, parent_location, child_location\n )\n\n if edge_counts is None:\n edge_counts = _estimate_vertex_edge_vertex_count_using_class_count(\n schema_info, query_metadata, parent_location, child_location\n )\n\n parent_name_from_location = query_metadata.get_location_info(parent_location).type.name\n # Count the number of parents, over which we assume the edges are uniformly distributed.\n parent_location_counts = schema_info.statistics.get_class_count(parent_name_from_location)\n\n # Anticipate division by zero\n if parent_location_counts == 0:\n # This implies that edge_counts is also 0. However, asserting that edge_counts is 0 is\n # too aggressive because we can't expect all statistics to be collected at the same time.\n return 0.0\n\n # False-positive bug in pylint: https://github.com/PyCQA/pylint/issues/3039\n # pylint: disable=old-division\n #\n # TODO(evan): edges are not necessarily uniformly distributed, so record more statistics\n child_counts_per_parent = float(edge_counts) / parent_location_counts\n # pylint: enable=old-division\n\n # TODO(evan): If edge is recursed over, we need a more detailed statistic\n # Recursion always starts with depth = 0, so we should treat the parent result set itself as a\n # child result set to be expanded (so add 1 to child_counts).\n is_recursive = _is_subexpansion_recursive(query_metadata, parent_location, child_location)\n if is_recursive:\n child_counts_per_parent += 1\n\n # Adjust the counts for filters at child_location.\n child_name_from_location = query_metadata.get_location_info(child_location).type.name\n child_filters = query_metadata.get_filter_infos(child_location)\n child_counts_per_parent = adjust_counts_for_filters(\n schema_info, child_filters, parameters, child_name_from_location, child_counts_per_parent\n )\n\n return child_counts_per_parent",
"def best_split(self, X, y, attributes):\n if (self.criterion==\"information_gain\"):\n global_if = float('-inf') # the highest value of information gain/gini gain seen so far\n attr = None\n for attribute in attributes:\n attr_val = X[attribute].copy()\n cur_if = information_gain(y,attr_val,self.type)\n if (cur_if>global_if):\n # Update when a better split is receieved\n global_if = cur_if\n attr = attribute\n return attr\n else:\n global_if = float('inf')\n attr = None\n for attribute in attributes:\n attr_val = X[attribute].copy()\n cur_if = gini_gain(y,attr_val)\n if (global_if>cur_if):\n # Update when a better split is receieved\n global_if = cur_if\n attr = attribute\n return attr",
"def do_calculate(self, node_id, **kwargs):\n try:\n _hazard_rates = kwargs['hazard_rates']\n except KeyError:\n _hazard_rates = None\n _return = False\n\n _parent = self.do_select(node_id)\n _children = self.do_select_children(node_id)\n _parent.n_sub_systems = 0\n for _child in _children:\n if _child.data.included:\n _parent.n_sub_systems += 1\n\n # Calculate the parent goals.\n if not _parent.calculate_goals():\n # Allocate the goal to the children.\n _idx = 1\n _parent.weight_factor = sum([\n _child.data.int_factor * _child.data.soa_factor *\n _child.data.op_time_factor * _child.data.env_factor\n for _child in _children\n ])\n for _child in _children:\n if _parent.method_id == 1:\n _return = (_return or _child.data.equal_apportionment(\n _parent.n_sub_systems, _parent.reliability_goal))\n elif _parent.method_id == 2:\n _return = (_return or _child.data.agree_apportionment(\n _parent.n_sub_systems, _parent.reliability_goal))\n elif _parent.method_id == 3:\n _return = (_return or _child.data.arinc_apportionment(\n _hazard_rates[0], _parent.hazard_rate_goal,\n _hazard_rates[_idx]))\n _idx += 1\n elif _parent.method_id == 4:\n _return = (_return or _child.data.foo_apportionment(\n _parent.weight_factor, _parent.hazard_rate_goal))\n else:\n _return = True\n\n return _return",
"def gain(self):\r\n \r\n for node in self.G.nodes():\r\n # Get number of nodes connected on same and other partition\r\n movForce, retForce = self.nodeForces(node)\r\n nodeGain = movForce-retForce\r\n\r\n #Fill list of Nodes with gains\r\n self.gainOrder.append((nodeGain,node))\r\n \r\n self.gainOrder.sort(key=lambda r: r[0])\r\n self.keys = [r[1] for r in self.gainOrder]",
"def test_gini_gain(self):\n self.decision_tree.train(self.data,\n list(range(self.data.num_samples)),\n max_depth=1,\n min_samples_per_node=1,\n use_stop_conditions=False,\n max_p_value_chi_sq=None)\n self.assertEqual(self.decision_tree.get_root_node().node_split.separation_attrib_index, 1)\n self.assertEqual(self.decision_tree.get_root_node().node_split.splits_values,\n [set([0]), set([1]), set([2])])\n self.assertEqual(self.decision_tree.get_root_node().node_split.values_to_split,\n {0:0, 1:1, 2:2})\n self.assertEqual(self.decision_tree.get_root_node().node_split.criterion_value, 0.66)",
"def Trees__LCA_LowestCommonDenominator():\n # Python2 ported to Python3 via 2to3-3.7\n # URL:# URL:https://www.hackerrank.com/challenges/binary-search-tree-lowest-common-ancestor/problem\n '''\n class Node:\n def __init__(self,info): \n self.info = info \n self.left = None \n self.right = None \n // this is a node of the tree , which contains info as data, left , right\n '''\n def lca(root, v1, v2):\n # Find a and b. Link child nodes to parent to be able to backtrack.\n # (1) Note, we add 'parent' attribute to node dynamically via node.parent = ...\n root.parent = None\n node_stack = []\n node_stack.append(root)\n v1_node, v2_node = None, None\n while node_stack:\n node = node_stack.pop()\n if not v1_node and node.info == v1:\n v1_node = node\n if not v2_node and node.info == v2:\n v2_node = node\n for child_node in [node.left, node.right]:\n if child_node:\n child_node.parent = node # (1)\n node_stack.append(child_node)\n\n # Generate path from A to root.\n curr = v1_node\n a_to_root = set()\n while curr:\n a_to_root.add(curr.info)\n curr = curr.parent\n\n # traverse up b until you come across an element in a's path to parent.\n curr = v2_node\n while curr:\n if curr.info in a_to_root:\n return curr\n else:\n curr = curr.parent\n\n print(\"Shouldn't be here, Something went wrong\")\n\n # # Recursive. (Iterative is better, but did recursive for practice.) ~15 min.\n # # Main idea is that we count the number of v1/v2's found of the subnodes.\n # # If a node has sum of 2, we know it's the lca.\n # def lca(root, v1, v2):\n # def lca_helper(node):\n # ret_node = None\n # if not node:\n # return 0, None\n # v_match_counter = 0\n # if node.info in [v1, v2]:\n # v_match_counter += 1\n # left_count, left_node_ret = lca_helper(node.left)\n # right_count, right_node_ret = lca_helper(node.right)\n # v_match_counter += left_count + right_count\n # if v_match_counter == 2:\n # ret_node = node\n # if left_node_ret:\n # ret_node = left_node_ret\n # if right_node_ret:\n # ret_node = right_node_ret\n # return v_match_counter, ret_node\n\n # _, node = lca_helper(root)\n # return node",
"def best_split1(self,X,attributes):\n if (self.criterion==\"information_gain\"):\n global_if = float('-inf') # the highest value of varience seen so far\n attr , val = None, None\n for attribute in attributes[::-1]:\n attr_val = pd.Series(X[attribute].unique()).sort_values(ignore_index=True)\n last_val = attr_val[0]\n for i in range(1,attr_val.size):\n cur_val = attr_val[i]\n valc = round((last_val+cur_val)/2,4)\n last_val = cur_val\n cur_if = information_gain1(valc,X[attribute],X[\"Output\"],self.type)\n if (cur_if>global_if):\n global_if,attr,val = cur_if,attribute,valc\n return attr,val\n else:\n global_if = float('inf') # the lowest value of varience seen so far\n attr , val = None, None\n for attribute in attributes[::-1]:\n attr_val = pd.Series(X[attribute].unique()).sort_values(ignore_index=True)\n last_val = attr_val[0]\n for i in range(1,attr_val.size):\n cur_val = attr_val[i]\n valc = round((last_val+cur_val)/2,4)\n last_val = cur_val\n cur_if = gini_gain1(X[\"Output\"],X[attribute], valc)\n if (global_if>cur_if):\n global_if,attr,val = cur_if,attribute,valc\n return attr,val",
"def get_parent_index(self, child):\n return (child-1)//2",
"def test_gini_gain(self):\n self.decision_tree.train(self.data,\n list(range(self.data.num_samples)),\n max_depth=1,\n min_samples_per_node=1,\n use_stop_conditions=False,\n max_p_value_chi_sq=None)\n self.assertEqual(self.decision_tree.get_root_node().node_split.separation_attrib_index, 0)\n self.assertEqual(self.decision_tree.get_root_node().node_split.splits_values,\n [set([0]), set([1])])\n self.assertEqual(self.decision_tree.get_root_node().node_split.values_to_split,\n {0:0, 1:1})\n self.assertEqual(self.decision_tree.get_root_node().node_split.criterion_value, 0.5)",
"def impurity_reduction(self, xj, S):\r\n # Determine number of rows in left and right children and calculate respective impurities for parent, \r\n # left, and right \r\n if len(self.path) == 0:\r\n\r\n self.cur.execute(\"SELECT COUNT(*) FROM \" + self.table_name + \" WHERE \" + xj + \" <= \" + str(S) + \";\")\r\n n_left = self.cur.fetchone()[0]\r\n\r\n self.cur.execute(\"SELECT COUNT(*) FROM \" + self.table_name + \" WHERE \" + xj + \" > \" + str(S) + \";\")\r\n n_right = self.cur.fetchone()[0]\r\n\r\n\r\n self.cur.execute(\"SELECT \" + self.criterion + \"(AVG(\" + self.y_name + \")) FROM \" + self.table_name + \";\")\r\n I_A = float(self.cur.fetchone()[0])\r\n\r\n if n_left == 0 or n_right == 0:\r\n return 0\r\n else: \r\n self.cur.execute(\"SELECT \" + self.criterion + \"(AVG(\" + self.y_name + \")) FROM \" + self.table_name + \" WHERE \" + xj + \" <= \" + str(S) + \";\")\r\n I_L = float(self.cur.fetchone()[0])\r\n\r\n self.cur.execute(\"SELECT \" + self.criterion + \"(AVG(\" + self.y_name + \")) FROM \" + self.table_name + \" WHERE \" + xj + \" > \" + str(S) + \";\")\r\n I_R = float(self.cur.fetchone()[0])\r\n\r\n\r\n else:\r\n\r\n self.cur.execute(\"SELECT COUNT(*) FROM \" + self.table_name + \" WHERE \" + xj + \" <= \" + str(S) + \" AND \" + \" AND \".join(self.path) + \";\")\r\n n_left = self.cur.fetchone()[0]\r\n\r\n self.cur.execute(\"SELECT COUNT(*) FROM \" + self.table_name + \" WHERE \" + xj + \" > \" + str(S) + \" AND \" + \" AND \".join(self.path) + \";\")\r\n n_right = self.cur.fetchone()[0]\r\n \r\n if n_left == 0 or n_right == 0:\r\n return 0\r\n \r\n self.cur.execute(\"SELECT \" + self.criterion + \"(AVG(\" + self.y_name + \")) FROM \" + self.table_name + \" WHERE \" + \" AND \".join(self.path) + \";\")\r\n I_A = float(self.cur.fetchone()[0])\r\n\r\n self.cur.execute(\"SELECT \" + self.criterion + \"(AVG(\" + self.y_name + \")) FROM \" + self.table_name + \" WHERE \" + \" AND \".join(self.path) + \" AND \" + xj + \" <= \" + str(S) + \";\")\r\n I_L = float(self.cur.fetchone()[0])\r\n\r\n self.cur.execute(\"SELECT \" + self.criterion + \"(AVG(\" + self.y_name + \")) FROM \" + self.table_name + \" WHERE \" + \" AND \".join(self.path) + \" AND \" + xj + \" > \" + str(S) + \";\")\r\n I_R = float(self.cur.fetchone()[0])\r\n\r\n \r\n # Calculate change in impurity\r\n frac_left = n_left / (n_left + n_right)\r\n frac_right = n_right / (n_left + n_right)\r\n\r\n change_impurity = I_A - frac_left*I_L - frac_right*I_R\r\n \r\n return change_impurity",
"def _calculate_information_gain(self, cur_state, next_state):\n\n n = len(cur_state)\n information_gain_per_action = np.zeros((n, self.action_dim))\n\n prob_cur = self.classifier.get_class1_prob(obs=cur_state)\n prob_next = self.classifier.get_class1_prob(obs=next_state)\n information_gain_true = (prob_next - prob_cur).reshape(-1, 1)\n\n next_state_null = np.copy(next_state)\n next_state_null[:, -self.action_dim:] = self.classifier.missing_value\n prob_next_null = self.classifier.get_class1_prob(next_state_null)\n\n for i in range(self.action_dim):\n next_state_i = np.copy(next_state)\n next_state_i[:, -self.action_dim:] = self.classifier.missing_value\n next_state_i[:, -i - 1] = next_state[:, -i - 1]\n\n prob_next_i = self.classifier.get_class1_prob(obs=next_state_i)\n information_gain_per_action[:, -i - 1] = prob_next_i - prob_next_null\n\n information_gain_sum = np.sum(information_gain_per_action, axis=1, keepdims=True)\n ratio = information_gain_true / information_gain_sum\n ratio[information_gain_sum == 0] = 0\n information_gain_per_action = information_gain_per_action * ratio\n return information_gain_per_action",
"def best_split(self):\n sub_group = []\n\n current_entropy = self.entropy(self._Passengers)\n best_gain = 0 # holds the best entropy difference so far\n best_split = self._Attr[0].get_name()\n relative_entropy = 0 # entropy while taking account for the size of the population\n\n for Attribute in self._Attr:\n relative_entropy = 0\n print(\"Attr considered: \" + Attribute.get_name())\n for Attr_option in Attribute.get_options():\n sub_group = []\n for Passenger in self._Passengers:\n if self.passenger_attr_option_check(Passenger,\n Attribute.get_name(),\n Attr_option): # if P.A = V\n sub_group.append(Passenger)\n if len(sub_group) > 0 and len(self._Passengers) > 0:\n relative_entropy += self.entropy(sub_group) * (len(sub_group)/len(self._Passengers))\n\n if current_entropy - relative_entropy > best_gain:\n best_gain = current_entropy - relative_entropy\n best_split = Attribute.get_name()\n\n print(f\"best split:{best_split} \\n with entropy gain of:\\n {best_gain}\")\n\n return best_split",
"def _calculate_parent_sum(curr_level, curr_level_weights, edge_betweenness):\n parent_edge_sum = {}\n for curr_node in curr_level_weights:\n for parent, weight in curr_level[curr_node][1]:\n edge_weight = curr_level_weights[curr_node] * weight / curr_level[curr_node][0]\n parent_edge_sum[parent] = parent_edge_sum.get(parent, 0) + edge_weight\n\n edge = tuple(sorted([curr_node, parent]))\n edge_betweenness[edge] += edge_weight / 2.0\n\n return parent_edge_sum",
"def _calculate_information_gain(self, obs, label):\n n = len(obs)\n information_gain_per_action = np.zeros((n, self.action_dim))\n\n obs_null = np.copy(obs)\n obs_null[:, -self.action_dim:] = self.classifier.missing_value\n prob_null = self.classifier.get_class1_prob(obs=obs_null)\n\n for i in range(self.action_dim):\n obs_i = np.copy(obs)\n for j in range(self.action_dim):\n if i != j:\n obs_i[:, - j - 1] = self.classifier.missing_value\n prob_i = self.classifier.get_class1_prob(obs=obs_i)\n class_1_gain = (prob_i - prob_null) * label[:, 0]\n class_0_gain = (prob_i - prob_null) * (1 - label)[:, 0]\n\n if self.positive_only:\n class_1_gain[class_1_gain < 0] = 0\n class_0_gain[class_0_gain < 0] = 0\n else:\n class_0_gain = - class_0_gain\n\n information_gain_per_action[:, - i - 1] = (class_1_gain + class_0_gain)\n\n return information_gain_per_action"
] |
[
"0.6516687",
"0.65080124",
"0.63141376",
"0.60892826",
"0.58556247",
"0.568292",
"0.56074995",
"0.55897933",
"0.55755717",
"0.5476209",
"0.53574795",
"0.5254822",
"0.520783",
"0.51747257",
"0.5096569",
"0.5088244",
"0.50874364",
"0.5062651",
"0.5010227",
"0.500158",
"0.498423",
"0.49748433",
"0.4960213",
"0.49509594",
"0.49502993",
"0.49477568",
"0.4945168",
"0.49205995",
"0.49144801",
"0.49050817"
] |
0.74386626
|
0
|
Prunes a node if doing so increases the validation accuracy.
|
def __prune_node(self, accuracy, node, validation_data):
classes = np.unique(node.labels)
class_counts = np.unique(node.labels, return_counts=True)[1]
node.predicted_class = classes[np.argmax(class_counts)]
node.is_leaf = True
x_validation = validation_data.features
y_validation = validation_data.labels
predictions = self.predict(x_validation)
evaluator = Evaluator()
confusion = evaluator.confusion_matrix(predictions, y_validation)
accuracy_1 = evaluator.accuracy(confusion)
if accuracy_1 > accuracy:
return accuracy_1
node.is_leaf = False
return accuracy
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def __prune_tree(self, accuracy, node, validation_data, depth):\n\n if node.is_leaf:\n return accuracy\n\n if node.depth == depth:\n if node.left_child.is_leaf and node.right_child.is_leaf:\n accuracy = self.__prune_node(accuracy, node, validation_data)\n return accuracy\n\n accuracy = self.__prune_tree(accuracy, node.left_child, validation_data, depth)\n accuracy = self.__prune_tree(accuracy, node.right_child, validation_data, depth)\n\n return accuracy",
"def prune_node (self, tree: BaseTree, node: TreeSplits):\n # Prune node, get if change\n change_made = self.tag_node_from_pruning (\n tree = tree, node = node, feature_matrix = self.X_validation, target_array = self.y_validation\n )\n\n # If change not made and it's not a leaf\n if not change_made and not node.isNodeLeaf():\n # Prune children nodes\n for node_idx, node in node.nodes.items():\n change_made_iter = self.prune_node ( tree = tree, node = node )\n change_made = change_made or change_made_iter # Track changes\n return change_made\n\n return change_made\n # End prune_node()",
"def prune_node(self, tree: BaseTree, node: TreeSplits):\n # Prune node, get if change\n change_made = self.tag_node_from_pruning(\n tree=tree, node=node, X=self.X_validation, y=self.y_validation\n )\n\n # If change not made and it's not a leaf...\n if not change_made and not node.is_leaf():\n # Prune children nodes\n for node_idx, node in node.nodes.items():\n change_made_iter = self.prune_node(tree=tree, node=node)\n change_made = change_made or change_made_iter # Track changes\n return change_made\n\n return change_made",
"def prune(self, accuracy, validation_data):\n for depth in range(self.maximum_depth, 0, -1):\n accuracy = self.__prune_tree(accuracy, self.root, validation_data, depth)\n\n return accuracy",
"def prune(self, x_val, y_val):\n\n # make sure that the classifier has been trained before predicting\n if not self.is_trained:\n raise Exception(\"DecisionTreeClassifier has not yet been trained.\")\n\n # get the maximum depth\n deepest_depth = get_max_depth(self.root)\n\n # explore the depth starting from (max_depth - 1) to half of the max_depth\n half_of_max_depth = deepest_depth // 2\n for depth in range(deepest_depth - 1, half_of_max_depth, -1):\n explore_nodes_to_prune(self, self.root, x_val, y_val, depth)\n\n print(\"Pruning completed\")",
"def prune(tree, testSet, res, technique):\n assert technique in [\"reduced_error\"]\n if technique == \"reduced_error\":\n tbSet = testSet[testSet[tree.col] >= tree.value] #find which test observations belong to this tree's true branch\n fbSet = testSet[testSet[tree.col] < tree.value] #find which test observations belong to this tree's false branch\n \n if tree.tb.results is None: #Check if the true branch of this sub-tree is a leaf\n ptb = prune(tree.tb, tbSet, res, technique) #If not, recursively travel down the true branch and prune it.\n else:\n ptb = tree.tb #If the true branch is a leaf, then the true branch has--in essence--already been pruned.\n if tree.fb.results is None: #Check if the false branch of this sub-tree is a leaf\n pfb = prune(tree.fb, fbSet, res, technique) #If not, recursively travel down the false branch and prune it.\n else:\n pfb = tree.fb #If the false branch is a leaf, then the false branch has--in essence--already been pruned.\n \n #Sum the number of misclassifications of the test data at each of the leaves of this node\n wrong_in_leaves = __deep_count_errors(ptb, tbSet, res) + __deep_count_errors(pfb, fbSet, res)\n \n #Count the number of misclassificationsof the test data that would occur if this node were treated as a leaf\n wrong_at_node = __count_errors(tree, testSet, res)\n \n #Assess whether or not treating the node as a leaf improves the accuracy on the test set\n if wrong_at_node <= wrong_in_leaves: \n #NOTE:The following line of code seems slightly redundant since count_errors(tree, testSet, res) had to call \n #__get_results(tree). I should set up some way to save the output of that function call instead of calling it twice.\n return decisionNode(results = __get_results(tree)) #If so, return a decisionNode where the node is a leaf\n else:\n #If not, return a decisionNode where the node splits on the same column and value as before, but the \n #true and false branches are the pruned-versions of the original true and false branches. See above for\n #definition of ptb and pfb\n return decisionNode(col = tree.col, value = tree.value, tb = ptb, fb = pfb)",
"def on_prune(self, function_graph, node, reason):",
"def prune():\n with tf.Graph().as_default() as g:\n # Input evaluation data\n images, labels = rn.inputs(eval_data=True)\n\n # inference model.\n logits = rn.inference(images, 15)\n\n # Calculate predictions.\n top_k_op = tf.nn.in_top_k(logits, labels, 1)\n\n # Create a saver\n saver = tf.train.Saver()\n\n # Create session to restore, and restore data\n sess = tf.InteractiveSession()\n\n # Queue runner\n tf.train.start_queue_runners()\n\n ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)\n if ckpt and ckpt.model_checkpoint_path:\n # Restores from checkpoint\n saver.restore(sess, ckpt.model_checkpoint_path)\n # extract global_step from it.\n global_step_num = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]\n else:\n print('No checkpoint file found')\n return\n\n precision = eval_once(sess, top_k_op)\n \n \"\"\"\n # Get all variables\n lst_variables = tf.global_variables()\n lst_values = sess.run(tf.global_variables())\n\n # Get the pruning information\n r = np.arange(0,0.2,0.01)\n p = []\n for reduce_factor in r:\n kernel_index, channel_to_delete_pack, pruning_number_pack = \\\n pru_cal(lst_variables, lst_values, reduce_factor=reduce_factor)\n print('reduce factor is %.3f' % reduce_factor)\n\n # Delete these variables\n counter = 0\n for i in kernel_index:\n for j in range(pruning_number_pack[counter]):\n sess.run(tf.assign(lst_variables[i][:, :, :, channel_to_delete_pack[counter][j]],\n tf.zeros(\n tf.shape(lst_variables[i][:, :, :, channel_to_delete_pack[counter][j]])),\n name=lst_variables[i][:, :, :, channel_to_delete_pack[counter][j]].name))\n counter = counter + 1\n\n # Real evaluation, after pruning\n p.append(eval_once(sess, top_k_op))\n\n return r, p\n \"\"\"",
"def tag_node_from_pruning(self, tree, node, X, y):\n # If is a leaf, return False\n if node.nodes is None or len(node.nodes) == 0:\n return False\n\n # Score predictions from whole tree\n predictions = tree.predict(X)\n whole_tree_score = self.eval_func(y, predictions)\n\n # Get the children from the node\n children = BaseTree.collect_children(node)\n # Save original nodes\n original_nodes = node.nodes\n # Update node to be a leaf\n node.update(\n nodes={},\n children=children,\n feature_col=node.feature_col,\n feature_value=node.feature_value,\n node_type=node.node_type,\n )\n\n # Score predictions from leaf\n predictions = tree.predict(X)\n pruned_tree_score = self.eval_func(y, predictions)\n\n # If leaf is better, don't swap it back and return True for change\n if whole_tree_score < pruned_tree_score:\n return True\n\n # Otherwise, change the node back to the original node.\n node.update(\n children=[],\n nodes=original_nodes,\n feature_col=node.feature_col,\n feature_value=node.feature_value,\n node_type=node.node_type,\n )\n # Return False (for no change)\n return False",
"def prune(tree, minGain, evaluationFunction=entropy, notify=False):\n # recursive call for each branch\n if tree.trueBranch.results == None: prune(tree.trueBranch, minGain, evaluationFunction, notify)\n if tree.falseBranch.results == None: prune(tree.falseBranch, minGain, evaluationFunction, notify)\n\n # merge leaves (potentionally)\n if tree.trueBranch.results != None and tree.falseBranch.results != None:\n tb, fb = [], []\n\n for v, c in tree.trueBranch.results.items(): tb += [[v]] * c\n for v, c in tree.falseBranch.results.items(): fb += [[v]] * c\n\n p = float(len(tb)) / len(tb + fb)\n delta = evaluationFunction(tb+fb) - p*evaluationFunction(tb) - (1-p)*evaluationFunction(fb)\n if delta < minGain:\n if notify: print('A branch was pruned: gain = %f' % delta)\n tree.trueBranch, tree.falseBranch = None, None\n tree.results = uniqueCounts(tb + fb)",
"def fine_prune(badnet_filename, badnet_weights, valid_set):\n K.clear_session()\n\n # load BadNet model and weights\n badnet = keras.models.load_model(badnet_filename)\n badnet.load_weights(badnet_weights)\n\n # load clean valid dataset\n x_valid, y_valid = data_loader(valid_set)\n x_valid = data_preprocess(x_valid)\n\n loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True)\n badnet.compile(optimizer='adam', loss=loss_fn, metrics=['accuracy'])\n\n # exercise the BadNet with clean valid inputs\n # call keract to fetch the activations of the model\n activations = get_activations(badnet, x_valid, layer_names=\"conv_3\", auto_compile=True)\n\n # print the activations shapes.\n [print(k, '->', v.shape, '- Numpy array') for (k, v) in activations.items()]\n\n conv3_activation = activations['conv_3']\n avg_activation = np.mean(conv3_activation, axis=(0,1,2))\n\n threshold = 94\n bias_penalty = -99999\n\n sorted_avg_activation = np.sort(avg_activation)\n\n # iteratively pruning\n for i in range(conv3_output_channel):\n prune_idx = np.where(sorted_avg_activation[i] == avg_activation)[0][0]\n print(\"iteration: {} pruning channel: {}\".format(i, prune_idx))\n\n # set bias of certain channel to a big negative value\n # so that the relu activation will be 0, which means such channel of neurons are \"pruned\"\n conv3_weights = badnet.get_layer(\"conv_3\").get_weights()\n # conv3_bias = conv3_weights[1]\n conv3_weights[1][prune_idx] = bias_penalty\n badnet.get_layer(\"conv_3\").set_weights(conv3_weights)\n # modelfile_name = \"badnets/badnet1/sunglasses_bd_net_pruned_{}.h5\".format(i)\n # badnet.save(modelfile_name)\n accuracy = model_evaluate(clean_valid_set_filename, badnet, is_clean_data=True)\n if accuracy < threshold:\n print('At iteration {}, the accuracy on the validation set drops below threshold'.format(i))\n break\n \n # save the pruned model\n badnet.save(pruned_badnet_filename)\n badnet.save_weights(pruned_badnet_weights)\n\n fine_pruned_model = fine_tune(valid_set, pruned_badnet_filename, pruned_badnet_weights, epochs=5)\n return fine_pruned_model",
"def prune(self, root, X, Y):\n # calculate the gini index of this subtree if the children of root is trimmed\n pruned_gini = len(X) * gini(Counter(Y).values())\n pruned_loss = pruned_gini\n # if root is a leaf node, return loss directly\n if root.col is None:\n return pruned_loss, 1\n\n # cur_loss record the loss function when root is not trimmed\n cur_loss = 0.\n # size record the size of this subtree\n size = 1\n\n selected_ind = X[:, root.col] == root.val\n other_ind = X[:, root.col] != root.val\n selected_X = X[selected_ind]\n other_X = X[other_ind]\n selected_Y = Y[selected_ind]\n other_Y = Y[other_ind]\n\n # trim the left node recursively\n child_loss, child_size = self.prune(root.left, selected_X, selected_Y)\n cur_loss += child_loss\n size += child_size\n\n # trim the right node recursively\n child_loss, child_size = self.prune(root.right, other_X, other_Y)\n cur_loss += child_loss\n size += child_size\n\n # alpha means that\n # if the weight of size of tree in the loss function is larger than alpha,\n # this node will be trimmed\n alpha = (pruned_loss - cur_loss) / (size - 1)\n root.alpha = alpha\n # FIXME: why its length is always 1?\n self.possible_alpha.add(alpha)\n return cur_loss, size",
"def _fold_node(self, node, graph):\n if node.type not in [\"Transpose\", \"Reshape\", \"Unsqueeze\"]:\n return False\n dequant_node = node.inputs[0]\n if dequant_node.type != \"DequantizeLinear\":\n return False\n if len(graph.find_output_consumers(dequant_node.output[0])) > 1:\n return False\n if not self._all_inputs_are_const(node.inputs[1:]) or self._is_graph_output(node, graph):\n return False\n if not self._all_inputs_are_const(dequant_node.inputs):\n return False\n graph.replace_input(node, node.input[0], dequant_node.input[0], 0)\n const_outputs = ConstFoldOptimizer.compute_const_folding(node, graph)\n graph.replace_all_inputs(node.output[0], dequant_node.output[0])\n graph.remove_node(node.name)\n dequant_const = dequant_node.inputs[0]\n if len(graph.find_output_consumers(dequant_const.output[0])) > 1:\n dequant_const = graph.copy_const(dequant_const)\n graph.replace_input(dequant_node, dequant_node.input[0], dequant_const.output[0], 0)\n dequant_const.set_tensor_value(const_outputs[0])\n return True",
"def cross_validate(self, curr_dataset, num_folds, max_depth, min_samples_per_node,\n is_stratified=True, print_tree=False, seed=None, print_samples=False,\n use_stop_conditions=False, max_p_value_chi_sq=0.1):\n classifications = [0] * curr_dataset.num_samples\n num_correct_classifications = 0\n num_correct_classifications_wo_unkown = 0\n total_cost = 0.0\n total_cost_wo_unkown = 0.0\n classified_with_unkown_value_array = [False] * curr_dataset.num_samples\n num_unkown = 0\n unkown_value_attrib_index_array = [0] * curr_dataset.num_samples\n max_depth_per_fold = []\n num_nodes_per_fold = []\n num_valid_nominal_attributes_in_root_per_fold = []\n num_values_root_attribute_list = []\n num_trivial_splits = 0\n time_taken_pruning_per_fold = []\n num_nodes_pruned_per_fold = []\n num_correct_trivial_classifications = 0\n\n fold_count = 0\n\n sample_indices_and_classes = list(enumerate(curr_dataset.sample_class))\n if seed is not None:\n random.seed(seed)\n np.random.seed(seed)\n random.shuffle(sample_indices_and_classes)\n shuffled_sample_indices, shuffled_sample_classes = zip(*sample_indices_and_classes)\n\n if is_stratified:\n for (training_randomized_indices,\n validation_randomized_indices) in StratifiedKFold(n_splits=num_folds).split(\n shuffled_sample_indices,\n shuffled_sample_classes):\n\n training_samples_indices = [shuffled_sample_indices[index]\n for index in training_randomized_indices]\n validation_sample_indices = [shuffled_sample_indices[index]\n for index in validation_randomized_indices]\n\n if print_samples:\n print('Samples used for validation in this fold:')\n print(validation_sample_indices)\n print()\n\n ((curr_classifications,\n curr_num_correct_classifications,\n curr_num_correct_classifications_wo_unkown,\n curr_total_cost,\n curr_total_cost_wo_unkown,\n curr_classified_with_unkown_value_array,\n curr_num_unkown,\n curr_unkown_value_attrib_index_array),\n curr_max_depth,\n curr_time_taken_pruning,\n curr_num_nodes_pruned) = self.train_and_test(curr_dataset,\n training_samples_indices,\n validation_sample_indices,\n max_depth,\n min_samples_per_node,\n use_stop_conditions,\n max_p_value_chi_sq)\n max_depth_per_fold.append(curr_max_depth)\n num_nodes_per_fold.append(self.get_root_node().get_num_nodes())\n num_valid_nominal_attributes_in_root_per_fold.append(\n sum(self._root_node.valid_nominal_attribute))\n try:\n root_node_split_attrib = self.get_root_node().node_split.separation_attrib_index\n if curr_dataset.valid_nominal_attribute[root_node_split_attrib]:\n num_values_root_attribute_list.append(sum(\n num_samples > 0\n for num_samples in self.get_root_node().contingency_tables[\n root_node_split_attrib].values_num_samples))\n except AttributeError:\n num_trivial_splits += 1\n for curr_index, validation_sample_index in enumerate(validation_sample_indices):\n classifications[validation_sample_index] = curr_classifications[curr_index]\n classified_with_unkown_value_array[validation_sample_index] = (\n curr_classified_with_unkown_value_array[curr_index])\n unkown_value_attrib_index_array[validation_sample_index] = (\n curr_unkown_value_attrib_index_array[curr_index])\n num_correct_classifications += curr_num_correct_classifications\n num_correct_classifications_wo_unkown += curr_num_correct_classifications_wo_unkown\n total_cost += curr_total_cost\n total_cost_wo_unkown += curr_total_cost_wo_unkown\n num_unkown += curr_num_unkown\n num_correct_trivial_classifications += round(\n len(validation_sample_indices) *\n (self.get_trivial_accuracy(validation_sample_indices) / 100.0))\n\n fold_count += 1\n time_taken_pruning_per_fold.append(curr_time_taken_pruning)\n num_nodes_pruned_per_fold.append(curr_num_nodes_pruned)\n\n if print_tree:\n print()\n print('-' * 50)\n print('Fold:', fold_count)\n self.save_tree()\n else:\n for (training_samples_indices,\n validation_sample_indices) in KFold(n_splits=num_folds).split(\n shuffled_sample_indices):\n\n ((curr_classifications,\n curr_num_correct_classifications,\n curr_num_correct_classifications_wo_unkown,\n curr_total_cost,\n curr_total_cost_wo_unkown,\n curr_classified_with_unkown_value_array,\n curr_num_unkown,\n curr_unkown_value_attrib_index_array),\n curr_max_depth,\n curr_time_taken_pruning,\n curr_num_nodes_pruned) = self.train_and_test(curr_dataset,\n training_samples_indices,\n validation_sample_indices,\n max_depth,\n min_samples_per_node,\n use_stop_conditions,\n max_p_value_chi_sq)\n max_depth_per_fold.append(curr_max_depth)\n num_nodes_per_fold.append(self.get_root_node().get_num_nodes())\n num_valid_nominal_attributes_in_root_per_fold.append(\n sum(self._root_node.valid_nominal_attribute))\n try:\n root_node_split_attrib = self.get_root_node().node_split.separation_attrib_index\n if curr_dataset.valid_nominal_attribute[root_node_split_attrib]:\n num_values_root_attribute_list.append(sum(\n num_samples > 0\n for num_samples in self.get_root_node().contingency_tables[\n root_node_split_attrib].values_num_samples))\n except AttributeError:\n num_trivial_splits += 1\n for curr_index, validation_sample_index in enumerate(validation_sample_indices):\n classifications[validation_sample_index] = curr_classifications[curr_index]\n classified_with_unkown_value_array[validation_sample_index] = (\n curr_classified_with_unkown_value_array[curr_index])\n unkown_value_attrib_index_array[validation_sample_index] = (\n curr_unkown_value_attrib_index_array[curr_index])\n num_correct_classifications += curr_num_correct_classifications\n num_correct_classifications_wo_unkown += curr_num_correct_classifications_wo_unkown\n total_cost += curr_total_cost\n total_cost_wo_unkown += curr_total_cost_wo_unkown\n num_unkown += curr_num_unkown\n num_correct_trivial_classifications += round(\n len(validation_sample_indices) *\n (self.get_trivial_accuracy(validation_sample_indices) / 100.0))\n\n fold_count += 1\n time_taken_pruning_per_fold.append(curr_time_taken_pruning)\n num_nodes_pruned_per_fold.append(curr_num_nodes_pruned)\n\n if print_tree:\n print()\n print('-' * 50)\n print('Fold:', fold_count)\n self.save_tree()\n\n return (classifications,\n num_correct_classifications,\n num_correct_classifications_wo_unkown,\n total_cost,\n total_cost_wo_unkown,\n classified_with_unkown_value_array,\n num_unkown,\n unkown_value_attrib_index_array,\n time_taken_pruning_per_fold,\n num_nodes_pruned_per_fold,\n max_depth_per_fold,\n num_nodes_per_fold,\n num_valid_nominal_attributes_in_root_per_fold,\n num_values_root_attribute_list,\n num_trivial_splits,\n 100.0 * num_correct_trivial_classifications / curr_dataset.num_samples)",
"def prune(self):\n if len(self.cluster_status) >= 10000000:\n self.logger.debug(\"pruning memory\")\n for ip in [status.ip for status in self.cluster_status]:\n states = self.cluster_status[ip]\n previous_status = None\n prune_list = []\n for index, status in enumerate(sorted(states, key=lambda x: x.time)):\n if previous_status and previous_status.load == status.load:\n prune_list.append(index)\n else:\n previous_status = status\n for index in sorted(prune_list, reverse=True):\n self.logger.debug(\"pruning memory: index {0}\".format(index))\n del (self.cluster_status[index])",
"def _prune( tree, impurity_crit, dataSet, treeSeq ):\n\n\t\tsaved = {}\n\n\t\ttotal_leaf_impurity, num_leaves = DecisionTree._fetch(tree, impurity_crit, dataSet, saved)\n\n\t\tnodes, sets, G = saved['node'], saved['set'], saved['G']\n\n\t\t# choose TreeNode such that g is minimum to prune\n\t\tmin_g_ind = np.argmin(G)\n\t\tnode2Prune = nodes[min_g_ind]\n\t\tnode2Prune.value = DecisionTree._make_leaf(sets[min_g_ind], impurity_crit)\n\t\tnode2Prune.cut_off = None\n\n\t\t# get a new tree pruned\n\t\ttreeSeq['alpha'].append(G[min_g_ind])\n\t\ttreeSeq['tree'].append(tree)\n\t\ttreeSeq['num_leaves'].append(num_leaves-node2Prune.leaves()+1)\n\n\t\tif not (tree.left.cut_off is None and tree.right.cut_off is None):\n\n\t\t\tDecisionTree._prune(deepcopy(tree), impurity_crit, dataSet, treeSeq )\n\t\telse:\n\t\t\treturn",
"def prune(self, x_val, y_val):\n\n # make sure that the classifier has been trained before predicting\n if not self.is_trained:\n raise Exception(\"DecisionTreeClassifier has not yet been trained.\")\n\n #######################################################################\n # ** TASK 4.1: COMPLETE THIS METHOD **\n #######################################################################\n\n self.prune_tree(self.decision_tree, x_val, y_val)\n\n return self.decision_tree",
"def check_prune_methods(self) -> None:\n # get criterion class lists\n pruner_names = get_class_names_in_files(\n \"src\" + os.path.sep + \"runners\" + os.path.sep + \"pruner.py\"\n )\n # Remove abstract class name\n pruner_names.remove(\"Pruner\")\n pruner_names.remove(\"ChannelwisePruning\")\n\n # Check pruner method in config exists\n assert self.config[\"PRUNE_METHOD\"] in pruner_names\n\n # Common config\n assert \"PRUNE_AMOUNT\" in self.config[\"PRUNE_PARAMS\"]\n assert 0.0 < self.config[\"PRUNE_PARAMS\"][\"PRUNE_AMOUNT\"] < 1.0\n assert isinstance(self.config[\"PRUNE_PARAMS\"][\"PRUNE_AMOUNT\"], float)\n\n assert \"STORE_PARAM_BEFORE\" in self.config[\"PRUNE_PARAMS\"]\n assert (\n 0\n <= self.config[\"PRUNE_PARAMS\"][\"STORE_PARAM_BEFORE\"]\n <= self.config[\"TRAIN_CONFIG_AT_PRUNE\"][\"EPOCHS\"]\n )\n assert isinstance(self.config[\"PRUNE_PARAMS\"][\"STORE_PARAM_BEFORE\"], int)\n\n assert \"TRAIN_START_FROM\" in self.config[\"PRUNE_PARAMS\"]\n assert (\n 0\n <= self.config[\"PRUNE_PARAMS\"][\"TRAIN_START_FROM\"]\n <= self.config[\"TRAIN_CONFIG_AT_PRUNE\"][\"EPOCHS\"]\n )\n assert isinstance(self.config[\"PRUNE_PARAMS\"][\"TRAIN_START_FROM\"], int)\n\n assert \"PRUNE_AT_BEST\" in self.config[\"PRUNE_PARAMS\"]\n assert isinstance(self.config[\"PRUNE_PARAMS\"][\"PRUNE_AT_BEST\"], bool)\n\n # Config for methods\n if (\n self.config[\"PRUNE_METHOD\"] == \"Magnitude\"\n or self.config[\"PRUNE_METHOD\"] == \"SlimMagnitude\"\n ):\n assert \"NORM\" in self.config[\"PRUNE_PARAMS\"]\n # https://pytorch.org/docs/master/generated/torch.norm.html\n assert isinstance(self.config[\"PRUNE_PARAMS\"][\"NORM\"], int) or self.config[\n \"PRUNE_PARAMS\"\n ][\"NORM\"] in (\"fro\", \"nuc\")",
"def mask_propagation(\n cls, node: NNCFNode, graph: NNCFGraph, tensor_processor: Type[NNCFPruningBaseTensorProcessor]\n ) -> None:\n raise NotImplementedError",
"def main():\n\n # path of model that should be pruned\n model_path = ('saved_models/PATH_TO_MODEL/model.h5')\n\n # weights below this threshold will be set to zero\n # thresholds can be defined per layer\n thresholds = [0.03, 0.01, 0.01]\n\n # specify training epochs for retraining\n epochs = [1, 1, 1]\n # define the layer index that should be pruned\n # only feedforward layers can be pruned!!!\n layers = [3, 4, 5]\n\n # TrainingData section\n # specify input dimension of the sliding window using 'slice_len'\n slice_len = 30\n\n # output delay for AREUS data\n delay = 6\n\n td1 = TrainingData()\n training_data = td1.window_dim_1_sized_td(slice_len, delay)\n\n # Pruning runs for each layer\n p_run = PruningRun(model_path, training_data)\n for i, layer in enumerate(layers):\n p_run.prune_layer(layer, thresholds[i], epochs[i])\n\n # when no retraining is needed\n #p_run.prune_layer_no_retraining(layer, thresholds[i])",
"def remove_node(self, node):\n affected_nodes = [v for u, v in self.edges() if u == node]\n\n for affected_node in affected_nodes:\n node_cpd = self.get_cpds(node=affected_node)\n if node_cpd:\n node_cpd.marginalize([node], inplace=True)\n\n if self.get_cpds(node=node):\n self.remove_cpds(node)\n super(BayesianModel, self).remove_node(node)",
"def _split_threshold(self, node):\n\n # define the score to improve upon\n if self.n_clusters >= self.min_leaves and node.size <= self.max_leaf_size:\n # split only if min(children scores) > node.score\n force_split = False\n best_score = node.score\n else:\n # force split: just take the best (even if children are worse)\n force_split = True\n best_score = None\n\n left, right = None, None\n\n # iterate over embedding dimensions (first ones are more reliable)\n # up to max_n_vec (included), until we found an improving split\n for _vec in range(self.n_vec):\n\n # get the candidate thresholds along this dimension\n threshs = self._get_candidate_thresholds(node, _vec)\n\n # look for an improving best split along this eigenvector\n for _t in threshs:\n # compute the split\n below_thresh = self.E[node.ids, _vec] < _t\n _lids = node.ids[below_thresh]\n _rids = node.ids[np.logical_not(below_thresh)]\n # check if the tubes are not too small\n _nl, _nr = len(_lids), len(_rids)\n is_valid = _nl >= self.min_leaf_size and _nr >= self.min_leaf_size\n if is_valid:\n # compute the score of the new tubes only\n _sl = self.get_tube_score(_lids)\n _sr = self.get_tube_score(_rids)\n # get the score of this split\n split_score = min(_sl, _sr)\n if best_score is None or split_score > best_score:\n # better split\n best_score = split_score\n node.has_children = True\n node.thresh = _t\n left = SpectralNode(\n _lids, _vec, score=_sl, name=node.name + \"0\")\n right = SpectralNode(\n _rids, _vec, score=_sr, name=node.name + \"1\")\n\n # check stopping criterion\n if node.has_children:\n # we found an improving split\n if _vec > 0 or not force_split:\n # found an improving non-forced split: stop here\n break\n\n return left, right",
"def train(self, curr_dataset, training_samples_indices, max_depth, min_samples_per_node,\n use_stop_conditions=False, max_p_value_chi_sq=0.1):\n self._curr_dataset = curr_dataset\n print('Starting tree training...')\n self._root_node = TreeNode(\n curr_dataset,\n training_samples_indices,\n curr_dataset.valid_nominal_attribute[:],\n curr_dataset.valid_numeric_attribute[:],\n max_depth,\n min_samples_per_node,\n use_stop_conditions,\n max_p_value_chi_sq)\n self._root_node.create_subtree(self._criterion)\n print('Starting pruning trivial subtrees...')\n start_time = timeit.default_timer()\n num_nodes_pruned = self._root_node.prune_trivial_subtrees()\n time_taken_pruning = timeit.default_timer() - start_time\n print('Done!')\n return time_taken_pruning, num_nodes_pruned",
"def prune_skincluster(skin_node, prune_value = 0.01):\n pm.skinPercent(skin_node, pruneWeights = prune_value)",
"def prune(self, n_leaves):\n self.tree_ = prune(self.tree_, n_leaves)\n return self",
"def tag_node_from_pruning ( self, tree, node, feature_matrix, target_array ):\n # If is a leaf, return False\n if node.nodes is None or len ( node.nodes ) == 0:\n return False\n\n # Score predictions from whole tree\n predictions = tree.predict ( feature_matrix )\n whole_tree_score = self.evaluate_function ( target_array, predictions )\n\n # Get the children from the node\n children = BaseTree.collect_children ( node )\n # Save original nodes\n original_nodes = node.nodes\n # Update node to be a leaf\n node.updateTreeValues (\n nodes = {},\n children = children,\n feature_column = node.feature_column,\n feature_value = node.feature_value,\n node_type = node.node_type,\n )\n\n # Score predictions from leaf\n predictions = tree.predict ( feature_matrix )\n pruned_tree_score = self.evaluate_function ( target_array, predictions )\n\n # If leaf is better, don't swap it back and return True for change\n if whole_tree_score < pruned_tree_score:\n return True\n\n # Otherwise, change the node back to the original node.\n node.updateTreeValues (\n children = [],\n nodes = original_nodes,\n feature_column = node.feature_column,\n feature_value = node.feature_value,\n node_type = node.node_type,\n )\n # Return False (for no change)\n return False\n #End tag_node_from_pruning()",
"def test_with_data(data):\r\n i = 0\r\n tuning_set = []\r\n training_set = []\r\n num_reps = len(data)\r\n for i in range(0, num_reps-1):\r\n if (i % 4 == 0):\r\n tuning_set.append(data[i])\r\n else:\r\n training_set.append(data[i])\r\n\r\n unpruned = induce_node_tree(training_set, original_issues, \"D\", -1)\r\n pruned = prune_tree(unpruned, tuning_set)\r\n\r\n return pruned",
"def prune(self, node, exclude=None):\n for child in node.children:\n if exclude and exclude.id != child.id:\n self.prune(child, exclude)\n\n self.nodes[node.id] = None\n del self.nodes[node.id]",
"def pruning_order(self, max_to_prune=None):\n\n def _get_terminal_nodes(children):\n \"\"\"Lists the nodes that only have leaves as children\"\"\"\n leaves = np.where(children[:,0]==_tree.TREE_LEAF)[0]\n child_is_leaf = np.in1d(children, leaves).reshape(children.shape)\n return np.where(np.all(child_is_leaf, axis=1))[0]\n\n def _next_to_prune(tree, children=None):\n \"\"\"Weakest link pruning for the subtree defined by children\"\"\"\n\n if children is None:\n children = tree.children\n\n t_nodes = _get_terminal_nodes(children)\n g_i = tree.init_error[t_nodes] - tree.best_error[t_nodes]\n\n return t_nodes[np.argmin(g_i)]\n\n if max_to_prune is None:\n max_to_prune = self.node_count - sum(self.children_left == _tree.TREE_UNDEFINED)\n\n children = np.array([self.children_left.copy(), self.children_right.copy()]).T\n nodes = list()\n\n while True:\n node = _next_to_prune(self, children)\n nodes.append(node)\n\n if (len(nodes) == max_to_prune) or (node == 0):\n return np.array(nodes)\n\n #Remove the subtree from the children array\n children[children[node], :] = _tree.TREE_UNDEFINED\n children[node, :] = _tree.TREE_LEAF",
"def prune_(self):\n idx = self.factor_lams() > 0\n self.factors = [f[:, idx] for f in self.factors]\n self.rank = np.sum(idx)"
] |
[
"0.679676",
"0.66037804",
"0.6426782",
"0.6208207",
"0.6121172",
"0.59960705",
"0.5981017",
"0.59379864",
"0.57365125",
"0.563774",
"0.5563799",
"0.55594593",
"0.5546524",
"0.5513652",
"0.5510996",
"0.5478574",
"0.543666",
"0.54011655",
"0.5294919",
"0.5273336",
"0.52240556",
"0.52043444",
"0.5169197",
"0.5157975",
"0.5124125",
"0.5123524",
"0.50842994",
"0.5064763",
"0.5026543",
"0.502411"
] |
0.7665027
|
0
|
Recursively goes down a tree until the node depth matches the input depth. If the node's children are leaves, it checks whether it's optimal to prune the node.
|
def __prune_tree(self, accuracy, node, validation_data, depth):
if node.is_leaf:
return accuracy
if node.depth == depth:
if node.left_child.is_leaf and node.right_child.is_leaf:
accuracy = self.__prune_node(accuracy, node, validation_data)
return accuracy
accuracy = self.__prune_tree(accuracy, node.left_child, validation_data, depth)
accuracy = self.__prune_tree(accuracy, node.right_child, validation_data, depth)
return accuracy
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def prune(self, rng, get_nodes, max_depth=1):\n if not self.children:\n return\n for i_c, child in enumerate(self.children):\n if child.min_depth >= max_depth:\n self.children[i_c] = Node(\n rng.choice(get_nodes(arity=0)),\n self.tree_type)\n self.children[i_c].parent = self\n elif max_depth > 1:\n child.prune(rng, get_nodes, max_depth - 1)",
"def pruning_order(self, max_to_prune=None):\n\n def _get_terminal_nodes(children):\n \"\"\"Lists the nodes that only have leaves as children\"\"\"\n leaves = np.where(children[:,0]==_tree.TREE_LEAF)[0]\n child_is_leaf = np.in1d(children, leaves).reshape(children.shape)\n return np.where(np.all(child_is_leaf, axis=1))[0]\n\n def _next_to_prune(tree, children=None):\n \"\"\"Weakest link pruning for the subtree defined by children\"\"\"\n\n if children is None:\n children = tree.children\n\n t_nodes = _get_terminal_nodes(children)\n g_i = tree.init_error[t_nodes] - tree.best_error[t_nodes]\n\n return t_nodes[np.argmin(g_i)]\n\n if max_to_prune is None:\n max_to_prune = self.node_count - sum(self.children_left == _tree.TREE_UNDEFINED)\n\n children = np.array([self.children_left.copy(), self.children_right.copy()]).T\n nodes = list()\n\n while True:\n node = _next_to_prune(self, children)\n nodes.append(node)\n\n if (len(nodes) == max_to_prune) or (node == 0):\n return np.array(nodes)\n\n #Remove the subtree from the children array\n children[children[node], :] = _tree.TREE_UNDEFINED\n children[node, :] = _tree.TREE_LEAF",
"def depth_limited_search(problem, limit):\n\n # in this case, we simply use a list to keep track of nodes we\n # traversed, instead of the data structure, Stack.\n path = list()\n visited = set() # as before, to prevent duplicated nodes\n root = problem.get_initial_state()\n\n def rec_dls(state, action, depth):\n\n visited.add(state)\n\n # if it is a goal\n if problem.goal_test(state):\n path.append((state, action))\n return path\n\n # or if it reaches a certain depth, but not a goal\n elif depth == 0:\n visited.remove(state)\n return None\n\n else:\n path.append([state, action])\n for successor, action, cost in problem.get_successors(state):\n if successor not in visited:\n # recursively expands the deepest node\n res = rec_dls(successor, action, depth-1)\n if res is not None:\n return res\n path.pop()\n visited.remove(state)\n\n # \"Stared From the Bottom\" (root)\n result = rec_dls(root, 'None', limit)\n # return the path if the we DID have achieved something\n if result is not None:\n return path",
"def cutoff_test(self, state, depth):\n return self.depth_limit > 0 and depth > self.depth_limit",
"def solve(problem):\n\n # *** YOUR CODE HERE ***\n\n # The core of Iterative Deepening Search are iterations of Depth Limited\n # Search with given increasing depth.\n\n # A recursive version of Depth Limited Search\n def depth_limited_search(problem, limit):\n \"\"\"\n Return a list of nodes we traversed (or None).\n :param problem: the starting set up.\n :param limit: a given numeric depth limit.\n :return: a list of nodes.\n \"\"\"\n\n # in this case, we simply use a list to keep track of nodes we\n # traversed, instead of the data structure, Stack.\n path = list()\n visited = set() # as before, to prevent duplicated nodes\n root = problem.get_initial_state()\n\n def rec_dls(state, action, depth):\n\n visited.add(state)\n\n # if it is a goal\n if problem.goal_test(state):\n path.append((state, action))\n return path\n\n # or if it reaches a certain depth, but not a goal\n elif depth == 0:\n visited.remove(state)\n return None\n\n else:\n path.append([state, action])\n for successor, action, cost in problem.get_successors(state):\n if successor not in visited:\n # recursively expands the deepest node\n res = rec_dls(successor, action, depth-1)\n if res is not None:\n return res\n path.pop()\n visited.remove(state)\n\n # \"Stared From the Bottom\" (root)\n result = rec_dls(root, 'None', limit)\n # return the path if the we DID have achieved something\n if result is not None:\n return path\n\n import sys\n for depth in range(sys.maxsize): # depth from 0 to infinity\n print(\"Lower-bound of the optimal cost is {}\".format(depth))\n res2 = depth_limited_search(problem, depth)\n if res2 is not None:\n action_list = list()\n for move in res2:\n action_list.append(move[1]) # recall index 0 is the parent\n # do not forget a None returned in iteration 0 (with depth 0)\n action_list.remove('None')\n return action_list",
"def DFS():\n\n\tglobal nonleaves\n\tdict_cp=copy.deepcopy(dict)\n\tnonleaves=[\"Root\"]\n\twaitlist=[\"Root\"]\n\twhile waitlist:\n\t\tpeek=waitlist[-1]\n\t\tif len(dict_cp[peek])>=2:\n\t\t\tsub_node=dict_cp[peek].pop()\n\t\t\tif dict[sub_node]:\n\t\t\t\tnonleaves.append(sub_node)\n\t\t\t\twaitlist.append(sub_node)\n\t\telse:\n\t\t\twaitlist.pop()",
"def prune_tree ( self ):\n tree = copy.deepcopy ( self.tree )\n change_made = True\n # As long as changes are made, recursively prune from the root node.\n while change_made:\n change_made = self.prune_node ( tree, tree.root )\n return tree\n # End prune_tree()",
"def _prune( tree, impurity_crit, dataSet, treeSeq ):\n\n\t\tsaved = {}\n\n\t\ttotal_leaf_impurity, num_leaves = DecisionTree._fetch(tree, impurity_crit, dataSet, saved)\n\n\t\tnodes, sets, G = saved['node'], saved['set'], saved['G']\n\n\t\t# choose TreeNode such that g is minimum to prune\n\t\tmin_g_ind = np.argmin(G)\n\t\tnode2Prune = nodes[min_g_ind]\n\t\tnode2Prune.value = DecisionTree._make_leaf(sets[min_g_ind], impurity_crit)\n\t\tnode2Prune.cut_off = None\n\n\t\t# get a new tree pruned\n\t\ttreeSeq['alpha'].append(G[min_g_ind])\n\t\ttreeSeq['tree'].append(tree)\n\t\ttreeSeq['num_leaves'].append(num_leaves-node2Prune.leaves()+1)\n\n\t\tif not (tree.left.cut_off is None and tree.right.cut_off is None):\n\n\t\t\tDecisionTree._prune(deepcopy(tree), impurity_crit, dataSet, treeSeq )\n\t\telse:\n\t\t\treturn",
"def search(board:Board, max_depth=3) -> DiGraph:\n\n n = 0 # node label which also serves as a node counter\n depth = 0\n \n G = nx.DiGraph()\n G.add_node(0, winner=None, player=0, board=board.state, board_p = board.display)\n \n # First branch in look ahead\n newleavelist=[]\n parent_node = n\n parent_board = Board(G.nodes[n]['board'][0], G.nodes[n]['board'][1])\n\n for move in ALL_MOVES:\n moves_available = parent_board.moves_available(player=0)\n if move not in moves_available:\n continue\n \n # Do move\n new_board = parent_board.update_board(Move(player=0, move=move))\n \n # Add move node to graph\n n=n+1\n G.add_node(n, winner=new_board.is_winner, player=1, board=new_board.state, board_p = new_board.display)\n G.add_edge(parent_node, n, move=move)\n if new_board.is_winner:\n continue\n newleavelist.append(n)\n \n depth=1\n # subsequent branches\n while depth < max_depth:\n leavelist = newleavelist[:]\n newleavelist = []\n for leave in leavelist: \n # Get parent board\n parent_board = Board(G.nodes[leave]['board'][0], G.nodes[leave]['board'][1])\n for move in ALL_MOVES:\n moves_available = parent_board.moves_available(player=depth%2)\n if move not in moves_available:\n continue\n # Do move\n new_board = parent_board.update_board(Move(player=depth%2, move=move))\n # Add move node to graph\n n=n+1\n G.add_node(n, winner=new_board.is_winner, player=1-depth%2, \n board=new_board.state, board_p=new_board.display)\n G.add_edge(leave, n, move=move)\n if new_board.is_winner:\n continue\n \n newleavelist.append(n)\n depth=depth+1\n return G",
"def treePolicy(node):\n while not node.getState().checkTerminal():\n if node.checkFullyExpanded():\n node = findBestChild(node, True)\n else:\n return expandNode(node)\n return node",
"def prune_trivial_subtrees(self):\n num_pruned = 0\n if not self.is_leaf:\n children_classes = set()\n num_trivial_children = 0\n for child_node in self.nodes:\n num_pruned += child_node.prune_trivial_subtrees()\n if child_node.is_leaf:\n num_trivial_children += 1\n children_classes.add(child_node.most_common_int_class)\n if num_trivial_children == len(self.nodes) and len(children_classes) == 1:\n self.is_leaf = True\n num_pruned += num_trivial_children\n self.nodes = []\n return num_pruned",
"def calc_node_depth(depth, node):\n if not isinstance(node, nodes.section):\n return depth - 1\n return calc_node_depth(depth + 1, node.parent)",
"def prune( self ):\n if self.children is None:\n return\n \n # recursively prune from bottom up\n for space in self.children:\n space.prune()\n\n # if all child nodes are empty remove them all\n for space in self.children:\n if not space.is_empty():\n return\n\n self.children = None",
"def traverse_depth_first(self, fn):\n queue = deque([self.root])\n while len(queue) > 0:\n node = queue.popleft()\n fn(node)\n queue.extendleft(reversed(node.children))",
"def prune(self, n_leaves):\n true_node_count = self.node_count - sum(self.children_left == _tree.TREE_UNDEFINED)\n leaves = np.where(self.children_left == _tree.TREE_LEAF)[0]\n to_remove_count = true_node_count - 2*n_leaves + 1\n\n nodes_to_remove = pruning_order(self, max_to_prune = to_remove_count/2)\n\n # self._copy is gone, but this does the same thing\n out_tree = _tree.Tree(*self.__reduce__()[1])\n out_tree.__setstate__(self.__getstate__().copy())\n\n for node in nodes_to_remove:\n #TODO: Add a Tree method to remove a branch of a tree\n out_tree.children_left[out_tree.children_left[node]] = _tree.TREE_UNDEFINED\n out_tree.children_right[out_tree.children_left[node]] = _tree.TREE_UNDEFINED\n out_tree.children_left[out_tree.children_right[node]] = _tree.TREE_UNDEFINED\n out_tree.children_right[out_tree.children_right[node]] = _tree.TREE_UNDEFINED\n out_tree.children_left[node] = _tree.TREE_LEAF\n out_tree.children_right[node] = _tree.TREE_LEAF\n\n # FIXME: currently should not change node_count, after deletion\n # this is not number of nodes in the tree\n #out_tree.node_count -= 2*len(nodes_to_remove)\n\n return out_tree",
"def at_depth_limit(self):\n return self.depth > self.depth_limit",
"def general_search(fringe, visited, limiting_depth):\n node_to_be_explored = fringe[0]\n node_state = node_to_be_explored['state']\n visited[node_state] = node_to_be_explored\n if goal_test(node_to_be_explored['state']):\n return generate_path(node_to_be_explored, visited)\n current_depth = node_to_be_explored['depth']\n if current_depth == limiting_depth:\n return False\n children = [\n {\n 'state': child_state,\n 'parent': node_state,\n 'depth': current_depth + 1,\n }\n for child_state in operator(node_state)]\n for child in children:\n if child['state'] in visited:\n continue\n fringe_copy = [child] + fringe[1:]\n visited_copy = visited.copy()\n solution = general_search(fringe_copy, visited_copy, limiting_depth)\n if solution:\n return solution\n else:\n continue\n return False",
"def prune_tree(self):\n tree = copy.deepcopy(self.tree)\n change_made = True\n # As long as changes are made, recursively prune from the root node.\n while change_made:\n change_made = self.prune_node(tree, tree.root)\n return tree",
"def prune(self, x_val, y_val):\n\n # make sure that the classifier has been trained before predicting\n if not self.is_trained:\n raise Exception(\"DecisionTreeClassifier has not yet been trained.\")\n\n # get the maximum depth\n deepest_depth = get_max_depth(self.root)\n\n # explore the depth starting from (max_depth - 1) to half of the max_depth\n half_of_max_depth = deepest_depth // 2\n for depth in range(deepest_depth - 1, half_of_max_depth, -1):\n explore_nodes_to_prune(self, self.root, x_val, y_val, depth)\n\n print(\"Pruning completed\")",
"def alpha_beta_prune(board,player,depth):\r\n \r\n global max_depth\r\n global depth_limit\r\n global nodes_generated\r\n global max_prune\r\n global min_prune\r\n global start_time\r\n \r\n moves_and_val = {}\r\n start_time = float(time.time())\r\n \r\n nodes_generated += 1\r\n \r\n flag = None \r\n max_util = -1000\r\n min_util = 1000\r\n \r\n all_moves = legal_moves(board,player)\r\n flag = result(board, all_moves, max_util, min_util, depth,flag,player,moves_and_val)\r\n \r\n if player == -1:\r\n optimal = min(moves_and_val)\r\n if player == 1:\r\n optimal = max(moves_and_val)\r\n \r\n finish_time = float(time.time())\r\n if (finish_time - start_time) < 5 and max_depth == depth_limit:\r\n depth_limit += 1\r\n print(\"Now performing Depth-Limited Search with incremented depth_limit\")\r\n return alpha_beta_prune(board,player,0)\r\n else:\r\n if flag == None:\r\n return all_moves[optimal]\r\n else:\r\n return flag",
"def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n visited_nodes = []\n start_node = problem.getStartState()\n visited_nodes.append(start_node)\n curr_node = start_node\n q = util.Queue()\n directions = util.Queue()\n q.push(curr_node)\n goal_found = problem.isGoalState(curr_node)\n\n while not goal_found:\n nxt_node_list = problem.getSuccessors(curr_node)\n nxt_node_found = False\n\n # Check if a child can be found which has not been visited\n for node in nxt_node_list:\n nxt_node = node[0]\n move = node[1]\n if nxt_node not in visited_nodes:\n nxt_node_found = True # mark that a child node has been found\n q.push(nxt_node) # add the node in the tree\n directions.push(move) # add the direction\n visited_nodes.append(nxt_node) # mark the node as visited\n break\n\n # If child not found, go to parent\n if not nxt_node_found:\n q.list.pop(0)\n directions.list.pop(0)\n\n if q.isEmpty(): break\n\n curr_node = q.list[0]\n goal_found = problem.isGoalState(curr_node)\n\n final_moves = []\n while not directions.isEmpty():\n final_moves.append(directions.pop())\n \n return final_moves\n #util.raiseNotDefined()",
"def depth_check(self, depth):\r\n if depth >= self.ply:\r\n return True\r\n return False",
"def prune(self, n_leaves):\n self.tree_ = prune(self.tree_, n_leaves)\n return self",
"def find_depth_tree(root):\n if root is not None:\n max_depth = 0\n if root.branches is None:\n return 1\n else:\n for value in root.branches.values():\n max_depth = max(max_depth, DecisionTree.find_depth_tree(value))\n return 1 + max_depth\n else:\n return 1",
"def minimax(self, board, depth, self_color, alpha, beta):\r\n\r\n # Reached terminal node, evaluate and pass up the tree.\r\n # Terminal nodes are either those at max depth, or the last ones we have time for.\r\n if depth == self.minimax_max_depth or (time.time() - self.start_time > self.time_limit and depth != 0):\r\n return self.evaluate(board)\r\n\r\n # Reached transient node, keep searching.\r\n else:\r\n\r\n possible_moves = self.find_possible_moves(board, self_color)\r\n\r\n if possible_moves:\r\n\r\n # Self makes a move\r\n if depth % 2 == 0:\r\n\r\n children_nodes = {}\r\n value = -10000000\r\n\r\n for move in possible_moves:\r\n\r\n updated_board = self.update_board(board, self_color, move)\r\n children_nodes[move] = self.minimax(updated_board, depth + 1, self.get_opponent_color(self_color), alpha, beta)\r\n\r\n temp_value = max(children_nodes[move], alpha)\r\n\r\n # Alpha-beta pruning\r\n if temp_value > value:\r\n value = temp_value\r\n if temp_value >= beta:\r\n break\r\n if temp_value > alpha:\r\n alpha = temp_value\r\n\r\n if depth == 0:\r\n # Tree has been searched, return all possible moves with their respective worth\r\n return children_nodes\r\n\r\n # Else, just pass current node's worth up the tree\r\n return value\r\n\r\n # Opponent makes a move\r\n else:\r\n\r\n children_nodes = {}\r\n value = 10000000\r\n\r\n for move in possible_moves:\r\n\r\n updated_board = self.update_board(board, self_color, move)\r\n children_nodes[move] = self.minimax(updated_board, depth + 1, self.get_opponent_color(self_color), alpha, beta)\r\n\r\n temp_value = min(children_nodes[move], beta)\r\n\r\n # Alpha-beta pruning\r\n if temp_value < value:\r\n value = temp_value\r\n if temp_value <= alpha:\r\n break\r\n if temp_value < beta:\r\n beta = temp_value\r\n\r\n # Else, just pass current node's worth up the tree\r\n return value\r\n\r\n # Return something even if all hell freezes over.\r\n return 0",
"def bfs_w_depth(tree):\n visited = []\n frontier = [(0, tree)]\n while frontier:\n depth, tree = frontier.pop(0)\n if tree is not None:\n visited.append((depth, tree[0]))\n frontier.append((depth + 1, tree[1]))\n frontier.append((depth + 1, tree[2]))\n return visited",
"def iterativeDeepeningSearch(problem):\n from util import Stack\n \n for max_depth in range(0, 10000000):\n # print max_depth\n st = Stack()\n mapper = {}\n mapper[(problem.getStartState(), 0)] = None #map of (childpos, depth): (parentpos, direction, depth)\n st.push((problem.getStartState(), 0)) # stack of ((x,y) , depth)\n\n while not(st.isEmpty()):\n vertex = st.pop() #( (x,y) , depth )\n depth = vertex[1]\n\n if (problem.isGoalState(vertex[0])):\n c = vertex\n l = []\n while mapper[c] != None:\n tup = mapper[c]\n l.append(tup[1])\n c = tup[0], tup[2]\n l.reverse()\n print \"max_depth: \", max_depth\n print l\n return l\n\n else:\n n_depth = depth + 1 # new depth\n if n_depth < max_depth:\n neigh = problem.getSuccessors(vertex[0])\n # neigh.reverse()\n for child in neigh:\n if (child[0], n_depth) not in mapper:\n st.push((child[0], n_depth))\n mapper[(child[0], n_depth)] = (vertex[0], child[1], depth)",
"def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n class Node:\n def __init__(self, state, parent, action, pathCost):\n self.state = state #state of the game\n self.parent = parent #parent of the node\n self.action = action #action that led to that node\n self.pathCost = pathCost #total cost of tha path until that node\n\n def solution(self): #return the path to the goal node\n path = [] #path is a list of actions\n tempNode = self #temp node is the goal node\n while tempNode.state != problem.getStartState(): #until we get to the initial node\n path.insert(0, tempNode.action) #insert at the start of the list\n tempNode = tempNode.parent #go to the parent of the node\n return path #return list of actions\n\n def childNode(successor, parent, action, stepCost):\n pathCost = parent.pathCost + stepCost #total cost is the total cost of the parent + the cost of the last action\n child = Node(successor, parent, action, pathCost) #create new child node\n return child\n\n initialNode = Node(problem.getStartState(), None, None, 0) #create initial node with start state and no parent\n if problem.isGoalState(initialNode.state):\n return initialNode.solution()\n\n frontier = util.Stack() #dfs uses a stack\n frontier.push(initialNode) #insert initial node to the stack\n explored = set() #explored nodes are added to a set\n\n while not frontier.isEmpty(): #while stack is not empty\n nextNode = frontier.pop() #extract the last node entered\n explored.add(nextNode.state) #add the state of the node to the explored set\n for successor, action, stepCost in problem.getSuccessors(nextNode.state): #for every successor create a new child\n child = childNode(successor, nextNode, action, stepCost)\n if child.state not in explored and child not in frontier.list: #if child is not already explored or is not in the stack\n if problem.isGoalState(child.state): # if node is goal node we return the path of actions\n return child.solution()\n frontier.push(child) #insert it into the stack\n\n return [] #if stack is empty\n util.raiseNotDefined()",
"def test_MaxDepth_SimpleTree(self):\n\n root = TreeNode(0)\n root.addLeft(1)\n root.addRight(5)\n root.left.addLeft(2)\n root.left.addRight(3)\n root.left.right.addRight(4)\n root.right.addRight(6)\n\n self.assertEqual(findMaxDepthDFS(root),3)",
"def depth_limit_search(node, limit, explored=None):\n if node.goal_test():\n return {'cutoff': False, 'is_solution': True, 'solution_node': node}\n elif limit == 0:\n return {'cutoff': True, 'is_solution': False, 'solution_node': None}\n else:\n if not explored:\n explored = []\n cutoff_occurred = False\n explored.append(node)\n for successor in node.generate_successors():\n if not successor:\n continue\n if successor.is_in(explored):\n continue\n result = depth_limit_search(successor, limit-1, explored)\n if result['cutoff']:\n cutoff_occurred = True\n elif result['is_solution']:\n return result\n if cutoff_occurred:\n return {'cutoff': True, 'is_solution': False, 'solution_node': None}\n else:\n return {'cutoff': False, 'is_solution': False, 'solution_node': None}"
] |
[
"0.6360459",
"0.604336",
"0.5938862",
"0.58943194",
"0.5823375",
"0.5788803",
"0.57416785",
"0.5709899",
"0.5696002",
"0.5655448",
"0.56483674",
"0.56330293",
"0.5631921",
"0.563008",
"0.5626417",
"0.56150573",
"0.56009525",
"0.55969006",
"0.55513614",
"0.5547976",
"0.55402696",
"0.55225724",
"0.5522015",
"0.5492552",
"0.54712963",
"0.5468739",
"0.54686236",
"0.5462049",
"0.54605037",
"0.5448133"
] |
0.64476705
|
0
|
Add an expression for the given binary address.
|
def add_expression(binary_addr, s):
assert not isinstance(s, labelmanager.Label) # TODO!?
# TODO: Warn/assert if addr already in expressions? Allow overriding this via an optional bool argument?
if binary_addr not in expressions:
expressions[binary_addr] = s
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def add_op(self, expr):\n from cascada.bitvector import operation\n assert isinstance(expr, operation.Operation)\n assert not self.contain_op(expr)\n name = \"{}{}\".format(self.id_prefix, self.counter)\n self.counter += 1\n identifier = core.Variable(name, expr.width)\n self.table[identifier] = expr\n\n return identifier",
"def addExpr( ):\n\n\ttok = tokens.peek( )\n\tif debug: print (\"addExpr: \", tok)\n\tleft = term( )\n\ttok = tokens.peek( )\n\twhile tok == \"+\" or tok == \"-\":\n\t\ttokens.next()\n\t\tright = term( )\n\t\tleft = BinaryExpr( tok, left, right )\n\t\ttok = tokens.peek( )\n\treturn left",
"def add(self, addr):\n\n val = self.mem.read(addr)\n result = self.alu.add(self.reg.accum, val)\n self.reg.accum = result",
"def addx(self, addr):\n\n val = self.mem_if.read(addr, index=self.reg.idx)\n result = self.alu.add(self.reg.accum, val)\n self.reg.accum = result",
"def add_expr_to_comp(self, comp, expr):\n if not isinstance(comp, cellml_component):\n comp = self.model.get_component_by_name(comp)\n if not hasattr(comp, u'math'):\n # Create the math element\n math = comp.xml_create_element(u'math', NSS[u'm'])\n comp.xml_append(math)\n # Append this expression\n comp.math.xml_append(expr)",
"def get_expression(binary_addr, expected_value):\n\n expression = expressions[binary_addr]\n utils.check_expr(expression, expected_value)\n return expression",
"def add_to_reg(self):\n register = (self.opcode & 0xF00) >> 8\n value = self.opcode & 0xFF\n sum = self.registers[register] + value\n if sum > 0xFF:\n sum = bit_utils.wrap_around(sum, 0xFF + 1)\n self.registers[register] = sum\n logger.info(\"Added {} to register V{}\".format(value, register))",
"def expression(self, expr):\n self.set(expression=expr)",
"def ADD():\n global pointer, memory, registers\n registers[memory[pointer + 0x02]] += memory[pointer + 0x01]\n pointer += 0x03",
"def add(self, name, expression, level):\n assert isinstance(level, EnvironmentLevel)\n index = len(level.expressions)\n level.bindings[name] = index\n level.expressions.append(expression)",
"def __add__(self, other):\n if not (isNumeric(other) or isinstance(other, Expression)):\n error_msg = (\n f'Invalid expression during addition to {self}: [{other}]'\n )\n raise excep.biogemeError(error_msg)\n return Plus(self, other)",
"def append(self,instr):\n self.instructions.append(instr)",
"def addaddr( addr ):\n\t\tif cmds:\n\t\t\tcmds.last().addrs.append( addr )\n\t\telse:\n\t\t\tlog.err( \"A command must preceed the first address\" )",
"def expr(runtime_addr, s):\n\n runtime_addr = memorymanager.RuntimeAddr(runtime_addr)\n binary_addr, _ = movemanager.r2b_checked(runtime_addr)\n assert memorymanager.is_data_loaded_at_binary_addr(binary_addr)\n\n if isinstance(s, dict):\n # Dictionary supplied.\n # Look up value in binary, and use that as key in dictionary\n val = get_u8_binary(binary_addr)\n classification.add_expression(binary_addr, s[val])\n else:\n classification.add_expression(binary_addr, s)",
"def add_operation(self):\n n1 = self.memory[self.memory[self._cursor + 1]]\n n2 = self.memory[self.memory[self._cursor + 2]]\n position = self.memory[self._cursor + 3]\n self.memory[position] = n1 + n2\n # print(f'Cursor: {self._cursor}\\tAssigning position {position} with value {n1} + {n2} = {n1 + n2}')\n return",
"def add_operation(self):\n arg1 = self.memory[self.memory[self._cursor + 1]]\n arg2 = self.memory[self.memory[self._cursor + 2]]\n arg3 = self.memory[self._cursor + 3]\n self.memory[arg3] = arg1 + arg2\n # print(f'Cursor: {self._cursor}\\tAssigning position {position} with value {n1 + n2}')\n self._cursor += 4\n return",
"def instruction_call(self, address):\n next_instruction_offset = self.exec_ptr + 2 # the value of the next instruction's memory\n\n self.stack_push(next_instruction_offset)\n\n if Vm.is_register(address):\n address = self.get_register(address)\n\n self.exec_ptr = Vm.filter_mem_address(address)",
"def convert_elementwise_add(node, **kwargs):\n return create_basic_op_node('Add', node, kwargs)",
"def __call__(self, expression):\n self.set_expression(expression)",
"def define_expression(DomainName=None, Expression=None):\n pass",
"def link_expr(self, expr):\n if expr.kind == PTN.VAR_EXP:\n self.link_to_dec(expr)\n self.print_debug(expr.line_number, self.link_message(expr))\n elif expr.kind == PTN.ARR_EXP:\n self.link_to_dec(expr)\n self.link_expr(expr.index)\n self.print_debug(expr.line_number, self.link_message(expr))\n elif expr.kind == PTN.FUN_CALL_EXP:\n self.link_to_dec(expr, function=True)\n self.print_debug(expr.line_number, self.link_message(expr))\n if expr.params is not None:\n map(self.link_expr, expr.params)\n elif expr.kind in (PTN.ADDR_EXP,\n PTN.DEREF_EXP,\n PTN.NEG_EXP):\n self.link_expr(expr.exp)\n elif isinstance(expr, OpExpNode):\n self.link_expr(expr.l_exp)\n self.link_expr(expr.r_exp)",
"def add(self, node, **offset):\n return self.dtype.add(self, node, **offset)",
"def evaluateExpression(expr):\n\toperators = {ast.Add: op.add, ast.Sub: op.sub, ast.Mult: op.mul,\n\t\t\t\t ast.Div: op.truediv, ast.USub: op.neg, ast.Pow: myPow}\n\tnode = ast.parse(expr.strip(), mode='eval')\n\treturn evaluate(node.body,operators)",
"def extractAddress(address, operandStruct):\n # Handles addressing mode for register or register offset\n if address[0] == \"[\" and address[-1] == \"]\":\n registerOffset = address[1:-1].split(\",\")\n if len(registerOffset) == 0 or len(registerOffset) > 2: raise Exception(\"Invalid arguments for instruction\")\n # Sets the Rin component\n Rin = extractRegister(registerOffset[0])\n if Rin == None: raise Exception(\"Invalid arguments for instruction\")\n operandStruct[\"Rin\"] = Rin\n # Sets the addressing mode\n operandStruct[\"addressingMode\"] = \"Register\"\n if len(registerOffset) == 2:\n if registerOffset[1].isnumeric(): \n operandStruct[\"offset\"] = int(registerOffset[1])\n else:\n # Otherwise the offset is DATA and so a symbol is stored for the instruciton compiler\n operandStruct[\"awaiting\"].append({\"DATA\": {\"symbol\": registerOffset[1], \"output\":\"offset\"}})\n # Sets the addressing mode to registeroffset\n operandStruct[\"addressingMode\"] = \"RegisterOffset\"\n return\n # Handles indirect addressing mode\n if address[0] == \"@\":\n if len(address) == 1: raise Exception(\"Invalid arguments for instruction\")\n indrectAddress = address[1:]\n if not indrectAddress.isnumeric(): raise Exception(\"Invalid arguments for instruction\")\n operandStruct[\"indirectAddress\"] = int(indrectAddress)\n # Sets the addressing mode to indirect\n operandStruct[\"addressingMode\"] = \"Indirect\"\n return\n # Handles absolute addressing mode\n if address.isnumeric():\n operandStruct[\"address\"] = int(address)\n operandStruct[\"addressingMode\"] = \"Absolute\"\n return\n # Otherwise the address is a LABEL and so a symbol is stored for the instruciton compiler\n operandStruct[\"awaiting\"].append({\"LABEL\": {\"symbol\": address, \"output\":\"address\"}})\n operandStruct[\"addressingMode\"] = \"Absolute\"",
"def add(self, addr):\n\n if len(addr) == 4:\n return ipset.ipset_ipv4_add(self.set, addr)\n\n elif len(addr) == 16:\n return ipset.ipset_ipv6_add(self.set, addr)\n\n else:\n raise ValueError(\"Invalid address\")",
"def convert_addn(node, **kwargs):\n return create_basic_op_node('Sum', node, kwargs)",
"def __radd__(self, other):\n if not (isNumeric(other) or isinstance(other, Expression)):\n error_msg = (\n f'Invalid expression during addition to {self}: [{other}]'\n )\n raise excep.biogemeError(error_msg)\n return Plus(other, self)",
"def add(self, params):\n if len(params) < 2:\n return\n x = self.reg_dct[params[0]]\n y = self.reg_dct[params[1]]\n self.reg_dct[params[0]] = (x + y) % (2** 32)",
"def __init__(self, expr1, expr2, coeff=1.0, name='add'):\n super(SumExpression, self).__init__(e1=expr1, e2=expr2, name=name)\n self._coeff = coeff\n self.domain = self.e1.domain",
"def add_operator(self, operator: Callable) -> None:\n self.operators.append(operator)"
] |
[
"0.64002496",
"0.62857234",
"0.62688",
"0.605564",
"0.5965186",
"0.59127086",
"0.5830413",
"0.57543206",
"0.56582236",
"0.55917954",
"0.5561406",
"0.54797566",
"0.54700917",
"0.54415226",
"0.5429673",
"0.54226315",
"0.5420622",
"0.54032975",
"0.53921527",
"0.5365806",
"0.5355792",
"0.53522855",
"0.5344711",
"0.5331377",
"0.5301559",
"0.53009504",
"0.5273859",
"0.5270971",
"0.52679664",
"0.526034"
] |
0.81047773
|
0
|
Get the previously supplied expression for the given address.
|
def get_expression(binary_addr, expected_value):
expression = expressions[binary_addr]
utils.check_expr(expression, expected_value)
return expression
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def eval_take_address(self, expr):\n if isinstance(expr, expressions.VariableAccess):\n declaration = expr.variable.declaration\n if isinstance(\n declaration,\n (\n declarations.VariableDeclaration,\n declarations.ParameterDeclaration,\n declarations.ConstantDeclaration,\n declarations.FunctionDeclaration,\n ),\n ):\n value = self.codegenerator.ir_var_map[declaration]\n cval = (ir.ptr, value.name)\n else: # pragma: no cover\n raise NotImplementedError()\n elif isinstance(expr, expressions.CompoundLiteral):\n cval = self.eval_compound_literal(expr)\n else: # pragma: no cover\n raise NotImplementedError()\n return cval",
"def get_address(self, address=None):\n return self.__get_addr_grp('address', address)",
"def part(expr,address):\n for num in address:\n expr = expr.args[num]\n return expr",
"def get_address8(binary_addr):\n\n operand = memory_binary[binary_addr]\n if binary_addr not in expressions:\n return disassembly.get_label(operand, binary_addr)\n return get_expression(binary_addr, operand)",
"def extractAddress(address, operandStruct):\n # Handles addressing mode for register or register offset\n if address[0] == \"[\" and address[-1] == \"]\":\n registerOffset = address[1:-1].split(\",\")\n if len(registerOffset) == 0 or len(registerOffset) > 2: raise Exception(\"Invalid arguments for instruction\")\n # Sets the Rin component\n Rin = extractRegister(registerOffset[0])\n if Rin == None: raise Exception(\"Invalid arguments for instruction\")\n operandStruct[\"Rin\"] = Rin\n # Sets the addressing mode\n operandStruct[\"addressingMode\"] = \"Register\"\n if len(registerOffset) == 2:\n if registerOffset[1].isnumeric(): \n operandStruct[\"offset\"] = int(registerOffset[1])\n else:\n # Otherwise the offset is DATA and so a symbol is stored for the instruciton compiler\n operandStruct[\"awaiting\"].append({\"DATA\": {\"symbol\": registerOffset[1], \"output\":\"offset\"}})\n # Sets the addressing mode to registeroffset\n operandStruct[\"addressingMode\"] = \"RegisterOffset\"\n return\n # Handles indirect addressing mode\n if address[0] == \"@\":\n if len(address) == 1: raise Exception(\"Invalid arguments for instruction\")\n indrectAddress = address[1:]\n if not indrectAddress.isnumeric(): raise Exception(\"Invalid arguments for instruction\")\n operandStruct[\"indirectAddress\"] = int(indrectAddress)\n # Sets the addressing mode to indirect\n operandStruct[\"addressingMode\"] = \"Indirect\"\n return\n # Handles absolute addressing mode\n if address.isnumeric():\n operandStruct[\"address\"] = int(address)\n operandStruct[\"addressingMode\"] = \"Absolute\"\n return\n # Otherwise the address is a LABEL and so a symbol is stored for the instruciton compiler\n operandStruct[\"awaiting\"].append({\"LABEL\": {\"symbol\": address, \"output\":\"address\"}})\n operandStruct[\"addressingMode\"] = \"Absolute\"",
"def getSymbolAt(self, address: ghidra.program.model.address.Address) -> ghidra.program.model.symbol.Symbol:\n ...",
"def get(self, expression: str, default=None):\n expression = expression.strip().lstrip('{').rstrip('}').strip()\n environment = Environment()\n expression = environment.compile_expression(\n expression,\n undefined_to_none=False\n )\n value = expression(**self._context)\n\n if isinstance(value, Undefined):\n return default\n else:\n return value",
"def getAbsoluteAddress(program: ghidra.program.model.listing.Program, address: ghidra.program.model.address.Address) -> ghidra.program.model.address.Address:\n ...",
"def main(expression):\n\n exception = parse_expression(expression)\n return calc(poland_notation(exception))",
"def getInstructionAt(self, address: ghidra.program.model.address.Address) -> ghidra.program.model.listing.Instruction:\n ...",
"def getSymbolAfter(self, address: ghidra.program.model.address.Address) -> ghidra.program.model.symbol.Symbol:\n ...",
"def Eval(expression):\n # pylint: disable=eval-used\n return eval(expression)",
"def expression(self):\n assert not self._handle_used\n self._expression_used = True\n return self._expression",
"def get(cls, address):\r\n def lookup():\r\n return cls._targets_by_address.get(address, None)\r\n\r\n target = lookup()\r\n if target:\r\n return target\r\n else:\r\n ParseContext(address.buildfile).parse()\r\n return lookup()",
"def expression(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"expression\")",
"def expression(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"expression\")",
"def expression(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"expression\")",
"def getSymbolAt(self, address: ghidra.program.model.address.Address, name: unicode) -> ghidra.program.model.symbol.Symbol:\n ...",
"def getSymbolBefore(self, address: ghidra.program.model.address.Address) -> ghidra.program.model.symbol.Symbol:\n ...",
"def getInstructionContaining(self, address: ghidra.program.model.address.Address) -> ghidra.program.model.listing.Instruction:\n ...",
"def get_address(address, registers):\n \n try:\n address, offset = address.split('+')\n offset = int(offset)\n except ValueError:\n try:\n address, offset = address.split('-')\n offset = -int(offset)\n except ValueError:\n offset = 0\n\n if address.isdigit():\n return int(address)\n\n return int(registers[address]) + offset",
"def resolve(self, address):\n address_map = self._address_map_from_spec_path(address.spec_path)\n if address not in address_map:\n self._raise_incorrect_address_error(address.spec_path, address.target_name, address_map)\n else:\n return address_map[address]",
"def lookup_address(self, address, **extra):\n if '@' in address:\n domain = address.rsplit('@', 1)[1]\n return self.lookup(address=address, domain=domain, **extra)\n return self.lookup(address=address, **extra)",
"def ev(expr):\n return eval(expr,user_ns())",
"def expression(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"expression\")",
"def getReferencedAddress(program: ghidra.program.model.listing.Program, address: ghidra.program.model.address.Address) -> ghidra.program.model.address.Address:\n ...",
"def getFunctionContaining(self, address: ghidra.program.model.address.Address) -> ghidra.program.model.listing.Function:\n ...",
"def get_address(self, ):\n return self.get_parameter('address')",
"def get_address(address=None, vsys=\"1\"):\n query = {\n \"type\": \"config\",\n \"action\": \"get\",\n \"xpath\": (\n \"/config/devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys{}']/\"\n \"address/entry[@name='{}']\".format(vsys, address)\n ),\n }\n\n return __proxy__[\"panos.call\"](query)",
"def getInstructionAfter(self, address: ghidra.program.model.address.Address) -> ghidra.program.model.listing.Instruction:\n ..."
] |
[
"0.6682179",
"0.6228155",
"0.62082",
"0.61590976",
"0.5866463",
"0.58355653",
"0.5672677",
"0.5643076",
"0.56316537",
"0.5614684",
"0.5610915",
"0.56038564",
"0.55660117",
"0.5565124",
"0.5557098",
"0.5557098",
"0.5557098",
"0.5524335",
"0.55141246",
"0.5494056",
"0.54875547",
"0.54761344",
"0.54658705",
"0.54627496",
"0.5458864",
"0.54267466",
"0.5402167",
"0.53869843",
"0.5331018",
"0.53219175"
] |
0.7468352
|
0
|
Get a string representing the 8 bit constant at binary_addr. This could return the name of a constant, an expression or failing that just a constant hex value. Used by CPU Opcodes to format output, e.g. for converting 'LDA 3' into 'LDA num_lives'
|
def get_constant8(binary_addr):
if binary_addr in expressions:
return get_expression(binary_addr, memory_binary[binary_addr])
return mainformatter.constant8(binary_addr)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_address8(binary_addr):\n\n operand = memory_binary[binary_addr]\n if binary_addr not in expressions:\n return disassembly.get_label(operand, binary_addr)\n return get_expression(binary_addr, operand)",
"def get_constant16(binary_addr):\n\n if binary_addr in expressions:\n return get_expression(binary_addr, memorymanager.get_u16_binary(binary_addr))\n return mainformatter.constant16(binary_addr)",
"def __ip2intstr(self, address):\n return str(struct.unpack('!I', address)[0])",
"def address(self) -> str:\n q1 = self.address_int >> 24\n q2 = (self.address_int & (255 << 16)) >> 16\n q3 = (self.address_int & (255 << 8)) >> 8\n q4 = self.address_int & 255\n\n return f\"{q1}.{q2}.{q3}.{q4}\"",
"def _get_address_binary(address):\n\n if is_valid_ipv4_address(address):\n return ''.join([_get_binary(int(octet), 8) for octet in address.split('.')])\n elif is_valid_ipv6_address(address):\n address = expand_ipv6_address(address)\n return ''.join([_get_binary(int(grouping, 16), 16) for grouping in address.split(':')])\n else:\n raise ValueError(\"'%s' is neither an IPv4 or IPv6 address\" % address)",
"def get_binary_op_str(bin_op_node):\n\n if isinstance(bin_op_node, ast.Add):\n return \"+\"\n\n elif isinstance(bin_op_node, ast.Sub):\n return \"-\"\n\n elif isinstance(bin_op_node, ast.Mult):\n return \"*\"\n\n elif isinstance(bin_op_node, ast.Div):\n return \"/\"\n\n elif isinstance(bin_op_node, ast.Mod):\n return \"%\"\n\n elif isinstance(bin_op_node, ast.Pow):\n return \"**\"\n\n elif isinstance(bin_op_node, ast.LShift):\n return \"<<\"\n\n elif isinstance(bin_op_node, ast.RShift):\n return \">>\"\n\n else:\n raise ValueError(\"No string defined for binary operator node %s\" % \\\n bin_op_node.__class__.__name__)",
"def stringn(runtime_addr):\n\n runtime_addr = memorymanager.RuntimeAddr(runtime_addr)\n binary_addr, _ = movemanager.r2b_checked(runtime_addr)\n disassembly.add_classification(binary_addr, Byte(1))\n length = memory_binary[binary_addr]\n add_expression(binary_addr, utils.LazyString(\"%s - %s\", disassembly.get_label(runtime_addr + 1 + length, binary_addr), disassembly.get_label(runtime_addr + 1, binary_addr)))\n return string(runtime_addr + 1, length)",
"def _get_bit_string(value):\n\n return \"{0:b}\".format(value).zfill(8)",
"def get_string(binary):\r\n new_string = \"\"\r\n\r\n # Sets range as length of binary string and returns an int\r\n for x in range((len(binary) // 8)):\r\n # Grabs 8 characters at a time, converts back to an integer\r\n n = int(binary[(x * 8) : ((x * 8) + 8)], 2)\r\n # Special logic to handle null values\r\n if n == 0:\r\n new_string += \"\\\\x00\"\r\n # Otherwise, change those bits back to a character\r\n else:\r\n new_string += n.to_bytes((n.bit_length() + 7) // 8, \"big\").decode()\r\n\r\n return new_string",
"def bt_addr_to_str(bt_addr):\n return \":\".join([b.encode(\"hex\") for b in bt_addr])",
"def to_binary_string(x):\n return \"{0:b}\".format(x)",
"def get_expression(binary_addr, expected_value):\n\n expression = expressions[binary_addr]\n utils.check_expr(expression, expected_value)\n return expression",
"def literal_to_string(self, literal):\n s = '!' if is_negated(literal) else ''\n return s + self.variables[literal >> 1]",
"def staticAddress(self, arg):\n label = self.ns + '.' + arg\n return label",
"def __get_binary(self, program_type: str) -> str:\n\n program_names = {\n \"affine\": \"reg_aladin\",\n \"freeform\": \"reg_f3d\",\n \"segmentation\": \"reg_resample\",\n \"transform\": \"reg_transform\",\n }\n program_name = program_names[program_type]\n return str(get_binary(program_name))",
"def get_address16(binary_addr):\n\n operand = memorymanager.get_u16_binary(binary_addr)\n if binary_addr not in expressions:\n return disassembly.get_label(operand, binary_addr)\n\n assert isinstance(disassembly.get_classification(binary_addr), Word) or (isinstance(disassembly.get_classification(binary_addr - 1), trace.cpu.Opcode) and disassembly.get_classification(binary_addr - 1).length() == 3)\n return get_expression(binary_addr, operand)",
"def __str__(self):\n return f\"{self.opcode:02x}_{self.address_hex_str}_{reverse_bytes(self.data).hex()}\"",
"def expr_label(runtime_addr, s):\n\n runtime_addr = memorymanager.RuntimeAddr(runtime_addr)\n # TODO: If this continues to just forward to label() perhaps make that behavuour\n # official and just provide both names for backwards compatibility/documenting the\n # difference for users who want to??\n return label(runtime_addr, s)",
"def hex_str (self):\n return \"#%02X%02X%02X\"%(self._intern[0],self._intern[1],self._intern[2])",
"def get_byte_string(self):\n return \"\".join(['%02X' % i for i in self._data]).decode('hex')",
"def string(runtime_addr, n=None):\n\n runtime_addr = memorymanager.RuntimeAddr(runtime_addr)\n binary_addr, _ = movemanager.r2b_checked(runtime_addr)\n if n is None:\n assert not disassembly.is_classified(binary_addr)\n n = 0\n while not disassembly.is_classified(binary_addr + n) and utils.isprint(memory_binary[binary_addr + n]):\n n += 1\n if n > 0:\n disassembly.add_classification(binary_addr, String(n))\n return movemanager.b2r(binary_addr + n)",
"def get_mac_string():\n mac_int = getnode()\n mac_str = ':'.join((\"%012x\" % mac_int)[i:i + 2] for i in range(0, 12, 2))\n return mac_str",
"def stringhi(runtime_addr, include_terminator_fn=None):\n\n runtime_addr = memorymanager.RuntimeAddr(runtime_addr)\n binary_addr, _ = movemanager.r2b_checked(runtime_addr)\n assert not disassembly.is_classified(binary_addr, 1)\n initial_addr = binary_addr\n while True:\n if disassembly.is_classified(binary_addr, 1):\n break\n if memory_binary[binary_addr] & 0x80 != 0:\n if include_terminator_fn is not None and include_terminator_fn(memory_binary[binary_addr]):\n c = memory_binary[binary_addr] & 0x7f\n if utils.isprint(c) and c != ord('\"') and c != ord('\\''):\n add_expression(binary_addr, \"%s+'%s'\" % (assembler().hex2(0x80), chr(c)))\n else:\n add_expression(binary_addr, \"%s+%s\" % (assembler().hex2(0x80), assembler().hex2(c)))\n binary_addr += 1\n break\n binary_addr += 1\n if binary_addr > initial_addr:\n disassembly.add_classification(initial_addr, String(binary_addr - initial_addr))\n return movemanager.b2r(binary_addr)",
"def address_to_bin(address):\n address_list = map(lambda x: int(x), address.split('.'))\n bin_list = map(_to_bin, address_list)\n bin = '.'.join(bin_list)\n return bin",
"def get_definitions(self):\n return \"const unsigned int %s = 0x%xu;\\n\" % (self.name, self.address)",
"def gen_string_literal(self, expr):\n data = expr.to_bytes()\n value = self.emit(ir.LiteralData(data, \"cstr\"))\n value = self.emit(ir.AddressOf(value, \"dptr\"))\n return value",
"def _getReg(address):\n return struct.unpack(\"<L\", mem[address:address+4])[0]",
"def decode_addr(self, addr):\n self._check_pid_wrap()\n # Find the binary that contains the specified address.\n # For .so files, look at the relative address; for the main\n # executable, look at the absolute address.\n for binary, (start, end) in self.code_ranges.items():\n if addr >= start and addr <= end:\n offset = addr - start \\\n if binary.endswith(\".so\") else addr\n return \"%s [%s]\" % (self._decode_sym(binary, offset),\n binary)\n return \"%x\" % addr",
"def gen_global_string_constant(self, expr):\n value_data = expr.to_bytes()\n amount = len(value_data)\n alignment = 1\n name = \"__txt_const_{}\".format(self.static_counter)\n self.static_counter += 1\n text_var = ir.Variable(\n name, ir.Binding.LOCAL, amount, alignment, value=value_data\n )\n self.builder.module.add_variable(text_var)\n return text_var",
"def gen_char_literal(self, expr):\n return self.emit_const(expr.value, expr.typ)"
] |
[
"0.67110384",
"0.63497406",
"0.6013744",
"0.59776306",
"0.5975686",
"0.5903683",
"0.57989484",
"0.5765043",
"0.5677991",
"0.559972",
"0.5513182",
"0.5490668",
"0.5480578",
"0.54773694",
"0.54433036",
"0.54310673",
"0.5414964",
"0.53971416",
"0.53705996",
"0.5370063",
"0.53668475",
"0.5337934",
"0.53373235",
"0.5315265",
"0.53060305",
"0.53010255",
"0.52964324",
"0.52868396",
"0.5282086",
"0.52405846"
] |
0.7693498
|
0
|
Get a string representing the 16 bit constant at binary_addr. This could return the name of a constant, an expression or failing that just a constant hex value. Used by CPU Opcodes to format output, e.g. for converting 'LXI BC,$1234' into 'LXI BC,my_special_constant'.
|
def get_constant16(binary_addr):
if binary_addr in expressions:
return get_expression(binary_addr, memorymanager.get_u16_binary(binary_addr))
return mainformatter.constant16(binary_addr)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_address16(binary_addr):\n\n operand = memorymanager.get_u16_binary(binary_addr)\n if binary_addr not in expressions:\n return disassembly.get_label(operand, binary_addr)\n\n assert isinstance(disassembly.get_classification(binary_addr), Word) or (isinstance(disassembly.get_classification(binary_addr - 1), trace.cpu.Opcode) and disassembly.get_classification(binary_addr - 1).length() == 3)\n return get_expression(binary_addr, operand)",
"def get_constant8(binary_addr):\n\n if binary_addr in expressions:\n return get_expression(binary_addr, memory_binary[binary_addr])\n return mainformatter.constant8(binary_addr)",
"def bt_addr_to_str(bt_addr):\n return \":\".join([b.encode(\"hex\") for b in bt_addr])",
"def hex_str (self):\n return \"#%02X%02X%02X\"%(self._intern[0],self._intern[1],self._intern[2])",
"def address(self) -> str:\n q1 = self.address_int >> 24\n q2 = (self.address_int & (255 << 16)) >> 16\n q3 = (self.address_int & (255 << 8)) >> 8\n q4 = self.address_int & 255\n\n return f\"{q1}.{q2}.{q3}.{q4}\"",
"def _get_const_str(self):\n const_components = []\n for k, v in self.constargs.items():\n v_str = f\"'{v}'\" if type(v) is str else str(v)\n const_components.append(f\"{k}={v_str}\")\n return \",\".join(const_components)",
"def _sym_constant(self, table: Mapping[int, str]) -> str:\n try:\n return table[self.sym]\n except KeyError:\n return str(self.sym)",
"def gen_get_const_expr(cls, const_name, const_p):\n sh, mask = cls.pos[const_name]\n s = \"(({t}{sh}) & {mask})\".format(\n t = const_p, mask = mask,\n sh = \" >> \" + str(sh) if sh else \"\"\n )\n return s",
"def c_hex(x):\n #print(\"c_hex\", x, type(x))\n h = hex(x & ((1 << INT_BITS) - 1))\n while h[-1:] == \"L\": h = h[:-1] # for python 2\n return h + UINT_SUFFIX",
"def get_definitions(self):\n return \"const unsigned int %s = 0x%xu;\\n\" % (self.name, self.address)",
"def _get_bit_string(value):\n\n return \"{0:b}\".format(value).zfill(8)",
"def hex_str (self):\n return \"#%02X%02X%02X\"%(self.r, self.g, self.b)",
"def stringn(runtime_addr):\n\n runtime_addr = memorymanager.RuntimeAddr(runtime_addr)\n binary_addr, _ = movemanager.r2b_checked(runtime_addr)\n disassembly.add_classification(binary_addr, Byte(1))\n length = memory_binary[binary_addr]\n add_expression(binary_addr, utils.LazyString(\"%s - %s\", disassembly.get_label(runtime_addr + 1 + length, binary_addr), disassembly.get_label(runtime_addr + 1, binary_addr)))\n return string(runtime_addr + 1, length)",
"def _hexbyte(b):\n return _hexchar(b[0]) * 16 + _hexchar(b[1])",
"def _dtype_to_str(dtype):\n if dtype == core.VarDesc.VarType.BF16:\n return 'bf16'\n else:\n return 'fp32'",
"def get_str ( self ):\n value = self.value\n def gen_words():\n if value == self.OV_NONE:\n yield \"none\"\n else:\n if value & self.OV_SYM_EXIST:\n if value & self.OV_SYM_DEAD:\n yield \"symlinks\"\n else:\n yield \"symlinks to existing files\"\n elif value & self.OV_SYM_DEAD:\n yield \"broken symlinks\"\n\n if value & self.OV_FILE:\n yield \"files\"\n # --- end of gen_words (...) ---\n\n return ', '.join ( gen_words() ) + \" (0x{:x})\".format ( value )",
"def int2hex(n: int) -> str:",
"def __ip2intstr(self, address):\n return str(struct.unpack('!I', address)[0])",
"def resolve(address):\n symbol = gdb.execute(\"info symbol 0x%08X\" % int(address.cast(size_t)), False, True).split(\" \",1)[0]\n if symbol == \"No\": # FIXME \"No symbol matches\"\n return \"0x%08X\" % int(address.cast(size_t))\n else:\n return \"%s\" % symbol",
"def func(self):\n return 'AAA{0[iface]}BBB{0[port]}'.format(self.opts)",
"def _scancode_constant(self, table: Mapping[int, str]) -> str:\n try:\n return table[self.scancode]\n except KeyError:\n return str(self.scancode)",
"def _get_address_binary(address):\n\n if is_valid_ipv4_address(address):\n return ''.join([_get_binary(int(octet), 8) for octet in address.split('.')])\n elif is_valid_ipv6_address(address):\n address = expand_ipv6_address(address)\n return ''.join([_get_binary(int(grouping, 16), 16) for grouping in address.split(':')])\n else:\n raise ValueError(\"'%s' is neither an IPv4 or IPv6 address\" % address)",
"def get_binary_op_str(bin_op_node):\n\n if isinstance(bin_op_node, ast.Add):\n return \"+\"\n\n elif isinstance(bin_op_node, ast.Sub):\n return \"-\"\n\n elif isinstance(bin_op_node, ast.Mult):\n return \"*\"\n\n elif isinstance(bin_op_node, ast.Div):\n return \"/\"\n\n elif isinstance(bin_op_node, ast.Mod):\n return \"%\"\n\n elif isinstance(bin_op_node, ast.Pow):\n return \"**\"\n\n elif isinstance(bin_op_node, ast.LShift):\n return \"<<\"\n\n elif isinstance(bin_op_node, ast.RShift):\n return \">>\"\n\n else:\n raise ValueError(\"No string defined for binary operator node %s\" % \\\n bin_op_node.__class__.__name__)",
"def get_calculable_constant_names_latex():\n return r\"t_0\", r\"S_{rr}\", r\"S_{r\\theta}\", r\"S_{rz}\", r\"S_{zz}\" \\\n r\"\\alpha\", r\"\\beta\", r\"\\gamma\", r\"C_{13}\", r\"C_{33}\", \\\n r\"\\hat{E}\", r\"g_1\"",
"def literal_to_string(self, literal):\n s = '!' if is_negated(literal) else ''\n return s + self.variables[literal >> 1]",
"def dereference_const_uint16(self, *args):\n return _ida_hexrays.fnumber_t_dereference_const_uint16(self, *args)",
"def gen_global_string_constant(self, expr):\n value_data = expr.to_bytes()\n amount = len(value_data)\n alignment = 1\n name = \"__txt_const_{}\".format(self.static_counter)\n self.static_counter += 1\n text_var = ir.Variable(\n name, ir.Binding.LOCAL, amount, alignment, value=value_data\n )\n self.builder.module.add_variable(text_var)\n return text_var",
"def const_col(dims: Iterable[int]) -> str:\n dims = sorted(dims)\n dims_str = [str(d) for d in dims]\n return f\"const_{'_'.join(dims_str)}\"",
"def gen_char_literal(self, expr):\n return self.emit_const(expr.value, expr.typ)",
"def Chimera_attribute_color_string(hex_map,value_array):\n attr_color_array=[]\n colorbar_array=[]\n max_index=len(value_array)-1\n for i in range(len(value_array)):\n attr_color_array.append(str(value_array[i]))\n attr_color_array.append(hex_map[i])\n if i == max_index or i == 0 or value_array[i] == 0:\n colorbar_array.append(str(value_array[max_index-i]))\n else:\n colorbar_array.append(\"-\")\n colorbar_array.append(hex_map[max_index-i])\n attr_color_string = \" \".join(iter(attr_color_array))\n colorbar_string = \" \".join(iter(colorbar_array))\n return attr_color_string,colorbar_string"
] |
[
"0.62325543",
"0.6061215",
"0.5533641",
"0.5410458",
"0.5308353",
"0.51860064",
"0.5170984",
"0.51191694",
"0.510812",
"0.51072353",
"0.49929827",
"0.4976014",
"0.49712458",
"0.49693966",
"0.49671468",
"0.49650222",
"0.4961762",
"0.49451163",
"0.4941017",
"0.49307668",
"0.49283564",
"0.49274954",
"0.49101436",
"0.48985088",
"0.48982495",
"0.4893589",
"0.48790097",
"0.48788646",
"0.48754677",
"0.48666507"
] |
0.7581733
|
0
|
Get a string representing the 8 bit address at binary_addr. This could return a label name, an expression or failing that just a constant hex address. Used by CPU Opcodes to format output, e.g. for converting 'LDA $12' into 'LDA num_lives'.
|
def get_address8(binary_addr):
operand = memory_binary[binary_addr]
if binary_addr not in expressions:
return disassembly.get_label(operand, binary_addr)
return get_expression(binary_addr, operand)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _get_address_binary(address):\n\n if is_valid_ipv4_address(address):\n return ''.join([_get_binary(int(octet), 8) for octet in address.split('.')])\n elif is_valid_ipv6_address(address):\n address = expand_ipv6_address(address)\n return ''.join([_get_binary(int(grouping, 16), 16) for grouping in address.split(':')])\n else:\n raise ValueError(\"'%s' is neither an IPv4 or IPv6 address\" % address)",
"def get_constant8(binary_addr):\n\n if binary_addr in expressions:\n return get_expression(binary_addr, memory_binary[binary_addr])\n return mainformatter.constant8(binary_addr)",
"def __ip2intstr(self, address):\n return str(struct.unpack('!I', address)[0])",
"def addr(label_name):\n\n if not utils.is_string_type(label_name):\n return None\n\n return labelmanager.addr(label_name)",
"def bt_addr_to_str(bt_addr):\n return \":\".join([b.encode(\"hex\") for b in bt_addr])",
"def decode_addr(self, addr):\n self._check_pid_wrap()\n # Find the binary that contains the specified address.\n # For .so files, look at the relative address; for the main\n # executable, look at the absolute address.\n for binary, (start, end) in self.code_ranges.items():\n if addr >= start and addr <= end:\n offset = addr - start \\\n if binary.endswith(\".so\") else addr\n return \"%s [%s]\" % (self._decode_sym(binary, offset),\n binary)\n return \"%x\" % addr",
"def address(self) -> str:\n q1 = self.address_int >> 24\n q2 = (self.address_int & (255 << 16)) >> 16\n q3 = (self.address_int & (255 << 8)) >> 8\n q4 = self.address_int & 255\n\n return f\"{q1}.{q2}.{q3}.{q4}\"",
"def get_address16(binary_addr):\n\n operand = memorymanager.get_u16_binary(binary_addr)\n if binary_addr not in expressions:\n return disassembly.get_label(operand, binary_addr)\n\n assert isinstance(disassembly.get_classification(binary_addr), Word) or (isinstance(disassembly.get_classification(binary_addr - 1), trace.cpu.Opcode) and disassembly.get_classification(binary_addr - 1).length() == 3)\n return get_expression(binary_addr, operand)",
"def sprint_addr(addr: bytes) -> str:\n\n if not len(addr) or not addr:\n return \"\"\n\n return str(ipaddress.ip_address(addr))",
"def render_address(self, addr, names={}):\n if addr in self.symbols:\n return self.symbols[addr].name\n if self.plt[addr]:\n return next(iter(self.plt[addr])).data\n if addr in self.sections:\n return self.sections[addr].name\n if addr in self.memory:\n return self.memory[addr]\n if addr in names:\n return names[addr]\n # return '0x%x' % addr\n return None",
"def address_to_bin(address):\n address_list = map(lambda x: int(x), address.split('.'))\n bin_list = map(_to_bin, address_list)\n bin = '.'.join(bin_list)\n return bin",
"def get_host_string(addr: AddressTupleVXType) -> str:\n if len(addr) >= 3:\n addr = cast(AddressTupleV6Type, addr)\n if addr[3]:\n return \"{}%{}\".format(addr[0], addr[3])\n return addr[0]",
"def get_str_address(address):\n return \\\n get_ob_value_primitive(address, 'AddrLine1', exception_return_value='') + ' ' + \\\n get_ob_value_primitive(address, 'AddrLine2', exception_return_value='') + ' ' + \\\n get_ob_value_primitive(address, 'AddrLine3', exception_return_value='') + ', ' + \\\n get_ob_value_primitive(address, 'City', exception_return_value='') + ' ' + \\\n get_ob_value_primitive(address, 'County', exception_return_value='') + ' ' + \\\n get_ob_value_primitive(address, 'StateProvince', exception_return_value='') + ' ' + \\\n get_ob_value_primitive(address, 'ZipPostalCode', exception_return_value='')",
"def addr_to_bin(address):\n bin_address = ''\n if re.findall('\\d', address)[0].isdigit():\n octet = address.split('.')\n for oct in octet:\n bin_address+=to_bit(int(oct))\n return bin_address",
"def ip_to_str(address):\n return socket.inet_ntop(socket.AF_INET, address)",
"def format_address(addr: int, arch: Optional[cemu.arch.Architecture] = None) -> str:\n if arch is None:\n arch = cemu.core.context.architecture\n\n if arch.ptrsize == 2:\n return f\"{addr:#04x}\"\n elif arch.ptrsize == 4:\n return f\"{addr:#08x}\"\n elif arch.ptrsize == 8:\n return f\"{addr:#016x}\"\n else:\n raise ValueError(f\"Invalid value for '{arch.ptrsize=}'\")",
"def mac_addr(address):\n\tprint(':'.join('%02x' % compat_ord(b) for b in address))\n\treturn ':'.join('%s' % format(compat_ord(b), '0>8b') for b in address)",
"def get_string(binary):\r\n new_string = \"\"\r\n\r\n # Sets range as length of binary string and returns an int\r\n for x in range((len(binary) // 8)):\r\n # Grabs 8 characters at a time, converts back to an integer\r\n n = int(binary[(x * 8) : ((x * 8) + 8)], 2)\r\n # Special logic to handle null values\r\n if n == 0:\r\n new_string += \"\\\\x00\"\r\n # Otherwise, change those bits back to a character\r\n else:\r\n new_string += n.to_bytes((n.bit_length() + 7) // 8, \"big\").decode()\r\n\r\n return new_string",
"def addr_to_decimal(bin_address):\n if len(bin_address) == 32:\n if re.match('[0-1]+', bin_address):\n return (str(to_decimal(bin_address[0:8])) + '.'\n + str(to_decimal(bin_address[8:16])) + '.'\n + str(to_decimal(bin_address[16:24])) + '.'\n + str(to_decimal(bin_address[24:32])))\n return '-1'",
"def stringn(runtime_addr):\n\n runtime_addr = memorymanager.RuntimeAddr(runtime_addr)\n binary_addr, _ = movemanager.r2b_checked(runtime_addr)\n disassembly.add_classification(binary_addr, Byte(1))\n length = memory_binary[binary_addr]\n add_expression(binary_addr, utils.LazyString(\"%s - %s\", disassembly.get_label(runtime_addr + 1 + length, binary_addr), disassembly.get_label(runtime_addr + 1, binary_addr)))\n return string(runtime_addr + 1, length)",
"def staticAddress(self, arg):\n label = self.ns + '.' + arg\n return label",
"def address(self):\n return str(self._address)",
"def get_binary_op_str(bin_op_node):\n\n if isinstance(bin_op_node, ast.Add):\n return \"+\"\n\n elif isinstance(bin_op_node, ast.Sub):\n return \"-\"\n\n elif isinstance(bin_op_node, ast.Mult):\n return \"*\"\n\n elif isinstance(bin_op_node, ast.Div):\n return \"/\"\n\n elif isinstance(bin_op_node, ast.Mod):\n return \"%\"\n\n elif isinstance(bin_op_node, ast.Pow):\n return \"**\"\n\n elif isinstance(bin_op_node, ast.LShift):\n return \"<<\"\n\n elif isinstance(bin_op_node, ast.RShift):\n return \">>\"\n\n else:\n raise ValueError(\"No string defined for binary operator node %s\" % \\\n bin_op_node.__class__.__name__)",
"def AioNodeToIpAddressString(node):\n ip = aio_node_to_ip_address.AioNodeToIpAddress(node)\n return '%d.%d.%d.%d' % (ip.a, ip.b, ip.c, ip.d)",
"def toAddr(self, addressString: unicode) -> ghidra.program.model.address.Address:\n ...",
"def address_str(self):\n return self._plrevgeoloc.addressString",
"def binary_to_ip(self, binary_num):\n ip_parts = []\n for i in range (0, 4):\n first = 8*i\n second = first + 8\n ip_parts.append(str(int(binary_num[first:second], 2)))\n\n final_ip = '.'.join(ip_parts)\n return final_ip",
"def address_str(self) -> str | None:\n pass",
"def label(self):\n return self.address.label",
"def get_address(self):\n return logic.address(self.get_program())"
] |
[
"0.6732723",
"0.6528365",
"0.6506534",
"0.6489246",
"0.6433877",
"0.6411833",
"0.63180417",
"0.62149584",
"0.618123",
"0.6051444",
"0.6042936",
"0.59640974",
"0.5957376",
"0.59408915",
"0.5895213",
"0.5891723",
"0.58413285",
"0.5822231",
"0.5796226",
"0.57706934",
"0.57389814",
"0.5732638",
"0.57316256",
"0.56947166",
"0.5692099",
"0.568628",
"0.56781566",
"0.5653658",
"0.56439745",
"0.56416637"
] |
0.7582911
|
0
|
Get a string representing the 16 bit address at binary_addr. This could return a label name, an expression or failing that just a constant hex address. Used by CPU Opcodes to format output, e.g. for converting 'JSR $1234' into 'JSR my_label'.
|
def get_address16(binary_addr):
operand = memorymanager.get_u16_binary(binary_addr)
if binary_addr not in expressions:
return disassembly.get_label(operand, binary_addr)
assert isinstance(disassembly.get_classification(binary_addr), Word) or (isinstance(disassembly.get_classification(binary_addr - 1), trace.cpu.Opcode) and disassembly.get_classification(binary_addr - 1).length() == 3)
return get_expression(binary_addr, operand)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_constant16(binary_addr):\n\n if binary_addr in expressions:\n return get_expression(binary_addr, memorymanager.get_u16_binary(binary_addr))\n return mainformatter.constant16(binary_addr)",
"def bt_addr_to_str(bt_addr):\n return \":\".join([b.encode(\"hex\") for b in bt_addr])",
"def __ip2intstr(self, address):\n return str(struct.unpack('!I', address)[0])",
"def address(self) -> str:\n q1 = self.address_int >> 24\n q2 = (self.address_int & (255 << 16)) >> 16\n q3 = (self.address_int & (255 << 8)) >> 8\n q4 = self.address_int & 255\n\n return f\"{q1}.{q2}.{q3}.{q4}\"",
"def addr(label_name):\n\n if not utils.is_string_type(label_name):\n return None\n\n return labelmanager.addr(label_name)",
"def format_address(addr: int, arch: Optional[cemu.arch.Architecture] = None) -> str:\n if arch is None:\n arch = cemu.core.context.architecture\n\n if arch.ptrsize == 2:\n return f\"{addr:#04x}\"\n elif arch.ptrsize == 4:\n return f\"{addr:#08x}\"\n elif arch.ptrsize == 8:\n return f\"{addr:#016x}\"\n else:\n raise ValueError(f\"Invalid value for '{arch.ptrsize=}'\")",
"def decode_addr(self, addr):\n self._check_pid_wrap()\n # Find the binary that contains the specified address.\n # For .so files, look at the relative address; for the main\n # executable, look at the absolute address.\n for binary, (start, end) in self.code_ranges.items():\n if addr >= start and addr <= end:\n offset = addr - start \\\n if binary.endswith(\".so\") else addr\n return \"%s [%s]\" % (self._decode_sym(binary, offset),\n binary)\n return \"%x\" % addr",
"def get_address8(binary_addr):\n\n operand = memory_binary[binary_addr]\n if binary_addr not in expressions:\n return disassembly.get_label(operand, binary_addr)\n return get_expression(binary_addr, operand)",
"def _get_address_binary(address):\n\n if is_valid_ipv4_address(address):\n return ''.join([_get_binary(int(octet), 8) for octet in address.split('.')])\n elif is_valid_ipv6_address(address):\n address = expand_ipv6_address(address)\n return ''.join([_get_binary(int(grouping, 16), 16) for grouping in address.split(':')])\n else:\n raise ValueError(\"'%s' is neither an IPv4 or IPv6 address\" % address)",
"def render_address(self, addr, names={}):\n if addr in self.symbols:\n return self.symbols[addr].name\n if self.plt[addr]:\n return next(iter(self.plt[addr])).data\n if addr in self.sections:\n return self.sections[addr].name\n if addr in self.memory:\n return self.memory[addr]\n if addr in names:\n return names[addr]\n # return '0x%x' % addr\n return None",
"def get_host_string(addr: AddressTupleVXType) -> str:\n if len(addr) >= 3:\n addr = cast(AddressTupleV6Type, addr)\n if addr[3]:\n return \"{}%{}\".format(addr[0], addr[3])\n return addr[0]",
"def addr_to_decimal(bin_address):\n if len(bin_address) == 32:\n if re.match('[0-1]+', bin_address):\n return (str(to_decimal(bin_address[0:8])) + '.'\n + str(to_decimal(bin_address[8:16])) + '.'\n + str(to_decimal(bin_address[16:24])) + '.'\n + str(to_decimal(bin_address[24:32])))\n return '-1'",
"def mac_addr(address):\n\tprint(':'.join('%02x' % compat_ord(b) for b in address))\n\treturn ':'.join('%s' % format(compat_ord(b), '0>8b') for b in address)",
"def sprint_addr(addr: bytes) -> str:\n\n if not len(addr) or not addr:\n return \"\"\n\n return str(ipaddress.ip_address(addr))",
"def _mac_addr(address):\n return ':'.join('%02x' % ord(b) for b in address)",
"def format_address(value):\n if type(value) in (tuple, list):\n return ', '.join([format_address(v) for v in value])\n name, addr = parseaddr(value)\n return formataddr((encode_header(name), addr.encode('ascii')))",
"def get_str_address(address):\n return \\\n get_ob_value_primitive(address, 'AddrLine1', exception_return_value='') + ' ' + \\\n get_ob_value_primitive(address, 'AddrLine2', exception_return_value='') + ' ' + \\\n get_ob_value_primitive(address, 'AddrLine3', exception_return_value='') + ', ' + \\\n get_ob_value_primitive(address, 'City', exception_return_value='') + ' ' + \\\n get_ob_value_primitive(address, 'County', exception_return_value='') + ' ' + \\\n get_ob_value_primitive(address, 'StateProvince', exception_return_value='') + ' ' + \\\n get_ob_value_primitive(address, 'ZipPostalCode', exception_return_value='')",
"def resolve(address):\n symbol = gdb.execute(\"info symbol 0x%08X\" % int(address.cast(size_t)), False, True).split(\" \",1)[0]\n if symbol == \"No\": # FIXME \"No symbol matches\"\n return \"0x%08X\" % int(address.cast(size_t))\n else:\n return \"%s\" % symbol",
"def macaddr(index):\n hname = name.encode(\"utf-8\") if not isinstance(name, bytes) else name\n mac_ext = hashlib.md5(hname).hexdigest() # pylint: disable=E1101\n return \"52:54:00:{0}:{1}:{2:02x}\".format(mac_ext[0:2], mac_ext[2:4], int(mac_ext[4:6], 16) ^ index)",
"def stringn(runtime_addr):\n\n runtime_addr = memorymanager.RuntimeAddr(runtime_addr)\n binary_addr, _ = movemanager.r2b_checked(runtime_addr)\n disassembly.add_classification(binary_addr, Byte(1))\n length = memory_binary[binary_addr]\n add_expression(binary_addr, utils.LazyString(\"%s - %s\", disassembly.get_label(runtime_addr + 1 + length, binary_addr), disassembly.get_label(runtime_addr + 1, binary_addr)))\n return string(runtime_addr + 1, length)",
"def address_to_bin(address):\n address_list = map(lambda x: int(x), address.split('.'))\n bin_list = map(_to_bin, address_list)\n bin = '.'.join(bin_list)\n return bin",
"def staticAddress(self, arg):\n label = self.ns + '.' + arg\n return label",
"def mac_addr(address):\n return ':'.join('%02x' % compat_ord(b) for b in address)",
"def mac_addr(address):\n return ':'.join('%02x' % compat_ord(b) for b in address)",
"def mac_addr(address):\n return ':'.join('%02x' % compat_ord(b) for b in address)",
"def mac_addr(address):\n return ':'.join('%02x' % compat_ord(b) for b in address)",
"def toAddr(self, addressString: unicode) -> ghidra.program.model.address.Address:\n ...",
"def ip_to_str(address):\n return socket.inet_ntop(socket.AF_INET, address)",
"def get_constant8(binary_addr):\n\n if binary_addr in expressions:\n return get_expression(binary_addr, memory_binary[binary_addr])\n return mainformatter.constant8(binary_addr)",
"def get_address(address, registers):\n \n try:\n address, offset = address.split('+')\n offset = int(offset)\n except ValueError:\n try:\n address, offset = address.split('-')\n offset = -int(offset)\n except ValueError:\n offset = 0\n\n if address.isdigit():\n return int(address)\n\n return int(registers[address]) + offset"
] |
[
"0.67523515",
"0.65508014",
"0.611563",
"0.61117715",
"0.6098377",
"0.60977286",
"0.6076913",
"0.60072947",
"0.5978159",
"0.58664674",
"0.58170366",
"0.57972336",
"0.57046545",
"0.56946206",
"0.56206506",
"0.5608305",
"0.5533078",
"0.55327",
"0.5509495",
"0.5507779",
"0.55056995",
"0.55003345",
"0.5488601",
"0.5488601",
"0.5488601",
"0.5488601",
"0.54638225",
"0.5451986",
"0.5450308",
"0.53863597"
] |
0.7478014
|
0
|
Classifies part of the binary as a string followed by a given terminator byte. Returns the next available memory address after the string.
|
def stringterm(runtime_addr, terminator, exclude_terminator=False):
runtime_addr = memorymanager.RuntimeAddr(runtime_addr)
binary_addr, _ = movemanager.r2b_checked(runtime_addr)
initial_addr = binary_addr
while memory_binary[binary_addr] != terminator:
binary_addr += 1
string_length = (binary_addr + 1) - initial_addr
if exclude_terminator:
string_length -= 1
if string_length > 0:
disassembly.add_classification(initial_addr, String(string_length))
return movemanager.b2r(binary_addr + 1)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def stringhiz(runtime_addr, include_terminator_fn=None):\n\n runtime_addr = memorymanager.RuntimeAddr(runtime_addr)\n binary_addr, _ = movemanager.r2b_checked(runtime_addr)\n assert not disassembly.is_classified(binary_addr, 1)\n initial_addr = binary_addr\n while True:\n if disassembly.is_classified(binary_addr, 1):\n break\n if memory_binary[binary_addr] == 0 or (memory_binary[binary_addr] & 0x80) != 0:\n if include_terminator_fn is not None and include_terminator_fn(memory_binary[binary_addr]):\n binary_addr += 1\n break\n binary_addr += 1\n if binary_addr > initial_addr:\n disassembly.add_classification(initial_addr, String(binary_addr - initial_addr))\n return movemanager.b2r(binary_addr)",
"def findBytes(self, start: ghidra.program.model.address.Address, byteString: unicode) -> ghidra.program.model.address.Address:\n ...",
"def stringhi(runtime_addr, include_terminator_fn=None):\n\n runtime_addr = memorymanager.RuntimeAddr(runtime_addr)\n binary_addr, _ = movemanager.r2b_checked(runtime_addr)\n assert not disassembly.is_classified(binary_addr, 1)\n initial_addr = binary_addr\n while True:\n if disassembly.is_classified(binary_addr, 1):\n break\n if memory_binary[binary_addr] & 0x80 != 0:\n if include_terminator_fn is not None and include_terminator_fn(memory_binary[binary_addr]):\n c = memory_binary[binary_addr] & 0x7f\n if utils.isprint(c) and c != ord('\"') and c != ord('\\''):\n add_expression(binary_addr, \"%s+'%s'\" % (assembler().hex2(0x80), chr(c)))\n else:\n add_expression(binary_addr, \"%s+%s\" % (assembler().hex2(0x80), assembler().hex2(c)))\n binary_addr += 1\n break\n binary_addr += 1\n if binary_addr > initial_addr:\n disassembly.add_classification(initial_addr, String(binary_addr - initial_addr))\n return movemanager.b2r(binary_addr)",
"def autostring(min_length=3):\n\n assert min_length >= 2\n addr = memorymanager.BinaryAddr(0)\n while addr < len(memory_binary):\n i = 0\n while (addr + i) < len(memory_binary) and memory_binary[addr + i] is not None and not disassembly.is_classified(addr + i, 1) and utils.isprint(memory_binary[addr + i]):\n i += 1\n if movemanager.b2r(addr + i) in labelmanager.labels:\n break\n if i >= min_length:\n # TODO: I suspect the next two line fragment should be wrapped up if I keep it, probably repeated a bit (probably something like \"with movemanager.b2r(binary_addr) as runtime_addr:...\", although I probably can't reuse the b2r function, but maybe think about it)\n runtime_addr = movemanager.b2r(addr)\n with movemanager.move_id_for_binary_addr[addr]:\n string(runtime_addr, i)\n addr += max(1, i)",
"def __read_to(self, terminator: bytes) -> bytes:\r\n try:\r\n # noinspection PyTypeChecker\r\n i = self.data.index(terminator, self.idx)\r\n b = self.data[self.idx:i]\r\n self.idx = i + 1\r\n return b\r\n except ValueError:\r\n raise DecodingError(\r\n 'Unable to locate terminator character \"{0}\" after index {1}.'.format(str(terminator), str(self.idx)))",
"def stringcr(runtime_addr, exclude_terminator=False):\n\n runtime_addr = memorymanager.RuntimeAddr(runtime_addr)\n return stringterm(runtime_addr, 13, exclude_terminator)",
"def get_string_binary(string):\r\n string_binary_array = []\r\n\r\n # Create array of binaries from the string\r\n for character in string:\r\n string_binary_array.append(get_binary(character))\r\n\r\n # Combine those binaries into one long binary\r\n string_binary = \"\".join(string_binary_array)\r\n\r\n return string_binary",
"def stringn(runtime_addr):\n\n runtime_addr = memorymanager.RuntimeAddr(runtime_addr)\n binary_addr, _ = movemanager.r2b_checked(runtime_addr)\n disassembly.add_classification(binary_addr, Byte(1))\n length = memory_binary[binary_addr]\n add_expression(binary_addr, utils.LazyString(\"%s - %s\", disassembly.get_label(runtime_addr + 1 + length, binary_addr), disassembly.get_label(runtime_addr + 1, binary_addr)))\n return string(runtime_addr + 1, length)",
"def _binary_string_to_str(binary_string: str, end=None) -> str:\n string = \"\"\n\n binary_list = re.findall(\".\" * 8, binary_string)\n for byte in binary_list:\n string += chr(int(byte, 2))\n if end and string.endswith(end):\n return string[: -len(end)]\n\n return string",
"def get_string(binary):\r\n new_string = \"\"\r\n\r\n # Sets range as length of binary string and returns an int\r\n for x in range((len(binary) // 8)):\r\n # Grabs 8 characters at a time, converts back to an integer\r\n n = int(binary[(x * 8) : ((x * 8) + 8)], 2)\r\n # Special logic to handle null values\r\n if n == 0:\r\n new_string += \"\\\\x00\"\r\n # Otherwise, change those bits back to a character\r\n else:\r\n new_string += n.to_bytes((n.bit_length() + 7) // 8, \"big\").decode()\r\n\r\n return new_string",
"def stringz(runtime_addr, exclude_terminator=False):\n\n runtime_addr = memorymanager.RuntimeAddr(runtime_addr)\n return stringterm(runtime_addr, 0, exclude_terminator)",
"def decode(self, byteString):\n decoded = ''\n portion_left = byteString\n while len(portion_left) > 0:\n substr_len = 1\n symbol = None\n while (symbol == None) and (substr_len <= len(portion_left)):\n symbol = self.decode_symbol(portion_left[:substr_len])\n substr_len += 1\n\n if symbol == None:\n print \"decode failed:\"\n print \"decoded: \" + decoded\n print \"left: \" + portion_left\n return None\n\n decoded += symbol\n #print \"decoded: _\" + symbol + \"_\"\n portion_left = portion_left[substr_len-1:]\n\n return decoded",
"def extract_literal_string(memory,address,ztext):\n zchar_start_address = address\n text, next_address = ztext.to_ascii(memory,zchar_start_address,0) \n return next_address,text",
"def beautifulBinaryString(binary_string) -> int:\n sub_str = \"010\"\n count = 0\n start_index = 0\n\n while start_index <= len(binary_string):\n end_index = start_index + 3\n slice = binary_string[start_index:end_index]\n\n if sub_str == slice:\n count += 1\n start_index = end_index\n else:\n start_index += 1\n\n return count",
"def found_terminator(self):\n self.l.debug('found_terminator()')\n self.process_data()",
"def _decode_bytes(data: BencodedString) -> bytes:\n # Get byte string length\n delimiter_index = data.bytes.find(COLON)\n\n if delimiter_index > 0:\n length_prefix = data.get_prefix(delimiter_index)\n string_length = int(length_prefix.decode(\"ascii\"))\n data.del_prefix(delimiter_index + 1)\n else:\n raise ValueError(\n \"Cannot decode a byte string, it doesn't contain a delimiter. \"\n \"Most likely the bencoded string is incomplete or incorrect.\"\n )\n\n # Get byte string data\n if len(data.bytes) >= string_length:\n result_bytes = data.get_prefix(string_length)\n data.del_prefix(string_length)\n else:\n raise ValueError(\n f\"Cannot decode a byte string (prefix length \"\n f\"- {string_length}, real_length - {len(data.bytes)}. \"\n \"Most likely the bencoded string is incomplete or incorrect.\"\n )\n\n return result_bytes",
"def _decode_octet_string(bytes_data): # type: (bytes) -> bytes\n return bytes_data",
"def read_label(self):\r\n # label = str(self.parse_binary())#!!BAD\r\n label = ''\r\n while True:\r\n c = self.eat_char()\r\n if c=='n':\r\n #terminal char\r\n break\r\n else:\r\n label += c\r\n\r\n self.log += \"'\" + label + \"'\"\r\n return label",
"def string(runtime_addr, n=None):\n\n runtime_addr = memorymanager.RuntimeAddr(runtime_addr)\n binary_addr, _ = movemanager.r2b_checked(runtime_addr)\n if n is None:\n assert not disassembly.is_classified(binary_addr)\n n = 0\n while not disassembly.is_classified(binary_addr + n) and utils.isprint(memory_binary[binary_addr + n]):\n n += 1\n if n > 0:\n disassembly.add_classification(binary_addr, String(n))\n return movemanager.b2r(binary_addr + n)",
"def bytes_packet(_bytes, termination_string=']'):\n\n return '{}{}'.format(len(_bytes), termination_string)",
"def unpack_string(self, offset, length):\n return struct.unpack_from(str(\"<%ds\") % (length), self._buf, self._offset + offset)[0]",
"def prepareMealFromString(string=\"\"):\n binstring = \"\"\n for char in string:\n binstring += bin(ord(char))\n \n binstring = binstring.replace(\"b\",\"10\")\n \n stringSuffix = \"10\"*128 # filler string of length 256.\n # Adds enough filler string to be multiple of 256:\n binstring += stringSuffix[:((len(stringSuffix)-len(binstring))%len(stringSuffix))]\n \n return binstring",
"def decodeName(self, last=-1):\n label = []\n done = False\n while not done:\n (length,) = self.unpack(\"!B\")\n if getBits(length, 6, 2) == 3:\n # Pointer\n self.offset -= 1\n pointer = getBits(self.unpack(\"!H\")[0], 0, 14)\n save = self.offset\n if last == save:\n raise BufferError(\n \"Recursive pointer [offset=%d,pointer=%d,length=%d]\" %\n (self.offset, pointer, len(self.data))\n )\n if pointer < self.offset:\n self.offset = pointer\n else:\n # Pointer can't point forwards\n raise BufferError(\n \"Invalid pointer [offset=%d,pointer=%d,length=%d]\" %\n (self.offset, pointer, len(self.data))\n )\n label.extend(self.decodeName(save).label)\n self.offset = save\n done = True\n else:\n if length > 0:\n l = self.get(length)\n try:\n l.decode()\n except UnicodeDecodeError:\n raise BufferError(\"Invalid label <%s>\" % l)\n label.append(l)\n else:\n done = True\n return \".\".join(str(label))",
"def get_firmware_id_string(self):\n unpacked_string = False\n # Find the address of the string in memory\n id_string_addr = self._get_slt_entry(3)\n if id_string_addr is None:\n # maybe it's not packed in this slt\n id_string_addr = self._get_slt_entry(2)\n if id_string_addr is None:\n # Can't find it in the slt return None\n return None\n unpacked_string = True\n # parse the null terminated string\n last = build_string = \"\"\n # There is no reason for the build string to contain\n # any non ASCII character but do it like this to avoid\n # breaking support for Python 2.7\n try:\n char = unichr\n except NameError:\n char = chr\n while last != \"\\0\":\n word = self.get_data(id_string_addr)\n if unpacked_string:\n if Arch.addr_per_word == 4:\n # Two char per word\n build_string += char(word & 0x00FFFF)\n if build_string[-1] == \"\\0\":\n break\n last = char((word & 0xFFFF0000) >> 16)\n build_string += last\n else:\n # Only one char per word\n last = char(word)\n build_string += last\n else:\n # Four chars per word\n if Arch.addr_per_word == 4:\n string = cu.get_string_from_word(Arch.addr_per_word, word)\n stop_decoding = False\n for char in string:\n if char != '\\0':\n build_string += char\n else:\n stop_decoding = True\n break\n last = string[3:]\n\n if stop_decoding:\n break\n else:\n # Two chars per word\n build_string += char((word & 0xFF00) >> 8)\n if build_string[-1] == \"\\0\":\n break\n last = char(word & 0x00FF)\n build_string += last\n # Move to the next word in the string\n id_string_addr += Arch.addr_per_word\n\n # remove the \\0 we don't want the terminator\n if build_string[-1] == \"\\0\":\n build_string = build_string[:-1]\n\n return build_string.strip()",
"def bstring(string):\n return BTEXT + string + NTEXT",
"def get_binary(string):\r\n # Use special logic for NULL_STRING to avoid errors\r\n if string == NULL_STRING:\r\n return \"00000000\"\r\n # Otherwise, gives the binary representation of UTF-8 characters\r\n return \"\".join(\"{:08b}\".format(d) for d in bytearray(string, \"utf-8\"))",
"def _read_string(bs):\n result = bs.readto('0x00', bytealigned=True).bytes.decode(\"utf-8\")[:-1]\n return result if result else None",
"def findBytes(self, start: ghidra.program.model.address.Address, byteString: unicode, matchLimit: int, alignment: int) -> List[ghidra.program.model.address.Address]:\n ...",
"def _increment_last_byte(byte_string):\n s = bytearray(byte_string)\n s[-1] = s[-1] + 1\n return bytes(s)",
"def findBytes(self, start: ghidra.program.model.address.Address, byteString: unicode, matchLimit: int) -> List[ghidra.program.model.address.Address]:\n ..."
] |
[
"0.5908737",
"0.5769606",
"0.5666709",
"0.566427",
"0.5659553",
"0.5444973",
"0.53724027",
"0.5353021",
"0.5340873",
"0.53317374",
"0.5320064",
"0.53055394",
"0.5225632",
"0.5193464",
"0.5171954",
"0.5166538",
"0.5161003",
"0.5111299",
"0.5103323",
"0.50885046",
"0.503426",
"0.5020252",
"0.50195",
"0.5001564",
"0.49931893",
"0.49556586",
"0.49145168",
"0.4912248",
"0.49112478",
"0.49019936"
] |
0.693786
|
0
|
Classifies part of the binary as a string followed by ASCII 13. Returns the next available memory address after the string.
|
def stringcr(runtime_addr, exclude_terminator=False):
runtime_addr = memorymanager.RuntimeAddr(runtime_addr)
return stringterm(runtime_addr, 13, exclude_terminator)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def autostring(min_length=3):\n\n assert min_length >= 2\n addr = memorymanager.BinaryAddr(0)\n while addr < len(memory_binary):\n i = 0\n while (addr + i) < len(memory_binary) and memory_binary[addr + i] is not None and not disassembly.is_classified(addr + i, 1) and utils.isprint(memory_binary[addr + i]):\n i += 1\n if movemanager.b2r(addr + i) in labelmanager.labels:\n break\n if i >= min_length:\n # TODO: I suspect the next two line fragment should be wrapped up if I keep it, probably repeated a bit (probably something like \"with movemanager.b2r(binary_addr) as runtime_addr:...\", although I probably can't reuse the b2r function, but maybe think about it)\n runtime_addr = movemanager.b2r(addr)\n with movemanager.move_id_for_binary_addr[addr]:\n string(runtime_addr, i)\n addr += max(1, i)",
"def string(runtime_addr, n=None):\n\n runtime_addr = memorymanager.RuntimeAddr(runtime_addr)\n binary_addr, _ = movemanager.r2b_checked(runtime_addr)\n if n is None:\n assert not disassembly.is_classified(binary_addr)\n n = 0\n while not disassembly.is_classified(binary_addr + n) and utils.isprint(memory_binary[binary_addr + n]):\n n += 1\n if n > 0:\n disassembly.add_classification(binary_addr, String(n))\n return movemanager.b2r(binary_addr + n)",
"def extract_literal_string(memory,address,ztext):\n zchar_start_address = address\n text, next_address = ztext.to_ascii(memory,zchar_start_address,0) \n return next_address,text",
"def stringhi(runtime_addr, include_terminator_fn=None):\n\n runtime_addr = memorymanager.RuntimeAddr(runtime_addr)\n binary_addr, _ = movemanager.r2b_checked(runtime_addr)\n assert not disassembly.is_classified(binary_addr, 1)\n initial_addr = binary_addr\n while True:\n if disassembly.is_classified(binary_addr, 1):\n break\n if memory_binary[binary_addr] & 0x80 != 0:\n if include_terminator_fn is not None and include_terminator_fn(memory_binary[binary_addr]):\n c = memory_binary[binary_addr] & 0x7f\n if utils.isprint(c) and c != ord('\"') and c != ord('\\''):\n add_expression(binary_addr, \"%s+'%s'\" % (assembler().hex2(0x80), chr(c)))\n else:\n add_expression(binary_addr, \"%s+%s\" % (assembler().hex2(0x80), assembler().hex2(c)))\n binary_addr += 1\n break\n binary_addr += 1\n if binary_addr > initial_addr:\n disassembly.add_classification(initial_addr, String(binary_addr - initial_addr))\n return movemanager.b2r(binary_addr)",
"def stringhiz(runtime_addr, include_terminator_fn=None):\n\n runtime_addr = memorymanager.RuntimeAddr(runtime_addr)\n binary_addr, _ = movemanager.r2b_checked(runtime_addr)\n assert not disassembly.is_classified(binary_addr, 1)\n initial_addr = binary_addr\n while True:\n if disassembly.is_classified(binary_addr, 1):\n break\n if memory_binary[binary_addr] == 0 or (memory_binary[binary_addr] & 0x80) != 0:\n if include_terminator_fn is not None and include_terminator_fn(memory_binary[binary_addr]):\n binary_addr += 1\n break\n binary_addr += 1\n if binary_addr > initial_addr:\n disassembly.add_classification(initial_addr, String(binary_addr - initial_addr))\n return movemanager.b2r(binary_addr)",
"def stringterm(runtime_addr, terminator, exclude_terminator=False):\n\n runtime_addr = memorymanager.RuntimeAddr(runtime_addr)\n binary_addr, _ = movemanager.r2b_checked(runtime_addr)\n initial_addr = binary_addr\n while memory_binary[binary_addr] != terminator:\n binary_addr += 1\n string_length = (binary_addr + 1) - initial_addr\n if exclude_terminator:\n string_length -= 1\n if string_length > 0:\n disassembly.add_classification(initial_addr, String(string_length))\n return movemanager.b2r(binary_addr + 1)",
"def getApplicationProcessId(binaryString, startPos=0):\n if (len(binaryString) - startPos) < PRIMARY_HEADER_BYTE_SIZE:\n raise Error(\"packet header is too small\")\n return (((binaryString[startPos + 0] * 256) + binaryString[startPos + 1]) & 0x07FF)",
"def get_string(binary):\r\n new_string = \"\"\r\n\r\n # Sets range as length of binary string and returns an int\r\n for x in range((len(binary) // 8)):\r\n # Grabs 8 characters at a time, converts back to an integer\r\n n = int(binary[(x * 8) : ((x * 8) + 8)], 2)\r\n # Special logic to handle null values\r\n if n == 0:\r\n new_string += \"\\\\x00\"\r\n # Otherwise, change those bits back to a character\r\n else:\r\n new_string += n.to_bytes((n.bit_length() + 7) // 8, \"big\").decode()\r\n\r\n return new_string",
"def getVersionNumber(binaryString, startPos=0):\n if (len(binaryString) - startPos) < PRIMARY_HEADER_BYTE_SIZE:\n raise Error(\"packet header is too small\")\n return ((binaryString[startPos + 0] & 0xE0) >> 5)",
"def stringn(runtime_addr):\n\n runtime_addr = memorymanager.RuntimeAddr(runtime_addr)\n binary_addr, _ = movemanager.r2b_checked(runtime_addr)\n disassembly.add_classification(binary_addr, Byte(1))\n length = memory_binary[binary_addr]\n add_expression(binary_addr, utils.LazyString(\"%s - %s\", disassembly.get_label(runtime_addr + 1 + length, binary_addr), disassembly.get_label(runtime_addr + 1, binary_addr)))\n return string(runtime_addr + 1, length)",
"def findBytes(self, start: ghidra.program.model.address.Address, byteString: unicode) -> ghidra.program.model.address.Address:\n ...",
"def _magic_r(self, s):\n if idapy._d is None:\n print \"Please select a dump first. Example:\"\n print \"sel t2i\"\n return\n a = addr_from_magic_string(s, rounded_32bit = False)\n idapy._d.refs(a)",
"def get_binary_start_address(target_binary):\n obj_dump = subprocess.Popen([\"objdump\", \"-f\", target_binary],stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n results = obj_dump.stdout.read().decode()\n start_address = results.strip()[-10:]\n return start_address",
"def readMemory(memory): # static method\n\t\t# Read value from shared memory\n\t\tmemoryValue = memory.read()\n\t\t# Find the 'end' of the string and strip\n\t\ti = memoryValue.find(ord('\\0'))\n\t\tif i != -1:\n\t\t\tmemoryValue = memoryValue[:i]\n\t\telse:\n\t\t\terrorMessage = \"i: \" + str(i) + \" should be -1 to have read \\0 in memory location\"\n\t\t\traise ValueError(errorMessage)\n\t\treturn str(memoryValue.decode('ascii'))",
"def get_firmware_id_string(self):\n unpacked_string = False\n # Find the address of the string in memory\n id_string_addr = self._get_slt_entry(3)\n if id_string_addr is None:\n # maybe it's not packed in this slt\n id_string_addr = self._get_slt_entry(2)\n if id_string_addr is None:\n # Can't find it in the slt return None\n return None\n unpacked_string = True\n # parse the null terminated string\n last = build_string = \"\"\n # There is no reason for the build string to contain\n # any non ASCII character but do it like this to avoid\n # breaking support for Python 2.7\n try:\n char = unichr\n except NameError:\n char = chr\n while last != \"\\0\":\n word = self.get_data(id_string_addr)\n if unpacked_string:\n if Arch.addr_per_word == 4:\n # Two char per word\n build_string += char(word & 0x00FFFF)\n if build_string[-1] == \"\\0\":\n break\n last = char((word & 0xFFFF0000) >> 16)\n build_string += last\n else:\n # Only one char per word\n last = char(word)\n build_string += last\n else:\n # Four chars per word\n if Arch.addr_per_word == 4:\n string = cu.get_string_from_word(Arch.addr_per_word, word)\n stop_decoding = False\n for char in string:\n if char != '\\0':\n build_string += char\n else:\n stop_decoding = True\n break\n last = string[3:]\n\n if stop_decoding:\n break\n else:\n # Two chars per word\n build_string += char((word & 0xFF00) >> 8)\n if build_string[-1] == \"\\0\":\n break\n last = char(word & 0x00FF)\n build_string += last\n # Move to the next word in the string\n id_string_addr += Arch.addr_per_word\n\n # remove the \\0 we don't want the terminator\n if build_string[-1] == \"\\0\":\n build_string = build_string[:-1]\n\n return build_string.strip()",
"def createAsciiString(self, address: ghidra.program.model.address.Address) -> ghidra.program.model.listing.Data:\n ...",
"def decode_addr(self, addr):\n self._check_pid_wrap()\n # Find the binary that contains the specified address.\n # For .so files, look at the relative address; for the main\n # executable, look at the absolute address.\n for binary, (start, end) in self.code_ranges.items():\n if addr >= start and addr <= end:\n offset = addr - start \\\n if binary.endswith(\".so\") else addr\n return \"%s [%s]\" % (self._decode_sym(binary, offset),\n binary)\n return \"%x\" % addr",
"def createAsciiString(self, address: ghidra.program.model.address.Address, length: int) -> ghidra.program.model.listing.Data:\n ...",
"def _magic_g(self, s):\n s = s.strip()\n if idapy._d is None:\n print \"Please select a dump first. Example:\"\n print \"sel t2i\"\n return\n a = addr_from_magic_string(s)\n show_disasm(idapy._d, a, a+80)",
"def get_binary_string(self) -> str:\n\n # Find the structure of the first section\n # This is determined by the first digit\n if self.HAS_STRUCTURE:\n # We find the structure of the first section using the first digit\n structure = EANCoding.STRUCTURE[self.code[0]]\n\n # The first digit is removed\n code = self.code[1:]\n else:\n # If there is no structure then all digits should be in `L` coding\n structure = \"L\" * (self.FIRST_SECTION[1])\n\n # In EAN8 barcodes the first digit is accounted for\n code = self.code\n\n # Convert the barcode to a binary string with the CodeNumbers class\n # Add the left guard\n binary_string = EANCoding.LEFT_GUARD\n\n # Add the 6 digits after the left guard\n for i in range(*self.FIRST_SECTION):\n digit = int(code[i])\n coding = structure[i]\n binary_string += EANCoding.CODES[coding][digit]\n\n # Add the center guard\n binary_string += EANCoding.CENTER_GUARD\n\n # Add the 6 digits after the center guard\n for i in range(*self.SECOND_SECTION):\n digit = int(code[i])\n binary_string += EANCoding.CODES[\"R\"][digit]\n\n binary_string += EANCoding.RIGHT_GUARD\n\n return binary_string",
"def data() -> str:\n return \"1721\\n979\\n366\\n299\\n675\\n1456\"",
"def get_string_binary(string):\r\n string_binary_array = []\r\n\r\n # Create array of binaries from the string\r\n for character in string:\r\n string_binary_array.append(get_binary(character))\r\n\r\n # Combine those binaries into one long binary\r\n string_binary = \"\".join(string_binary_array)\r\n\r\n return string_binary",
"def FancyReader( address, hexdump ):\n\ttry:\n\t\tdata = ReadMemAString(address)\n\t\tprefix=\"ASCII: \"\n\texcept Exception:\n\t\tdata=\"\"\n\t\tprefix=\"\"\n\t\n\tif len(data) < int(ExtensionSettings.getValue('min_string_length')):\n\t\ttry:\n\t\t\tdata = ReadMemUString(address)\n\t\t\tprefix=\"U: \"\n\t\texcept Exception:\n\t\t\tdata=\"\"\n\t\t\tprefix=\"\"\n\t\n\tif len(data) < int(ExtensionSettings.getValue('min_string_length')):\n\t\ttry:\n\t\t\tif hexdump:\n\t\t\t\tdata = ReadMemHexDump(address, 1, False, columnwidth=int(ExtensionSettings.getValue('hexdump_column_size')))\n\t\t\t\tdata=data.replace('\\n', '')\n\t\t\telse:\n\t\t\t\tdata = ReadMemHex(address, 16)\n\t\t\tprefix=\"Hex: \"\n\t\texcept Exception:\n\t\t\tdata=\"\"\n\t\t\tprefix=\"\"\n\t\n\treturn prefix+data",
"def x64exe_example():\n \n text_store = \"010011010101101010010000000000000000001100000000000000000000000000000100000000000000000000000000111111111111111100000000000000001011100000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000101010000000000000000000000000000000111000011111101110100000111000000000101101000000100111001101001000011011100000000001010011001100110100100001010101000110100001101001011100110010000001110000011100100110111101100111011100100110000101101101001000000110001101100001011011100110111001101111011101000010000001100010011001010010000001110010011101010110111000100000011010010110111000100000010001000100111101010011001000000110110101101111011001000110010100101110000011010000110100001010001001000000000000000000000000000000000000000000000000000000000010001111011011100011101011011001110010110000111101010100100010101100101100001111010101001000101011001011000011110101010010001010110010110000111101010100100010101100101000001111010101001000101000011110011000100101011010001011110010100000111101010100100010100101001001101001011000110110100011001011000011110101010010001010010100000100010100000000000000000110010010000110000000100000000000000111110111101000111101011101000000000000000000000000000000000000000000000000000000000000000011110000000000000010001000000000000010110000001000001110000110000000000000000010000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000100000000000000000000000000000000000000000000010000000000000100000000000000000000000000000000000100000000000000000000000000000000001000000000000000000000011000000000000000000000000000000000000000000000000000000000000001100000000000000000000000000000000000000000000000000000000000000000001100000000000000000000000000000000001000000000000000000000000000000000000000000000000000000011000000000110000010000001000000000000000000010000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000011100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001011100111010001100101011110000111010000000000000000000000000000000101000000000000000000000000000000000001000000000000000000000000000000000010000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000011000000010111001110010011001000110000101110100011000010000000000000000010110000000000000000000000000000000000000100000000000000000000000000000000000100000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000100000010111000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000111110111101000111101011101000000000000000000000000000000000000110100000000000000000000000000111100000000000000000000000000000111000010000000000000000000000001110000000100000000000000000000000000000000000000000000000000000000000001000000000000000000000000010100000000000000000000000000101110011101000110010101111000011101000000000000000000000000000000000000100000000000000000000000011100000000000000000000000000001011100111001001100100011000010111010001100001000000000000000000011100001000000000000000000000001111000000000000000000000000000010111001110010011001000110000101110100011000010010010001111010011110100111101001100100011000100110011100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000\"\n text.delete('1.0', tk.END) \n text.insert(tk.END, text_store) \n box=tk.Tk()\n m = tk.Message(box, text=\"In theory you could save this code as an .exe file and run it on a 64bit Intel chip machine, however we would strongly advise you not to :\\n1) it won't appear to do anything (see the code below) - there is no output; \\n2) there are no guarentees as to what will happen if you edit this code - you could damage your machine;\\n3) there are no guarentees that even as the code stands it will not damage your machine - it may run differently on different machines;\\n4) you should not run arbitrary binary code downloaded from someone you do not have reason to trust - while the code for this editor is open source, and therefore checkable, checking binary code is much harder. This software is licenced under a restricted licence that excludes adjustment of the source code (see ReadMe) but that doesn't mean someone hasn't if you didn't track the code back to the original site. Ultimately, there's really no way of trusting the code without trusting the site you downloaded it from.\\n\\nNevertheless, for information, this code was compiled from the following assembly:\\n\\n-----------------------------------\\nbits 64\\n\\ndefault rel\\n\\nsegment .text\\nglobal main\\n\\n mov rax, 0\\n-----------------------------------\\n\\nThe following command lines were used to compile it:\\n\\nnasm -f win64 -o file.obj file.asm\\n\\nlink file.obj /subsystem:console /entry:main /out:file.exe\\n\\nThe file ran from a command prompt.\")\n m.config(padx=50, pady=50, width=350)\n m.pack()",
"def read_instruction(memory,address,version,ztext):\n # If top two bits are 11, variable form. If 10, short. \n # If opcode is BE, form is extended. Otherwise long.\n start_address=address\n\n branch_offset = None # Offset, in bytes, to move PC\n store_to = None # Variable # to store the resulting value to\n zchars = [] # If this instruction works with zcodes, store them here\n literal_string = None # Ascii version of zchars, if any\n\n address,instruction_form, instruction_type, opcode_number,operands = extract_opcode(memory,address)\n \n # Find opcode handler\n handler = OPCODE_HANDLERS.get((instruction_type, opcode_number))\n if not handler:\n raise InstructionException('Unknown opcode %s, %s' % (instruction_type, opcode_number)) \n\n address, operands = process_operands(operands, handler,memory,address,version)\n\n if handler.get('literal_string'):\n address,literal_string = extract_literal_string(memory,address,ztext)\n \n # 4.6\n if handler.get('store'):\n store_to = memory[address] \n address+=1\n\n # 4.7\n branch_if_true=False\n if handler.get('branch'):\n address, branch_offset,branch_if_true = extract_branch_offset(memory,address)\n next_address = address\n\n # Create the handler function for this instruction\n handler_f = lambda interpreter: handler['handler'](interpreter, operands, next_address,store_to,branch_offset,branch_if_true, literal_string)\n\n # Setup text version for debuggging\n description = format_description(instruction_type, handler, operands, store_to, branch_offset, branch_if_true, literal_string)\n \n return handler_f,description,next_address",
"def get_address(self):\n self.rs485.clear_buffers()\n self.rs485.write_command('#00?0')\n response = self.rs485.read_response()\n pattern = '\\$.*? (.*?) \\r\\n'\n hexval = re.findall(pattern,response).pop()\n address = int(hexval,16)\n return address",
"def CODE(string):\n return ord(string[0])",
"def test_hexlify():\n result = uflash.hexlify(TEST_SCRIPT)\n lines = result.split()\n # The first line should be the extended linear address, ox0003\n assert lines[0] == ':020000040003F7'\n # There should be the expected number of lines.\n assert len(lines) == 5",
"def get_sequence(address):\n ref = ''\n with open(address,'rU') as ref_file:\n next(ref_file) # skip header lines\n for line in ref_file:\n ref = ref + line.strip()\n return ref",
"def isbn13_convert(isbn13):\r\n if not is_isbn_13(isbn13): return None\r\n return isbn13[3:-1] + isbn_10_check_digit(isbn13[3:-1])"
] |
[
"0.60261166",
"0.5575427",
"0.55525887",
"0.5451665",
"0.5430152",
"0.5402106",
"0.537459",
"0.53555804",
"0.53287554",
"0.53241026",
"0.5264728",
"0.5187579",
"0.51730996",
"0.51520854",
"0.5141504",
"0.5126803",
"0.5119388",
"0.50915444",
"0.50793564",
"0.50766116",
"0.5050902",
"0.50153196",
"0.50112224",
"0.49907792",
"0.49893078",
"0.4967153",
"0.49657953",
"0.49622238",
"0.49516183",
"0.49471214"
] |
0.5639703
|
1
|
Classifies a part of the binary as a string with the first byte giving the length. Returns the next available memory address after the string.
|
def stringn(runtime_addr):
runtime_addr = memorymanager.RuntimeAddr(runtime_addr)
binary_addr, _ = movemanager.r2b_checked(runtime_addr)
disassembly.add_classification(binary_addr, Byte(1))
length = memory_binary[binary_addr]
add_expression(binary_addr, utils.LazyString("%s - %s", disassembly.get_label(runtime_addr + 1 + length, binary_addr), disassembly.get_label(runtime_addr + 1, binary_addr)))
return string(runtime_addr + 1, length)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def autostring(min_length=3):\n\n assert min_length >= 2\n addr = memorymanager.BinaryAddr(0)\n while addr < len(memory_binary):\n i = 0\n while (addr + i) < len(memory_binary) and memory_binary[addr + i] is not None and not disassembly.is_classified(addr + i, 1) and utils.isprint(memory_binary[addr + i]):\n i += 1\n if movemanager.b2r(addr + i) in labelmanager.labels:\n break\n if i >= min_length:\n # TODO: I suspect the next two line fragment should be wrapped up if I keep it, probably repeated a bit (probably something like \"with movemanager.b2r(binary_addr) as runtime_addr:...\", although I probably can't reuse the b2r function, but maybe think about it)\n runtime_addr = movemanager.b2r(addr)\n with movemanager.move_id_for_binary_addr[addr]:\n string(runtime_addr, i)\n addr += max(1, i)",
"def stringterm(runtime_addr, terminator, exclude_terminator=False):\n\n runtime_addr = memorymanager.RuntimeAddr(runtime_addr)\n binary_addr, _ = movemanager.r2b_checked(runtime_addr)\n initial_addr = binary_addr\n while memory_binary[binary_addr] != terminator:\n binary_addr += 1\n string_length = (binary_addr + 1) - initial_addr\n if exclude_terminator:\n string_length -= 1\n if string_length > 0:\n disassembly.add_classification(initial_addr, String(string_length))\n return movemanager.b2r(binary_addr + 1)",
"def unpack_string(self, offset, length):\n return struct.unpack_from(str(\"<%ds\") % (length), self._buf, self._offset + offset)[0]",
"def get_string(binary):\r\n new_string = \"\"\r\n\r\n # Sets range as length of binary string and returns an int\r\n for x in range((len(binary) // 8)):\r\n # Grabs 8 characters at a time, converts back to an integer\r\n n = int(binary[(x * 8) : ((x * 8) + 8)], 2)\r\n # Special logic to handle null values\r\n if n == 0:\r\n new_string += \"\\\\x00\"\r\n # Otherwise, change those bits back to a character\r\n else:\r\n new_string += n.to_bytes((n.bit_length() + 7) // 8, \"big\").decode()\r\n\r\n return new_string",
"def string(runtime_addr, n=None):\n\n runtime_addr = memorymanager.RuntimeAddr(runtime_addr)\n binary_addr, _ = movemanager.r2b_checked(runtime_addr)\n if n is None:\n assert not disassembly.is_classified(binary_addr)\n n = 0\n while not disassembly.is_classified(binary_addr + n) and utils.isprint(memory_binary[binary_addr + n]):\n n += 1\n if n > 0:\n disassembly.add_classification(binary_addr, String(n))\n return movemanager.b2r(binary_addr + n)",
"def stringhiz(runtime_addr, include_terminator_fn=None):\n\n runtime_addr = memorymanager.RuntimeAddr(runtime_addr)\n binary_addr, _ = movemanager.r2b_checked(runtime_addr)\n assert not disassembly.is_classified(binary_addr, 1)\n initial_addr = binary_addr\n while True:\n if disassembly.is_classified(binary_addr, 1):\n break\n if memory_binary[binary_addr] == 0 or (memory_binary[binary_addr] & 0x80) != 0:\n if include_terminator_fn is not None and include_terminator_fn(memory_binary[binary_addr]):\n binary_addr += 1\n break\n binary_addr += 1\n if binary_addr > initial_addr:\n disassembly.add_classification(initial_addr, String(binary_addr - initial_addr))\n return movemanager.b2r(binary_addr)",
"def get_string(addr, size):\n\t\n\toutput = ''\n\tfor offset in range(size):\n\t\toutput += chr(getByte(addr))\n\t\taddr = addr.add(1)\n\n\treturn output",
"def slice_string(s, _len):\n return long_to_bytes(int(bin(bytes_to_long(s))[2:2+_len], 2))",
"def get_firmware_id_string(self):\n unpacked_string = False\n # Find the address of the string in memory\n id_string_addr = self._get_slt_entry(3)\n if id_string_addr is None:\n # maybe it's not packed in this slt\n id_string_addr = self._get_slt_entry(2)\n if id_string_addr is None:\n # Can't find it in the slt return None\n return None\n unpacked_string = True\n # parse the null terminated string\n last = build_string = \"\"\n # There is no reason for the build string to contain\n # any non ASCII character but do it like this to avoid\n # breaking support for Python 2.7\n try:\n char = unichr\n except NameError:\n char = chr\n while last != \"\\0\":\n word = self.get_data(id_string_addr)\n if unpacked_string:\n if Arch.addr_per_word == 4:\n # Two char per word\n build_string += char(word & 0x00FFFF)\n if build_string[-1] == \"\\0\":\n break\n last = char((word & 0xFFFF0000) >> 16)\n build_string += last\n else:\n # Only one char per word\n last = char(word)\n build_string += last\n else:\n # Four chars per word\n if Arch.addr_per_word == 4:\n string = cu.get_string_from_word(Arch.addr_per_word, word)\n stop_decoding = False\n for char in string:\n if char != '\\0':\n build_string += char\n else:\n stop_decoding = True\n break\n last = string[3:]\n\n if stop_decoding:\n break\n else:\n # Two chars per word\n build_string += char((word & 0xFF00) >> 8)\n if build_string[-1] == \"\\0\":\n break\n last = char(word & 0x00FF)\n build_string += last\n # Move to the next word in the string\n id_string_addr += Arch.addr_per_word\n\n # remove the \\0 we don't want the terminator\n if build_string[-1] == \"\\0\":\n build_string = build_string[:-1]\n\n return build_string.strip()",
"def stringhi(runtime_addr, include_terminator_fn=None):\n\n runtime_addr = memorymanager.RuntimeAddr(runtime_addr)\n binary_addr, _ = movemanager.r2b_checked(runtime_addr)\n assert not disassembly.is_classified(binary_addr, 1)\n initial_addr = binary_addr\n while True:\n if disassembly.is_classified(binary_addr, 1):\n break\n if memory_binary[binary_addr] & 0x80 != 0:\n if include_terminator_fn is not None and include_terminator_fn(memory_binary[binary_addr]):\n c = memory_binary[binary_addr] & 0x7f\n if utils.isprint(c) and c != ord('\"') and c != ord('\\''):\n add_expression(binary_addr, \"%s+'%s'\" % (assembler().hex2(0x80), chr(c)))\n else:\n add_expression(binary_addr, \"%s+%s\" % (assembler().hex2(0x80), assembler().hex2(c)))\n binary_addr += 1\n break\n binary_addr += 1\n if binary_addr > initial_addr:\n disassembly.add_classification(initial_addr, String(binary_addr - initial_addr))\n return movemanager.b2r(binary_addr)",
"def createAsciiString(self, address: ghidra.program.model.address.Address, length: int) -> ghidra.program.model.listing.Data:\n ...",
"def ReadFixedString(self, length):\n return self.ReadBytes(length).rstrip(b'\\x00')",
"def _getStr(self, length):\n element, n = self._buf.pop(length)\n \n if n != length:\n raise SerializationError('There is not enough data left.')\n \n return element",
"def _binary_string_to_str(binary_string: str, end=None) -> str:\n string = \"\"\n\n binary_list = re.findall(\".\" * 8, binary_string)\n for byte in binary_list:\n string += chr(int(byte, 2))\n if end and string.endswith(end):\n return string[: -len(end)]\n\n return string",
"def get_binary_string(self) -> str:\n\n # Find the structure of the first section\n # This is determined by the first digit\n if self.HAS_STRUCTURE:\n # We find the structure of the first section using the first digit\n structure = EANCoding.STRUCTURE[self.code[0]]\n\n # The first digit is removed\n code = self.code[1:]\n else:\n # If there is no structure then all digits should be in `L` coding\n structure = \"L\" * (self.FIRST_SECTION[1])\n\n # In EAN8 barcodes the first digit is accounted for\n code = self.code\n\n # Convert the barcode to a binary string with the CodeNumbers class\n # Add the left guard\n binary_string = EANCoding.LEFT_GUARD\n\n # Add the 6 digits after the left guard\n for i in range(*self.FIRST_SECTION):\n digit = int(code[i])\n coding = structure[i]\n binary_string += EANCoding.CODES[coding][digit]\n\n # Add the center guard\n binary_string += EANCoding.CENTER_GUARD\n\n # Add the 6 digits after the center guard\n for i in range(*self.SECOND_SECTION):\n digit = int(code[i])\n binary_string += EANCoding.CODES[\"R\"][digit]\n\n binary_string += EANCoding.RIGHT_GUARD\n\n return binary_string",
"def findBytes(self, start: ghidra.program.model.address.Address, byteString: unicode) -> ghidra.program.model.address.Address:\n ...",
"def _make_data(self, approximate_length):\n fragments = []\n so_far = 0\n while so_far < approximate_length:\n fragment = ('%d:' % so_far).encode('utf-8')\n so_far += len(fragment)\n fragments.append(fragment)\n return six.b('').join(fragments)",
"def readString(self) -> str:\n length = self._unpack('i', 4)\n\n return self._unpack('{:d}s'.format(length), length)",
"def get_string_binary(string):\r\n string_binary_array = []\r\n\r\n # Create array of binaries from the string\r\n for character in string:\r\n string_binary_array.append(get_binary(character))\r\n\r\n # Combine those binaries into one long binary\r\n string_binary = \"\".join(string_binary_array)\r\n\r\n return string_binary",
"def read_string(self, register, length):\n return bytearray(self.device.readregistermulti(register, length)).decode()",
"def extract_literal_string(memory,address,ztext):\n zchar_start_address = address\n text, next_address = ztext.to_ascii(memory,zchar_start_address,0) \n return next_address,text",
"def ReadString(self):\n length = self.ReadUInt8()\n return self.unpack(str(length) + 's', length)",
"def decode_network_string(msgtype, plen, buf):\n return buf[header.size:plen - 1]",
"def beautifulBinaryString(binary_string) -> int:\n sub_str = \"010\"\n count = 0\n start_index = 0\n\n while start_index <= len(binary_string):\n end_index = start_index + 3\n slice = binary_string[start_index:end_index]\n\n if sub_str == slice:\n count += 1\n start_index = end_index\n else:\n start_index += 1\n\n return count",
"def decodeName(self, last=-1):\n label = []\n done = False\n while not done:\n (length,) = self.unpack(\"!B\")\n if getBits(length, 6, 2) == 3:\n # Pointer\n self.offset -= 1\n pointer = getBits(self.unpack(\"!H\")[0], 0, 14)\n save = self.offset\n if last == save:\n raise BufferError(\n \"Recursive pointer [offset=%d,pointer=%d,length=%d]\" %\n (self.offset, pointer, len(self.data))\n )\n if pointer < self.offset:\n self.offset = pointer\n else:\n # Pointer can't point forwards\n raise BufferError(\n \"Invalid pointer [offset=%d,pointer=%d,length=%d]\" %\n (self.offset, pointer, len(self.data))\n )\n label.extend(self.decodeName(save).label)\n self.offset = save\n done = True\n else:\n if length > 0:\n l = self.get(length)\n try:\n l.decode()\n except UnicodeDecodeError:\n raise BufferError(\"Invalid label <%s>\" % l)\n label.append(l)\n else:\n done = True\n return \".\".join(str(label))",
"def get_barcode(curr_seq, barcode_len):\r\n raw_barcode = curr_seq[0:barcode_len]\r\n raw_seq = curr_seq[barcode_len:]\r\n return raw_barcode, raw_seq",
"def read_string(self):\n return self.bits.read('bytes:{0}'.format(self.read_int())).decode(\"utf-8\", 'replace')",
"def OffsetToStringData(self) -> int:",
"def _get_string_from_packing(self, string_to_unpack):\n return string_to_unpack[4:]",
"def hexlify(self: str, verbose=False):\n nbytes = len(_chunk_bs(self))\n buf = b''\n strlen = ''\n for b in to_bytes(_chunk_bs(self)):\n buf+=b\n# for s in _from_list(_chunk_bs(self)):\n# strlen+=f'{ _bit_length(s): 02d}'\n if verbose:\n for n in range(nbytes):\n strlen += f'{_bit_length(_from_list(_chunk_bs(self))[n])} @[{n}] '\n print(strlen)\n return buf"
] |
[
"0.71907574",
"0.60568094",
"0.60319275",
"0.60304165",
"0.5983436",
"0.5930473",
"0.59154713",
"0.56741375",
"0.5656219",
"0.56132877",
"0.5602844",
"0.5600993",
"0.5566914",
"0.5478548",
"0.5426606",
"0.5397106",
"0.53721356",
"0.53622264",
"0.5342458",
"0.53340405",
"0.52786773",
"0.52722543",
"0.5267942",
"0.5257803",
"0.5224116",
"0.52221596",
"0.5222051",
"0.5218181",
"0.5213098",
"0.5176105"
] |
0.62302107
|
1
|
Do a simple SECINFO_NO_NAME send PUTROOTFH+SECINFO_NO_NAME, check is result legal
|
def testSupported(t, env):
c = env.c1.new_client(env.testname(t))
sess = c.create_session()
# Do a simple SECINFO_NO_NAME
res = sess.compound([op.putrootfh(), op.secinfo_no_name(0)])
check(res)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_cleans_peer_name(self):\n ps1_status = self.pybird.get_peer_status(\"PS1{\\\"'}\")\n self.assertFalse(ps1_status['up'])",
"def testSupported2(t, env):\n c = env.c1.new_client(env.testname(t))\n sess = c.create_session()\n\n # GETFH after do a SECINFO_NO_NAME should get error NFS4ERR_NOFILEHANDLE\n res = sess.compound([op.putrootfh(), op.secinfo_no_name(0), op.getfh()])\n print res\n check(res, NFS4ERR_NOFILEHANDLE)",
"def test_ap_hs20_username_unknown2(dev, apdev):\n bssid = apdev[0]['bssid']\n params = hs20_ap_params()\n params['hessid'] = bssid\n del params['domain_name']\n hostapd.add_ap(apdev[0]['ifname'], params)\n\n dev[0].hs20_enable()\n id = dev[0].add_cred_values({ 'realm': \"example.com\",\n 'username': \"hs20-test\",\n 'password': \"password\",\n 'domain': \"example.com\" })\n interworking_select(dev[0], bssid, \"unknown\", freq=\"2412\")\n interworking_connect(dev[0], bssid, \"TTLS\")\n check_sp_type(dev[0], \"unknown\")",
"def test_ap_hs20_username_unknown(dev, apdev):\n bssid = apdev[0]['bssid']\n params = hs20_ap_params()\n params['hessid'] = bssid\n hostapd.add_ap(apdev[0]['ifname'], params)\n\n dev[0].hs20_enable()\n id = dev[0].add_cred_values({ 'realm': \"example.com\",\n 'username': \"hs20-test\",\n 'password': \"password\" })\n interworking_select(dev[0], bssid, \"unknown\", freq=\"2412\")\n interworking_connect(dev[0], bssid, \"TTLS\")\n check_sp_type(dev[0], \"unknown\")",
"def test_alt_name_request(self):\n oim = OIM()\n hostname = 'test.' + DOMAIN\n san = 'test-san.' + DOMAIN\n san2 = 'test-san2.' + DOMAIN\n rc, _, _, msg = oim.request('--hostname', hostname,\n '--altname', san,\n '--altname', san2)\n self.assertEqual(rc, 0, \"Failed to request certificate\\n%s\" % msg)\n self.assert_(oim.reqid != '', msg)",
"def test_can_info_does_not_exist(self):\n fake_user = User(username='Fake', password='')\n self.assertFalse(send_rotate_to_can(fake_user, self.BIN_NUM))",
"def check_name(name, allow_services=False):",
"def _check_name(self):\n\t\tpass",
"def checkMTSinfoCompliance(info):\n if len(info) != 3:\n print(\"MTS INFO DOES NOT ADHERE TO MY STANDARD: processingUnit_machine_order\")\n exit(5)",
"def test_001_create_empty(self):\n ret = svcmgr.main(argv=[\"create\", \"-s\", SVCNAME])\n assert ret == 0",
"def cmd_noc(self, data, client, cmd=None):\n \n if data:\n \n input = self._adminPlugin.parseUserCmd(data)\n \n else:\n \n client.message('!noc <name>')\n return\n \n sclient = self._adminPlugin.findClientPrompt(input[0], client)\n \n if sclient:\n \n if sclient.maskedGroup:\n \n cgroup = sclient.maskedGroup.name\n \n else:\n \n cgroup = self.gnamelevel0 \n \n client.message('%s^7 connected ^2%s ^7times : ^2%s^7 [^2%s^7] '%(sclient.exactName, sclient.connections, cgroup, sclient.maxLevel)) \n \n else:\n return False",
"def test_noname(self):\n sid = h5s.create_simple((10,10))\n g = h5g.open(self.fid, '/')\n g._close()\n self.assertIsNone(h5i.get_name(sid))\n self.assertIsNone(h5i.get_name(g))",
"def test_instance_naming_with_illegal_chars(self):\n NEUTRON.list_security_groups = mock.MagicMock(\n return_value=iter([{\"security_groups\": []}]))\n NEUTRON.create_subnet = mock.MagicMock(\n return_value={\"subnet\": SUBNETS}\n )\n conn = MagicMock()\n config_bad = copy.deepcopy(CONFIG)\n config_bad['cluster-name'] = \"illegal:)chars\"\n\n conn.network.networks.return_value = {\"name\": \"ext01\"}\n\n info = OSClusterInfo(NOVA, NEUTRON, CINDER, config_bad, conn)\n with self.assertRaises(SystemExit):\n # assert this raises system exit\n info.nodes_names",
"async def test_noise_incorrect_name():\n outgoing_packets = [\n \"010000\", # hello packet\n \"010031001ed7f7bb0b74085418258ed5928931bc36ade7cf06937fcff089044d4ab142643f1b2c9935bb77696f23d930836737a4\",\n ]\n incoming_packets = [\n \"01000d01736572766963657465737400\",\n \"0100160148616e647368616b65204d4143206661696c757265\",\n ]\n packets = []\n\n def _packet(type_: int, data: bytes):\n packets.append((type_, data))\n\n def _on_error(exc: Exception):\n raise exc\n\n helper = MockAPINoiseFrameHelper(\n on_pkt=_packet,\n on_error=_on_error,\n noise_psk=\"QRTIErOb/fcE9Ukd/5qA3RGYMn0Y+p06U58SCtOXvPc=\",\n expected_name=\"wrongname\",\n client_info=\"my client\",\n log_name=\"test\",\n )\n helper._transport = MagicMock()\n helper._writer = MagicMock()\n\n for pkt in outgoing_packets:\n helper.mock_write_frame(bytes.fromhex(pkt))\n\n with pytest.raises(BadNameAPIError):\n for pkt in incoming_packets:\n helper.data_received(bytes.fromhex(pkt))\n\n with pytest.raises(BadNameAPIError):\n await helper.perform_handshake(30)",
"def test_info_whitespace():\n pytest.raises(SaltInvocationError, mac_group.info, \"white space\")",
"def isAddName(name):\t\n if lib.essentials.isAlphanumeric(name) != 0:\n\tprint \" '%s' is not valid name. \\n Vadapter-name should be an alphanumeric.\" % (name)\n #output.completeOutputError(lib.errorhandler.InvalidArgumentCount(descape = \" '%s' is not valid name. \\n Vadapter-name should be an alphanumeric.\" % (name))) \n return -1\n \n if lib.essentials.isStartNumeric(name) != 0:\n\tprint \"'%s' is not valid name. \\n Vadapter name should not start with an digit\"% (name)\n\t#output.completeOutputError(lib.errorhandler.InvalidArgumentCount(descape = \"'%s' is not valid name. \\n Vadapter name should not start with an digit\"% (name)))\n return -1\n\n if lib.essentials.isContainSpecial(name) != 0:\n\tprint \"'%s' is not valid name. \\n Vadapter name should not contain special characher\" % (name)\n\t#output.completeOutputError(InvalidArgumentCount(descape = \"'%s' is not valid name. \\n Vadapter name should not contain special characher\" % (name)))\n return -1\n\n# if lib.db.db.ifExistsInDatabase(name) == 0:\n#\tprint NameError(\"'%s' is not valid name. \\n Already Exists\" % (name))\n#\treturn -1\n \n return 0",
"def test_change_name_without_name(self):\r\n self.client.login(username=self.student.username, password='test')\r\n change_name_url = self.get_url()\r\n resp = self.client.post(change_name_url, {\r\n 'new_name': '',\r\n 'rationale': 'change identity'\r\n })\r\n response_data = json.loads(resp.content)\r\n self.assertFalse(response_data['success'])",
"def extract_sni(packet) -> str:\n try:\n tls_layer = packet[TLSClientHello] # type: ignore\n except IndexError:\n return ''\n\n for attr in ['ext', 'extensions']:\n extensions = getattr(tls_layer, attr, [])\n if extensions:\n for extension in extensions:\n try:\n if extension.type == 0:\n return extension.servernames[0].servername.decode()\n except Exception:\n pass\n\n return ''",
"def test_op_no_ticket(self):\n assert OP_NO_TICKET == 0x4000",
"async def test_noise_incorrect_name():\n outgoing_packets = [\n \"010000\", # hello packet\n \"010031001ed7f7bb0b74085418258ed5928931bc36ade7cf06937fcff089044d4ab142643f1b2c9935bb77696f23d930836737a4\",\n ]\n incoming_packets = [\n \"01000d01736572766963657465737400\",\n \"0100160148616e647368616b65204d4143206661696c757265\",\n ]\n packets = []\n\n def _packet(type_: int, data: bytes):\n packets.append((type_, data))\n\n def _on_error(exc: Exception):\n raise exc\n\n helper = APINoiseFrameHelper(\n on_pkt=_packet,\n on_error=_on_error,\n noise_psk=\"QRTIErOb/fcE9Ukd/5qA3RGYMn0Y+p06U58SCtOXvPc=\",\n expected_name=\"wrongname\",\n )\n helper._transport = MagicMock()\n\n for pkt in outgoing_packets:\n helper._write_frame(bytes.fromhex(pkt))\n\n with pytest.raises(BadNameAPIError):\n for pkt in incoming_packets:\n helper.data_received(bytes.fromhex(pkt))\n\n with pytest.raises(BadNameAPIError):\n await helper.perform_handshake()",
"def test_print_title_negativie(capsys, title, result):\n GC.print_title(title)\n out, err = capsys.readouterr()\n print(err)\n assert out != result",
"def test_break_security_group_usual_case_specify_sg():",
"def test_get_cipher_name_before_connect(self):\n ctx = Context(SSLv23_METHOD)\n conn = Connection(ctx, None)\n assert conn.get_cipher_name() is None",
"def testInfoEmptyDefaultNodeComponent(self):\n self.stream_start(mode='component',\n jid='tester.localhost',\n plugins=['xep_0030'])\n\n self.recv(\"\"\"\n <iq type=\"get\" id=\"test\">\n <query xmlns=\"http://jabber.org/protocol/disco#info\" />\n </iq>\n \"\"\")\n\n self.send(\"\"\"\n <iq type=\"result\" id=\"test\">\n <query xmlns=\"http://jabber.org/protocol/disco#info\">\n <identity category=\"component\" type=\"generic\" />\n <feature var=\"http://jabber.org/protocol/disco#info\" />\n </query>\n </iq>\n \"\"\")",
"def has_name(self):\n return self.unpack_word(0x2) != 0",
"def test_bogus():\n\n # Find a bogus op_type by trial and error\n #\n bogus_op_type = -1\n for bogus_op_type in range(0, 1000):\n if not bogus_op_type in SentmanRequest.MSG_TYPES:\n break\n\n req1 = SentmanRequest(SentmanRequest.ALLOCATE_SENTINEL)\n req1.op_type = bogus_op_type\n req1_buf = req1.pack()\n try:\n (_msgs, _buf) = SentmanRequest.recv(req1_buf)\n except SentmanRequestUnpackError, _exc:\n print \"Bogus op_type detected\"\n else:\n print \"Didn't catch bogus op_type\"\n\n req1 = SentmanRequest(SentmanRequest.ALLOCATE_SENTINEL)\n req1.version = SentmanRequest.PROTOCOL_VERSION + 20\n req1_buf = req1.pack()\n try:\n (_msgs, _buf) = SentmanRequest.recv(req1_buf)\n except SentmanRequestUnpackError, _exc:\n print \"Bogus version detected\"\n else:\n print \"Didn't catch bogus version\"\n\n req1 = SentmanRequest(SentmanRequest.ALLOCATE_SENTINEL)\n req1.msg_len = SentmanRequest.MESSAGE_LEN + 33\n req1_buf = req1.pack()\n try:\n (_msgs, _buf) = SentmanRequest.recv(req1_buf)\n except SentmanRequestUnpackError, _exc:\n print \"Bogus msg_len detected\"\n else:\n print \"Didn't catch bogus msg_len\"",
"def get_sni(data):\n\n # First we parse the TLS Record Protocol\n content_type, tls_major, tls_minor, record_length = struct.unpack('>BBBH', data[:5])\n\n assert content_type == 22, 'Record should be of type handshake (22)'\n\n # Parsing TLS Hanshaking Protocol\n handshake_data = data[5:]\n assert len(handshake_data) == record_length, \"Length of handshake record should be the remaining\"\n\n handshake_type, = struct.unpack('>B', handshake_data[:1])\n assert handshake_type == 1, 'Handshaking message type should be ClientHello'\n handshake_length, = struct.unpack('>I', b'\\x00' + handshake_data[1:4])\n\n client_hello_data = handshake_data[4:]\n assert len(client_hello_data) == handshake_length\n\n # Parsing Client Hello message\n # Skipping irrelevant information of static length 34 in the ClientHello message\n n = 34\n\n # Parsing length of more irrelevant data\n session_id_len, = struct.unpack('>B', client_hello_data[n:n+1])\n n += 1 + session_id_len\n cipher_suites_len, = struct.unpack('>H', client_hello_data[n:n+2])\n n += 2 + cipher_suites_len\n compression_methods_len, = struct.unpack('>B', client_hello_data[n:n+1])\n n += 1 + compression_methods_len\n\n # Finally getting to the extensions\n extensions_len, = struct.unpack('>H', client_hello_data[n:n+2])\n n += 2\n assert handshake_length == n + extensions_len\n\n # Searching for server name in the list of TLS-extensions the client supplies\n while n < handshake_length:\n extension_type, data_len = struct.unpack('>HH', client_hello_data[n:n+4])\n n += 4\n\n if extension_type == 0:\n # server_name_list_len, = struct.unpack('>H', client_hello_data[n:n+2])\n\n name_type = client_hello_data[n+2]\n # name type could in principle be something other than a hostname\n # but the standard has currently only hostname as a choice\n # https://tools.ietf.org/html/rfc6066#section-3\n assert name_type == 0, \"name type should be a hostname.\"\n\n host_name_len, = struct.unpack('>H', client_hello_data[n+3:n+5])\n assert data_len == host_name_len + 5\n return client_hello_data[n+5:n+5+host_name_len]\n n += data_len\n raise ValueError(\"No Server Name Indication could be found\")",
"def test_validate_party_info_name_is_none(self):\n self.party_test_data[\"name\"] = None\n response = validate_party_info(self.party_test_data)\n self.assertDictEqual(\n response, {\"message\": \"name is required\", \"code\": 400})",
"def test_bad_name(self):\n\n request = service.get_request('GET', {u'taxon': u'Nosuchtaxonia'})\n x = self.start_request_tests(request)\n m = x.json().get(u'message')\n self.assertTrue(x.status_code >= 200)\n self.assertTrue('No Taxon matched\" in \"%s\"' % m)",
"def test_change_name_of_the_devicefalse():"
] |
[
"0.5770228",
"0.57652646",
"0.56357694",
"0.5508631",
"0.5416629",
"0.5194066",
"0.5136464",
"0.50815845",
"0.5057677",
"0.49920005",
"0.49905288",
"0.4957912",
"0.4951877",
"0.49414647",
"0.4929611",
"0.4901294",
"0.4896876",
"0.4892041",
"0.48907965",
"0.48583683",
"0.4836342",
"0.48174",
"0.47781911",
"0.4774271",
"0.47730073",
"0.4772022",
"0.4750474",
"0.47443897",
"0.4741738",
"0.4736824"
] |
0.65952736
|
0
|
GETFH after do a SECINFO_NO_NAME or SECINFO result in a NOFILEHANDLE error, See rfc 5661 section 2.6.3.1.1.8
|
def testSupported2(t, env):
c = env.c1.new_client(env.testname(t))
sess = c.create_session()
# GETFH after do a SECINFO_NO_NAME should get error NFS4ERR_NOFILEHANDLE
res = sess.compound([op.putrootfh(), op.secinfo_no_name(0), op.getfh()])
print res
check(res, NFS4ERR_NOFILEHANDLE)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def GetHFSFileEntry(self):\n return self._fshfs_file_entry",
"def get_fh(filename, mode):\n fh = None\n try:\n if mode == 'r':\n fh = open(filename,'r')\n elif mode == 'w':\n fh = open(filename,'w')\n else:\n raise ValueError('Command should be r or w')\n except IOError as e:\n print(e)\n except ValueError as e:\n print(e)\n return fh",
"def test_load_tmp_dh_missing_file(self):\n context = Context(SSLv23_METHOD)\n with pytest.raises(Error):\n context.load_tmp_dh(b\"hello\")",
"def getFileDescriptors(self):\n fds = {}\n extraFDs = []\n if self.metaSocket is not None:\n extraFDs.append(self.metaSocket.childSocket().fileno())\n if self.ampSQLDispenser is not None:\n self.ampDBSocket = self.ampSQLDispenser.dispense()\n extraFDs.append(self.ampDBSocket.fileno())\n for fd in self.inheritSSLFDs + self.inheritFDs + extraFDs:\n fds[fd] = fd\n return fds",
"def efile_handle(self):\n if not self.status == \"finished\":\n raise NameError(\"redhawk: unfinished efile check\")\n\n tries = 0\n while not self.efile_exists() and ties < self.file_limit:\n time.sleep(self.file_delay)\n tries = tries+1\n \n if os.path.isfile(self.efile_name()):\n return open(self.efile_name(), \"r\")\n\n raise NameError(\"redhawk: unfinished efile check\")",
"def recv_open_response(self, recv_payload):\n\n\tunpacked_payload = struct.unpack(\"!?Q2I\", recv_payload)\n # Read status field. If set to False, ignore remaining fields and \n\t# generate error msg (file not found) before exiting. \n\t# Each unpacked value is a tuple, so [0] accesses the value that we want\n\tstatus = unpacked_payload[0:1][0]\n\tif status == False:\n\t print \"Error: File not found.\"\n\t sys.exit()\n\t\n\t#If set to True, read remaining fields.\n\telif status == True:\n\t print(\"File found.\")\n\t self.file_length = unpacked_payload[1:2][0]\n\t self.epoch_no = unpacked_payload[2:3][0]\n\t self.handle_no = unpacked_payload[3:][0]\t \t \n\treturn",
"def test_fileobj_not_closed(self):\n\n f = open(self.data('test0.fits'), 'rb')\n data = fits.getdata(f)\n assert not f.closed\n\n f.seek(0)\n header = fits.getheader(f)\n assert not f.closed",
"def fileno(self):\n\n _global_lock.acquire()\n try:\n fd = _DNSServiceRefSockFD(self)\n finally:\n _global_lock.release()\n\n return fd",
"def hid_read(hid_handle):\n output_buffer = None\n timeout = 2000.0\n rlist, wlist, xlist = select([hid_handle], [], [hid_handle], timeout)\n\n if xlist:\n if xlist == [hid_handle]:\n raise IOError(errno.EIO, \"exception on file descriptor %d\" % hid_handle)\n\n if rlist:\n if rlist == [hid_handle]:\n output_buffer = os.read(hid_handle, BUFFER_LENGTH)\n if output_buffer is None:\n return b\"\"\n return output_buffer",
"def GetNTFSFileEntry(self):\n return self._fsntfs_file_entry",
"def _filehandle(self):\n if not self._fh or self._is_closed():\n filename = self._rotated_logfile or self.filename\n if filename.endswith('.gz'):\n self._fh = gzip.open(filename, 'r')\n else:\n self._fh = open(filename, \"r\", 1)\n self._fh.seek(self._offset)\n\n return self._fh",
"def safe_open(fname, mode, buffering=-1):\n # file descriptors\n try:\n return open(fname, mode, buffering=buffering)\n except PermissionError as ex:\n raise xt.XonshError(f\"xonsh: {fname}: permission denied\") from ex\n except FileNotFoundError as ex:\n raise xt.XonshError(f\"xonsh: {fname}: no such file or directory\") from ex\n except Exception as ex:\n raise xt.XonshError(f\"xonsh: {fname}: unable to open file\") from ex",
"def test_fileobj_not_closed(self):\n\n f = open(self.data(\"test0.fits\"), \"rb\")\n _ = fits.getdata(f)\n assert not f.closed\n\n f.seek(0)\n _ = fits.getheader(f)\n assert not f.closed\n\n f.close() # Close it now",
"def ofile_handle(self):\n if not self.status == \"finished\":\n raise NameError(\"redhawk: unfinished ofile check\")\n tries = 0\n while not self.ofile_exists() and tries < self.file_limit:\n time.sleep(self.file_delay)\n tries = tries+1\n \n if os.path.isfile(self.ofile_name()):\n return open(self.ofile_name(), \"r\")\n\n raise NameError(\"redhawk: unfound ofile\")",
"def comdlg32_GetOpenFileName(jitter, get_str, set_str):\n ret_ad, args = jitter.func_args_stdcall([\"lpofn\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)",
"def read_handle(self, handle, filename, events, error):\n self.filehandle.seek(self.end_of_file)\n tailportion = self.filehandle.read()\n sys.stdout.write(tailportion)\n self.end_of_file = os.stat(self.filename).st_size",
"def get_open_files(self, pid):\n try:\n cmd = ['/usr/sbin/lsof', '-p', str(pid)]\n fd_out = subprocess.check_output(cmd)\n fd_out = str(fd_out).split(\"\\\\n\")\n return fd_out\n except Exception:\n pass",
"def check_handle(handle):\n return os.path.isfile(get_path_filename(handle))",
"def getHostFsInfo(hostfs):\n pattern = re.compile('^([^\\.]+)\\.([^\\.]+)\\.([^\\.]+)-(([0-9]+\\.)+([0-9]+))\\.([^\\.]+)$')\n result = pattern.match(hostfs)\n if result is None:\n return None\n else:\n version = result.group(4)\n platform = result.group(1)\n cpu = result.group(2)\n endian = result.group(3)\n ext = result.group(7)\n return {\n 'name': hostfs,\n 'file': hostfs,\n 'filepath': hostfs,\n 'version': version,\n 'platform': platform,\n 'cpu': cpu,\n 'endian': endian,\n 'type': ext\n }",
"def createHandle(self, handle, spec, encoding=None):\n \n spec = re.sub(r\"[\\\\/]\", re.escape(os.path.sep), spec) # clean up path separator\n cmd = \"\"\"FILE HANDLE %(handle)s /NAME=\"%(spec)s\" \"\"\" % locals()\n # Note the use of double quotes around the encoding name as there are some encodings that\n # contain a single quote in the name\n if encoding:\n cmd += ' /ENCODING=\"' + encoding + '\"'\n spss.Submit(cmd)\n self.fhdict[handle.lower()] = (spec, encoding)",
"def read_header(fid):\r\n\r\n # Check 'magic number' at beginning of file to make sure this is an Intan\r\n # Technologies RHD2000 data file.\r\n magic_number, = struct.unpack('<I', fid.read(4)) \r\n if magic_number != int('c6912702', 16): raise Exception('Unrecognized file type.')\r\n\r\n header = {}\r\n # Read version number.\r\n version = {}\r\n (version['major'], version['minor']) = struct.unpack('<hh', fid.read(4)) \r\n header['version'] = version\r\n\r\n print('')\r\n print('Reading Intan Technologies RHD2000 Data File, Version {}.{}'.format(version['major'], version['minor']))\r\n print('')\r\n\r\n freq = {}\r\n\r\n # Read information of sampling rate and amplifier frequency settings.\r\n header['sample_rate'], = struct.unpack('<f', fid.read(4))\r\n (freq['dsp_enabled'], freq['actual_dsp_cutoff_frequency'], freq['actual_lower_bandwidth'], freq['actual_upper_bandwidth'], \r\n freq['desired_dsp_cutoff_frequency'], freq['desired_lower_bandwidth'], freq['desired_upper_bandwidth']) = struct.unpack('<hffffff', fid.read(26))\r\n\r\n\r\n # This tells us if a software 50/60 Hz notch filter was enabled during\r\n # the data acquisition.\r\n notch_filter_mode, = struct.unpack('<h', fid.read(2))\r\n header['notch_filter_frequency'] = 0\r\n if notch_filter_mode == 1:\r\n header['notch_filter_frequency'] = 50\r\n elif notch_filter_mode == 2:\r\n header['notch_filter_frequency'] = 60\r\n freq['notch_filter_frequency'] = header['notch_filter_frequency']\r\n\r\n (freq['desired_impedance_test_frequency'], freq['actual_impedance_test_frequency']) = struct.unpack('<ff', fid.read(8))\r\n\r\n note1 = read_qstring(fid)\r\n note2 = read_qstring(fid)\r\n note3 = read_qstring(fid)\r\n header['notes'] = { 'note1' : note1, 'note2' : note2, 'note3' : note3}\r\n\r\n # If data file is from GUI v1.1 or later, see if temperature sensor data was saved.\r\n header['num_temp_sensor_channels'] = 0\r\n if (version['major'] == 1 and version['minor'] >= 1) or (version['major'] > 1) :\r\n header['num_temp_sensor_channels'], = struct.unpack('<h', fid.read(2))\r\n \r\n # If data file is from GUI v1.3 or later, load eval board mode.\r\n header['eval_board_mode'] = 0\r\n if ((version['major'] == 1) and (version['minor'] >= 3)) or (version['major'] > 1) :\r\n header['eval_board_mode'], = struct.unpack('<h', fid.read(2))\r\n \r\n \r\n header['num_samples_per_data_block'] = 60\r\n # If data file is from v2.0 or later (Intan Recording Controller), load name of digital reference channel\r\n if (version['major'] > 1):\r\n header['reference_channel'] = read_qstring(fid)\r\n header['num_samples_per_data_block'] = 128\r\n\r\n # Place frequency-related information in data structure. (Note: much of this structure is set above)\r\n freq['amplifier_sample_rate'] = header['sample_rate']\r\n freq['aux_input_sample_rate'] = header['sample_rate'] / 4\r\n freq['supply_voltage_sample_rate'] = header['sample_rate'] / header['num_samples_per_data_block']\r\n freq['board_adc_sample_rate'] = header['sample_rate']\r\n freq['board_dig_in_sample_rate'] = header['sample_rate']\r\n\r\n header['frequency_parameters'] = freq\r\n\r\n # Create structure arrays for each type of data channel.\r\n header['spike_triggers'] = []\r\n header['amplifier_channels'] = []\r\n header['aux_input_channels'] = []\r\n header['supply_voltage_channels'] = []\r\n header['board_adc_channels'] = []\r\n header['board_dig_in_channels'] = []\r\n header['board_dig_out_channels'] = []\r\n\r\n # Read signal summary from data file header.\r\n\r\n number_of_signal_groups, = struct.unpack('<h', fid.read(2))\r\n print('n signal groups {}'.format(number_of_signal_groups))\r\n\r\n for signal_group in range(1, number_of_signal_groups + 1):\r\n signal_group_name = read_qstring(fid)\r\n signal_group_prefix = read_qstring(fid)\r\n (signal_group_enabled, signal_group_num_channels, signal_group_num_amp_channels) = struct.unpack('<hhh', fid.read(6))\r\n\r\n if (signal_group_num_channels > 0) and (signal_group_enabled > 0):\r\n for signal_channel in range(0, signal_group_num_channels):\r\n new_channel = {'port_name' : signal_group_name, 'port_prefix' : signal_group_prefix, 'port_number' : signal_group}\r\n new_channel['native_channel_name'] = read_qstring(fid)\r\n new_channel['custom_channel_name'] = read_qstring(fid)\r\n (new_channel['native_order'], new_channel['custom_order'], signal_type, channel_enabled, new_channel['chip_channel'], new_channel['board_stream']) = struct.unpack('<hhhhhh', fid.read(12))\r\n new_trigger_channel = {}\r\n (new_trigger_channel['voltage_trigger_mode'], new_trigger_channel['voltage_threshold'], new_trigger_channel['digital_trigger_channel'], new_trigger_channel['digital_edge_polarity']) = struct.unpack('<hhhh', fid.read(8))\r\n (new_channel['electrode_impedance_magnitude'], new_channel['electrode_impedance_phase']) = struct.unpack('<ff', fid.read(8))\r\n\r\n if channel_enabled:\r\n if signal_type == 0:\r\n header['amplifier_channels'].append(new_channel)\r\n header['spike_triggers'].append(new_trigger_channel)\r\n elif signal_type == 1:\r\n header['aux_input_channels'].append(new_channel)\r\n elif signal_type == 2:\r\n header['supply_voltage_channels'].append(new_channel)\r\n elif signal_type == 3:\r\n header['board_adc_channels'].append(new_channel)\r\n elif signal_type == 4:\r\n header['board_dig_in_channels'].append(new_channel)\r\n elif signal_type == 5:\r\n header['board_dig_out_channels'].append(new_channel)\r\n else:\r\n raise Exception('Unknown channel type.')\r\n \r\n # Summarize contents of data file.\r\n header['num_amplifier_channels'] = len(header['amplifier_channels'])\r\n header['num_aux_input_channels'] = len(header['aux_input_channels'])\r\n header['num_supply_voltage_channels'] = len(header['supply_voltage_channels'])\r\n header['num_board_adc_channels'] = len(header['board_adc_channels'])\r\n header['num_board_dig_in_channels'] = len(header['board_dig_in_channels'])\r\n header['num_board_dig_out_channels'] = len(header['board_dig_out_channels'])\r\n\r\n return header",
"def fileno(self) -> int:",
"def fileno (self):\n return -1\n #TODO: assign unique pseudo-filenos to mock sockets,\n # so apps don't get confused.",
"def mspatchc_ExtractPatchHeaderToFileByHandles(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"PatchFileHandle\", \"PatchHeaderFileHandle\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)",
"def return_file_handle(input_file):\n if str(input_file).endswith(\".gz\"):\n gzipped_file_handle = gzip.open(input_file, \"rt\")\n return gzipped_file_handle\n else:\n normal_fh = open(input_file, \"r\")\n return normal_fh",
"def fileno(self):\n return None",
"def fileno(self):\n return None",
"def missing_but_potential_file():\r\n tempf = tempfile.NamedTemporaryFile()\r\n fname = tempf.name\r\n tempf.close()\r\n return fname",
"def test_failToOpenLocalFile(self):\n fp = FilePath(self.mktemp()).child(\"child-with-no-existing-parent\")\n\n self.assertRaises(IOError, self.makeConnectedDccFileReceive, fp.path)",
"def fileno(self):\n raise io.UnsupportedOperation"
] |
[
"0.53072584",
"0.5148183",
"0.50249344",
"0.502488",
"0.4968095",
"0.49357927",
"0.48998624",
"0.48761383",
"0.48639977",
"0.48622304",
"0.48400453",
"0.48285824",
"0.47875142",
"0.47775492",
"0.4760703",
"0.4758203",
"0.47239888",
"0.47136837",
"0.47108918",
"0.46619362",
"0.46577656",
"0.46308205",
"0.4619355",
"0.46145073",
"0.45768696",
"0.45668814",
"0.45668814",
"0.45203966",
"0.4514033",
"0.45107713"
] |
0.6027806
|
0
|
Return the max sequence length of this tokenizer.
|
def max_sequence_length(self) -> int:
return self._max_request_length
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def max_length(self) -> int | None:\n return self._underlying.max_length",
"def max_length(self):\n\t\treturn self._max_length",
"def _max_length(self):\n return self.__max_length",
"def max_request_length(self) -> int:\n return self.max_sequence_length",
"def get_length_of_longest_sentence(self):\n sentence_lengths = [len(s.words) for s in self.blob.sentences]\n return max(sentence_lengths)",
"def max_seq_len() -> int:\n return 8",
"def sequence_length(self):\n return self.get_sequence_length()",
"def sequence_length(self):\n return self._sequence_length",
"def maxContigLength(self):\n\t\tstats = self.scores()\n\t\treturn stats['largestContig']",
"def maxsize(self) -> int:\n return self._maxsize",
"def maxsize(self):\r\n return self._maxsize",
"def maxlen(self):\n \n return reduce(max, list(map(len, self.tags)))",
"def maxlength(config, tokenizer):\n\n # Unpack nested config, handles passing model directly\n if hasattr(config, \"config\"):\n config = config.config\n\n # Get non-defaulted fields\n keys = config.to_diff_dict()\n\n # Use config.max_length if not set to default value, else use tokenizer.model_max_length if available\n return config.max_length if \"max_length\" in keys or not hasattr(tokenizer, \"model_max_length\") else tokenizer.model_max_length",
"def max_length(self):\n return self._config.trace_max_length",
"def maximumORFLength(self):\n return max(len(orf) for orf in self.ORFs())",
"def get_sequence_length(self):\n if self.random_length is None:\n return self.bptt\n bptt = self.bptt\n if np.random.random() >= 0.95:\n bptt /= 2\n seq_len = max(5, int(np.random.normal(bptt, 5)))\n return seq_len",
"def length_of_sequences(self):\n return self._seq_length",
"def get_max(self):\n return self._max",
"def getLargestPatternLength(self):\n return self._patternLimit",
"def get_v_max(self) -> int:\n return len(self.vocabulary)",
"def token_max_ttl(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"token_max_ttl\")",
"def len_max(self):\n return 16 + 16 + 8 + 8 + Tools.bin_to_dec(self.get_data_size()) + Tools.bin_to_dec(self.get_verification_size())",
"def max(self):\n return self._max",
"def max(self):\n return self._max",
"def length(self):\n return len(self._sequence)",
"def max(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"max\")",
"def max(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"max\")",
"def token_max_ttl(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"token_max_ttl\")",
"def token_max_ttl(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"token_max_ttl\")",
"def max_steps(self) -> int:\n return pulumi.get(self, \"max_steps\")"
] |
[
"0.79248345",
"0.7814715",
"0.7710857",
"0.76122516",
"0.7540627",
"0.74956506",
"0.7342764",
"0.7237697",
"0.71165454",
"0.70737785",
"0.6990284",
"0.68691313",
"0.6851906",
"0.6830742",
"0.68259156",
"0.67945015",
"0.6794275",
"0.67336994",
"0.67235285",
"0.6708446",
"0.66975",
"0.6646394",
"0.6645346",
"0.6645346",
"0.6642967",
"0.66384226",
"0.66384226",
"0.6636867",
"0.6636867",
"0.6620781"
] |
0.7911943
|
1
|
Return the max request length of this tokenizer.
|
def max_request_length(self) -> int:
return self.max_sequence_length
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def max_sequence_length(self) -> int:\n return self._max_request_length",
"def max_length(self):\n\t\treturn self._max_length",
"def _max_length(self):\n return self.__max_length",
"def max_length(self) -> int | None:\n return self._underlying.max_length",
"def maxsize(self):\r\n return self._maxsize",
"def maxsize(self) -> int:\n return self._maxsize",
"def maxlength(config, tokenizer):\n\n # Unpack nested config, handles passing model directly\n if hasattr(config, \"config\"):\n config = config.config\n\n # Get non-defaulted fields\n keys = config.to_diff_dict()\n\n # Use config.max_length if not set to default value, else use tokenizer.model_max_length if available\n return config.max_length if \"max_length\" in keys or not hasattr(tokenizer, \"model_max_length\") else tokenizer.model_max_length",
"def max_length(self):\n return self._config.trace_max_length",
"def maximum_size(self):\n return self._maximum_size",
"def get_length_of_longest_sentence(self):\n sentence_lengths = [len(s.words) for s in self.blob.sentences]\n return max(sentence_lengths)",
"def maxlen(self):\n \n return reduce(max, list(map(len, self.tags)))",
"def token_max_ttl(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"token_max_ttl\")",
"def token_max_ttl(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"token_max_ttl\")",
"def maxsize(self):\n return len(self._data)",
"def maximum_GET_length(self, code):\n return config.maximum_GET_length",
"def get_max_cleverbot_requests(self):\n return int(self.bot_data_file[\"maxCleverbotRequests\"])",
"def len_max(self):\n return 16 + 16 + 8 + 8 + Tools.bin_to_dec(self.get_data_size()) + Tools.bin_to_dec(self.get_verification_size())",
"def max_packet_size(self):\n return max(self.fcip_doc['packet_lengths'])",
"def allocation_max_netmask_length(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"allocation_max_netmask_length\")",
"def allocation_max_netmask_length(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"allocation_max_netmask_length\")",
"def max_size(self):\n raise NotImplementedError()",
"def maxContigLength(self):\n\t\tstats = self.scores()\n\t\treturn stats['largestContig']",
"def max_page_size(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"max_page_size\")",
"def max_page_size(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"max_page_size\")",
"def maxMessageSize(self, appdata=None):\r\n return self.user.maxMessageSize",
"def min_length(self) -> int:\n return pulumi.get(self, \"min_length\")",
"def token_max_ttl(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"token_max_ttl\")",
"def vocab_size(self) -> int:\n return len(self._tokenizer)",
"def vocab_size(self) -> int:\n return len(self._tokenizer)",
"def size_limit(self):\n\t\treturn self._size_limit"
] |
[
"0.8040181",
"0.80028194",
"0.7908217",
"0.78137726",
"0.74633443",
"0.7395531",
"0.73337406",
"0.7236661",
"0.70824194",
"0.6937923",
"0.6824151",
"0.6814213",
"0.6814213",
"0.6808442",
"0.6740837",
"0.67247903",
"0.67175454",
"0.6702094",
"0.66950095",
"0.66950095",
"0.6653016",
"0.66296935",
"0.66264474",
"0.66264474",
"0.65927863",
"0.6580864",
"0.6553571",
"0.65075725",
"0.65075725",
"0.64973533"
] |
0.84071106
|
0
|
The end of text token.
|
def end_of_text_token(self) -> str:
return self._end_of_text_token
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def isEnd(self):\n return _libsbml.XMLToken_isEnd(self)",
"def _is_at_end(self):\n return self._peek().token_type == scanner.TokenType.EOF",
"def end(text=None):\n global _current_line\n if _current_line is not None:\n _current_line.end(text)\n _current_line = None",
"def end(self):\n return self.__end_line",
"def end_tag_or_none(self, token):\n if self.patterns['end_tag'].match(token):\n return token[2:-4].upper()",
"def unsetEnd(self):\n return _libsbml.XMLToken_unsetEnd(self)",
"def setEnd(self):\n return _libsbml.XMLToken_setEnd(self)",
"def end_paragraph(self):\n raise NotImplementedError",
"def test_end(self):\n return self._endTest('\\x05')",
"def last_token(self, text):\n if text is not None:\n text = text.strip()\n if len(text) > 0:\n word = self.safe_split(text)[-1]\n word = word.strip()\n return word\n return ''",
"def trata_EOF(self):\n pass",
"def end(self):\n return self.__end",
"def end(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"end\")",
"def rehydrate_paragraph_end(self, next_token):\n assert next_token\n top_stack_token = self.block_stack[-1]\n del self.block_stack[-1]\n return top_stack_token.final_whitespace + \"\\n\"",
"def end(self):\n while self.position < len(self.document.characters\n ) and self.document.characters[\n self.position].character != '\\n':\n self.position += 1",
"def setEOF(self):\n return _libsbml.XMLToken_setEOF(self)",
"def isEOF(self):\n return _libsbml.XMLToken_isEOF(self)",
"def end(self):\n return self._get('end')",
"def eat_EOL(self):\n # print(\"Start eating EOL\")\n self.eat(EOL)\n while self.current_token.type == EOL:\n self.eat(EOL)\n # print(\"Stop eating EOL\")",
"def end(self):\n return self._end",
"def end(self):\n return self._end",
"def end(self):\n return self._end",
"def isEndFor(self, *args):\n return _libsbml.XMLToken_isEndFor(self, *args)",
"def parse(self, tokens):\n self.tokens = tokens\n self.tokens.append(END())\n t = self.e()\n self.expect(END)\n return t",
"def end(self) -> pos.Pos:\n return self.__end",
"def end(self):\n\t\treturn self._end",
"def end(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"end\")",
"def end(self):\n return self.end_",
"def get_next_token(self):\n\t\t\n\t\tif self.pos > len(self.text)-1:\n\t\t\treturn Token(EOF, None)\n\t\t\t\n\t\tcurrent_char = self.text[self.pos]\n\t\t\n\t\tif current_char.isdigit() or current_char.isalpha():",
"def end(self):\n return self._t0 + self.length"
] |
[
"0.7161437",
"0.69371384",
"0.6831333",
"0.6623254",
"0.6569478",
"0.6566383",
"0.6510539",
"0.6465891",
"0.64181924",
"0.63650733",
"0.63393337",
"0.63297063",
"0.630279",
"0.63026834",
"0.62961084",
"0.6288676",
"0.6287439",
"0.62656957",
"0.6261917",
"0.6245131",
"0.6245131",
"0.6245131",
"0.62248707",
"0.61966515",
"0.61855966",
"0.6164462",
"0.61525583",
"0.6078044",
"0.6068443",
"0.6063622"
] |
0.87383765
|
0
|
Name of the tokenizer to use when sending a request.
|
def tokenizer_name(self) -> str:
return self._tokenizer_name
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_tokenizer_class(model_name):\n return OpenAIGPTTokenizer if model_name == 'openai-gpt' else GPT2Tokenizer",
"def token_type(self) -> str:\n return self._token_type",
"def token_type(self) -> str:\n return self._token_type",
"def token(self) -> str:",
"def token(self) -> str:\n raise NotImplementedError",
"def parse(self, tokenizer):\n pass",
"def set_token_name(self, name):\n output = self.copy()\n output.token_name = name\n return output",
"def token_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"token_type\")",
"def token_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"token_type\")",
"def token(self) -> Optional[str]:\n return self._builder._token",
"def token(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"token\")",
"def __str__(self):\n return 'Tokenizer({type}, {value})'.format(\n type=self.type,\n value=repr(self.value)\n )",
"def token_type(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"token_type\")",
"def __get_type(self):\r\n if self.__tokenizer.token_type() == TYPES_DIC[\"IDENTIFIER\"]:\r\n return self.__tokenizer.identifier()\r\n else:\r\n return self.__tokenizer.keyword()",
"def token(self):\n\n return self.__token",
"def token_name(self, other: SkupperSite) -> str:\n return other.name",
"def get_token(self):\n return self.__token",
"def get_token(self):\n return self.__token",
"def custom_tokenizer(self, nlp):\n # nlp.tokenizer = custom_tokenizer(nlp)\n return Tokenizer(\n nlp.vocab,\n prefix_search=regex.PREFIX_RE.search,\n suffix_search=regex.SUFFIX_RE.search,\n infix_finditer=regex.INFIX_RE.finditer,\n token_match=regex.SIMPLE_URL_RE.match,\n )",
"def custom_tokenizer(nlp, infix_reg):\n return Tokenizer(nlp.vocab, infix_finditer=infix_reg.finditer)",
"def create_tokenizer(dataset):\n lang_tokenizer = tf.keras.preprocessing.text.Tokenizer(char_level=True)\n lang_tokenizer.fit_on_texts([x['input'] for x in dataset])\n return lang_tokenizer",
"def get_tokenizer(tokenizer_type: str, tokenizer_vocab_file: str, pretrained_model_name_or_path: str):\n return get_tokenizer_from_registry(tokenizer_type)(\n vocab_file=tokenizer_vocab_file,\n pretrained_model_name_or_path=pretrained_model_name_or_path,\n )",
"def token(self):\r\n return self._token",
"def token(self):\n return self._token",
"def token(self):\n return self._token",
"def token(self):\n return self._token",
"def sentence_tokenizer(text):\n return sent_tokenize(text)",
"def token_str(self) -> Optional[str]:\n return self._token_str",
"def tokenizer(path, **kwargs):\n\n return AutoTokenizer.from_pretrained(path, **kwargs) if isinstance(path, str) else path",
"def token(self):\n token = self.lex.token()\n if token is not None:\n print(token)\n return token"
] |
[
"0.6726041",
"0.6509525",
"0.6509525",
"0.63465",
"0.62254226",
"0.6080051",
"0.60489094",
"0.6039997",
"0.6039997",
"0.6030399",
"0.5906061",
"0.58846533",
"0.586634",
"0.5844633",
"0.58108366",
"0.5780667",
"0.57521707",
"0.57521707",
"0.57500947",
"0.57380617",
"0.57223713",
"0.5694347",
"0.56919205",
"0.56605643",
"0.56605643",
"0.56605643",
"0.56554216",
"0.5643707",
"0.5627245",
"0.5614364"
] |
0.8316068
|
0
|
Convert serialized json to Mobile component
|
def from_json(json_data):
direction = json_data.get('direction')
max_momentum = json_data.get('max_momentum')
current_momentum = json_data.get('current_momentum')
max_speed = json_data.get('max_speed')
current_speed = json_data.get('current_speed')
rowing = json_data.get('rowing')
return Mobile(direction=direction, max_momentum=max_momentum, current_momentum=current_momentum,
max_speed=max_speed, current_speed=current_speed, rowing=rowing)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def __init__(self, mobile_attck_json):\n self.mobile_attck = mobile_attck_json",
"def test_wire_parsing_from_json(self) -> None:\n grid_unit = StackupTestHelper.mfr_grid()\n metal_dict = StackupTestHelper.create_test_metal(3) # type: Dict[str, Any]\n direct_metal = Metal.from_setting(grid_unit, StackupTestHelper.create_test_metal(3)) # type: Metal\n json_string = json.dumps(metal_dict, cls=HammerJSONEncoder) # type: str\n json_metal = Metal.from_setting(grid_unit, json.loads(json_string)) # type: Metal\n self.assertTrue(direct_metal, json_metal)",
"def deserialize(self, data):",
"def json_to_model(cls, data):\n m = cls.to_model(data)\n m.raw = data\n cls._unlock_unmarshalling(m)\n cls.set_additional_fields(m, data)\n return m",
"def from_json(self, content):\r\n return simplejson.loads(content)",
"def _trait_from_json(x, self):\n return x",
"def deserialize(self, obj):\n raise NotImplementedError",
"def unmarshal(self):\n ...",
"def fromJson(json):\r\n raise NotImplementedError(\"Returns instance\")",
"def deserialize(self, value):\n raise NotImplementedError",
"def json(self) -> Any:\n return json.loads(self)",
"def fromJSON(cls, data):\n return SERIALISE_CLASS_LOOKUP[data['timeref_type']].fromJSON(data)",
"def _json_to_obj(cls, serialized_str):\n json_dict = json.loads(serialized_str)\n if 'metadata' in json_dict.keys():\n metadata_dict = json_dict['metadata']\n return Metadata(metadata_dict)",
"def to_representation(self, value):\n if isinstance(value, str):\n return json.loads(value)\n return value",
"def _json_to_obj(cls, serialized_str):\n\n ret = cls()\n json_response = json.loads(serialized_str)\n\n # Creating a deep copy just in case later we want the original resp\n json_dict = copy.deepcopy(json_response)\n\n # Replacing attribute response names if they are Python reserved words\n # with a trailing underscore, for ex. id for id_ or if they have a\n # special character within the name replacing it for an underscore too\n json_dict = cls._replace_dict_key(\n json_dict, 'id', 'id_', recursion=True)\n\n if cls.PORTS in json_dict:\n ports = json_dict.get(cls.PORTS)\n for port in ports:\n ret.append(Port(**port))\n return ret",
"def _deserialize_object(value):\n return value",
"def test_iot_msg_to_str_v1(self):\n m = IotMsg(\"test\",MsgType.CMD,msg_class=\"binary\",msg_subclass=\"switch\",uuid_=\"e48fbe58-3aaf-442d-b769-7a24aed8b716\")\n m.set_default(True)\n m.set_properties({\"p1\":165})\n mstr = IotMsgConverter.iot_msg_to_str(PayloadType.JSON_IOT_MSG_V1, m)\n self.assertIsInstance(mstr,basestring)\n jobj = json.loads(mstr)\n self.assertEqual(jobj[\"def\"][\"value\"],True)\n self.assertEqual(jobj[\"type\"],\"cmd\")\n self.assertEqual(jobj[\"cls\"],\"binary\")\n self.assertEqual(jobj[\"subcls\"],\"switch\")",
"def deserialize(self, jsonData):\n super(AnyPin, self).deserialize(jsonData)\n if \"currDataType\" in jsonData:\n self.setType(jsonData[\"currDataType\"])\n\n pinClass = findPinClassByType(self.activeDataType)\n try:\n self.setData(json.loads(\n jsonData['value'], cls=pinClass.jsonDecoderClass()))\n except:\n self.setData(self.defaultValue())\n\n self.updateError([])",
"def set_model_from_json(self, json):\n self.enable_auto_reply = get_value_from_json(json, \"enableAutoReply\")\n self.response_subject = get_value_from_json(json, \"responseSubject\")\n self.response_body_plain_text = json.get(\"responseBodyPlainText\")\n self.response_body_html = json.get(\"responseBodyHtml\")\n self.restrict_to_contacts = get_value_from_json(json, \"restrictToContacts\")\n self.restrict_to_domain = json.get(\"restrictToDomain\")\n self.start_time = get_value_from_json(json, \"startTime\")\n self.end_time = get_value_from_json(json, \"endTime\")\n return self",
"def _post_deserialize (self):\n pass",
"def set_values_from_json_map(self,\n test_data):\n self.reset_fields()\n '''\n Sonata message fields\n '''\n if \"mtype\" in test_data:\n '''\n SET the VALUES FOR FURTHER \n COMPARISON RIGHT HERE SINCE \n the \n WE HAVE TARGET STUCTURE AVAILABLE \n '''\n self.fields_data['mtype'] = test_data.pop('mtype')\n\n '''\n Attempts to find a way to form bin \n number of the size appropriate for Sonata\n messages\n '''\n #mtype_bin = bin(self.fields_data.get(\"mtype\"))\n #mtype_barr = BitArray(mtype_bin)\n\n #field_length = BitArray(bin='0b00')\n #field_length = BitArray()\n #field_length.append(mtype_barr)\n #self.mtype = BitArray(field_length) # тип сообщения: 00 - нав.данные, 01 - текст Ж\n #self.mtype = BitArray(bin=str(mtype_str))\n\n '''Form the proper string for mtype'''\n # mtype_str = format(self.fields_data.get(\"mtype\"),'b')\n mtype_str = '{0:02b}'.format(self.fields_data.get(\"mtype\"))\n self.mtype = BitArray(bin=mtype_str) # тип сообщения: 00 - нав.данные, 01 - текст Ж\n\n\n if \"sonata_id\" in test_data:\n '''\n SET the VALUES FOR FURTHER \n COMPARISON RIGHT HERE SINCE \n the \n WE HAVE TARGET STRUCTURE AVAILABLE \n '''\n self.fields_data['sonata_id'] = test_data.pop('sonata_id')\n\n '''Attempts to find a way to form bin number of the size appropriate for Sonata'''\n #sonata_id_bin = bin(self.fields_data.get('sonata_id'))\n #mtype_barr = BitArray(sonata_id_bin)\n\n #field_length = BitArray(bin='0b000000000000')\n #field_length = BitArray()\n #field_length.append(mtype_barr)\n #self.sonata_id = BitArray(field_length) # sonata_id\n\n '''Form the proper string for sonata_id'''\n #self.sonata_id = BitArray(bin=str(sonata_id_str))\n #sonata_id_str = format(self.fields_data.get('sonata_id'), 'b')\n sonata_id_str = '{0:012b}'.format(self.fields_data.get('sonata_id'), 'b')\n self.sonata_id = BitArray(bin=sonata_id_str)\n\n\n if \"lat\" in test_data:\n '''\n SET the VALUES FOR FURTHER \n COMPARISON RIGHT HERE SINCE \n the \n WE HAVE TARGET STUCTURE AVAILABLE \n '''\n self.fields_data['lat'] = test_data.pop('lat')\n '''\n TODO: Maybe 4 fields for lattitude are better. NOT just ONE\n DONE\n '''\n '''SET degrees'''\n '''Attempts to find a way to form bin number of the size appropriate for Sonata'''\n #lat_deg = BitArray(bin='0b0000000')\n #lat_deg = BitArray()\n #lat_deg_bin = bin(self.fields_data.get(\"lat\").get(\"deg\"))\n #deg_barr_int = self.fields_data.get(\"lat\").get(\"deg\")\n #lat_deg_barr_int = BitArray(int=deg_barr_int, length=8)\n #lat_deg_barr = BitArray(lat_deg_bin)\n #lat_deg.append(lat_deg_barr)\n\n\n '''THIS STRING FORMATION SHOULD WORK Fingers crossed'''\n # lat_deg_str = format(self.fields_data.get(\"lat\").get(\"deg\"), 'b')\n lat_deg_str = '{0:07b}'.format(self.fields_data.get(\"lat\").get(\"deg\"))\n self.lat_deg = BitArray(bin=lat_deg_str)\n\n\n\n '''SET minutes'''\n '''Attempts to find a way to form bin number of the size appropriate for Sonata'''\n #lat_min = BitArray(bin='0b0000000')\n #lat_min = BitArray()\n #lat_min_bin = bin(self.fields_data.get(\"lat\").get(\"min\"))\n\n #lat_min_barr_int = BitArray(int=int(self.fields_data.get(\"lat\").get(\"min\")), length=8)\n #lat_min_barr = BitArray(lat_min_bin)\n #lat_min.append(lat_min_barr)\n\n '''THIS STRING FORMATION SHOULD WORK Fingers crossed'''\n #lat_min_str = format(self.fields_data.get(\"lat\").get(\"min\"), 'b')\n lat_min_str = '{0:07b}'.format(self.fields_data.get(\"lat\").get(\"min\"))\n self.lat_min = BitArray(bin=lat_min_str)\n\n '''Set seconds'''\n '''Attempts to find a way to form bin number of the size appropriate for Sonata'''\n #lat_sec = BitArray(bin='0b0000000')\n #lat_sec = BitArray()\n #lat_sec_bin = bin(self.fields_data.get(\"lat\").get(\"sec\"))\n #lat_sec_str = format(self.fields_data.get(\"lat\").get(\"sec\"), 'b')\n #lat_sec_barr = BitArray(lat_sec_bin)\n #lat_sec.append(lat_sec_barr)\n\n '''THIS STRING FORMATION SHOULD WORK Fingers crossed'''\n # lat_sec_str = format(self.fields_data.get(\"lat\").get(\"sec\"), 'b')\n lat_sec_str = '{0:07b}'.format(self.fields_data.get(\"lat\").get(\"sec\"))\n self.lat_sec = BitArray(bin=lat_sec_str)\n\n\n '''Set tens of seconds'''\n '''Attempts to find a way to form bin number of the size appropriate for Sonata'''\n #lat_tens_sec = BitArray(bin='0b0000')\n #lat_tens_sec = BitArray()\n #lat_tens_sec_bin = bin(self.fields_data.get(\"lat\").get(\"tens_sec\"))\n #lat_tens_sec_str = format(self.fields_data.get(\"lat\").get(\"tens_sec\"), 'b')\n #lat_tens_sec_barr = BitArray(lat_tens_sec_bin)\n #lat_tens_sec.append(lat_tens_sec_barr)\n\n '''THIS STRING FORMATION SHOULD WORK Fingers crossed'''\n # lat_tens_sec_str = format(self.fields_data.get(\"lat\").get(\"tens_sec\"), 'b')\n lat_tens_sec_str = '{0:04b}'.format(self.fields_data.get(\"lat\").get(\"tens_sec\"))\n self.lat_tens_sec = BitArray(bin=lat_tens_sec_str)\n\n '''Set full value'''\n #self.lat = BitArray()\n\n #self.lat.append(lat_deg)\n #self.lat.append(lat_min)\n #self.lat.append(lat_sec)\n #self.lat.append(lat_tens_sec)\n\n self.lat = self.lat_deg+self.lat_min+self.lat_sec+self.lat_tens_sec\n\n\n if \"lon\" in test_data:\n '''\n SET the VALUES FOR FURTHER \n COMPARISON RIGHT HERE SINCE \n the \n WE HAVE TARGET STUCTURE AVAILABLE \n '''\n self.fields_data['lon'] = test_data.pop('lon')\n\n '''\n TODO: Maybe 4 fields for lattitude are better. NOT just ONE\n DONE\n '''\n '''SET degrees'''\n '''Attempts to find a way to form bin number of the size appropriate for Sonata'''\n #lon_deg = BitArray(bin='0b0000000')\n #lon_deg = BitArray()\n #lon_deg_bin = bin(self.fields_data.get(\"lon\").get(\"deg\"))\n #lon_deg_str = format(self.fields_data.get(\"lon\").get(\"deg\"), 'b')\n #lon_deg_barr = BitArray(lon_deg_bin)\n #lon_deg.append(lon_deg_barr)\n\n\n '''THIS STRING FORMATION SHOULD WORK Fingers crossed'''\n # lon_deg_str = format(self.fields_data.get(\"lon\").get(\"deg\"), 'b')\n lon_deg_str = '{0:08b}'.format(self.fields_data.get(\"lon\").get(\"deg\"))\n self.lon_deg = BitArray(bin=lon_deg_str)\n\n '''Set minutes'''\n '''Attempts to find a way to form bin number of the size appropriate for Sonata'''\n #lon_min = BitArray(bin='0b0000000')\n #lon_min = BitArray()\n #lon_min_bin = bin(self.fields_data.get(\"lon\").get(\"min\"))\n #lon_min_str = format(self.fields_data.get(\"lon\").get(\"min\"), 'b')\n #lon_min_barr = BitArray(lon_min_bin)\n #lon_min.append(lon_min_barr)\n\n '''THIS STRING FORMATION SHOULD WORK Fingers crossed'''\n # lon_min_str = format(self.fields_data.get(\"lon\").get(\"min\"), 'b')\n lon_min_str = '{0:07b}'.format(self.fields_data.get(\"lon\").get(\"min\"))\n self.lon_min = BitArray(bin=lon_min_str)\n\n '''Set seconds'''\n '''Attempts to find a way to form bin number of the size appropriate for Sonata'''\n #lat_sec = BitArray(bin='0b0000000')\n #lon_sec = BitArray()\n #lon_sec_bin = bin(self.fields_data.get(\"lon\").get(\"sec\"))\n #lon_sec_str = format(self.fields_data.get(\"lon\").get(\"sec\"), 'b')\n #lon_sec_barr = BitArray(lon_sec_bin)\n #lon_sec.append(lon_sec_barr)\n\n '''THIS STRING FORMATION SHOULD WORK Fingers crossed'''\n # lon_sec_str = format(self.fields_data.get(\"lon\").get(\"sec\"), 'b')\n lon_sec_str = '{0:07b}'.format(self.fields_data.get(\"lon\").get(\"sec\"))\n self.lon_sec = BitArray(bin=lon_sec_str)\n\n\n '''Set tens of seconds'''\n '''Attempts to find a way to form bin number of the size appropriate for Sonata'''\n #lat_tens_sec = BitArray(bin='0b0000')\n #lon_tens_sec = BitArray()\n #lon_tens_sec_bin = bin(self.fields_data.get(\"lon\").get(\"tens_sec\"))\n #lon_tens_sec_str = format(self.fields_data.get(\"lon\").get(\"tens_sec\"), 'b')\n #lon_tens_sec_barr = BitArray(lon_tens_sec_bin)\n #lon_tens_sec.append(lon_tens_sec_barr)\n\n '''THIS STRING FORMATION SHOULD WORK Fingers crossed'''\n # lon_tens_sec_str = format(self.fields_data.get(\"lon\").get(\"tens_sec\"), 'b')\n lon_tens_sec_str = '{0:04b}'.format(self.fields_data.get(\"lon\").get(\"tens_sec\"))\n self.lon_tens_sec = BitArray(bin=lon_tens_sec_str)\n\n\n '''Set full value'''\n #self.lon = BitArray()\n #self.lon.append(lat_deg)\n #self.lon.append(lat_min)\n #self.lon.append(lat_sec)\n #self.lon.append(lat_tens_sec)\n\n self.lon = self.lon_deg+self.lon_min+self.lon_sec+self.lon_tens_sec\n\n\n if \"vel\" in test_data:\n '''\n SET the VALUES FOR FURTHER \n COMPARISON RIGHT HERE SINCE \n the \n WE HAVE TARGET STUCTURE AVAILABLE \n '''\n self.fields_data['vel'] = test_data.pop('vel')\n\n '''Attempts to find a way to form bin number of the size appropriate for Sonata'''\n #vel_hkm_h_bin = bin(self.fields_data.get('vel').get(\"hkm_h\"))\n #vel_hkm_h_str = format(self.fields_data.get('vel').get(\"hkm_h\"), 'b')\n #vel_hkm_h_barr = BitArray(vel_hkm_h_bin)\n\n #vel_km_h_bin = bin(self.fields_data.get('vel').get(\"km_h\"))\n #vel_km_h_str = format(self.fields_data.get('vel').get(\"km_h\"), 'b')\n #vel_km_h_barr = BitArray(vel_km_h_bin)\n\n '''SET hkm_h'''\n '''THIS STRING FORMATION SHOULD WORK Fingers crossed'''\n # vel_hkm_h_str = format(self.fields_data.get('vel').get(\"hkm_h\"), 'b')\n vel_hkm_h_str = '{0:03b}'.format(self.fields_data.get('vel').get(\"hkm_h\"))\n self.vel_hkm_h = BitArray(bin=vel_hkm_h_str)\n\n '''SET km_h'''\n '''THIS STRING FORMATION SHOULD WORK Fingers crossed'''\n # vel_km_h_str = format(self.fields_data.get('vel').get(\"km_h\"), 'b')\n vel_km_h_str = '{0:07b}'.format(self.fields_data.get('vel').get(\"km_h\"))\n self.vel_km_h = BitArray(bin=vel_km_h_str)\n\n # field_length = BitArray(bin='0b000000000000')\n #field_length = BitArray()\n #field_length.append(vel_barr)\n #self.vel = BitArray(field_length) # sonata_id\n\n '''Set full value'''\n self.vel = self.vel_hkm_h+self.vel_km_h\n\n\n\n if \"course\" in test_data:\n '''\n SET the VALUES FOR FURTHER \n COMPARISON RIGHT HERE SINCE \n the \n WE HAVE TARGET STUCTURE AVAILABLE \n '''\n self.fields_data['course'] = test_data.pop('course')\n '''\n TODO: Maybe 4 fields for lattitude are better. NOT just ONE\n DONE\n '''\n '''SET course'''\n '''Attempts to find a way to form bin number of the size appropriate for Sonata'''\n #course_deg = BitArray(bin='0b0000000')\n #course_deg = BitArray()\n #course_deg_bin = bin(self.fields_data.get(\"course\").get(\"deg\"))\n #course_deg_str = format(self.fields_data.get('course').get(\"deg\"), 'b')\n #course_deg_barr = BitArray(course_deg_bin)\n #course_deg.append(course_deg_barr)\n\n #course_tens_deg = BitArray()\n #course_tens_deg_bin = bin(self.fields_data.get(\"course\").get(\"tens_deg\"))\n #course_tens_deg_str = format(self.fields_data.get('course').get(\"tens_deg\"), 'b')\n #course_tens_deg_barr = BitArray(course_tens_deg_bin)\n #course_tens_deg.append(course_tens_deg_barr)\n # self.course = BitArray()\n # self.course.append(course_deg)\n # self.course.append(course_tens_deg)\n\n '''SET course_deg'''\n # course_deg_str = format(self.fields_data.get('course').get(\"deg\"), 'b')\n course_deg_str = '{0:02b}'.format(self.fields_data.get('course').get(\"deg\"))\n self.course_deg = BitArray(bin=course_deg_str)\n\n '''SET course_tens_deg'''\n # course_tens_deg_str = format(self.fields_data.get('course').get(\"tens_deg\"), 'b')\n course_tens_deg_str = '{0:07b}'.format(self.fields_data.get('course').get(\"tens_deg\"))\n self.course_tens_deg = BitArray(bin=course_tens_deg_str)\n\n '''Set full value'''\n self.course = self.course_deg + self.course_tens_deg\n\n\n if \"state\" in test_data:\n '''\n SET the VALUES FOR FURTHER \n COMPARISON RIGHT HERE SINCE \n the \n WE HAVE TARGET STUCTURE AVAILABLE \n '''\n self.fields_data['state'] = test_data.pop('state')\n\n '''Attempts to find a way to form bin number of the size appropriate for Sonata'''\n #state_str = format(self.fields_data.get(\"state\"),'b')\n #state_bin = bin(self.fields_data.get(\"state\"))\n #state_barr = BitArray(mtype_bin)\n #field_length = BitArray(bin='0b00')\n #field_length = BitArray()\n #field_length.append(state_barr)\n #self.state = BitArray(field_length) # тип сообщения: 00 - нав.данные, 01 - текст Ж\n\n\n '''SET state'''\n # state_str = format(self.fields_data.get(\"state\"),'b')\n state_str = '{0:03b}'.format(self.fields_data.get(\"state\"))\n\n\n '''Set full value'''\n self.state = BitArray(bin=state_str) # A(ctual), N(orth), E(ast)\n\n\n if \"tail\" in test_data:\n '''\n SET the VALUES FOR FURTHER \n COMPARISON RIGHT HERE SINCE \n the \n WE HAVE TARGET STUCTURE AVAILABLE \n '''\n self.fields_data['tail'] = test_data.pop('tail')\n\n tail_int = self.fields_data.get(\"tail\")\n self.tail = BitArray(tail_int) # Датчики и каналы управления игнорируются плагином. = bin(self.fields_data.get(\"state\"))\n\n\n if \"signal_lvl\" in test_data:\n '''\n SET the VALUES FOR FURTHER \n COMPARISON RIGHT HERE SINCE \n the \n WE HAVE TARGET STUCTURE AVAILABLE \n '''\n self.fields_data['signal_lvl'] = test_data.pop('signal_lvl')\n\n '''Attempts to find a way to form bin number of the size appropriate for Sonata'''\n #signal_lvl_str = format(self.fields_data.get(\"signal_lvl\"),'b')\n #signal_lvl_bin = bin(self.fields_data.get(\"signal_lvl\"))\n #signal_lvl_barr = BitArray(mtype_bin)\n\n #field_length = BitArray(bin='0b00')\n #field_length = BitArray()\n #field_length.append(signal_lvl_barr)\n #self.signal_lvl = BitArray(field_length) # тип сообщения: 00 - нав.данные, 01 - текст Ж\n #self.signal_lvl = BitArray(bin=signal_lvl_str)\n\n '''SET signal_lvl'''\n # signal_lvl_str = format(self.fields_data.get(\"signal_lvl\"),'b')\n signal_lvl_str = '{0:04b}'.format(self.fields_data.get(\"signal_lvl\"))\n\n\n '''Set full value'''\n self.signal_lvl = BitArray(bin=signal_lvl_str)\n\n\n\n '''|---------------------------------------------------------------------------------------------------------'''\n '''\n Individual fields combined into a whole SONATA message\n First: converted into HEX\n Second: checksummed and prepended with $ \n '''\n #sonata_data = self.mtype + self.sonata_id + self.lat + self.lon + self.vel + self.course + self.state + self.tail + self.signal_lvl\n #self.sonata_msg = sonata_data.hex.upper()\n #self.sonata_data_chsumed = '$' + self.add_checksum(self.sonata_msg)\n\n #self.logger.debug(\": \" + self.sonata_msg + \" <- Sonata message composed from stored values: \")\n #self.logger.debug(\": \" + self.sonata_data_chsumed + \" <- Sonata message composed from stored values: \")\n\n '''\n Individual fields combined into a whole SONATA message\n First: converted into HEX\n Second: checksummed and prepended with $ \n '''\n\n '''\n However there are exceptions\n designed for negative testing \n '''\n if \"fail\" in test_data and (test_data['fail'] == 'bad_chsum'):\n\n '''Store search pattern and test message type (positive/negative)'''\n self.fields_data['fail'] = test_data.pop('fail')\n self.fields_data['log_pttrn'] = test_data.pop('log_pttrn')\n\n self.logger.debug(\n \"============================================================================================\")\n\n self.sonata_data = self.mtype + self.sonata_id + self.lat + self.lon + self.vel + self.course + self.state + self.tail + self.signal_lvl\n self.sonata_msg = self.sonata_data.hex.upper()\n\n self.logger.debug(\"============================================================================================\")\n self.logger.debug(\": \" + self.sonata_msg + \" <- Sonata message composed from stored values: \")\n\n self.sonata_data_chsumed = '$' + self.add_checksum_wrong(self.sonata_msg)\n self.logger.debug(\": \" + self.sonata_data_chsumed + \" <- Sonata message WAS checksumed INCORRECTLY: \")\n self.sonata_data_chsumed = ''.join([self.sonata_data_chsumed, '\\n\\n'])\n self.logger.debug(\": \" + self.sonata_data_chsumed + \" <- Sonata message WAS NOT WAS checksumed INCORRECTLY but TAILED \")\n self.logger.debug(\"============================================================================================\")\n\n test_data_combined = (self.sonata_data_chsumed, copy.deepcopy(self.fields_data), \"fail\")\n return test_data_combined\n\n if \"fail\" in test_data and (test_data['fail'] == 'no_$'):\n\n '''Store search pattern right away'''\n self.fields_data['fail'] = test_data.pop('fail')\n self.fields_data['log_pttrn'] = test_data.pop('log_pttrn')\n\n\n self.logger.debug(\n \"============================================================================================\")\n\n self.sonata_data = self.mtype + self.sonata_id + self.lat + self.lon + self.vel + self.course + self.state + self.tail + self.signal_lvl\n self.sonata_msg = self.sonata_data.hex.upper()\n self.logger.debug(\": \" + self.sonata_msg + \" <- Sonata message composed from stored values: \")\n self.sonata_data_chsumed = '#' + self.add_checksum(self.sonata_msg)\n self.logger.debug(\": \" + self.sonata_data_chsumed + \" <- Sonata message checksumed but no $: \")\n self.sonata_data_chsumed = ''.join([self.sonata_data_chsumed, '\\n\\n'])\n\n self.logger.debug(\n \"============================================================================================\")\n\n test_data_combined = (self.sonata_data_chsumed, copy.deepcopy(self.fields_data),\"fail\")\n return test_data_combined\n\n if \"fail\" in test_data and (test_data['fail'] == 'no_tail'):\n\n '''Store search pattern right away'''\n\n self.fields_data['fail'] = test_data.pop('fail')\n self.fields_data['log_pttrn'] = test_data.pop('log_pttrn')\n\n\n self.logger.debug(\"============================================================================================\")\n\n self.sonata_data = self.mtype + self.sonata_id + self.lat + self.lon + self.vel + self.course + self.state + self.tail + self.signal_lvl\n self.sonata_msg = self.sonata_data.hex.upper()\n self.logger.debug(\": \" + self.sonata_msg + \" <- Sonata message composed from stored values: \")\n self.sonata_data_chsumed = '$' + self.add_checksum(self.sonata_msg)\n self.logger.debug(\": \" + self.sonata_data_chsumed + \" <- Sonata message checksumed: but no TAIL \")\n\n self.logger.debug(\"============================================================================================\")\n\n test_data_combined = (self.sonata_data_chsumed, copy.deepcopy(self.fields_data),\"fail\")\n return test_data_combined\n\n else:\n '''Store search keys for values that ought to match'''\n self.fields_data['pass'] = test_data.pop('pass')\n\n\n self.logger.debug(\"============================================================================================\")\n\n self.sonata_data = self.mtype + self.sonata_id + self.lat + self.lon + self.vel + self.course + self.state + self.tail + self.signal_lvl\n self.sonata_msg = self.sonata_data.hex.upper()\n self.logger.debug(\": \" + self.sonata_msg + \" <- Sonata message composed from stored values: \")\n self.sonata_data_chsumed = '$' + self.add_checksum(self.sonata_msg)\n self.logger.debug(\": \" + self.sonata_data_chsumed + \" <- Sonata message checksumed: \")\n self.sonata_data_chsumed = ''.join([self.sonata_data_chsumed, '\\n\\n'])\n self.logger.debug(\": \" + self.sonata_data_chsumed + \" <- Sonata message checksumed and TAILED\")\n\n self.logger.debug(\"============================================================================================\")\n test_data_combined = (self.sonata_data_chsumed, copy.deepcopy(self.fields_data),\"pass\")\n return test_data_combined",
"def serialize(self, data):",
"def from_dict(cls, dikt) -> \"SapDataIm\":\n return util.deserialize_model(dikt, cls)",
"def __from_json__(self, properties: dict):\r\n # Look for units first so the temperatures are set correctly.\r\n value = properties.pop(\"temperature_scale\", None)\r\n if value is not None:\r\n eval(f\"self.set_temperature_scale('{value}')\")\r\n\r\n # Let superclass handle the rest\r\n super().__from_json__(properties)",
"def deserialize(self, payload: str) -> object:\n raise NotImplementedError()",
"def _to_jsonrpc_obj(self, jsonrpcstr):\n return jsonrpc.JsonRpcData.parse(jsonrpcstr)",
"def to_internal_value(self, data):\n if isinstance(data, str):\n try:\n return json.loads(data)\n except ValueError as e:\n # invalid JSON\n raise serializers.ValidationError(str(e)) from e\n return data",
"def to_model(self, payload):\n return payload",
"def from_json(cls, b):\n return cls.from_dict(json.loads(b))",
"def test_constructor_serialized_json(self):\n content = json.dumps({\n \"nb\": \"test-nb\",\n \"en\": \"test-en\",\n })\n structure = MultiLingualTextStructure(content, use_default_for_empty=True)\n self.assertEqual(structure[\"nb\"], \"test-nb\")\n self.assertEqual(structure[\"en\"], \"test-en\")"
] |
[
"0.6125935",
"0.60722554",
"0.59188205",
"0.5756421",
"0.55791175",
"0.5515973",
"0.53879184",
"0.5372095",
"0.5279038",
"0.5270271",
"0.52698904",
"0.5241729",
"0.52389807",
"0.52368647",
"0.52153134",
"0.519805",
"0.5191095",
"0.51542693",
"0.51514065",
"0.5138934",
"0.5133385",
"0.5128154",
"0.5125978",
"0.50976914",
"0.5075192",
"0.50685406",
"0.5065045",
"0.506434",
"0.5062594",
"0.5058504"
] |
0.62281847
|
0
|
Get the grant counts for real/synth benes If application != None, the counts are for a specific application.
|
def get_grant_bene_counts(application=None):
# Init counts dict
counts_returned = {}
# Grant real/synth bene counts (includes granted to multiple apps)
start_time = datetime.utcnow().timestamp()
# Setup base queryset
grant_queryset = DataAccessGrant.objects
if application:
grant_queryset = grant_queryset.filter(application=application)
# Get total table count
counts_returned["total"] = grant_queryset.count()
real_grant_queryset = grant_queryset.filter(
~Q(beneficiary__crosswalk___fhir_id__startswith="-")
& ~Q(beneficiary__crosswalk___fhir_id="")
& Q(beneficiary__crosswalk___fhir_id__isnull=False)
).values("beneficiary")
synthetic_grant_queryset = grant_queryset.filter(
Q(beneficiary__crosswalk___fhir_id__startswith="-")
& ~Q(beneficiary__crosswalk___fhir_id="")
& Q(beneficiary__crosswalk___fhir_id__isnull=False)
).values("beneficiary")
counts_returned["real"] = real_grant_queryset.count()
counts_returned["synthetic"] = synthetic_grant_queryset.count()
counts_returned["elapsed"] = round(datetime.utcnow().timestamp() - start_time, 3)
# Grant real/synth bene distinct counts (excludes granted to multiple apps)
if application is None:
start_time = datetime.utcnow().timestamp()
counts_returned["real_deduped"] = real_grant_queryset.distinct().count()
counts_returned[
"synthetic_deduped"
] = synthetic_grant_queryset.distinct().count()
counts_returned["deduped_elapsed"] = round(
datetime.utcnow().timestamp() - start_time, 3
)
# Archived grant real/synth bene distinct counts (excludes granted to multiple apps and multiple archived records)
start_time = datetime.utcnow().timestamp()
# Setup base queryset
archived_queryset = ArchivedDataAccessGrant.objects
if application:
archived_queryset = archived_queryset.filter(application=application)
# Get total table count
counts_returned["archived_total"] = archived_queryset.count()
real_archived_queryset = archived_queryset.filter(
~Q(beneficiary__crosswalk___fhir_id__startswith="-")
& ~Q(beneficiary__crosswalk___fhir_id="")
& Q(beneficiary__crosswalk___fhir_id__isnull=False)
).values("beneficiary")
synthetic_archived_queryset = archived_queryset.filter(
Q(beneficiary__crosswalk___fhir_id__startswith="-")
& ~Q(beneficiary__crosswalk___fhir_id="")
& Q(beneficiary__crosswalk___fhir_id__isnull=False)
).values("beneficiary")
counts_returned["archived_real_deduped"] = real_archived_queryset.distinct().count()
counts_returned[
"archived_synthetic_deduped"
] = synthetic_archived_queryset.distinct().count()
counts_returned["archived_deduped_elapsed"] = round(
datetime.utcnow().timestamp() - start_time, 3
)
# Both Grant and Archived grant (UNION) real/synth bene distinct counts
start_time = datetime.utcnow().timestamp()
real_union_queryset = real_grant_queryset.union(real_archived_queryset)
synthetic_union_queryset = synthetic_grant_queryset.union(
synthetic_archived_queryset
)
# Django 3.2.13 upgrade: seems need to re-write the query to work around?
# django.db.utils.NotSupportedError: Calling QuerySet.distinct() after union() is not supported
# and below is the quote from Django doc:
#
# union()
# union(*other_qs, all=False)
# Uses SQL UNION operator to combine the results of two or more QuerySets. For example:
#
# >>> qs1.union(qs2, qs3)
# The UNION operator selects only distinct values by default. To allow duplicate values, use the all=True argument.
# counts_returned[
# "grant_and_archived_real_deduped"
# ] = real_union_queryset.distinct().count()
counts_returned[
"grant_and_archived_real_deduped"
] = real_union_queryset.count()
counts_returned[
"grant_and_archived_synthetic_deduped"
] = synthetic_union_queryset.count()
counts_returned["grant_and_archived_deduped_elapsed"] = round(
datetime.utcnow().timestamp() - start_time, 3
)
return counts_returned
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_beneficiary_grant_app_pair_counts():\n\n # Init counts dict\n counts_returned = {}\n\n # Grant real/synth bene counts (includes granted to multiple apps)\n start_time = datetime.utcnow().timestamp()\n\n # Setup base queryset\n grant_queryset = DataAccessGrant.objects.values(\"beneficiary\", \"application\")\n\n real_grant_queryset = grant_queryset.filter(\n ~Q(beneficiary__crosswalk___fhir_id__startswith=\"-\")\n & ~Q(beneficiary__crosswalk___fhir_id=\"\")\n & Q(beneficiary__crosswalk___fhir_id__isnull=False)\n ).values(\"beneficiary\", \"application\")\n\n synthetic_grant_queryset = grant_queryset.filter(\n Q(beneficiary__crosswalk___fhir_id__startswith=\"-\")\n & ~Q(beneficiary__crosswalk___fhir_id=\"\")\n & Q(beneficiary__crosswalk___fhir_id__isnull=False)\n ).values(\"beneficiary\", \"application\")\n\n counts_returned[\"grant_total\"] = grant_queryset.count()\n counts_returned[\"real_grant\"] = real_grant_queryset.count()\n counts_returned[\"synthetic_grant\"] = synthetic_grant_queryset.count()\n\n # Setup base queryset\n grant_archived_queryset = ArchivedDataAccessGrant.objects.values(\n \"beneficiary\", \"application\"\n )\n\n real_grant_archived_queryset = grant_archived_queryset.filter(\n ~Q(beneficiary__crosswalk___fhir_id__startswith=\"-\")\n & ~Q(beneficiary__crosswalk___fhir_id=\"\")\n & Q(beneficiary__crosswalk___fhir_id__isnull=False)\n ).values(\"beneficiary\", \"application\")\n\n synthetic_grant_archived_queryset = grant_archived_queryset.filter(\n Q(beneficiary__crosswalk___fhir_id__startswith=\"-\")\n & ~Q(beneficiary__crosswalk___fhir_id=\"\")\n & Q(beneficiary__crosswalk___fhir_id__isnull=False)\n ).values(\"beneficiary\", \"application\")\n\n # Get total table count\n counts_returned[\"grant_archived_total\"] = grant_archived_queryset.count()\n counts_returned[\"real_grant_archived\"] = real_grant_archived_queryset.count()\n counts_returned[\n \"synthetic_grant_archived\"\n ] = synthetic_grant_archived_queryset.count()\n\n \"\"\"\n Bene<->App pair differences\n \"\"\"\n # Pairs in Grant but not in ArchivedGrant.\n counts_returned[\"grant_vs_archived_difference_total\"] = grant_queryset.difference(\n grant_archived_queryset\n ).count()\n counts_returned[\n \"real_grant_vs_archived_difference_total\"\n ] = real_grant_queryset.difference(real_grant_archived_queryset).count()\n counts_returned[\n \"synthetic_grant_vs_archived_difference_total\"\n ] = synthetic_grant_queryset.difference(synthetic_grant_archived_queryset).count()\n\n # Pairs in ArchivedGrant but not in Grant.\n counts_returned[\n \"archived_vs_grant_difference_total\"\n ] = grant_archived_queryset.difference(grant_queryset).count()\n counts_returned[\n \"real_archived_vs_grant_difference_total\"\n ] = real_grant_archived_queryset.difference(real_grant_queryset).count()\n counts_returned[\n \"synthetic_archived_vs_grant_difference_total\"\n ] = synthetic_grant_archived_queryset.difference(synthetic_grant_queryset).count()\n\n counts_returned[\"elapsed\"] = round(datetime.utcnow().timestamp() - start_time, 3)\n\n return counts_returned",
"def get_beneficiary_counts():\n User = get_user_model()\n\n # Init counts dict\n counts_returned = {}\n\n start_time = datetime.utcnow().timestamp()\n\n queryset = (\n User.objects.select_related()\n .filter(userprofile__user_type=\"BEN\")\n .annotate(\n fhir_id=Min(\"crosswalk___fhir_id\"),\n grant_count=Count(\"dataaccessgrant__application\", distinct=True),\n grant_archived_count=Count(\n \"archiveddataaccessgrant__application\", distinct=True\n ),\n )\n .all()\n )\n\n # Count should be equal to Crosswalk\n counts_returned[\"total\"] = queryset.count()\n\n # Setup base Real queryset\n real_queryset = queryset.filter(~Q(fhir_id__startswith=\"-\") & ~Q(fhir_id=\"\"))\n\n # Setup base synthetic queryset\n synthetic_queryset = queryset.filter(Q(fhir_id__startswith=\"-\") & ~Q(fhir_id=\"\"))\n\n # Real/synth counts. This should match counts using the Crosswalk table directly.\n counts_returned[\"real\"] = real_queryset.count()\n counts_returned[\"synthetic\"] = synthetic_queryset.count()\n\n \"\"\"\n Grant related count section\n \"\"\"\n # Count only if in grant\n counts_returned[\"total_grant\"] = queryset.filter(Q(grant_count__gt=0)).count()\n counts_returned[\"real_grant\"] = real_queryset.filter(Q(grant_count__gt=0)).count()\n counts_returned[\"synthetic_grant\"] = synthetic_queryset.filter(\n Q(grant_count__gt=0)\n ).count()\n\n # Count only if in grant archived\n counts_returned[\"total_grant_archived\"] = queryset.filter(\n Q(grant_archived_count__gt=0)\n ).count()\n counts_returned[\"real_grant_archived\"] = real_queryset.filter(\n Q(grant_archived_count__gt=0)\n ).count()\n counts_returned[\"synthetic_grant_archived\"] = synthetic_queryset.filter(\n Q(grant_archived_count__gt=0)\n ).count()\n\n # Count only if in grant OR archived\n counts_returned[\"total_grant_or_archived\"] = queryset.filter(\n Q(grant_count__gt=0) | Q(grant_archived_count__gt=0)\n ).count()\n counts_returned[\"real_grant_or_archived\"] = real_queryset.filter(\n Q(grant_count__gt=0) | Q(grant_archived_count__gt=0)\n ).count()\n counts_returned[\"synthetic_grant_or_archived\"] = synthetic_queryset.filter(\n Q(grant_count__gt=0) | Q(grant_archived_count__gt=0)\n ).count()\n\n # Count only if in grant AND archived\n counts_returned[\"total_grant_and_archived\"] = queryset.filter(\n Q(grant_count__gt=0) & Q(grant_archived_count__gt=0)\n ).count()\n counts_returned[\"real_grant_and_archived\"] = real_queryset.filter(\n Q(grant_count__gt=0) & Q(grant_archived_count__gt=0)\n ).count()\n counts_returned[\"synthetic_grant_and_archived\"] = synthetic_queryset.filter(\n Q(grant_count__gt=0) & Q(grant_archived_count__gt=0)\n ).count()\n\n # Count only if in grant NOT archived\n counts_returned[\"total_grant_not_archived\"] = queryset.filter(\n Q(grant_count__gt=0) & ~Q(grant_archived_count__gt=0)\n ).count()\n counts_returned[\"real_grant_not_archived\"] = real_queryset.filter(\n Q(grant_count__gt=0) & ~Q(grant_archived_count__gt=0)\n ).count()\n counts_returned[\"synthetic_grant_not_archived\"] = synthetic_queryset.filter(\n Q(grant_count__gt=0) & ~Q(grant_archived_count__gt=0)\n ).count()\n\n # Count only if in archived NOT grant\n counts_returned[\"total_archived_not_grant\"] = queryset.filter(\n ~Q(grant_count__gt=0) & Q(grant_archived_count__gt=0)\n ).count()\n counts_returned[\"real_archived_not_grant\"] = real_queryset.filter(\n ~Q(grant_count__gt=0) & Q(grant_archived_count__gt=0)\n ).count()\n counts_returned[\"synthetic_archived_not_grant\"] = synthetic_queryset.filter(\n ~Q(grant_count__gt=0) & Q(grant_archived_count__gt=0)\n ).count()\n\n \"\"\"\n Bene grants to applications break down count section\n \"\"\"\n counts_returned[\"real_grant_to_apps_eq_1\"] = real_queryset.filter(\n Q(grant_count=1)\n ).count()\n counts_returned[\"synthetic_grant_to_apps_eq_1\"] = synthetic_queryset.filter(\n Q(grant_count=1)\n ).count()\n\n counts_returned[\"real_grant_to_apps_eq_2\"] = real_queryset.filter(\n Q(grant_count=2)\n ).count()\n counts_returned[\"synthetic_grant_to_apps_eq_2\"] = synthetic_queryset.filter(\n Q(grant_count=2)\n ).count()\n\n counts_returned[\"real_grant_to_apps_eq_3\"] = real_queryset.filter(\n Q(grant_count=3)\n ).count()\n counts_returned[\"synthetic_grant_to_apps_eq_3\"] = synthetic_queryset.filter(\n Q(grant_count=3)\n ).count()\n\n counts_returned[\"real_grant_to_apps_eq_4thru5\"] = real_queryset.filter(\n Q(grant_count__gte=4) & Q(grant_count__lte=5)\n ).count()\n counts_returned[\"synthetic_grant_to_apps_eq_4thru5\"] = synthetic_queryset.filter(\n Q(grant_count__gte=4) & Q(grant_count__lte=5)\n ).count()\n\n counts_returned[\"real_grant_to_apps_eq_6thru8\"] = real_queryset.filter(\n Q(grant_count__gte=6) & Q(grant_count__lte=8)\n ).count()\n counts_returned[\"synthetic_grant_to_apps_eq_6thru8\"] = synthetic_queryset.filter(\n Q(grant_count__gte=6) & Q(grant_count__lte=8)\n ).count()\n\n counts_returned[\"real_grant_to_apps_eq_9thru13\"] = real_queryset.filter(\n Q(grant_count__gte=9) & Q(grant_count__lte=13)\n ).count()\n counts_returned[\"synthetic_grant_to_apps_eq_9thru13\"] = synthetic_queryset.filter(\n Q(grant_count__gte=9) & Q(grant_count__lte=13)\n ).count()\n\n counts_returned[\"real_grant_to_apps_gt_13\"] = real_queryset.filter(\n Q(grant_count__gt=13)\n ).count()\n counts_returned[\"synthetic_grant_to_apps_gt_13\"] = synthetic_queryset.filter(\n Q(grant_count__gt=13)\n ).count()\n\n \"\"\"\n Bene archived grants to applications break down count section\n \"\"\"\n counts_returned[\"real_grant_archived_to_apps_eq_1\"] = real_queryset.filter(\n Q(grant_archived_count=1)\n ).count()\n counts_returned[\n \"synthetic_grant_archived_to_apps_eq_1\"\n ] = synthetic_queryset.filter(Q(grant_archived_count=1)).count()\n\n counts_returned[\"real_grant_archived_to_apps_eq_2\"] = real_queryset.filter(\n Q(grant_archived_count=2)\n ).count()\n counts_returned[\n \"synthetic_grant_archived_to_apps_eq_2\"\n ] = synthetic_queryset.filter(Q(grant_archived_count=2)).count()\n\n counts_returned[\"real_grant_archived_to_apps_eq_3\"] = real_queryset.filter(\n Q(grant_archived_count=3)\n ).count()\n counts_returned[\n \"synthetic_grant_archived_to_apps_eq_3\"\n ] = synthetic_queryset.filter(Q(grant_archived_count=3)).count()\n\n counts_returned[\"real_grant_archived_to_apps_eq_4thru5\"] = real_queryset.filter(\n Q(grant_archived_count__gte=4) & Q(grant_archived_count__lte=5)\n ).count()\n counts_returned[\n \"synthetic_grant_archived_to_apps_eq_4thru5\"\n ] = synthetic_queryset.filter(\n Q(grant_archived_count__gte=4) & Q(grant_archived_count__lte=5)\n ).count()\n\n counts_returned[\"real_grant_archived_to_apps_eq_6thru8\"] = real_queryset.filter(\n Q(grant_archived_count__gte=6) & Q(grant_archived_count__lte=8)\n ).count()\n counts_returned[\n \"synthetic_grant_archived_to_apps_eq_6thru8\"\n ] = synthetic_queryset.filter(\n Q(grant_archived_count__gte=6) & Q(grant_archived_count__lte=8)\n ).count()\n\n counts_returned[\"real_grant_archived_to_apps_eq_9thru13\"] = real_queryset.filter(\n Q(grant_archived_count__gte=9) & Q(grant_archived_count__lte=13)\n ).count()\n counts_returned[\n \"synthetic_grant_archived_to_apps_eq_9thru13\"\n ] = synthetic_queryset.filter(\n Q(grant_archived_count__gte=9) & Q(grant_archived_count__lte=13)\n ).count()\n\n counts_returned[\"real_grant_archived_to_apps_gt_13\"] = real_queryset.filter(\n Q(grant_archived_count__gt=13)\n ).count()\n counts_returned[\n \"synthetic_grant_archived_to_apps_gt_13\"\n ] = synthetic_queryset.filter(Q(grant_archived_count__gt=13)).count()\n\n counts_returned[\"elapsed\"] = round(datetime.utcnow().timestamp() - start_time, 3)\n\n return counts_returned",
"def test_get_user_grant(self):\n grants = get_obj_grants()\n\n self.assertTrue(grants.count((\"linode\", Instance)) > 0)\n self.assertTrue(grants.count((\"domain\", Domain)) > 0)\n self.assertTrue(grants.count((\"stackscript\", StackScript)) > 0)\n self.assertTrue(grants.count((\"nodebalancer\", NodeBalancer)) > 0)\n self.assertTrue(grants.count((\"volume\", Volume)) > 0)\n self.assertTrue(grants.count((\"image\", Image)) > 0)\n self.assertTrue(grants.count((\"longview\", LongviewClient)) > 0)\n self.assertTrue(grants.count((\"database\", Database)) > 0)\n self.assertTrue(grants.count((\"firewall\", Firewall)) > 0)",
"def _collect_counts(self):\n for t in self.system.keys():\n if t in self.gold:\n self.tp += 1\n else:\n self.fp += 1\n for t in self.gold.keys():\n if t not in self.system:\n self.fn += 1",
"def getAppCount(self):\n logger.debug('Getting the number of apps discovered...')\n return get_text(get_element_by_css(\"span[data-nsmodule='totalAppCount']\"))",
"def hashes_count_by_algs(self):\n return self._db.fetch_pairs(\n \"SELECT hl.alg_id, COUNT(DISTINCT h.summ) FROM `hashes` h, hashlists hl \"\n \"WHERE h.hashlist_id = hl.id AND h.cracked = 0 AND hl.common_by_alg = 0 \"\n \"GROUP BY hl.alg_id\"\n )",
"def context_counts(self, context):\n return (\n self.counts[len(context) + 1][context] if context else self.counts.unigrams\n )",
"def count(cls, client) :\n\t\ttry :\n\t\t\tobj = appfwlearningsettings()\n\t\t\toption_ = options()\n\t\t\toption_.count = True\n\t\t\tresponse = obj.get_resources(client, option_)\n\t\t\tif response :\n\t\t\t\treturn response[0].__dict__['___count']\n\t\t\treturn 0\n\t\texcept Exception as e :\n\t\t\traise e",
"def get_num_gratings(self):\r\n msg = struct.pack('>2B', 56, 13)\r\n response = self.query(msg)\r\n return response[1]",
"def num_applicants(driver):\n # use two selectors since LI has two methods of showing number\n # of applicants in the applicants-insights driver\n num_applicant_selectors = [\n \"span.applicant-rank-header-text\",\n \"table.other-applicants-table.comparison-table tr td\",\n \"p.number-of-applicants\"\n ]\n for selector in num_applicant_selectors:\n try:\n num_applicants = driver.find_element_by_css_selector(selector).text\n except Exception as e:\n pass\n else:\n return ''.join(list(filter(lambda c: c.isdigit(), num_applicants)))\n return ''",
"def getViewPortAppCount(self):\n logger.debug('Getting map view port app count...')\n elements = get_elements_by_css(\".leaflet-marker-icon.dstCluster\")\n users = 0\n for element in elements:\n users += int(get_text(element))\n return users",
"def tx_counts(self) -> Dict[str, Dict[str, int]]:\n agent_pbk_to_name = self.game.configuration.agent_pbk_to_name\n result = {agent_name: 0 for agent_name in agent_pbk_to_name.values()}\n results = {\"seller\": result.copy(), \"buyer\": result.copy()}\n\n temp_game = Game(self.game.configuration, self.game.initialization)\n\n # compute the partial scores for every agent after every transaction\n # (remember that indexes of the transaction start from one, because index 0 is reserved for the initial scores)\n for idx, tx in enumerate(self.game.transactions):\n temp_game.settle_transaction(tx)\n results[\"seller\"][agent_pbk_to_name[tx.seller_pbk]] += 1\n results[\"buyer\"][agent_pbk_to_name[tx.buyer_pbk]] += 1\n\n return results",
"def count(self, tokens):\n return self.counts[tokens]",
"def getCount(self):\n return self.base.get(\"count\", [])",
"def _do_get_provider_count_and_objs(self, **kw):\n if AlgunPermiso(tipo=\"Sistema\").is_met(request.environ):\n count, lista = super(RolTableFiller,\n self)._do_get_provider_count_and_objs(**kw)\n filtrados = []\n for rol in lista:\n if rol.tipo == u\"Sistema\":\n filtrados.append(rol)\n return len(filtrados), filtrados\n return 0, []",
"def count(self, tokens):\n return self._count[tuple(tokens)]",
"def _get_total_ngrams(n_grams_counts: Dict[int, Dict[Tuple[str, ...], Tensor]]) ->Dict[int, Tensor]:\n total_n_grams: Dict[int, Tensor] = defaultdict(lambda : tensor(0.0))\n for n in n_grams_counts:\n total_n_grams[n] = tensor(sum(n_grams_counts[n].values()))\n return total_n_grams",
"def count(cls, client) :\n\t\ttry :\n\t\t\tobj = lbprofile()\n\t\t\toption_ = options()\n\t\t\toption_.count = True\n\t\t\tresponse = obj.get_resources(client, option_)\n\t\t\tif response :\n\t\t\t\treturn response[0].__dict__['___count']\n\t\t\treturn 0\n\t\texcept Exception as e :\n\t\t\traise e",
"def count(cls, client) :\n try :\n obj = nshttpprofile()\n option_ = options()\n option_.count = True\n response = obj.get_resources(client, option_)\n if response :\n return response[0].__dict__['___count']\n return 0\n except Exception as e :\n raise e",
"def _get_hit_count(self, database, enquire):\n return self._get_enquire_mset(\n database, enquire, 0, database.get_doccount()\n ).size()",
"def get_programs():\n users = m.User.objects().only('program_name')\n programs_names = [user.short_program_name for user in users]\n\n program_frequencies = collections.Counter(programs_names)\n\n programs = []\n for program, count in program_frequencies.items():\n programs.append({\n 'name': program,\n 'count': count,\n })\n\n return api_util.jsonify({\n 'programs': programs\n })",
"def get_count(username):\n return get_contributor(username)[\"count\"]",
"def summarize_unused_offers(app_queue: Optional[MarathonQueueItem]) -> Dict[str, int]:\n unused_offers = get_app_queue_last_unused_offers(app_queue)\n reasons: Dict[str, int] = defaultdict(lambda: 0)\n for offer in unused_offers:\n for reason in offer[\"reason\"]:\n reasons[reason] += 1\n return reasons",
"def counts(self, only=Task):\n assert only in self._counts\n return dictproxy(self._counts[only])",
"def gatherReadCounts(samplesList, scriptsDir, threads, alignmentPath, outRoot, stype, mode):\n reads = 0\n ext = \".pruned.bam\"\n if mode == \"all_reads\":\n ext = \".bam\"\n for i in range(len(samplesList)):\n bam = os.path.join(alignmentPath, outRoot) + \".\" + stype + \".\" + str(i) + ext\n reads += int(subprocess.run([os.path.join(scriptsDir, \"get_readcount.sh\"), bam, str(threads)], capture_output=True, text=True).stdout.strip(\"\\n\"))\n return reads",
"def freq_count(self):\n #eg: fc = spammy.freq_count()\n count_dict = defaultdict(int)\n for entry in self._train_list:\n if entry in self._vocab_set:\n count_dict[entry] += 1\n return count_dict",
"def nmodes(self):\n if self.mode_selection is not None:\n return len(self.mode_selection)\n else:\n return len(self.mol.normal_modes.modes.freqs)",
"async def get_events(self, time_period) -> Optional[int]:\n\t\tdate_filter = 3600*24*365*20\n\t\tif time_period == TimePeriod.HOUR:\n\t\t\tdate_filter = 3600\n\t\t\n\t\tif time_period == TimePeriod.DAY:\n\t\t\tdate_filter = 3600*24\n\t\t\n\t\tif time_period == TimePeriod.WEEK:\n\t\t\tdate_filter = 3600*24*7\n\t\t\n\t\tif time_period == TimePeriod.MONTH:\n\t\t\tdate_filter = 3600*24*30\n\t\t\n\t\tcount_response = await self._client.get_state(\n\t\t\t'eventcounts.json?oid={0}&ot={1}&secs={2}'.format(self._oid, self._ot, date_filter)\n\t\t)\n\t\tif count_response is None:\n\t\t\treturn 0\n\n\t\treturn count_response['count']",
"def encoders_count(self):\r\n return self._get('encoders_count', {})",
"def CountGlobal():\r\n return _hiew.HiewGate_Names_CountGlobal()"
] |
[
"0.69182026",
"0.5773201",
"0.55573565",
"0.55211645",
"0.5338212",
"0.5242998",
"0.51704586",
"0.513185",
"0.51247793",
"0.5093041",
"0.50661576",
"0.5023353",
"0.49908602",
"0.49895573",
"0.49218634",
"0.4905657",
"0.49004886",
"0.48802036",
"0.48799422",
"0.4876613",
"0.48384765",
"0.4799379",
"0.4795267",
"0.47583207",
"0.475162",
"0.4743104",
"0.47395757",
"0.4735944",
"0.47278512",
"0.47268924"
] |
0.7806188
|
0
|
Get AccessToken, DataAccessGrant and ArchivedDataAccessGrant counts for beneficiary type users.
|
def get_beneficiary_counts():
User = get_user_model()
# Init counts dict
counts_returned = {}
start_time = datetime.utcnow().timestamp()
queryset = (
User.objects.select_related()
.filter(userprofile__user_type="BEN")
.annotate(
fhir_id=Min("crosswalk___fhir_id"),
grant_count=Count("dataaccessgrant__application", distinct=True),
grant_archived_count=Count(
"archiveddataaccessgrant__application", distinct=True
),
)
.all()
)
# Count should be equal to Crosswalk
counts_returned["total"] = queryset.count()
# Setup base Real queryset
real_queryset = queryset.filter(~Q(fhir_id__startswith="-") & ~Q(fhir_id=""))
# Setup base synthetic queryset
synthetic_queryset = queryset.filter(Q(fhir_id__startswith="-") & ~Q(fhir_id=""))
# Real/synth counts. This should match counts using the Crosswalk table directly.
counts_returned["real"] = real_queryset.count()
counts_returned["synthetic"] = synthetic_queryset.count()
"""
Grant related count section
"""
# Count only if in grant
counts_returned["total_grant"] = queryset.filter(Q(grant_count__gt=0)).count()
counts_returned["real_grant"] = real_queryset.filter(Q(grant_count__gt=0)).count()
counts_returned["synthetic_grant"] = synthetic_queryset.filter(
Q(grant_count__gt=0)
).count()
# Count only if in grant archived
counts_returned["total_grant_archived"] = queryset.filter(
Q(grant_archived_count__gt=0)
).count()
counts_returned["real_grant_archived"] = real_queryset.filter(
Q(grant_archived_count__gt=0)
).count()
counts_returned["synthetic_grant_archived"] = synthetic_queryset.filter(
Q(grant_archived_count__gt=0)
).count()
# Count only if in grant OR archived
counts_returned["total_grant_or_archived"] = queryset.filter(
Q(grant_count__gt=0) | Q(grant_archived_count__gt=0)
).count()
counts_returned["real_grant_or_archived"] = real_queryset.filter(
Q(grant_count__gt=0) | Q(grant_archived_count__gt=0)
).count()
counts_returned["synthetic_grant_or_archived"] = synthetic_queryset.filter(
Q(grant_count__gt=0) | Q(grant_archived_count__gt=0)
).count()
# Count only if in grant AND archived
counts_returned["total_grant_and_archived"] = queryset.filter(
Q(grant_count__gt=0) & Q(grant_archived_count__gt=0)
).count()
counts_returned["real_grant_and_archived"] = real_queryset.filter(
Q(grant_count__gt=0) & Q(grant_archived_count__gt=0)
).count()
counts_returned["synthetic_grant_and_archived"] = synthetic_queryset.filter(
Q(grant_count__gt=0) & Q(grant_archived_count__gt=0)
).count()
# Count only if in grant NOT archived
counts_returned["total_grant_not_archived"] = queryset.filter(
Q(grant_count__gt=0) & ~Q(grant_archived_count__gt=0)
).count()
counts_returned["real_grant_not_archived"] = real_queryset.filter(
Q(grant_count__gt=0) & ~Q(grant_archived_count__gt=0)
).count()
counts_returned["synthetic_grant_not_archived"] = synthetic_queryset.filter(
Q(grant_count__gt=0) & ~Q(grant_archived_count__gt=0)
).count()
# Count only if in archived NOT grant
counts_returned["total_archived_not_grant"] = queryset.filter(
~Q(grant_count__gt=0) & Q(grant_archived_count__gt=0)
).count()
counts_returned["real_archived_not_grant"] = real_queryset.filter(
~Q(grant_count__gt=0) & Q(grant_archived_count__gt=0)
).count()
counts_returned["synthetic_archived_not_grant"] = synthetic_queryset.filter(
~Q(grant_count__gt=0) & Q(grant_archived_count__gt=0)
).count()
"""
Bene grants to applications break down count section
"""
counts_returned["real_grant_to_apps_eq_1"] = real_queryset.filter(
Q(grant_count=1)
).count()
counts_returned["synthetic_grant_to_apps_eq_1"] = synthetic_queryset.filter(
Q(grant_count=1)
).count()
counts_returned["real_grant_to_apps_eq_2"] = real_queryset.filter(
Q(grant_count=2)
).count()
counts_returned["synthetic_grant_to_apps_eq_2"] = synthetic_queryset.filter(
Q(grant_count=2)
).count()
counts_returned["real_grant_to_apps_eq_3"] = real_queryset.filter(
Q(grant_count=3)
).count()
counts_returned["synthetic_grant_to_apps_eq_3"] = synthetic_queryset.filter(
Q(grant_count=3)
).count()
counts_returned["real_grant_to_apps_eq_4thru5"] = real_queryset.filter(
Q(grant_count__gte=4) & Q(grant_count__lte=5)
).count()
counts_returned["synthetic_grant_to_apps_eq_4thru5"] = synthetic_queryset.filter(
Q(grant_count__gte=4) & Q(grant_count__lte=5)
).count()
counts_returned["real_grant_to_apps_eq_6thru8"] = real_queryset.filter(
Q(grant_count__gte=6) & Q(grant_count__lte=8)
).count()
counts_returned["synthetic_grant_to_apps_eq_6thru8"] = synthetic_queryset.filter(
Q(grant_count__gte=6) & Q(grant_count__lte=8)
).count()
counts_returned["real_grant_to_apps_eq_9thru13"] = real_queryset.filter(
Q(grant_count__gte=9) & Q(grant_count__lte=13)
).count()
counts_returned["synthetic_grant_to_apps_eq_9thru13"] = synthetic_queryset.filter(
Q(grant_count__gte=9) & Q(grant_count__lte=13)
).count()
counts_returned["real_grant_to_apps_gt_13"] = real_queryset.filter(
Q(grant_count__gt=13)
).count()
counts_returned["synthetic_grant_to_apps_gt_13"] = synthetic_queryset.filter(
Q(grant_count__gt=13)
).count()
"""
Bene archived grants to applications break down count section
"""
counts_returned["real_grant_archived_to_apps_eq_1"] = real_queryset.filter(
Q(grant_archived_count=1)
).count()
counts_returned[
"synthetic_grant_archived_to_apps_eq_1"
] = synthetic_queryset.filter(Q(grant_archived_count=1)).count()
counts_returned["real_grant_archived_to_apps_eq_2"] = real_queryset.filter(
Q(grant_archived_count=2)
).count()
counts_returned[
"synthetic_grant_archived_to_apps_eq_2"
] = synthetic_queryset.filter(Q(grant_archived_count=2)).count()
counts_returned["real_grant_archived_to_apps_eq_3"] = real_queryset.filter(
Q(grant_archived_count=3)
).count()
counts_returned[
"synthetic_grant_archived_to_apps_eq_3"
] = synthetic_queryset.filter(Q(grant_archived_count=3)).count()
counts_returned["real_grant_archived_to_apps_eq_4thru5"] = real_queryset.filter(
Q(grant_archived_count__gte=4) & Q(grant_archived_count__lte=5)
).count()
counts_returned[
"synthetic_grant_archived_to_apps_eq_4thru5"
] = synthetic_queryset.filter(
Q(grant_archived_count__gte=4) & Q(grant_archived_count__lte=5)
).count()
counts_returned["real_grant_archived_to_apps_eq_6thru8"] = real_queryset.filter(
Q(grant_archived_count__gte=6) & Q(grant_archived_count__lte=8)
).count()
counts_returned[
"synthetic_grant_archived_to_apps_eq_6thru8"
] = synthetic_queryset.filter(
Q(grant_archived_count__gte=6) & Q(grant_archived_count__lte=8)
).count()
counts_returned["real_grant_archived_to_apps_eq_9thru13"] = real_queryset.filter(
Q(grant_archived_count__gte=9) & Q(grant_archived_count__lte=13)
).count()
counts_returned[
"synthetic_grant_archived_to_apps_eq_9thru13"
] = synthetic_queryset.filter(
Q(grant_archived_count__gte=9) & Q(grant_archived_count__lte=13)
).count()
counts_returned["real_grant_archived_to_apps_gt_13"] = real_queryset.filter(
Q(grant_archived_count__gt=13)
).count()
counts_returned[
"synthetic_grant_archived_to_apps_gt_13"
] = synthetic_queryset.filter(Q(grant_archived_count__gt=13)).count()
counts_returned["elapsed"] = round(datetime.utcnow().timestamp() - start_time, 3)
return counts_returned
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def user_stats(request):\r\n user_count = UserMgr.count()\r\n pending_activations = ActivationMgr.count()\r\n users_with_bookmarks = BmarkMgr.count(distinct_users=True)\r\n return _api_response(request, {\r\n 'count': user_count,\r\n 'activations': pending_activations,\r\n 'with_bookmarks': users_with_bookmarks\r\n })",
"def get_beneficiary_grant_app_pair_counts():\n\n # Init counts dict\n counts_returned = {}\n\n # Grant real/synth bene counts (includes granted to multiple apps)\n start_time = datetime.utcnow().timestamp()\n\n # Setup base queryset\n grant_queryset = DataAccessGrant.objects.values(\"beneficiary\", \"application\")\n\n real_grant_queryset = grant_queryset.filter(\n ~Q(beneficiary__crosswalk___fhir_id__startswith=\"-\")\n & ~Q(beneficiary__crosswalk___fhir_id=\"\")\n & Q(beneficiary__crosswalk___fhir_id__isnull=False)\n ).values(\"beneficiary\", \"application\")\n\n synthetic_grant_queryset = grant_queryset.filter(\n Q(beneficiary__crosswalk___fhir_id__startswith=\"-\")\n & ~Q(beneficiary__crosswalk___fhir_id=\"\")\n & Q(beneficiary__crosswalk___fhir_id__isnull=False)\n ).values(\"beneficiary\", \"application\")\n\n counts_returned[\"grant_total\"] = grant_queryset.count()\n counts_returned[\"real_grant\"] = real_grant_queryset.count()\n counts_returned[\"synthetic_grant\"] = synthetic_grant_queryset.count()\n\n # Setup base queryset\n grant_archived_queryset = ArchivedDataAccessGrant.objects.values(\n \"beneficiary\", \"application\"\n )\n\n real_grant_archived_queryset = grant_archived_queryset.filter(\n ~Q(beneficiary__crosswalk___fhir_id__startswith=\"-\")\n & ~Q(beneficiary__crosswalk___fhir_id=\"\")\n & Q(beneficiary__crosswalk___fhir_id__isnull=False)\n ).values(\"beneficiary\", \"application\")\n\n synthetic_grant_archived_queryset = grant_archived_queryset.filter(\n Q(beneficiary__crosswalk___fhir_id__startswith=\"-\")\n & ~Q(beneficiary__crosswalk___fhir_id=\"\")\n & Q(beneficiary__crosswalk___fhir_id__isnull=False)\n ).values(\"beneficiary\", \"application\")\n\n # Get total table count\n counts_returned[\"grant_archived_total\"] = grant_archived_queryset.count()\n counts_returned[\"real_grant_archived\"] = real_grant_archived_queryset.count()\n counts_returned[\n \"synthetic_grant_archived\"\n ] = synthetic_grant_archived_queryset.count()\n\n \"\"\"\n Bene<->App pair differences\n \"\"\"\n # Pairs in Grant but not in ArchivedGrant.\n counts_returned[\"grant_vs_archived_difference_total\"] = grant_queryset.difference(\n grant_archived_queryset\n ).count()\n counts_returned[\n \"real_grant_vs_archived_difference_total\"\n ] = real_grant_queryset.difference(real_grant_archived_queryset).count()\n counts_returned[\n \"synthetic_grant_vs_archived_difference_total\"\n ] = synthetic_grant_queryset.difference(synthetic_grant_archived_queryset).count()\n\n # Pairs in ArchivedGrant but not in Grant.\n counts_returned[\n \"archived_vs_grant_difference_total\"\n ] = grant_archived_queryset.difference(grant_queryset).count()\n counts_returned[\n \"real_archived_vs_grant_difference_total\"\n ] = real_grant_archived_queryset.difference(real_grant_queryset).count()\n counts_returned[\n \"synthetic_archived_vs_grant_difference_total\"\n ] = synthetic_grant_archived_queryset.difference(synthetic_grant_queryset).count()\n\n counts_returned[\"elapsed\"] = round(datetime.utcnow().timestamp() - start_time, 3)\n\n return counts_returned",
"def test_get_user_grant(self):\n grants = get_obj_grants()\n\n self.assertTrue(grants.count((\"linode\", Instance)) > 0)\n self.assertTrue(grants.count((\"domain\", Domain)) > 0)\n self.assertTrue(grants.count((\"stackscript\", StackScript)) > 0)\n self.assertTrue(grants.count((\"nodebalancer\", NodeBalancer)) > 0)\n self.assertTrue(grants.count((\"volume\", Volume)) > 0)\n self.assertTrue(grants.count((\"image\", Image)) > 0)\n self.assertTrue(grants.count((\"longview\", LongviewClient)) > 0)\n self.assertTrue(grants.count((\"database\", Database)) > 0)\n self.assertTrue(grants.count((\"firewall\", Firewall)) > 0)",
"def get_stats(self):\n result = {\n 'datetime': dt.datetime.now().strftime('%d.%m.%Y %H:%M:%S'),\n 'total': db.session.query(User). \\\n count(),\n 'unverified': db.session.query(User). \\\n filter(db.not_(User.verified)). \\\n count(),\n 'male students': db.session.query(User). \\\n filter(User.sex == Sex.Male,\n User.type == UserType.Student). \\\n count(),\n 'male employees': db.session.query(User). \\\n filter(User.sex == Sex.Male,\n User.type == UserType.Employee). \\\n count(),\n 'male alumni': db.session.query(User). \\\n filter(User.sex == Sex.Male,\n User.type == UserType.Alumni). \\\n count(),\n 'female students': db.session.query(User). \\\n filter(User.sex == Sex.Female,\n User.type == UserType.Student). \\\n count(),\n 'female employees': db.session.query(User). \\\n filter(User.sex == Sex.Female,\n User.type == UserType.Employee). \\\n count(),\n 'female alumni': db.session.query(User). \\\n filter(User.sex == Sex.Female,\n User.type == UserType.Alumni). \\\n count()\n }\n\n return result",
"def get_grant_bene_counts(application=None):\n # Init counts dict\n counts_returned = {}\n\n # Grant real/synth bene counts (includes granted to multiple apps)\n start_time = datetime.utcnow().timestamp()\n\n # Setup base queryset\n grant_queryset = DataAccessGrant.objects\n\n if application:\n grant_queryset = grant_queryset.filter(application=application)\n\n # Get total table count\n counts_returned[\"total\"] = grant_queryset.count()\n\n real_grant_queryset = grant_queryset.filter(\n ~Q(beneficiary__crosswalk___fhir_id__startswith=\"-\")\n & ~Q(beneficiary__crosswalk___fhir_id=\"\")\n & Q(beneficiary__crosswalk___fhir_id__isnull=False)\n ).values(\"beneficiary\")\n\n synthetic_grant_queryset = grant_queryset.filter(\n Q(beneficiary__crosswalk___fhir_id__startswith=\"-\")\n & ~Q(beneficiary__crosswalk___fhir_id=\"\")\n & Q(beneficiary__crosswalk___fhir_id__isnull=False)\n ).values(\"beneficiary\")\n\n counts_returned[\"real\"] = real_grant_queryset.count()\n counts_returned[\"synthetic\"] = synthetic_grant_queryset.count()\n counts_returned[\"elapsed\"] = round(datetime.utcnow().timestamp() - start_time, 3)\n\n # Grant real/synth bene distinct counts (excludes granted to multiple apps)\n if application is None:\n start_time = datetime.utcnow().timestamp()\n\n counts_returned[\"real_deduped\"] = real_grant_queryset.distinct().count()\n counts_returned[\n \"synthetic_deduped\"\n ] = synthetic_grant_queryset.distinct().count()\n counts_returned[\"deduped_elapsed\"] = round(\n datetime.utcnow().timestamp() - start_time, 3\n )\n\n # Archived grant real/synth bene distinct counts (excludes granted to multiple apps and multiple archived records)\n start_time = datetime.utcnow().timestamp()\n\n # Setup base queryset\n archived_queryset = ArchivedDataAccessGrant.objects\n\n if application:\n archived_queryset = archived_queryset.filter(application=application)\n\n # Get total table count\n counts_returned[\"archived_total\"] = archived_queryset.count()\n\n real_archived_queryset = archived_queryset.filter(\n ~Q(beneficiary__crosswalk___fhir_id__startswith=\"-\")\n & ~Q(beneficiary__crosswalk___fhir_id=\"\")\n & Q(beneficiary__crosswalk___fhir_id__isnull=False)\n ).values(\"beneficiary\")\n\n synthetic_archived_queryset = archived_queryset.filter(\n Q(beneficiary__crosswalk___fhir_id__startswith=\"-\")\n & ~Q(beneficiary__crosswalk___fhir_id=\"\")\n & Q(beneficiary__crosswalk___fhir_id__isnull=False)\n ).values(\"beneficiary\")\n\n counts_returned[\"archived_real_deduped\"] = real_archived_queryset.distinct().count()\n counts_returned[\n \"archived_synthetic_deduped\"\n ] = synthetic_archived_queryset.distinct().count()\n counts_returned[\"archived_deduped_elapsed\"] = round(\n datetime.utcnow().timestamp() - start_time, 3\n )\n\n # Both Grant and Archived grant (UNION) real/synth bene distinct counts\n start_time = datetime.utcnow().timestamp()\n\n real_union_queryset = real_grant_queryset.union(real_archived_queryset)\n synthetic_union_queryset = synthetic_grant_queryset.union(\n synthetic_archived_queryset\n )\n\n # Django 3.2.13 upgrade: seems need to re-write the query to work around?\n # django.db.utils.NotSupportedError: Calling QuerySet.distinct() after union() is not supported\n # and below is the quote from Django doc:\n #\n # union()\n # union(*other_qs, all=False)\n # Uses SQL UNION operator to combine the results of two or more QuerySets. For example:\n #\n # >>> qs1.union(qs2, qs3)\n # The UNION operator selects only distinct values by default. To allow duplicate values, use the all=True argument.\n\n # counts_returned[\n # \"grant_and_archived_real_deduped\"\n # ] = real_union_queryset.distinct().count()\n counts_returned[\n \"grant_and_archived_real_deduped\"\n ] = real_union_queryset.count()\n counts_returned[\n \"grant_and_archived_synthetic_deduped\"\n ] = synthetic_union_queryset.count()\n counts_returned[\"grant_and_archived_deduped_elapsed\"] = round(\n datetime.utcnow().timestamp() - start_time, 3\n )\n\n return counts_returned",
"def _get_count(_khoros_object, _user_id, _object_type):\n _api_response = query_users_table_by_id(_khoros_object, f'{_object_type}.count(*)', _user_id)\n return int(_api_response['data']['items'][0][_object_type]['count'])",
"def get_amount_users() -> User:\n return User.objects.all().count()",
"def get_count(username):\n return get_contributor(username)[\"count\"]",
"def user_stats(df):\r\n\r\n print('\\nCalculating User Stats...\\n')\r\n start_time = time.time()\r\n\r\n # TO DO: Display counts of user types\r\n user_types =df['User Type'].value_counts()\r\n print(user_types)\r\n\r\n\r\n # TO DO: Display counts of gender\r\n Gender =df['Gender'].value_counts()\r\n print(Gender)\r\n\r\n\r\n # TO DO: Display earliest, most recent, and most common year of birth\r\n print('Earliest year of birth:\\n', df['Birth Year'].min())\r\n print('Most recent year of birth:\\n', df['Birth Year'].max())\r\n print('Most common year of birth:\\n', df['Birth Year'].mean())\r\n\r\n\r\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\r\n print('-'*40)",
"def user_stats(df):\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # display counts of user types\n user_types = df['User Type'].value_counts()\n print(user_types)\n\n # display counts of gender\n try:\n gender = df['Gender'].value_counts()\n print(gender)\n except KeyError:\n print(\"Gender data is not provided.\")\n\n # Display earliest, most recent, and most common year of birth\n try:\n earliest_year = df['Birth Year'].min()\n recent_year = df['Birth Year'].max()\n common_year = df['Birth Year'].mode()[0]\n print(\"The earliest year is: \", int(earliest_year))\n print(\"The most recent year is: \", int(recent_year))\n print(\"The most common birth year is: \", int(common_year))\n except KeyError:\n print('Birth year data is not provided.')\n finally:\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)",
"def user_stats(df):\r\n print('\\nCalculating User Stats...\\n')\r\n start_time = time.time()\r\n # TO DO: Display counts of user types\r\n df = ['user type'].value_counts()\r\n print('count of user typs:\\n')\r\n # TO DO: Display counts of gender\r\n df = ['grnder'].value_counts()\r\n if 'Gender' in df:\r\n print('count of gender:\\n')\r\n # TO DO: Display earliest, most recent, and most common year of birth\r\n year = df['birth year'].value_counts()\r\n if 'birth year' in df:\r\n print('earliset birth year is:{year.min()}\\nmost recent is: {year.max()}\\nand most common birth year is: (year.mode()[0]')\r\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\r\n print('-'*40)",
"def user_stats(df):\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # Display counts of user types\n num_types=df['User Type'].groupby(df['User Type']).count()\n print(num_types)\n\n # Display counts of gender\n num_gender=df['Gender'].groupby(df['Gender']).count()\n print(num_gender)\n\n # Display earliest, most recent, and most common year of birth\n b_year=df['Birth Year']\n print(\"earliest year of birth :{}\".format(b_year.min()))\n print(\"most recent year of birth : {}\".format(b_year.max()))\n print(\"most common year of birth : {}\".format(b_year.mode()[0]))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)",
"def user_stats(df):\n\n print('\\nCalculating User Stats...\\n')\n \n start_time = time.time()\n\n # TO DO: Display counts of user types\n \n \n #print(user_types)\n print('User Types:\\n', df['User Type'].value_counts())\n\n\n # TO DO: Display counts of gender\n if('Gender' in df):\n number_females = df['Gender'].str.count('Female').sum()\n \n number_of_males = df['Gender'].str.count('Male').sum()\n \n print('\\nThere are {} male users\\n'.format(int(number_of_males)))\n \n print('\\nThere are {} female users\\n'.format(int(number_females)))\n\n # TO DO: Display earliest, most recent, and most common year of birth\n if('Birth Year' in df):\n most_common_year = df['Birth Year'].value_counts().idxmax()\n \n earliest_year = df['Birth Year'].min()\n \n most_recent_year = df['Birth Year'].max()\n \n print('\\n Oldest Birth Year is {}\\n Youngest Birth Year is {}\\n Most popular Birth Year is {}\\n'.format(int(earliest_year), int(most_recent_year), int(most_common_year)))\n\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)",
"def user_stats(df):\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # Display counts of user types\n print(\"Counts of user types:\\n\")\n user_counts = df['User Type'].value_counts()\n # printing out the total numbers of user types\n for index, user_count in enumerate(user_counts):\n print(\" {}: {}\".format(user_counts.index[index], user_count))\n\n # Display counts of gender",
"def user_stats(df):\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # Display counts of user types\n print('Trip Count By User Type:')\n for index, value in zip(df['User Type'].value_counts().index, df['User Type'].value_counts().values):\n print(index, '=', value)\n\n\n # Display counts of gender\n if 'Gender' in df.columns:\n print()\n print('Trip Count By Gender:')\n for index, value in zip(df['Gender'].value_counts().index, df['Gender'].value_counts().values):\n print(index, '=', value)\n print()\n\n # Display earliest, most recent, and most common year of birth\n if 'Birth Year' in df.columns:\n print('Earliest Year of Birth:', df['Birth Year'].min())\n print('Most Recent Year of Birth:', df['Birth Year'].max())\n print('Most Common Year of Birth:', df['Birth Year'].mode()[0])\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)",
"def user_stats(df):\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # Display counts of user types\n user_count = df['User Type'].value_counts()\n print(f'{user_count}\\n')\n\n # Display counts of gender if only the data contains that column\n if 'Gender' in df:\n gender_count = df['Gender'].value_counts()\n print(gender_count, '\\n')\n\n # Display earliest, most recent, and most common year of birth if \"Birth Year\" column is found\n earliest = df['Birth Year'].min()\n print('Most earliest birth year:', int(earliest))\n recent = df['Birth Year'].max()\n print('Most recent birth year', int(recent))\n common = df['Birth Year'].value_counts().idxmax()\n print('Most common birth year', int(common))\n\n print(\"\\nTotal time taken: %s seconds.\" % (round(time.time() - start_time, 2)))\n print('-' * 40)",
"def user_stats(df):\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # TO DO: Display counts of user types\n user_types = df['User Type'].value_counts()\n print('The number of subscribers and customers are:', user_types)\n \n # TO DO: Display counts of gender (Male / Female / Unknown)\n if 'Gender' in df: # perform gender related calculation\n gender = df['Gender'].value_counts()\n print('The number of males and females is:', gender)\n\n # TO DO: Display earliest, most recent, and most common year of birth\n if 'Birth Year' in df: # perform gender related calculation\n earliest_year = df['Birth Year'].min()\n print('The earliest year of birth is', earliest_year)\n\n recent_year = df['Birth Year'].max()\n print('The most recent year of birth is', recent_year)\n\n common_year = df['Birth Year'].mode()[0]\n print('The most common year of birth is', common_year)\n\n print(\"\\nRunning this code took %s seconds.\" % (time.time() - start_time))\n print('-'*40)",
"def user_stats(df):\n\n print('\\n#4 USER INFO\\nCalculating User Stats...\\n')\n start_time = time.time()\n \n # TO DO: Display counts of user types\n print('Count of each User type:')\n print(df['User Type'].value_counts(dropna=False))\n \n # TO DO: Display counts of gender\n if 'Gender' in df.columns:\n print('\\nCount of each Gender type:')\n print(df['Gender'].value_counts(dropna=False))\n\n \n # TO DO: Display earliest, most recent, and most common year of birth\n if 'Birth Year' in df.columns:\n print('\\nBirth Year Statistics:')\n print(df['Birth Year'].value_counts(sort=True).head(1))\n print(df['Birth Year'].min())\n print(df['Birth Year'].max())\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)",
"def user_counts(user_id):\n return _fetch_records(f\"SELECT rating_type, count FROM counts_by_rating_type WHERE user_id = {user_id} AND count > 0\")",
"def user_stats(df):\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # TO DO: Display counts of user types\n\n user_type_count = df['User Type'].value_counts()\n print('The number of rentals per user type:')\n print(user_type_count.to_string())\n\n # TO DO: Display counts of gender\n \n if 'Gender' in df.columns:\n \n gender_type_count = df['Gender'].value_counts()\n print('\\nThe number of rental per gender:')\n print(gender_type_count.to_string())\n \n else:\n \n print('\\nNo data about gender available.\\n')\n \n # TO DO: Display earliest, most recent, and most common year of birth\n\n if 'Birth Year' in df.columns:\n \n youngest_user = df['Birth Year'].max()\n common_user = df['Birth Year'].mode()[0]\n oldest_user = df['Birth Year'].min()\n print('\\nThe youngest user was born', int(youngest_user))\n print('The most common user was born', int(common_user))\n print('The oldest user was born', int(oldest_user))\n \n else:\n \n print('\\nNo data about age available.\\n')\n \n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)",
"def user_stats(df):\r\n\r\n print('\\nCalculating User Stats...\\n')\r\n start_time = time.time()\r\n\r\n # Display counts of user types \r\n count_user_types = df['User Type'].value_counts()\r\n print('The count of user types are:', count_user_types)\r\n\r\n # Display counts of gender - Chicago and New York ONLY were having Gender column\r\n try:\r\n gender_counts = df['Gender'].value_counts()\r\n print('The count of gender types are:', gender_counts)\r\n \r\n # Displays the earliest, the most recent, and most common year of birth\r\n # Chicago and New York ONLY were having Birth Year column\r\n \r\n earliest_year = df['Birth Year'].min()\r\n print('Earliest Year of Birth is:', earliest_year)\r\n most_recent_year = df['Birth Year'].max()\r\n print('Most Recent Year of Birth is:', most_recent_year)\r\n common_year = df['Birth Year'].mode()[0]\r\n print('Most Common Year of Birth is:', common_year)\r\n \r\n except KeyError:\r\n print('Sorry, Gender & Birth year data is not available for Washington')\r\n \r\n print(\"\\nThis process took %s seconds.\" % (time.time() - start_time))\r\n print('-'*40)",
"def user_stats(df):\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # TO DO: Display counts of user types\n a = len(df['User Type'].unique())\n print('counts of user types', a)\n\n # TO DO: Display counts of gender\n b = len(df['Gender'].unique())\n print('counts of gender', b)\n\n # TO DO: Display earliest, most recent, and most common year of birth\n max = df['Birth Year'].max()\n min = df['Birth Year'].min()\n common = df['Birth Year'].mode()[0]\n print('earliest of birth is %s, most recent of birth is %s, and most common year of birth is %s' % (min, max, common))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)",
"def user_stats(df):\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # Display counts of user types\n print(\"Counts of user types: \\n{}\\n\".format(df[\"User Type\"].value_counts()))\n\n # Display counts of gender\n if 'Gender' in df:\n print(\"Counts of user types: \\n{}\\n\".format(df['Gender'].value_counts()))\n else:\n print(\"Given data doesn't contain gender data.\\n\")\n\n # Display earliest, most recent, and most common year of birth\n if 'Birth Year' in df:\n print(\"The earliest birth year is: {}.\".format(df[\"Birth Year\"].min()))\n print(\"The most recent birth year is: {}.\".format(df[\"Birth Year\"].max()))\n print(\"The most common birth year is: {}.\".format(df[\"Birth Year\"].mode()[0]))\n else:\n print(\"Given data doesn't contain birth year data.\\n\")\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-' * 40)",
"def user_stats(df):\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # TO DO: Display counts of user types\n user_type = df[\"User Type\"].value_counts()\n print(\"These are the user types requested: \",user_type)\n\n # TO DO: Display counts of gender\n gender = df[\"Gender\"].value_counts()\n print(\"These are the genders requested: \",gender)\n\n # TO DO: Display earliest, most recent, and most common year of birth\n early_year = df[\"Birth Year\"].min()\n print(\"The earliest year of birth for this filtered set is: \", int(early_year))\n \n recent_year = df[\"Birth Year\"].max()\n print(\"The most recent year of birth for this set is: \",int(recent_year))\n \n common_year = df[\"Birth Year\"].mode()\n print(\"The most common year of birth is: \",int(common_year))\n print('-'*40)",
"def user_stats(df):\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # TO DO: Display counts of user types\n Users = df['User Type'].value_counts()\n print(\"What is the breakdown of Users:\\n\")\n print(Users)\n \n # TO DO: Display counts of gender\n print(\"\\nWhat is the breakdown of Gender:\\n\")\n if 'Gender' in df.columns:\n Gender = df['Gender'].value_counts()\n print(Gender)\n else:\n print(\"No gender data to share\")\n \n # TO DO: Display earliest, most recent, and most common year of birth\n print(\"\\nWhat is the oldest, youngest and most popular year of birth:\\n\")\n if 'Birth Year' in df.columns:\n Birth_max = int(df['Birth Year'].max())\n Birth_min = int(df['Birth Year'].min())\n Birth_common = int(df['Birth Year'].mode()[0])\n print(\"The oldest year of birth is: {}, the youngest is: {} and the most popular is: {}\".format(Birth_min, Birth_max, Birth_common))\n else:\n print(\"No birth year data to share\")\n \n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)",
"def user_stats(df):\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # TO DO: Display counts of user types\n user_type_count = df['User Type'].value_counts().to_frame()\n print(user_type_count)\n # TO DO: Display counts of gender\n try:\n most_common_gender = df['Gender'].value_counts().to_frame()\n print(most_common_gender)\n # TO DO: Display earliest, most recent, and most common year of birth\n earliest_birth_year = df['Birth Year'].min()\n print('\\nEarliest birth year is : ', int(earliest_birth_year))\n most_recent_birth_year = df['Birth Year'].max()\n print('\\nMost recent birth year is : ', int(most_recent_birth_year))\n most_common_birth_year = df['Birth Year'].mode()[0]\n print('\\nMost common birth year is : ', int(most_common_birth_year))\n except KeyError:\n print('\\nGender and Birth year data is only available in \\'chicago\\' and \\'new york city\\'')\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)",
"def user_stats(df):\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # TO DO: Display counts of user types\n print(df['User Type'].value_counts())\n print('\\n\\n')\n\n # TO DO: Display counts of gender\n if 'Gender' in(df.columns):\n print(df['Gender'].value_counts())\n print('\\n\\n')\n\n # TO DO: Display earliest, most recent, and most common year of birth\n if 'Birth Year' in(df.columns):\n year = df['Birth Year'].fillna(0).astype('int64')\n print(f'Earliest birth year is: {year.min()}\\nmost recent is: {year.max()}\\nand most common birth year is: {year.mode()[0]}')\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)",
"def user_stats(df):\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # Display counts of user types\n print('User type counts:')\n print(df['User Type'].value_counts())\n\n # Display counts of gender\n print('User gender counts:')\n try:\n print(df['Gender'].value_counts())\n except:\n print('This file has no gender data')\n\n # Display earliest, most recent, and most common year of birth\n print('User birth year:')\n try:\n earliest = min(df['Birth Year'])\n most_recent = max(df['Birth Year'])\n most_common = df['Birth Year'].value_counts().index.tolist()[0]\n print('Birth Years:\\nEarliest: {}\\nMost Recent: {}\\nMost Common: {}'\n .format(earliest, most_recent, most_common))\n except:\n print('This file has no birth year data')\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)",
"def user_stats(df):\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # Display counts of user types\n print('Counts of User types is:\\n',df['User Type'].value_counts())\n\n # Display counts of gender with exception for Washington\n while 'Gender' not in df:\n print('No gender data for washington\\n')\n break\n else:\n gender = df['Gender'].value_counts()\n print(gender, '\\n')\n\n # Display earliest, most recent, and most common year of birth with exception for Washington\n while 'Birth Year' not in df:\n print('No birth year data for washington')\n break\n else:\n earliest_year = df['Birth Year'].min()\n recent_year = df['Birth Year'].max()\n common_year = df['Birth Year'].mode()[0]\n print('Earliest year of birth:', earliest_year)\n print('Most recent year of birth:', recent_year)\n print('Most common year of birth:', common_year)\n \n \n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n return user_stats",
"def user_statistics(df):\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # TO DO: Display counts of user types\n print('Count of user types is:... \\n')\n user_type_counts=df['User Type'].value_counts()\n \n #loop through to print the total number of user types\n for index, user_count in enumerate(user_type_counts):\n print(' {}: {}'.format(user_type_counts.index[index],user_count))\n \n print(\"..........\")\n \n # TO DO: Display counts of gender\n if 'Gender' in df.columns:\n user_gender_statistics(df)\n\n # TO DO: Display earliest, most recent, and most common year of birth\n \n if 'Birth Year' in df.columns:\n user_birth_statistics(df)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)"
] |
[
"0.6891219",
"0.61459666",
"0.6123837",
"0.6058093",
"0.6011557",
"0.5944853",
"0.5926225",
"0.5921975",
"0.5912602",
"0.58535355",
"0.585157",
"0.58273494",
"0.5811506",
"0.58088696",
"0.5805567",
"0.5795218",
"0.5774675",
"0.5768302",
"0.57660484",
"0.57649344",
"0.57579374",
"0.57576025",
"0.575084",
"0.57497394",
"0.5742346",
"0.5736951",
"0.5727711",
"0.5714418",
"0.5713381",
"0.5706608"
] |
0.68389356
|
1
|
Get DataAccessGrant and ArchivedDataAccessGrant counts for beneficiaryapplication pairs.
|
def get_beneficiary_grant_app_pair_counts():
# Init counts dict
counts_returned = {}
# Grant real/synth bene counts (includes granted to multiple apps)
start_time = datetime.utcnow().timestamp()
# Setup base queryset
grant_queryset = DataAccessGrant.objects.values("beneficiary", "application")
real_grant_queryset = grant_queryset.filter(
~Q(beneficiary__crosswalk___fhir_id__startswith="-")
& ~Q(beneficiary__crosswalk___fhir_id="")
& Q(beneficiary__crosswalk___fhir_id__isnull=False)
).values("beneficiary", "application")
synthetic_grant_queryset = grant_queryset.filter(
Q(beneficiary__crosswalk___fhir_id__startswith="-")
& ~Q(beneficiary__crosswalk___fhir_id="")
& Q(beneficiary__crosswalk___fhir_id__isnull=False)
).values("beneficiary", "application")
counts_returned["grant_total"] = grant_queryset.count()
counts_returned["real_grant"] = real_grant_queryset.count()
counts_returned["synthetic_grant"] = synthetic_grant_queryset.count()
# Setup base queryset
grant_archived_queryset = ArchivedDataAccessGrant.objects.values(
"beneficiary", "application"
)
real_grant_archived_queryset = grant_archived_queryset.filter(
~Q(beneficiary__crosswalk___fhir_id__startswith="-")
& ~Q(beneficiary__crosswalk___fhir_id="")
& Q(beneficiary__crosswalk___fhir_id__isnull=False)
).values("beneficiary", "application")
synthetic_grant_archived_queryset = grant_archived_queryset.filter(
Q(beneficiary__crosswalk___fhir_id__startswith="-")
& ~Q(beneficiary__crosswalk___fhir_id="")
& Q(beneficiary__crosswalk___fhir_id__isnull=False)
).values("beneficiary", "application")
# Get total table count
counts_returned["grant_archived_total"] = grant_archived_queryset.count()
counts_returned["real_grant_archived"] = real_grant_archived_queryset.count()
counts_returned[
"synthetic_grant_archived"
] = synthetic_grant_archived_queryset.count()
"""
Bene<->App pair differences
"""
# Pairs in Grant but not in ArchivedGrant.
counts_returned["grant_vs_archived_difference_total"] = grant_queryset.difference(
grant_archived_queryset
).count()
counts_returned[
"real_grant_vs_archived_difference_total"
] = real_grant_queryset.difference(real_grant_archived_queryset).count()
counts_returned[
"synthetic_grant_vs_archived_difference_total"
] = synthetic_grant_queryset.difference(synthetic_grant_archived_queryset).count()
# Pairs in ArchivedGrant but not in Grant.
counts_returned[
"archived_vs_grant_difference_total"
] = grant_archived_queryset.difference(grant_queryset).count()
counts_returned[
"real_archived_vs_grant_difference_total"
] = real_grant_archived_queryset.difference(real_grant_queryset).count()
counts_returned[
"synthetic_archived_vs_grant_difference_total"
] = synthetic_grant_archived_queryset.difference(synthetic_grant_queryset).count()
counts_returned["elapsed"] = round(datetime.utcnow().timestamp() - start_time, 3)
return counts_returned
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_grant_bene_counts(application=None):\n # Init counts dict\n counts_returned = {}\n\n # Grant real/synth bene counts (includes granted to multiple apps)\n start_time = datetime.utcnow().timestamp()\n\n # Setup base queryset\n grant_queryset = DataAccessGrant.objects\n\n if application:\n grant_queryset = grant_queryset.filter(application=application)\n\n # Get total table count\n counts_returned[\"total\"] = grant_queryset.count()\n\n real_grant_queryset = grant_queryset.filter(\n ~Q(beneficiary__crosswalk___fhir_id__startswith=\"-\")\n & ~Q(beneficiary__crosswalk___fhir_id=\"\")\n & Q(beneficiary__crosswalk___fhir_id__isnull=False)\n ).values(\"beneficiary\")\n\n synthetic_grant_queryset = grant_queryset.filter(\n Q(beneficiary__crosswalk___fhir_id__startswith=\"-\")\n & ~Q(beneficiary__crosswalk___fhir_id=\"\")\n & Q(beneficiary__crosswalk___fhir_id__isnull=False)\n ).values(\"beneficiary\")\n\n counts_returned[\"real\"] = real_grant_queryset.count()\n counts_returned[\"synthetic\"] = synthetic_grant_queryset.count()\n counts_returned[\"elapsed\"] = round(datetime.utcnow().timestamp() - start_time, 3)\n\n # Grant real/synth bene distinct counts (excludes granted to multiple apps)\n if application is None:\n start_time = datetime.utcnow().timestamp()\n\n counts_returned[\"real_deduped\"] = real_grant_queryset.distinct().count()\n counts_returned[\n \"synthetic_deduped\"\n ] = synthetic_grant_queryset.distinct().count()\n counts_returned[\"deduped_elapsed\"] = round(\n datetime.utcnow().timestamp() - start_time, 3\n )\n\n # Archived grant real/synth bene distinct counts (excludes granted to multiple apps and multiple archived records)\n start_time = datetime.utcnow().timestamp()\n\n # Setup base queryset\n archived_queryset = ArchivedDataAccessGrant.objects\n\n if application:\n archived_queryset = archived_queryset.filter(application=application)\n\n # Get total table count\n counts_returned[\"archived_total\"] = archived_queryset.count()\n\n real_archived_queryset = archived_queryset.filter(\n ~Q(beneficiary__crosswalk___fhir_id__startswith=\"-\")\n & ~Q(beneficiary__crosswalk___fhir_id=\"\")\n & Q(beneficiary__crosswalk___fhir_id__isnull=False)\n ).values(\"beneficiary\")\n\n synthetic_archived_queryset = archived_queryset.filter(\n Q(beneficiary__crosswalk___fhir_id__startswith=\"-\")\n & ~Q(beneficiary__crosswalk___fhir_id=\"\")\n & Q(beneficiary__crosswalk___fhir_id__isnull=False)\n ).values(\"beneficiary\")\n\n counts_returned[\"archived_real_deduped\"] = real_archived_queryset.distinct().count()\n counts_returned[\n \"archived_synthetic_deduped\"\n ] = synthetic_archived_queryset.distinct().count()\n counts_returned[\"archived_deduped_elapsed\"] = round(\n datetime.utcnow().timestamp() - start_time, 3\n )\n\n # Both Grant and Archived grant (UNION) real/synth bene distinct counts\n start_time = datetime.utcnow().timestamp()\n\n real_union_queryset = real_grant_queryset.union(real_archived_queryset)\n synthetic_union_queryset = synthetic_grant_queryset.union(\n synthetic_archived_queryset\n )\n\n # Django 3.2.13 upgrade: seems need to re-write the query to work around?\n # django.db.utils.NotSupportedError: Calling QuerySet.distinct() after union() is not supported\n # and below is the quote from Django doc:\n #\n # union()\n # union(*other_qs, all=False)\n # Uses SQL UNION operator to combine the results of two or more QuerySets. For example:\n #\n # >>> qs1.union(qs2, qs3)\n # The UNION operator selects only distinct values by default. To allow duplicate values, use the all=True argument.\n\n # counts_returned[\n # \"grant_and_archived_real_deduped\"\n # ] = real_union_queryset.distinct().count()\n counts_returned[\n \"grant_and_archived_real_deduped\"\n ] = real_union_queryset.count()\n counts_returned[\n \"grant_and_archived_synthetic_deduped\"\n ] = synthetic_union_queryset.count()\n counts_returned[\"grant_and_archived_deduped_elapsed\"] = round(\n datetime.utcnow().timestamp() - start_time, 3\n )\n\n return counts_returned",
"def get_beneficiary_counts():\n User = get_user_model()\n\n # Init counts dict\n counts_returned = {}\n\n start_time = datetime.utcnow().timestamp()\n\n queryset = (\n User.objects.select_related()\n .filter(userprofile__user_type=\"BEN\")\n .annotate(\n fhir_id=Min(\"crosswalk___fhir_id\"),\n grant_count=Count(\"dataaccessgrant__application\", distinct=True),\n grant_archived_count=Count(\n \"archiveddataaccessgrant__application\", distinct=True\n ),\n )\n .all()\n )\n\n # Count should be equal to Crosswalk\n counts_returned[\"total\"] = queryset.count()\n\n # Setup base Real queryset\n real_queryset = queryset.filter(~Q(fhir_id__startswith=\"-\") & ~Q(fhir_id=\"\"))\n\n # Setup base synthetic queryset\n synthetic_queryset = queryset.filter(Q(fhir_id__startswith=\"-\") & ~Q(fhir_id=\"\"))\n\n # Real/synth counts. This should match counts using the Crosswalk table directly.\n counts_returned[\"real\"] = real_queryset.count()\n counts_returned[\"synthetic\"] = synthetic_queryset.count()\n\n \"\"\"\n Grant related count section\n \"\"\"\n # Count only if in grant\n counts_returned[\"total_grant\"] = queryset.filter(Q(grant_count__gt=0)).count()\n counts_returned[\"real_grant\"] = real_queryset.filter(Q(grant_count__gt=0)).count()\n counts_returned[\"synthetic_grant\"] = synthetic_queryset.filter(\n Q(grant_count__gt=0)\n ).count()\n\n # Count only if in grant archived\n counts_returned[\"total_grant_archived\"] = queryset.filter(\n Q(grant_archived_count__gt=0)\n ).count()\n counts_returned[\"real_grant_archived\"] = real_queryset.filter(\n Q(grant_archived_count__gt=0)\n ).count()\n counts_returned[\"synthetic_grant_archived\"] = synthetic_queryset.filter(\n Q(grant_archived_count__gt=0)\n ).count()\n\n # Count only if in grant OR archived\n counts_returned[\"total_grant_or_archived\"] = queryset.filter(\n Q(grant_count__gt=0) | Q(grant_archived_count__gt=0)\n ).count()\n counts_returned[\"real_grant_or_archived\"] = real_queryset.filter(\n Q(grant_count__gt=0) | Q(grant_archived_count__gt=0)\n ).count()\n counts_returned[\"synthetic_grant_or_archived\"] = synthetic_queryset.filter(\n Q(grant_count__gt=0) | Q(grant_archived_count__gt=0)\n ).count()\n\n # Count only if in grant AND archived\n counts_returned[\"total_grant_and_archived\"] = queryset.filter(\n Q(grant_count__gt=0) & Q(grant_archived_count__gt=0)\n ).count()\n counts_returned[\"real_grant_and_archived\"] = real_queryset.filter(\n Q(grant_count__gt=0) & Q(grant_archived_count__gt=0)\n ).count()\n counts_returned[\"synthetic_grant_and_archived\"] = synthetic_queryset.filter(\n Q(grant_count__gt=0) & Q(grant_archived_count__gt=0)\n ).count()\n\n # Count only if in grant NOT archived\n counts_returned[\"total_grant_not_archived\"] = queryset.filter(\n Q(grant_count__gt=0) & ~Q(grant_archived_count__gt=0)\n ).count()\n counts_returned[\"real_grant_not_archived\"] = real_queryset.filter(\n Q(grant_count__gt=0) & ~Q(grant_archived_count__gt=0)\n ).count()\n counts_returned[\"synthetic_grant_not_archived\"] = synthetic_queryset.filter(\n Q(grant_count__gt=0) & ~Q(grant_archived_count__gt=0)\n ).count()\n\n # Count only if in archived NOT grant\n counts_returned[\"total_archived_not_grant\"] = queryset.filter(\n ~Q(grant_count__gt=0) & Q(grant_archived_count__gt=0)\n ).count()\n counts_returned[\"real_archived_not_grant\"] = real_queryset.filter(\n ~Q(grant_count__gt=0) & Q(grant_archived_count__gt=0)\n ).count()\n counts_returned[\"synthetic_archived_not_grant\"] = synthetic_queryset.filter(\n ~Q(grant_count__gt=0) & Q(grant_archived_count__gt=0)\n ).count()\n\n \"\"\"\n Bene grants to applications break down count section\n \"\"\"\n counts_returned[\"real_grant_to_apps_eq_1\"] = real_queryset.filter(\n Q(grant_count=1)\n ).count()\n counts_returned[\"synthetic_grant_to_apps_eq_1\"] = synthetic_queryset.filter(\n Q(grant_count=1)\n ).count()\n\n counts_returned[\"real_grant_to_apps_eq_2\"] = real_queryset.filter(\n Q(grant_count=2)\n ).count()\n counts_returned[\"synthetic_grant_to_apps_eq_2\"] = synthetic_queryset.filter(\n Q(grant_count=2)\n ).count()\n\n counts_returned[\"real_grant_to_apps_eq_3\"] = real_queryset.filter(\n Q(grant_count=3)\n ).count()\n counts_returned[\"synthetic_grant_to_apps_eq_3\"] = synthetic_queryset.filter(\n Q(grant_count=3)\n ).count()\n\n counts_returned[\"real_grant_to_apps_eq_4thru5\"] = real_queryset.filter(\n Q(grant_count__gte=4) & Q(grant_count__lte=5)\n ).count()\n counts_returned[\"synthetic_grant_to_apps_eq_4thru5\"] = synthetic_queryset.filter(\n Q(grant_count__gte=4) & Q(grant_count__lte=5)\n ).count()\n\n counts_returned[\"real_grant_to_apps_eq_6thru8\"] = real_queryset.filter(\n Q(grant_count__gte=6) & Q(grant_count__lte=8)\n ).count()\n counts_returned[\"synthetic_grant_to_apps_eq_6thru8\"] = synthetic_queryset.filter(\n Q(grant_count__gte=6) & Q(grant_count__lte=8)\n ).count()\n\n counts_returned[\"real_grant_to_apps_eq_9thru13\"] = real_queryset.filter(\n Q(grant_count__gte=9) & Q(grant_count__lte=13)\n ).count()\n counts_returned[\"synthetic_grant_to_apps_eq_9thru13\"] = synthetic_queryset.filter(\n Q(grant_count__gte=9) & Q(grant_count__lte=13)\n ).count()\n\n counts_returned[\"real_grant_to_apps_gt_13\"] = real_queryset.filter(\n Q(grant_count__gt=13)\n ).count()\n counts_returned[\"synthetic_grant_to_apps_gt_13\"] = synthetic_queryset.filter(\n Q(grant_count__gt=13)\n ).count()\n\n \"\"\"\n Bene archived grants to applications break down count section\n \"\"\"\n counts_returned[\"real_grant_archived_to_apps_eq_1\"] = real_queryset.filter(\n Q(grant_archived_count=1)\n ).count()\n counts_returned[\n \"synthetic_grant_archived_to_apps_eq_1\"\n ] = synthetic_queryset.filter(Q(grant_archived_count=1)).count()\n\n counts_returned[\"real_grant_archived_to_apps_eq_2\"] = real_queryset.filter(\n Q(grant_archived_count=2)\n ).count()\n counts_returned[\n \"synthetic_grant_archived_to_apps_eq_2\"\n ] = synthetic_queryset.filter(Q(grant_archived_count=2)).count()\n\n counts_returned[\"real_grant_archived_to_apps_eq_3\"] = real_queryset.filter(\n Q(grant_archived_count=3)\n ).count()\n counts_returned[\n \"synthetic_grant_archived_to_apps_eq_3\"\n ] = synthetic_queryset.filter(Q(grant_archived_count=3)).count()\n\n counts_returned[\"real_grant_archived_to_apps_eq_4thru5\"] = real_queryset.filter(\n Q(grant_archived_count__gte=4) & Q(grant_archived_count__lte=5)\n ).count()\n counts_returned[\n \"synthetic_grant_archived_to_apps_eq_4thru5\"\n ] = synthetic_queryset.filter(\n Q(grant_archived_count__gte=4) & Q(grant_archived_count__lte=5)\n ).count()\n\n counts_returned[\"real_grant_archived_to_apps_eq_6thru8\"] = real_queryset.filter(\n Q(grant_archived_count__gte=6) & Q(grant_archived_count__lte=8)\n ).count()\n counts_returned[\n \"synthetic_grant_archived_to_apps_eq_6thru8\"\n ] = synthetic_queryset.filter(\n Q(grant_archived_count__gte=6) & Q(grant_archived_count__lte=8)\n ).count()\n\n counts_returned[\"real_grant_archived_to_apps_eq_9thru13\"] = real_queryset.filter(\n Q(grant_archived_count__gte=9) & Q(grant_archived_count__lte=13)\n ).count()\n counts_returned[\n \"synthetic_grant_archived_to_apps_eq_9thru13\"\n ] = synthetic_queryset.filter(\n Q(grant_archived_count__gte=9) & Q(grant_archived_count__lte=13)\n ).count()\n\n counts_returned[\"real_grant_archived_to_apps_gt_13\"] = real_queryset.filter(\n Q(grant_archived_count__gt=13)\n ).count()\n counts_returned[\n \"synthetic_grant_archived_to_apps_gt_13\"\n ] = synthetic_queryset.filter(Q(grant_archived_count__gt=13)).count()\n\n counts_returned[\"elapsed\"] = round(datetime.utcnow().timestamp() - start_time, 3)\n\n return counts_returned",
"def number_of_connections(self, asn):\n customer_count = 0\n provider_count = 0\n peer_count = 0\n\n for neighbor in nx.all_neighbors(self, asn):\n edge_data = self.get_edge_data(asn, neighbor)\n if edge_data[\"relationship\"] == -1 and edge_data[\"as1\"] == asn:\n customer_count += 1\n elif edge_data[\"relationship\"] == -1 and edge_data[\"as2\"] == asn:\n provider_count += 1\n elif edge_data[\"relationship\"] == 0:\n peer_count += 1\n return customer_count, provider_count, peer_count",
"def getViewPortAppCount(self):\n logger.debug('Getting map view port app count...')\n elements = get_elements_by_css(\".leaflet-marker-icon.dstCluster\")\n users = 0\n for element in elements:\n users += int(get_text(element))\n return users",
"def count_common_connections(network, user_A, user_B):\n count = 0\n if user_A not in network or user_B not in network:\n return False\n for person in network[user_A][0]:\n if person in network[user_B][0]:\n count += 1\n return count",
"def getCombinationCounts(self, attributeList):\n cD = {}\n try:\n idxL = [self._attributeNameList.index(atName) for atName in attributeList]\n #\n for row in self.data:\n ky = tuple([row[jj] for jj in idxL])\n cD[ky] = cD[ky] + 1 if ky in cD else 1\n except Exception as e:\n if self.__verbose:\n logger.exception(\"Selection failure\")\n if self._raiseExceptions:\n raise e\n return cD",
"def get_attendance_counts(attendance):\n count_a = 0\n count_p = 0\n count_d = 0\n for a in attendance:\n if a.ATT_STATUS == 'A':\n count_a+=1\n elif a.ATT_STATUS == 'D':\n count_d+=1\n elif a.ATT_STATUS == 'P':\n count_p+=1\n return (count_p,count_a,count_d)",
"def count(self) -> Tuple[groupable, pdarray]:\n repMsg = generic_msg(\n cmd=\"countReduction\",\n args={\"segments\": cast(pdarray, self.segments), \"size\": self.length},\n )\n self.logger.debug(repMsg)\n return self.unique_keys, create_pdarray(repMsg)",
"def get_all_dataset_counts(\n self,\n ) -> Dict[Tuple[str, int, int], int]:\n res = self._engine.execute(\n select(\n [\n PRODUCT.c.name,\n TIME_OVERVIEW.c.start_day,\n TIME_OVERVIEW.c.period_type,\n TIME_OVERVIEW.c.dataset_count,\n ]\n )\n .select_from(TIME_OVERVIEW.join(PRODUCT))\n .where(TIME_OVERVIEW.c.product_ref == PRODUCT.c.id)\n .order_by(\n PRODUCT.c.name, TIME_OVERVIEW.c.start_day, TIME_OVERVIEW.c.period_type\n )\n )\n\n return {\n (\n r.name,\n *TimePeriodOverview.from_flat_period_representation(\n r.period_type, r.start_day\n )[:2],\n ): r.dataset_count\n for r in res\n }",
"def num_applicants(driver):\n # use two selectors since LI has two methods of showing number\n # of applicants in the applicants-insights driver\n num_applicant_selectors = [\n \"span.applicant-rank-header-text\",\n \"table.other-applicants-table.comparison-table tr td\",\n \"p.number-of-applicants\"\n ]\n for selector in num_applicant_selectors:\n try:\n num_applicants = driver.find_element_by_css_selector(selector).text\n except Exception as e:\n pass\n else:\n return ''.join(list(filter(lambda c: c.isdigit(), num_applicants)))\n return ''",
"def analysis():\r\n data_frame = load_from_mysql('core', 'BDFMHQAA_D')\r\n data_frame.registerTempTable('business')\r\n gd = data_frame.select('AA03CSNO', 'AA08PRON')\r\n\r\n def merge_count(a, b):\r\n r = {}\r\n for p, c in a.items():\r\n if p in r:\r\n r[p] += c\r\n else:\r\n r[p] = c\r\n for p, c in b.items():\r\n if p in r:\r\n r[p] += c\r\n else:\r\n r[p] = c\r\n return r\r\n result = gd.map(lambda row: (row.AA03CSNO, {row.AA08PRON: 1})).reduceByKey(merge_count)\r\n pron_count = gd.map(lambda row: (row.AA08PRON, 1)).reduceByKey(lambda a, b: a + b)\r\n\r\n # result = gd.map(lambda row: (row.AA03CSNO, row.AA08PRON))\r\n print(result.take(10))\r\n print('----------------pron count-----------------')\r\n print(pron_count.collect())\r\n\r\n print(gd)",
"def assignments_count(request):\r\n count_assignments = 0\r\n # check if current user is authenticated or not.\r\n if request.user.is_authenticated:\r\n try:\r\n # get assignments count for students.\r\n student = request.user.student\r\n count_assignments = Assignment.objects.assignments_count(student)\r\n except:\r\n # get assignments count for adminhod.\r\n try: \r\n if request.user.adminhod:\r\n count_assignments = Assignment.objects.assignments_count()\r\n except:\r\n pass \r\n return {'count_assignments':count_assignments}",
"def compute_numbers_of_approvals(self, committees: list[list[int]], ballots: list[set[int]]) -> dict:\n number_of_approvals={}\n for committee in committees:\n for ballot in ballots:\n key = [entry for item in [committee, ballot] for entry in item]\n 'We use the combination of the committee and the ballot to have a unique and hashable key.'\n number_of_approvals[tuple(key)]=len([party for party in committee if party in ballot])\n return number_of_approvals",
"def user_stats(request):\r\n user_count = UserMgr.count()\r\n pending_activations = ActivationMgr.count()\r\n users_with_bookmarks = BmarkMgr.count(distinct_users=True)\r\n return _api_response(request, {\r\n 'count': user_count,\r\n 'activations': pending_activations,\r\n 'with_bookmarks': users_with_bookmarks\r\n })",
"def count_common_connections(network, user_A, user_B):\n if user_A not in network or user_B not in network:\n return False\n common_connections = 0\n for conn in network[user_A]['connections']:\n if conn in network[user_B]['connections']:\n common_connections += 1\n return common_connections",
"def analyse_by_applications(self, dataframe, applicationslist, cwe_filter):\r\n result = []\r\n\r\n for app in applicationslist:\r\n df_analysis = dataframe[(dataframe['project'] == app)]\r\n counts = 0\r\n counts += len(df_analysis[(df_analysis['CWE'].isin(cwe_filter))].index)\r\n result.append([app, counts])\r\n\r\n return result",
"def numberActivities(self):\n if self.use_dic:\n nb_data = self.dic.keys()\n nb_act = (self.dic[nb_data[0]]).keys()\n return len(nb_data)*len(nb_act)\n else:\n return -1",
"def has_active_loan(user_application, approved_loans):\n return len(filter(lambda item: item['customer_id'] == user_application['customer_id'] and len(intersected_applications(user_application, approved_loans)), approved_loans))",
"def all_categories_for_phrase(db, phrase, access_codes):\n ratings = [0, 0, 0]\n for access_code in access_codes:\n category_index = annotator_category_for_phrase(db, phrase, access_code)\n ratings[category_index] += 1\n return ratings",
"def get_case_counts_for_primary_sites():\n cases_endpt = 'https://api.gdc.cancer.gov/cases'\n headers = {'Content-Type': 'application/json'}\n data2 = {\n \"size\":\"0\",\n \"facets\":\"primary_site\",\n }\n\n response = requests.post(cases_endpt, headers=headers, data = json.dumps(data2))\n response_dic = response.json()\n count_dic = {}\n\n for bucket in response_dic[\"data\"][\"aggregations\"][\"primary_site\"][\"buckets\"]:\n count_dic[bucket[\"key\"]] = count_dic.get(bucket[\"key\"], 0) + bucket[\"doc_count\"]\n\n return count_dic",
"def keycount(self, essid):\n with SessionContext(self.SessionClass) as session:\n q = session.query(PAW2_DBObject.key)\n q = q.join(PYR2_DBObject).join(ESSID_DBObject)\n q = q.filter(ESSID_DBObject.essid == essid)\n return q.count()",
"def customer_acccounting(customer_orders):",
"def people(self):\n count = self.db.query(\n 'select count(id) as people_count from \\\n (select id from staff union all select id from fellows)')\n return count.all()[0]['people_count']",
"def count():\r\n return Activation.query.count()",
"def _do_get_provider_count_and_objs(self, **kw):\n if AlgunPermiso(tipo=\"Sistema\").is_met(request.environ):\n count, lista = super(RolTableFiller,\n self)._do_get_provider_count_and_objs(**kw)\n filtrados = []\n for rol in lista:\n if rol.tipo == u\"Sistema\":\n filtrados.append(rol)\n return len(filtrados), filtrados\n return 0, []",
"def ranking(availability_info,mapds):\n rank=Counter(dict())\n for key in availability_info.keys():\n rank[mapds[key]]=len(availability_info[key])\n #print rank\n return rank",
"def resource_discovery_association_count(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"resource_discovery_association_count\")",
"def passengersRemaining():\n passengers_remaining = 0\n # loop through both dictionaries and count all people\n for i in range(0,len(building.up_dictionary),1):\n passengers = building.up_dictionary[i]\n if passengers:\n passengers_remaining = passengers_remaining + len(passengers)\n else:\n continue\n for i in range(0,len(building.down_dictionary),1):\n passengers = building.down_dictionary[i]\n if passengers:\n passengers_remaining = passengers_remaining + len(passengers)\n else:\n continue\n\n return passengers_remaining",
"def user_bookmark_count_authorization(self):\r\n self.testapp.get(u'/api/v1/admin/stats/bmarkcount',\r\n status=403)",
"def create_count_map(self) -> Dict[int, int]:\n res: Dict[int, int] = {}\n for sequence_data in self.model.values():\n sequence_data: NGramsSequence = cast(NGramsSequence, sequence_data)\n for count in sequence_data.next_count.values():\n count: int = cast(int, count)\n if count not in res:\n res[count] = 0\n res[count] += 1\n self.count_map = res\n logger.success('created count map')\n return res"
] |
[
"0.6973483",
"0.66911924",
"0.5162412",
"0.51583827",
"0.5151415",
"0.5109306",
"0.5107106",
"0.4989022",
"0.4976905",
"0.49694315",
"0.4954362",
"0.49121442",
"0.4887774",
"0.48872083",
"0.48740187",
"0.48256645",
"0.4821141",
"0.48171303",
"0.48086876",
"0.48056912",
"0.4804997",
"0.4784676",
"0.47727165",
"0.47698194",
"0.47592777",
"0.4748463",
"0.47259825",
"0.47203213",
"0.47202688",
"0.47094253"
] |
0.79986787
|
0
|
Make an API call to Wild Apricot to retrieve contact info for all active members.
|
def get_all_active_members(debug, contactsUrl):
valid_date = str(datetime.date.today() - datetime.timedelta(days=7)) # 7 days ago in yyyy-mm-dd format
#params = {'$filter': 'member eq true AND Status eq Active',
# '$async': 'false'}
params = {'$filter': "member eq true AND ( Status eq Active OR ( Status eq PendingRenewal AND 'Renewal due' ge " + valid_date + "))",
'$async': 'false'}
request_url = contactsUrl + '?' + urllib.parse.urlencode(params)
if debug: print('Making api call to get contacts')
return api.execute_request(request_url).Contacts
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_contacts():\n return jsonify(g.driver.get_contacts())",
"def getcontacts():\n contacts = {}\n\n try:\n #get list of contact ids\n contactids = r.smembers(\"contacts\")\n\n #for each contact id get data\n for contactid in contactids:\n contacts.update(_getcontact(str(contactid)))\n return contacts\n except:\n print \"Unexpected error:\", sys.exc_info()[0]\n raise",
"def contact_info(self, sensitive=True):\n account_id = self.account_id()\n retry_count = 5\n\n req_url = self.get(\"/accounts/{}/contacts\".format(account_id))['ResultUrl']\n resp = self.get(req_url)\n tries = 0\n while 'Contacts' not in resp and tries < retry_count:\n resp = self.get(req_url)\n tries += 1\n time.sleep(1)\n contacts = resp['Contacts']\n\n contact_data = list()\n for contact in contacts:\n row_data = {\n 'ContactId': contact['Id'],\n 'Email': \"*****@****.***\" if sensitive else contact['Email'],\n 'FirstName': \"*****\" if sensitive else contact['FirstName'],\n 'LastName': \"*****\" if sensitive else contact['LastName'],\n 'Status': contact.get('Status'),\n 'MembeshipEnabled': contact.get('MembershipEnabled'),\n 'TermsOfUseAccepted': contact['TermsOfUseAccepted'],\n }\n\n if 'MembershipLevel' in contact:\n row_data['MembershipLevel'] = contact['MembershipLevel']['Name']\n\n # Map all field values into a dict for convenience\n field_values = {val['FieldName']: val['Value']\n for val in contact['FieldValues']}\n\n # Get list of authorizations\n if 'Managed Authorizations' in field_values:\n authorizations = [i['Label']\n for i in field_values['Managed Authorizations']]\n row_data['Authorizations'] = authorizations\n\n contact_data.append(row_data)\n self.__contact_df = pd.DataFrame(contact_data).set_index('ContactId')\n return self.__contact_df",
"def get(self):\n args = GET_PARSER.parse_args()\n print(f'args={args}')\n\n return Contacts().get_all(\n args[\"phonetypeOne\"],\n args[\"phonetypeTwo\"],\n args[\"phonetypeThree\"],\n args[\"firstName\"],\n args[\"lastName\"],)",
"def contact(self, request, **kwargs):\n group_obj = self.get_object()\n contact_data = group_obj.contacts.all()\n if contact_data is not None:\n serializer_data = ContactSerializer(contact_data, many=True)\n return Response(serializer_data.data)\n else:\n return Response({'message': 'No details found for contact of this group'}, status=status.HTTP_404_NOT_FOUND)",
"def fetch_contacts(owner_account_id):\n resp = oauth.tapkey.get(f\"Owners/{owner_account_id}/Contacts?$select=id,identifier\")\n contacts = resp.json()\n return contacts",
"async def get(self):\n await self.handle_request(self.contacts_new_api, 1)",
"def Run(self):\n return self.ListAllContacts()",
"def test_get_contact(self):\n pass",
"def list_contacts(self):\n return self.contacts",
"def get_all(self):\n total_contacts = []\n get_count = {\n 'query': {\n 'object': 'CONTACT',\n 'select': {\n 'field': 'RECORDNO'\n },\n 'pagesize': '1'\n }\n }\n\n response = self.format_and_send_request(get_count)\n count = int(response['data']['@totalcount'])\n pagesize = 2000\n offset = 0\n for i in range(0, count, pagesize):\n data = {\n 'query': {\n 'object': 'CONTACT',\n 'select': {\n 'field': [\n 'RECORDNO',\n 'CONTACTNAME',\n 'COMPANYNAME',\n 'FIRSTNAME',\n 'LASTNAME',\n 'INITIAL',\n 'PRINTAS',\n 'TAXABLE',\n 'MAILADDRESS.ADDRESS1'\n ]\n },\n 'pagesize': pagesize,\n 'offset': offset\n }\n }\n contacts = self.format_and_send_request(data)['data']['CONTACT']\n total_contacts = total_contacts + contacts\n offset = offset + pagesize\n return total_contacts",
"def contact_details(self) -> 'outputs.ContactDetailsResponse':\n return pulumi.get(self, \"contact_details\")",
"def contact_details(self) -> 'outputs.ContactDetailsResponse':\n return pulumi.get(self, \"contact_details\")",
"def contact_details(self) -> 'outputs.ContactDetailsResponse':\n return pulumi.get(self, \"contact_details\")",
"def get_contacts(self):\n\n\t\treturn self.__contacts",
"def get_info(self,who=None):\n alluri = []\n if who == None:\n return self.get_personal_info()\n\n if type(who) is not list:\n alluri.append(who) \n else:\n alluri = who\n \n self.get(\"INFO\",\"GetContactsInfo\",alluri)\n response = self.send()\n return response",
"def ListAllContacts(self):\n feed = self.gd_client.GetContacts()\n self.contacts = self.CleanPhoneNumbers(self.GetContactsInfo(feed))\n return self.contacts",
"async def test_list_contacts(client):\n params = [('access_token', 'access_token_example')]\n headers = { \n 'Accept': 'application/json',\n }\n response = await client.request(\n method='GET',\n path='/v1/contacts',\n headers=headers,\n params=params,\n )\n assert response.status == 200, 'Response body is : ' + (await response.read()).decode('utf-8')",
"def test_get_contacts(self):\n pass",
"def get_contacts(self):\n contacts = Membership.objects.filter(entity = self, key_contact = True).order_by('importance_to_entity')\n return contacts",
"def contacts(self):\n return ContactCollection(self.request)",
"def GetMembers(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def get_contacts():\n # Parse command line options\n try:\n opts, args = getopt.getopt(sys.argv[1:], '', ['user=', 'password='])\n except getopt.error, msg:\n print 'python contacts_example.py --user [username] --password [password]'\n sys.exit(2)\n user = ''\n password = ''\n # Process options\n for option, arg in opts:\n if option == '--user':\n user = arg\n elif option == '--password':\n password = arg\n\n while not user:\n print 'NOTE: Please run these tests only with a test account.'\n user = raw_input('Please enter your username: ')\n while not password:\n password = getpass.getpass()\n if not password:\n print 'Password cannot be blank.'\n try:\n contacts = GoogleContacts(user, password)\n except gdata.client.BadAuthentication:\n print 'Invalid user credentials given.'\n exit(1)\n contacts_list = contacts.Run()\n return contacts_list",
"def get_people(self):\n url = self.base_url + 'memberships'\n\n req = requests.get(headers=self.headers, url=url)\n\n return req.json()",
"def get_queryset(self):\n return self.request.user.contacts.all()",
"async def test_get_organization_contact(client):\n params = [('access_token', 'access_token_example')]\n headers = { \n 'Accept': 'application/json',\n }\n response = await client.request(\n method='GET',\n path='/v1/contacts/{contact_id}'.format(contact_id=56),\n headers=headers,\n params=params,\n )\n assert response.status == 200, 'Response body is : ' + (await response.read()).decode('utf-8')",
"def get_contacts(userid):\n return 'get contacts - ' + userid",
"def test_projects_id_contacts_get(self):\n response = self.client.open('/project-tracker/projects/{id}/contacts'.format(id=56),\n method='GET')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))",
"def test_get_contact_objects(self):\n\n contacts = MessageController.get_contact_objects(['2'])\n self.assertEqual(contacts[0].contact_first_name, 'Contact2')\n self.assertEqual(contacts[0].contact_phone, '4153417706')\n self.assertEqual(contacts[0].user_id, 1)\n self.assertEqual(contacts[0].lang_id, 1)",
"def test_get_members():\n client = Client\n members_res = get_members(client, {})\n assert members_res['Contents'] == [{'AccountId': 1, 'DetectorId': 1, 'MasterId': 1}]"
] |
[
"0.6713617",
"0.6643783",
"0.64085954",
"0.6258809",
"0.6256413",
"0.6255006",
"0.6143721",
"0.6133915",
"0.6131055",
"0.61093426",
"0.60089076",
"0.5980703",
"0.5980703",
"0.5980703",
"0.5972099",
"0.5960462",
"0.59564537",
"0.59230226",
"0.58822167",
"0.5870821",
"0.5854786",
"0.5840642",
"0.5838896",
"0.579027",
"0.57576066",
"0.5756036",
"0.5754075",
"0.5734978",
"0.57318556",
"0.57311976"
] |
0.6996974
|
0
|
Maps RFID strings to integers
|
def fix_RFID(r):
#f = str(r).strip()
#while f[0] == '0':
# f = f[1:]
return int(r)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def phone_int_mapping(path_to_phone_char_map):\n mapping = dict()\n with open(path_to_phone_char_map) as f:\n for line in f:\n m = line.strip().split('\\t')\n mapping[m[0]] = int(m[1])\n\n return mapping",
"def __file_from_str_to_int(rank: str) -> int:\n # Warning, my own, not very well tested implementation of base26 converter\n values = []\n for letter in rank:\n values.append(ascii_lowercase.index(letter.lower()))\n index_value = 0\n counter = 0\n for value in reversed(values):\n if counter < 1:\n index_value += value\n else:\n index_value += (value * 26) ** counter\n counter += 1\n return index_value",
"def hex2int(r: str) -> int:",
"def Illumina_ID(rid):\n index = rid.find(\":\") # finds the first occurance of ':'\n new_id = rid[:index] + \":1:12345\" + rid[index:]\n new_id_split = re.split(\"#|/\", new_id)\n new_id = new_id_split[0] + \" \" + new_id_split[2] + \":Y:0:\" + new_id_split[1]\n return new_id",
"def get_id(self):\n id_num = []\n i = 0\n while True:\n serial_data = self.rfid_serial_port.read()\n data = serial_data.decode('utf-8')\n i = i + 1\n if i == 12:\n i = 0\n ID = \"\".join(map(str, id_num))\n return ID\n else:\n id_num.append(data)",
"def bin2int(r: str) -> int:",
"def comma_format_to_ten_digit(badge):\n\tif badge > 25565535: # the 8 digits correspond to a set of two and four hex values, so the max is the decimal version of FF and FFFF concatenated\n\t\tException(\"Error: Invalid RFID Number\")\n\tbadge = str(badge).zfill(8)\n\tformattedID = \"{0:x}\".format(int(badge[:-5])).zfill(2) + \"{0:x}\".format(int(badge[-5:])).zfill(4) # splits dec at last 5 digits and everything except last 5, converts each section to hex, then combines\n\treturn int(formattedID, 16) # converts combined hex string to int",
"def _converter(self,string_representation):\n assert len(string_representation) == 1\n\n hash_dic = {'T':10,'J':11,'Q':12,'K':13,'A':14}\n\n try:\n integer_representation=int(string_representation)\n except:\n integer_representation=hash_dic[string_representation]\n\n return integer_representation",
"def map_base_to_int(base):\n return __BASES_MAP__[base]",
"def dec2int(r: str) -> int:",
"def from_trace_codes_text(codes_text: str) -> Mapping[int, str]:\n return {int(s[0], 16): s[1] for s in map(lambda l: l.split(), codes_text.splitlines())}",
"def sensor_id(raft, ccd):\n return 'R%s%s_S%s%s' % (raft[2], raft[4], ccd[2], ccd[4])",
"def mapped_char_to_id(self, mapped_char):\n keys = sorted(list(self.char_map.keys()))\n return 1 if mapped_char not in keys else keys.index(mapped_char) + 4",
"def read_RFID(self):\n # flushes to remove any remaining bytes\n self.serial_conn.flushInput()\n self.data = b\"\"\n\n while True:\n while self.serial_conn.inWaiting() > 0:\n self.data += self.serial_conn.read(1)\n\n if self.data:\n str_data = str(self.data, 'utf-8')\n code = self.find_key_code(str_data)\n if code:\n return code",
"def convert_str_encoded_cards_to_int_encoded(cards: List[str]) -> List[int]:\n return [card_ids[card] for card in cards]",
"def map_bitstring_digit(bitstring):\n\n\tcounter_one = 0\n\tcounter_zero = 0\n\n\tfor bit in bitstring:\n\t\t# print(\"bit:\", bit)\n\t\tif (int(bit) == 1):\n\t\t\tcounter_one += 1\n\n\t\telif (int(bit) == 0):\n\t\t\tcounter_zero += 1\n\n\tmapped_value = 2\n\n\t\t\n\tif (counter_zero > counter_one): #maps to 0\n\t\tmapped_value = 0\n\n\telse: #maps to 1\n\t\tmapped_value = 1\n\n\n\treturn mapped_value",
"def parse_ircounter(hex_str, port=None):\n data = None\n\n if (hex_str[4:6] == \"07\"):\n data = {\n 'voltage': int(hex_str[6:10], 16), # millivolts in pcb not at car battery\n 'in': int(hex_str[12:16]),\n 'out': int(hex_str[16:20]),\n }\n\n if (hex_str[4:6] == \"37\"):\n data = {\n 'in': int(hex_str[6:10]),\n 'out': int(hex_str[10:14]),\n }\n\n return data",
"def convert_ipv4_to_int(n_str):\n return reduce(lambda a, b: a << 8 | b, [int(x) for x in n_str.split(\".\")])",
"def load_uuid_map(input_fn):\n with open(input_fn) as input_fh:\n fieldnames = ['barcode', 'uuid']\n reader = csv.DictReader(input_fh, fieldnames=fieldnames)\n uuids = {}\n for row in reader:\n uuids[row['barcode']] = row['uuid']\n return uuids",
"def readNumbers(instrProgram):\n if((instrProgram in range(24,32) or instrProgram in range(40, 52))):\n return 'Strings'\n elif((instrProgram in range(80,96))):\n return 'Chords'\n elif((instrProgram in range(56, 80))):\n return 'Winds'\n else:\n return None",
"def string_id_to_integer(front_type_string):\n\n check_front_type(front_type_string)\n if front_type_string == WARM_FRONT_STRING_ID:\n return WARM_FRONT_INTEGER_ID\n\n return COLD_FRONT_INTEGER_ID",
"def get_id(f):\n name = f.split('/')[-1][-10:-4]\n\n score = int(''.join(filter(str.isdigit, f.split('/')[-1][2:]))) * 3 + 1\n if name == 'real_A':\n score += 1\n if name == 'real_B':\n score -= 1\n\n return score",
"def _convert_to_integer(srs, d):\n return srs.map(lambda x: d[x])",
"def remap_ids(self, id_map: Dict[int, int]) -> None:",
"def convert_to_numeric(sequence):\n \n int_mapped_seq=[]\n DNA_to_numeric = get_DNA_to_numeric()\n \n for n in sequence:\n int_mapped_seq.append(DNA_to_numeric[n])\n return int_mapped_seq",
"def RouteID(self, value):\n if not value:\n self.SR, self.RRT, self.RRQ = (None,)*3\n elif len(value) <= 3:\n self.SR = \"%03d\" % int(value)\n elif len(value) >= 5:\n self.SR = value[:3]\n self.RRT = value[3:5]\n self.RRQ = value[5:]",
"def __rank_from_str_to_int(rank: str) -> int:\n return int(rank) - 1",
"def convert_smi_to_id(smi_str):\n\n if smi_str in self.refsmi_dict:\n frag_id = self.refsmi_dict[smi_str]\n\n else:\n self.refsmi_id += 1\n self.refsmi_dict[smi_str] = self.refsmi_id\n self.refsmi_dict[self.refsmi_id] = smi_str\n frag_id = self.refsmi_id\n\n return frag_id",
"def unpack_uid(uid):\n return ''.join([str(i-1) if i < 11 else '.' for pair in [(ord(c) >> 4, ord(c) & 15) for c in uid] for i in pair if i > 0])",
"def trans_temperature(string):\r\n\treturn int(string[:2])"
] |
[
"0.6201763",
"0.5783668",
"0.5724338",
"0.57200146",
"0.5689862",
"0.5614756",
"0.560068",
"0.55919224",
"0.55771476",
"0.55649203",
"0.55375004",
"0.55303115",
"0.55156255",
"0.5489265",
"0.54641",
"0.54608136",
"0.54566324",
"0.5437163",
"0.5405148",
"0.53972507",
"0.5390915",
"0.53667754",
"0.5363352",
"0.5354195",
"0.53453636",
"0.53229654",
"0.5320906",
"0.53165734",
"0.5314102",
"0.5310148"
] |
0.64756197
|
0
|
Given a contact from Wild Apricot member database, pulls out list of RFIDs and privileges (ACLs) Adds to global RFID_list
|
def grab_RFID(debug, contact):
global RFID_list
global NAME_list
priv = ['door', 'beltsander', 'spindlesander'] # everyone gets in the door and access to some basic tools
# change: only folks that have taken the orientation get in the door!
#
#priv = []
#for field in contact.FieldValues:
# if (field.FieldName == 'Training'):
# for training in field.Value:
# if (training.Label == 'Orientation'):
# priv.append('door')
rfid = ''
for field in contact.FieldValues:
if (field.FieldName == 'RFID ID') and (field.Value is not None):
rfid = field.Value
if (field.FieldName == 'Privileges'):
for privilege in field.Value:
priv.append(map_acl(privilege.Label))
rfids = []
if rfid == '':
return
if ',' in rfid:
for r in rfid.split(','):
RFID_list.append({'rfid':fix_RFID(r), 'priv':priv})
rfids.append(str(fix_RFID(r)))
if debug: print ('Appending ACL - rfid:', r, 'priv:', priv)
else:
RFID_list.append({'rfid':fix_RFID(rfid), 'priv':priv})
rfids.append(str(fix_RFID(rfid)))
if debug: print ('Appending ACL - rfid:', rfid, 'priv:', priv)
rfid_str = ','.join(rfids)
NAME_list.append({'name':(contact.LastName + ',' + contact.FirstName), 'rfids':rfid_str})
if debug: print ('Appending Name: ' + contact.LastName + ',' + contact.FirstName + ' rfids: ' + rfid_str)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_fedcm_account_list(self):\n pass",
"def receiveContactList(self, contactList):",
"def read_contacts(contacts_file, uidlist, used_uid_names):\n # Read in out values to a list\n lines = read_contacts_to_list(contacts_file)\n\n # Convert our list to dict\n contactsdict = contacts_from_list_to_dict(lines, uidlist, used_uid_names)\n\n #logging.debug(\"Aliasdict: %s\" % aliasdict)\n return contactsdict",
"def getcontacts():\n contacts = {}\n\n try:\n #get list of contact ids\n contactids = r.smembers(\"contacts\")\n\n #for each contact id get data\n for contactid in contactids:\n contacts.update(_getcontact(str(contactid)))\n return contacts\n except:\n print \"Unexpected error:\", sys.exc_info()[0]\n raise",
"def Run(self):\n return self.ListAllContacts()",
"def search_contact_list(self):\n\n search_db = Database()\n result = search_db.contact_search(self.name)\n if not result:\n print Fore.YELLOW + ' No such contact'\n return None\n if result > 1:\n print ' Which contact ??'\n for items in result:\n if items[2] > 1:\n print Fore.BLUE + ' %s %s %s' % ([items[0]], items[1], items[2])\n else:\n print str(items[1]), items[2]\n\n return result",
"def populate_fru_list(self):\n\n # Clear the list each time this runs. Allows a user-requested\n # refresh of the list.\n self.frus_inited = False\n self.frus = {}\n\n result = \"\"\n\n #print('populate_fru_list: frus_inited = {}'.format(self.frus_inited))\n #print('populate_fru_list: crate_resetting = {}'.format(self.crate_resetting))\n #print('populate_fru_list: mch_comms.connected = {}'.format(self.mch_comms.connected))\n if (self.host != None\n and self.user != None\n and self.password != None\n and not self.crate_resetting\n and self.mch_comms.connected):\n\n # Need to repeat this until we get a proper reponse to the FRU list\n while len(result) <= 0:\n try:\n result = self.mch_comms.call_ipmitool_direct_command([\"sdr\", \"elist\", \"fru\"]).decode('ascii')\n except CalledProcessError:\n pass\n except TimeoutExpired as e:\n print(\"populate_fru_list: caught TimeoutExpired exception: {}\".format(e))\n\n # Wait a short whlie before trying again\n time.sleep(1.0)\n\n #print('populate_fru_list: result = {}'.format(result))\n\n for line in result.splitlines():\n try:\n name, ref, status, id, desc = line.split('|')\n\n # Get the AMC slot number\n bus, slot = id.strip().split('.')\n bus, slot = int(bus), int(slot)\n\n slot -= SLOT_OFFSET\n if (bus, slot) not in self.frus.keys():\n self.frus[(bus, slot)] = FRU(\n name = name.strip(),\n id = id.strip(),\n slot = slot,\n bus = bus,\n crate = self)\n except ValueError:\n print (\"Couldn't parse {}\".format(line))\n self.frus_inited = True\n # Get the MCH firmware info\n self.read_fw_version()",
"def contacts_list_update(self):\n\t\tself.database.contacts_clear()\n\t\tclient_log.debug(f'Запрос контакт листа для пользователся {self.name}')\n\t\treq = {\n\t\t\tACTION: GET_CONTACTS,\n\t\t\tTIME: time.time(),\n\t\t\tUSER: self.username\n\t\t}\n\t\tclient_log.debug(f'Сформирован запрос {req}')\n\t\twith socket_lock:\n\t\t\tsend_message(self.transport, req)\n\t\t\tans = get_message(self.transport)\n\t\tclient_log.debug(f'Получен ответ {ans}')\n\t\tif RESPONSE in ans and ans[RESPONSE] == 202:\n\t\t\tfor contact in ans[LIST_INFO]:\n\t\t\t\tself.database.add_contact(contact)\n\t\telse:\n\t\t\tclient_log.error('Не удалось обновить список контактов.')",
"def main(token, customerid=None):\n\n # All availavle schedules to user\n all_schedules = [\"All the time\"]\n\n # Collects existin schedules from user's account\n cust_schedules = schedules.get_schedule(token, customerid=customerid)\n\n # Gets the names of the schedules\n for key, _value in cust_schedules.items():\n all_schedules.append(key)\n\n single_contacts = contacts.get_all(token, customerid=customerid)\n contactgroups = group_contacts.get_all(token, customerid=customerid)\n single_contacts.update(contactgroups)\n\n single_contacts = format_contacts(single_contacts)\n\n contact_info = choose_contacts(single_contacts, all_schedules)\n\n return contact_info",
"def update_contacts(self, contact_list):\n updated_contacts = 0\n request_list = list()\n\n # stale_contacts contains all old contacts at first, all current\n # contacts get then removed so that the remaining can get deleted\n stale_contacts = set(self.contacts)\n\n for contact in contact_list:\n c = Persona.query.get(contact[\"id\"])\n\n if c is None:\n c = Persona(id=contact[\"id\"], _stub=True)\n\n if c._stub is True:\n request_list.append(contact[\"id\"])\n\n try:\n # Old and new contact; remove from stale list\n stale_contacts.remove(c)\n except KeyError:\n # New contact\n self.contacts.append(c)\n updated_contacts += 1\n\n # Remove old contacts that are not new contacts\n for contact in stale_contacts:\n self.contacts.remove(contact)\n\n app.logger.info(\"Updated {}'s contacts: {} added, {} removed, {} requested\".format(\n self.username, updated_contacts, len(stale_contacts), len(request_list)))\n\n return request_list",
"def get_a_contact(self, uid):\n self.init_db(self._testing)\n\n query = \"SELECT {} FROM {} WHERE (id=?) ORDER BY id;\".format(\n \", \".join(Contact.columns_with_uid), Contact.table_name)\n\n data = self.db.conn.execute(query, (uid,))\n\n return [Contact(*item) for item in data]",
"def list_contacts(self, prefix):\n sub_trie = self.find(prefix.lower())\n _crawl_trie(sub_trie, prefix)",
"def dump_RFIDs(debug, acl_dir, ts):\n # get existing list of acl files\n old_files = os.listdir(acl_dir)\n \n # sort RFIDs and generate list of privileges - we need the list to know what files to write\n privileges = set()\n RFID_list.sort(key=lambda x:x['rfid'])\n for e in RFID_list:\n for p in e['priv']:\n privileges.add(p)\n\n # open one file per privilege\n File = {}\n for f in privileges:\n fname = acl_fname_prefix + f\n fname_tmp = os.path.join(acl_dir, '.' + fname + '.tmp')\n File[f] = open (fname_tmp, 'w')\n if debug: print('Opened file:',fname_tmp)\n print('# Generated at', ts, file=File[f])\n try:\n old_files.remove(fname)\n except:\n pass\n\n # go back through list of RFIDs and write them to the appropriate file(s)\n #\n for e in RFID_list:\n for p in e['priv']:\n File[p].write(str(e['rfid'])+'\\n')\n\n # all done writing\n for f in File:\n File[f].close()\n\n # rename files just written\n for f in privileges:\n fname = os.path.join(acl_dir, acl_fname_prefix + f)\n fname_tmp = os.path.join(acl_dir, '.' + acl_fname_prefix + f + '.tmp') \n os.rename(fname_tmp, fname)\n \n # remove obsolete files\n for fname in old_files:\n if not fname.startswith(acl_fname_prefix):\n continue\n fpath = os.path.join(acl_dir, fname)\n os.unlink(fpath)",
"def contact_list(self):\n return self._contact_list",
"def get_contacts(self):\n contacts = Membership.objects.filter(entity = self, key_contact = True).order_by('importance_to_entity')\n return contacts",
"def main2():\n\t\n\tcu_locations = cu_locations_data();\n\t\n\tfor row in cu_locations:\n\t\tprint \"INSERT INTO contact ('ref_id') VALUES (%s);\" % ( row['location_id'] );",
"def update_contacts(self):\n self.contacts = self.db.list_contacts()\n return self.list_contacts()",
"def members(self, uid=\"*\"):\n entries = self.search(uid='*')\n result = []\n for entry in entries:\n result.append(entry[1])\n return result",
"def display_contact(self):\n contacts = \"\".join(str(contact) for contact in self.contact_list)\n print(contacts)",
"def contacts_from_list_to_dict(lines, uidlist, used_uid_names):\n contactdict = {}\n \n # Our dict keys are the PrimarySmtpAddresses\n for i in range(len(lines)):\n if i % 2 != 0:\n contact_key_value = extract_contact_key(lines[i-1])\n contact_list_string = lines[i]\n\n if contact_list_string.endswith(\"futurice.com.test-google-a.com\"):\n #contactdict.pop(contact_key_value)\n continue\n else:\n mail = str(contact_list_string.split(\"SMTP:\")[1]).lower()\n displayname = extract_contact_name(lines[i-1]).encode(\"ascii\", \"ignore\")\n \n if len(displayname.split()) >= 2:\n sn = displayname.split()[-1]\n else:\n sn = displayname\n \n if contact_key_value in used_uid_names:\n logging.warn(\"UID '%s' was already taken, check manually if it is a collision or the same person.\" % contact_key_value)\n continue\n \n uidNumber = get_free_uidNumber(uidlist)\n uidlist.append(uidNumber)\n \n contactdict[contact_key_value] = {\n \"uid\": contact_key_value, \n \"mail\": mail, \n \"cn\": displayname,\n #rdn_value', 'cn', 'title', 'sn', 'display\n \"displayName\" : displayname,\n \"title\" : \"customer\",\n \"sn\": sn,\n \"ntUserDomainId\" : contact_key_value,\n \"gidNumber\" : \"2000\",\n \"homeDirectory\" : \"/home/\" + contact_key_value[0] + \"/\" + contact_key_value,\n \"uidNumber\" : str(uidNumber),\n \"sambaSID\" : 'S-1-5-21-1049098856-3271850987-3507249052-%s' % (uidNumber * 2 + 1000),\n \"shadowLastChange\" : \"0\",\n #\"userPassword\" : \"!\",\n \"googlePassword\" : \"!\"\n #\"shadowMaxChange\" : \"0\"\n }\n \n return contactdict",
"def get_reagent_list(self) -> DBRecList:\n raise NotImplementedError('not implemented')",
"def list_contacts(self):\n return self.contacts",
"def fetch_contacts(owner_account_id):\n resp = oauth.tapkey.get(f\"Owners/{owner_account_id}/Contacts?$select=id,identifier\")\n contacts = resp.json()\n return contacts",
"def do_list(self, arg):\n print('The roster includes the following members:\\n')\n lines = formatTable(\n map(self.memberToList, self.roster),\n [\n ColumnFormat('id', 4),\n ColumnFormat('name', 30),\n ColumnFormat('introduced', 12)\n ]) \n for line in lines: \n print(line)",
"def appendedEntries(self):\n self.contact_list.append({\"name\": self.first_name.title() + \" \" + self.last_name.title(), \"phone number\": self.phone_number, \"phone number type\": self.phone_number_type})",
"def ListAllContacts(self):\n feed = self.gd_client.GetContacts()\n self.contacts = self.CleanPhoneNumbers(self.GetContactsInfo(feed))\n return self.contacts",
"def modify_membership(self, gfd_info):\n for member, status in gfd_info.items():\n if status:\n if member not in self.membership:\n self.membership.append(member)\n else:\n self.membership.remove(member)\n\n # Send change_replica_ips request to the client \n self.send_replica_IPs()\n\n # Elect a new primary if running on passive mode.\n if self.mode == 'passive':\n if member == self.primary:\n self.pick_primary()\n print(\"\\n The current membership is :\")\n print(self.membership)\n \n return",
"def main():\n nfc = pyfare.NFC(settings.ENTER)\n prev = None, None, None\n prevprev = None, None, None\n while True:\n target = nfc.waitfortarget()\n\n #3s cool down for same card after valid\n if prev[2] and prev[0] > time.time()-3 and prev[1] == _id(target):\n continue\n\n #Throttle logs for retries\n if prev[0] < time.time()-3 or prev[1] != _id(target):\n LOG.info(\"Reading card: \" + _idprint(target))\n\n #Check for valid card\n try:\n with open('/home/gilliam/bridgeofdeath/' + _id(target) + '.dat', 'rb') as infile:\n token = infile.read()\n valid = True\n except IOError:\n pass\n else:\n if nfc.verify(token):\n LOG.info(\"Valid card\")\n bad = None, None\n http = requests.get('https://members.rlab.org.uk/member/name/',auth=(_idprint(target),''),cert='/home/gilliam/client.pem')\n LOG.info(\"Member name: \" + http.text)\n #_touch('SESAME')\n #nfc.unlock()\n\n #Check for magic three card sequence to auth a new card\n if prevprev[2] and not(prev[2]) and prevprev[0] > time.time()-6 and prevprev[1] == _id(target):\n LOG.info(\"Three card sequence\")\n try:\n open('/home/gilliam/bridgeofdeath/admin/' + _id(target) + '.dat', 'rb')\n LOG.info(\"Admin card\")\n try:\n shutil.move('/home/gilliam/bridgeofdeath/new/' + prev[1] + '.dat', '/home/gilliam/bridgeofdeath/' + prev[1] + '.dat')\n LOG.info(\"New card authorized:\" + prev[1])\n except:\n LOG.warn(\"Not pre-authed card\")\n except IOError:\n LOG.info(\"Not admin\")\n\n #Track last two cards for magic three card sequence\n if prev[1] != _id(target) or prev[0] < time.time()-3 or prev[2] != valid:\n prevprev = prev\n prev = time.time(), _id(target), valid",
"def get_members(\n db: sqlite3.Cursor,\n addressbook: Addressbook,\n thread_id: int,\n versioninfo: VersionInfo,\n) -> List[Recipient]:\n thread_rid_column = versioninfo.get_thread_recipient_id_column()\n if versioninfo.is_addressbook_using_rids():\n query = db.execute(\n \"SELECT r._id, g.members \"\n \"FROM thread t \"\n \"LEFT JOIN recipient r \"\n f\"ON t.{thread_rid_column} = r._id \"\n \"LEFT JOIN groups g \"\n \"ON g.group_id = r.group_id \"\n \"WHERE t._id = :thread_id\",\n {\"thread_id\": thread_id},\n )\n query_result = query.fetchall()\n recipient_id, thread_members = query_result[0]\n else:\n query = db.execute(\n \"SELECT t.recipient_ids, g.members \"\n \"FROM thread t \"\n \"LEFT JOIN groups g \"\n \"ON t.recipient_ids = g.group_id \"\n \"WHERE t._id = :thread_id\",\n {\"thread_id\": thread_id},\n )\n query_result = query.fetchall()\n recipient_id, thread_members = query_result[0]\n\n if not thread_members is None:\n member_addresses = thread_members.split(\",\")\n members = []\n for address in member_addresses:\n recipient = addressbook.get_recipient_by_address(address)\n members.append(recipient)\n else:\n members = [addressbook.get_recipient_by_address(recipient_id)]\n return members",
"def list_contact(self, key, value):\n self.db.list_contact(\n key,\n value,\n )"
] |
[
"0.5793463",
"0.5556666",
"0.55314684",
"0.5385785",
"0.5292811",
"0.528041",
"0.5224662",
"0.5192124",
"0.51767176",
"0.5069077",
"0.5021138",
"0.5014443",
"0.500264",
"0.4993337",
"0.4978985",
"0.4968212",
"0.4967215",
"0.49658525",
"0.4953307",
"0.4927711",
"0.49252343",
"0.49188384",
"0.49067912",
"0.4906261",
"0.49051446",
"0.4887335",
"0.48848403",
"0.48848024",
"0.48802987",
"0.48766404"
] |
0.7778988
|
0
|
Reads stored list of RFID and writes ACL files with that data
|
def dump_RFIDs(debug, acl_dir, ts):
# get existing list of acl files
old_files = os.listdir(acl_dir)
# sort RFIDs and generate list of privileges - we need the list to know what files to write
privileges = set()
RFID_list.sort(key=lambda x:x['rfid'])
for e in RFID_list:
for p in e['priv']:
privileges.add(p)
# open one file per privilege
File = {}
for f in privileges:
fname = acl_fname_prefix + f
fname_tmp = os.path.join(acl_dir, '.' + fname + '.tmp')
File[f] = open (fname_tmp, 'w')
if debug: print('Opened file:',fname_tmp)
print('# Generated at', ts, file=File[f])
try:
old_files.remove(fname)
except:
pass
# go back through list of RFIDs and write them to the appropriate file(s)
#
for e in RFID_list:
for p in e['priv']:
File[p].write(str(e['rfid'])+'\n')
# all done writing
for f in File:
File[f].close()
# rename files just written
for f in privileges:
fname = os.path.join(acl_dir, acl_fname_prefix + f)
fname_tmp = os.path.join(acl_dir, '.' + acl_fname_prefix + f + '.tmp')
os.rename(fname_tmp, fname)
# remove obsolete files
for fname in old_files:
if not fname.startswith(acl_fname_prefix):
continue
fpath = os.path.join(acl_dir, fname)
os.unlink(fpath)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def dump_NAMEs(debug, acl_dir, ts):\n NAME_list.sort(key=lambda x:x['name'])\n\n fname_tmp = os.path.join(acl_dir, 'name_to_rfid')\n f = open (fname_tmp, 'w')\n if debug: print('Opened file:',fname_tmp)\n\n for e in NAME_list:\n f.write(e['name']+':'+str(e['rfids'])+'\\n')\n\n # all done writing\n f.close()",
"def grab_RFID(debug, contact):\n global RFID_list\n global NAME_list\n\n priv = ['door', 'beltsander', 'spindlesander'] # everyone gets in the door and access to some basic tools\n # change: only folks that have taken the orientation get in the door!\n #\n #priv = []\n #for field in contact.FieldValues:\n # if (field.FieldName == 'Training'):\n # for training in field.Value:\n # if (training.Label == 'Orientation'):\n # priv.append('door')\n\n rfid = ''\n for field in contact.FieldValues:\n if (field.FieldName == 'RFID ID') and (field.Value is not None):\n rfid = field.Value\n if (field.FieldName == 'Privileges'):\n for privilege in field.Value:\n priv.append(map_acl(privilege.Label))\n rfids = []\n if rfid == '':\n return\n if ',' in rfid:\n for r in rfid.split(','):\n RFID_list.append({'rfid':fix_RFID(r), 'priv':priv})\n rfids.append(str(fix_RFID(r)))\n if debug: print ('Appending ACL - rfid:', r, 'priv:', priv)\n else:\n RFID_list.append({'rfid':fix_RFID(rfid), 'priv':priv})\n rfids.append(str(fix_RFID(rfid)))\n if debug: print ('Appending ACL - rfid:', rfid, 'priv:', priv)\n\n rfid_str = ','.join(rfids)\n NAME_list.append({'name':(contact.LastName + ',' + contact.FirstName), 'rfids':rfid_str})\n if debug: print ('Appending Name: ' + contact.LastName + ',' + contact.FirstName + ' rfids: ' + rfid_str)",
"def result_file(accession_list):\n with open(\"../accessions_list.txt\", 'w') as file:\n file.write(accession_list)",
"def create_ta_list(ta_list):\n with open(ta_list, \"r\") as ta_file:\n user_list = ta_file.readlines()\n add_to_db(\"ta_list\", user_list[1:])\n add_to_online_db(\"online_ta\", user_list[1:])\n add_to_rating_db(\"ta_rating\", user_list[1:])",
"def add_from_uuid_list(self):\n\n uuids = self._read_file()\n if not uuids:\n return\n\n for uuid in uuids:\n uuid = uuid.split('\\n')[0]\n\n # Checks if lenght of the uuid is correct\n if not check_uuid_authenticity(uuid):\n self.report.add('Invalid uuid lenght.')\n continue\n \n self.add_record.push_record_by_uuid(self.global_counters, uuid)\n return",
"def __storeToFile(self, allR):\n with open(self.__fileR, \"w\") as f:\n for bk in allR:\n f.write(bk.get_idBook()+\",\"+bk.get_idCustomer()+\",\"+bk.get_flag()+\",\"+bk.get_id()+\"\\n\")",
"def getReadSamFile(read_file,rnameList):\n size = len(rnameList)\n prev = 0\n ends = range(0, size, 20)\n ends += [size]\n ends.pop(0)\n \n \n \n for i in ends:\n chrs = rnameList[prev:i]\n f = []\n ch_p = ''\n jj = 0\n for j in range(0,i-prev):\n samfile = os.path.join(working_dir, 'MappedRead.'+chrs[j]+'.sam')\n log.info('Generating ' + samfile)\n f.append(open(samfile, \"w\"))\n for line in open(read_file, \"r\"):\n \n itemList = line[:-1].split('\\t')\n \n if len(itemList) < 11:\n continue\n #print itemList\n if itemList[0][0:1] == '@':\n continue\n line_ch = itemList[2]\n if line_ch == '*':\n continue\n if int(itemList[1]) & 0b100 != 0:\n continue\n \n if ch_p != line_ch:\n for j in range(0,i-prev):\n if chrs[j] == line_ch:\n f[j].write(line)\n jj = j\n ch_p = line_ch\n continue\n #end for j in range(0,i-prev):\n elif ch_p == line_ch:\n f[jj].write(line)\n '''\n for j in range(0,i-prev):\n if chrs[j] == line_ch:\n f[j].write(line)\n continue\n '''\n for fp in f:\n fp.close()\n prev = i",
"def get_acls():\n return config.get_cfg_storage(ID_ACL)",
"def write_guids(guidlist):\n guid_file = os.path.join(os.path.expanduser(\"~\"), REQALL_GUIDS_FILE)\n try:\n logfile = open(guid_file, 'w')\n cPickle.dump(guidlist, logfile)\n except IOError:\n print \"Error writing to %\" % (guid_file)\n\n logfile.close()",
"def __writeToFile(self, filePath, lst): \n \n if not self.outDir is None: \n filePath = os.path.join(self.outDir, filePath) \n \n open(filePath,'a').writelines(lst)",
"def write_manifests( file_lists, target_dir, output_dir ):\n for i, lst in enumerate( file_lists ):\n with open( os.path.join( output_dir, \"manifest-{}.txt\".format( i ) ), \"w\" ) as fout:\n for r in lst:\n fout.write( insert_rsync_marker( r, target_dir ) + \"\\n\" )",
"def export_list_to_xacro(list, filename):\n global robot, OUTPUT\n doc = Document()\n root = doc.createElement('robot')\n doc.appendChild(root)\n root.setAttribute(\"xmlns:xacro\", \"http://www.ros.org/wiki/xacro\")\n print ('exporting ' + os.path.basename(filename))\n for string in list:\n for link in robot.links:\n if robot.links[link].name.find(string) != -1:\n root.appendChild(robot.links[link].to_xml(doc))\n for joint in robot.joints:\n if robot.joints[joint].child == robot.links[link].name:\n root.appendChild(robot.joints[joint].to_xml(doc))\n write_comments_in_xacro(doc, filename)",
"def read_ls(listfile, root_path, blocksize=750000, buffer_length=1.e5,debug_interval=1000):\n chunksize=int(50.e6)\n blocksize = int(blocksize)\n blanks = re.compile('\\s+')\n stripQuotes = re.compile('.*\\\"(.*)\\\".*')\n getName = re.compile('(?P<left>.*)\\\"(?P<name>.*)\\\".*')\n\n columnNames = ['permission', 'links', 'owner', 'theGroup', 'size', 'date',\n 'directory', 'name', 'hash']\n frame_list = []\n mylog = logging.getLogger('main')\n mylog.propagate = False\n print('here are the handlers', mylog.handlers)\n new_dask_file=True\n with open(listfile, 'r', encoding='utf-8') as f:\n errlist = []\n counter = 0\n collect = []\n frame_list=[]\n debug_counter=0\n for newline in f:\n if (counter > 0) & (counter % blocksize == 0):\n if len(collect) == 0:\n counter+=1\n continue\n print(f\"new frame creation: linecount: {counter}\")\n new_frame = pd.DataFrame.from_records(collect)\n frame_list.append(new_frame)\n # if len(frame_list) > 2:\n # collect=[]\n # break\n collect = []\n if counter % debug_interval == 0:\n mylog.info('debug %s %d', 'dump: ', counter)\n newline = newline.strip()\n if len(newline) > 0:\n if newline[-1] == \":\":\n # found a directory name\n # \"/Users/phil/www.physics.mcgill.ca/~gang/ftp.transfer\":\n dirname = stripQuotes.match(newline)\n dirname_capture = dirname.group(1)\n continue\n else:\n test = getName.match(newline)\n # -rw-r--r-- 1 phil users 0 2005-10-06 12:28:09.000000000 -0700 \"/home/phil/eosc211_fall2005.txt~\"\n if test:\n #\n # skip directories and symbolic links\n #\n if test.group(\"left\")[0] == 'd' or test.group(\"left\")[\n 0] == 'l':\n continue\n #\n # check for a path name like /home/phil/eosc211_fall2005.txt\n #\n head_path, filename = os.path.split(test.group(\"name\"))\n if len(head_path) != 0:\n raise ValueError(\n \"expecting a naked filename, got {}\".format(\n test.group(\"name\")))\n try:\n permission,links,owner,theGroup,size,date,time,offset =\\\n blanks.split(test.group(\"left\").strip())\n #the_hash=hashlib.sha256('{}/{}'.format(dirname,filename).encode('utf-8')).hexdigest()\n # the_hash = check_md5('{}/{}'.format(\n # dirname_capture, filename),\n # buffer_length=buffer_length)\n the_hash = 999.\n except ValueError:\n saveit = dict(newline=newline,\n splitout=repr(blanks.split(\n test.group(\"left\").strip())),\n dirname=head_path,\n filename=filename,\n counter=counter)\n errmsg=\\\n \"\"\"\n __________\n caught ValueError trying to split {newline:s}\n output of split is {splitout:s}\n filename is {dirname:s}/{filename:s}\n counter value is {counter:d}\n __________\n \"\"\".format(**saveit)\n errlist.append(errmsg)\n continue\n size = int(size)\n string_date = \" \".join([date, time, offset])\n date_with_tz = du.parse(string_date)\n date_utc = date_with_tz.astimezone(timezone('UTC'))\n timestamp = int(date_utc.strftime('%s'))\n #columnNames=['permission','links','owner','theGroup','size','date','directory','name','hash']\n if dirname_capture.find(root_path) < 0:\n raise ValueError(\n f'dirname root error for dirname= {dirname_capture} and rootpath= {root_path} with counter={counter}')\n if dirname_capture == root_path:\n dirname = '.'\n else:\n #\n # add two spaces for slashes: /Users/\n #\n dirname = dirname_capture[len(root_path)+2:]\n if debug_counter < 10:\n print('debug: ',dirname)\n debug_counter+=1\n out = (permission, links, owner, theGroup, size,\n timestamp, dirname, filename, the_hash)\n\n collect.append(dict(list(zip(columnNames, out))))\n ## print string_date\n ## print date_utc\n ## print dt.datetime.fromtimestamp(timestamp)\n counter += 1\n if len(collect) != 0:\n print(\"linecount: \", counter)\n new_frame=pd.DataFrame.from_records(collect)\n frame_list.append(new_frame)\n print('inserting final {} lines'.format(len(collect)))\n return counter, frame_list, errlist",
"def _init_serial(self):\n index_name = self.ca_dir + '/index.txt'\n serial_name = self.ca_dir + '/serial'\n with open(index_name, 'w'):\n pass\n with open(serial_name, 'w') as serial:\n serial.writelines(['%d' % CA_SERIAL])",
"def _create_ID_files(self):\n for file, IDs in [(self._trn_IDs_file, self._trn_IDs), (self._val_IDs_file,\n self._val_IDs), (self._tst_IDs_file, self._tst_IDs)]:\n with open(file, 'w') as f:\n f.write('\\n'.join('{}###{}###{}'.format(ID[0], ID[1], ID[2]) for ID in IDs))",
"def write_ids(ids,fname) :\n\twith open(fname,'w') as fout : \n\t\tfor id in ids: \n\t\t\tfout.write(id+\"\\n\")",
"def file_update(self, data):\n file = open(\"../util/LinkedList_File\", \"r+\")\n file.truncate(0)\n file.close()\n if self.search_item(data) == True:\n self.remove(data)\n file = open(\"../util/LinkedList_File\", \"a+\")\n\n orderedlist_content = []\n orderedlist_content = self.display_content()\n\n for i in orderedlist_content:\n file.write(i + \" \", )\n file.close()\n file = open(\"../util/LinkedList_File\", \"r\")\n for i in file:\n print(i)\n file.close()\n else:\n self.add(data)\n\n file = open(\"../util/LinkedList_File\", \"a+\")\n\n orderedlist_content = []\n orderedlist_content = self.display_content()\n\n for i in orderedlist_content:\n file.write(i + \" \")\n file.close()\n\n file = open(\"../util/LinkedList_File\", \"r\")\n for i in file:\n print(i)\n file.close()",
"def _add_acl_sequence_numbers(self):\n\n ipv4_acl_sw = 'ip access-list'\n # ipv6_acl_sw = ('ipv6 access-list')\n if self.host.os in ['ios']:\n acl_line_sw = ('permit', 'deny')\n else:\n acl_line_sw = ('permit', 'deny', 'remark')\n for child in self.children:\n if child.text.startswith(ipv4_acl_sw):\n sn = 10\n for sub_child in child.children:\n if sub_child.text.startswith(acl_line_sw):\n sub_child.text = \"{} {}\".format(sn, sub_child.text)\n sn += 10\n\n return self",
"def write_data(data):\r\n\r\n db = open(os.getcwd() + \"/www/access_list.txt\", 'w')\r\n json.dump(dict(data), db)",
"def create_tffile(self, path, fl_list, outpath):\n with tf.python_io.TFRecordWriter(outpath) as wr:\n for vl in fl_list:\n exam = self.create_tfrec(path, vl[0], vl[1])\n wr.write(exam.SerializeToString())",
"def _write(self):\n raw_data = {'file_version': 1}\n raw_creds = []\n raw_data['data'] = raw_creds\n for (cred_key, cred) in self._data.items():\n raw_key = dict(cred_key)\n raw_cred = json.loads(cred.to_json())\n raw_creds.append({'key': raw_key, 'credential': raw_cred})\n self._locked_json_write(raw_data)",
"def sample():\n write_drive_catalog_file(\"j:\\\\\", \"SANSA2_1G\", r\"c:\\SANSA2_1G.txt\")\n write_drive_catalog_file(\"k:\\\\\", \"8GB\", r\"c:\\8GB.txt\")\n write_master_catalog_file([r\"c:\\SANSA2_1G.txt\", r\"c:\\8GB.txt\"], r\"c:\\Master_Catalog.txt\")\n entries = read_catalog_file_entries(r\"c:\\Master_Catalog.txt\")\n for entry in entries:\n println(entry_to_line(entry))",
"def write_filelist_to_db(files_online):\n db.write_exchange_log_to_db(ac, files_online, \"write_also_to_console\")\n db.write_log_to_db(ac, ac.app_desc + \" Filelist in db geschrieben\", \"t\")",
"def createLists(listDirectory, dataset, name=\"\"):\n if name==\"\": \n \tname = getDatasetNameFromPath(dataset)\n files = getFileListDAS(dataset)\n \n fileName = listDirectory+\"/\"+name+\".txt\"\n with open(fileName, \"w\") as f:\n for l in files:\n f.write(\"%s\\n\" % l)\n print \"Wrote file list: \", fileName\n return",
"def write_out_account_numbers_and_balances(list_of_all_accounts_known):\n with open('./practise_accounts.txt', mode='wt') as accounts_and_balances_to_write_out:\n for accounts in list_of_all_accounts_known:\n accounts_and_balances_to_write_out.writelines('{0} {1}\\n'.format(accounts.account_id, accounts.balance));\n # end of withblock, close open file writing",
"def post_access_control_list_read(self, resource_id, resource_dict):\n pass",
"def store_list( q_frame, final, path_out):\n if not os.path.exists( path_out ):\n os.makedirs( path_out )\n\n fid = open( os.path.join(path_out, os.path.basename(q_frame) +'.txt'), \"wb\" )\n for k in final:\n fid.write( \"{}\\n\".format( k ) )\n fid.close()",
"def file_update(self, data):\n file = open(\"../util/LinkedList_File\", \"r+\")\n file.truncate(0)\n file.close()\n if self.search_item(data) == True:\n self.remove(data)\n file = open(\"../util/LinkedList_File\", \"a+\")\n\n linkedlist_content = []\n linkedlist_content = self.display_content()\n\n for i in linkedlist_content:\n file.write(i + \" \", )\n file.close()\n file = open(\"../util/LinkedList_File\", \"r\")\n for i in file:\n print(i)\n file.close()\n else:\n self.append(data)\n\n file = open(\"../util/LinkedList_File\", \"a+\")\n\n linkedlist_content = []\n linkedlist_content = self.display_content()\n\n for i in linkedlist_content:\n file.write(i + \" \")\n file.close()\n\n file = open(\"../util/LinkedList_File\", \"r\")\n for i in file:\n print(i)\n file.close()",
"def _reload_acls(self):\n\t\tself.acls = ACLs()",
"def cmd_list (self, line):\r\n try:\r\n dir_list_producer = self.get_dir_list (line, 1)\r\n except os.error as why:\r\n self.respond ('550 Could not list directory: %s' % why)\r\n return\r\n self.respond (\r\n '150 Opening %s mode data connection for file list' % (\r\n self.type_map[self.current_mode]\r\n )\r\n )\r\n self.make_xmit_channel()\r\n self.client_dc.push_with_producer (dir_list_producer)\r\n self.client_dc.close_when_done()"
] |
[
"0.62157315",
"0.58563346",
"0.5717289",
"0.5391716",
"0.5216398",
"0.5095185",
"0.5009426",
"0.5003885",
"0.49178278",
"0.49156103",
"0.49055976",
"0.48633957",
"0.48611525",
"0.4849756",
"0.48304555",
"0.4824907",
"0.47930318",
"0.4769174",
"0.47683936",
"0.47630203",
"0.47553477",
"0.47551405",
"0.47545886",
"0.4750818",
"0.4718713",
"0.47120795",
"0.4702957",
"0.46924016",
"0.46918607",
"0.46904165"
] |
0.72992396
|
0
|
Make links to the project resource folders in this project
|
def link_resources(ctx):
for resource in RESOURCES:
command = "ln -s -r -f -T {res}/{resource} {proj}/{resource}".format(
res=RESOURCE_DIR,
proj=PROJECT_DIR,
resource=resource)
print("Running")
print(command)
print("-----------------------------")
ctx.run(command)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def resources(self):",
"def assemble_resource_directories(project, base_dir):\n resource_path = os.path.join(base_dir, project.resources_path)\n os.makedirs(os.path.join(resource_path, 'images'))\n os.makedirs(os.path.join(resource_path, 'fonts'))\n os.makedirs(os.path.join(resource_path, 'data'))",
"def project_linkage():\n current_dir = os.getcwd()\n ve_lib = os.path.join(current_dir, 'fabric_factory', 've', 'lib')\n \n python_version = os.listdir(ve_lib).pop()\n for target_dir in [\"project\", \"worker\", \"factory\"]:\n if not os.path.islink(\n os.path.join(ve_lib, python_version,\n \"site-packages\", target_dir)):\n local('ln -s %s %s' %\n (\n os.path.join(current_dir,\"fabric_factory\", \"src\", target_dir),\n os.path.join(ve_lib, python_version,\n \"site-packages\", target_dir)\n )\n )\n else:\n print 'link to %s already exists' %target_dir",
"def set_paths(self, specs, resources):\n self.install = 'install.xml'\n self.specs_path = path_format(specs)\n self.root = path_format(dirname(dirname(self.specs_path)) + '/')\n self.res_path = path_format(resources)\n self.resources['BASE'] = self.res_path\n self.specs['BASE'] = self.specs_path",
"def resource_path(self,relative_path):\n try:\n # PyInstaller creates a temp folder and stores path in _MEIPASS\n base_path = sys._MEIPASS\n except Exception:\n base_path = os.path.abspath(\".\\\\Visual_Ressources\\\\\"+self.language+\"\\\\\") \n # \".\"\n # 'Content\\\\Back_End\\\\'\n return os.path.join(base_path, relative_path)",
"def make_links(self):\n for filepath in list(self):\n self.make_link(filepath)",
"def getProjectURL():",
"def relative_to_buildroot(self):\n return [os.path.join(self.rel_path, source) for source in self.source_paths]",
"def linking_library_dirs(self):",
"def _set_rel_paths(self):\n if self.working_dir is not None:\n self._rel_working_dir = os.path.relpath(self.working_dir)\n if self.alignment is not None:\n self._rel_alignment = os.path.relpath(self.alignment, \n self.working_dir)\n if self.out_file is not None:\n self._rel_out_file = os.path.relpath(self.out_file, \n self.working_dir)",
"def resources_path(*args):\n path = os.path.dirname(__file__)\n path = os.path.abspath(\n os.path.join(path, os.path.pardir, 'resources'))\n for item in args:\n path = os.path.abspath(os.path.join(path, item))\n\n return path",
"def add_static_paths(app):\n app.env.book_theme_resources_changed = False\n\n output_static_folder = Path(app.outdir) / \"_static\"\n theme_static_files = resources.contents(theme_static)\n\n if (\n app.config.html_theme_options.get(\"theme_dev_mode\", False)\n and output_static_folder.exists()\n ):\n # during development, the JS/CSS may change, if this is the case,\n # we want to remove the old files and ensure that the new files are loaded\n for path in output_static_folder.glob(\"sphinx-book-theme*\"):\n if path.name not in theme_static_files:\n app.env.book_theme_resources_changed = True\n path.unlink()\n # note sphinx treats theme css different to regular css\n # (it is specified in theme.conf), so we don't directly use app.add_css_file\n for fname in resources.contents(theme_static):\n if fname.endswith(\".css\"):\n if not (output_static_folder / fname).exists():\n (output_static_folder / fname).write_bytes(\n resources.read_binary(theme_static, fname)\n )\n app.env.book_theme_resources_changed = True\n\n # add javascript\n for fname in resources.contents(theme_static):\n if fname.endswith(\".js\"):\n app.add_js_file(fname)",
"def _create_links(self):\n for line in self.iter_files_to_install():\n arcname, link = line.split()\n if link == 'False':\n continue\n self.files.append(create_link(arcname, link, self.prefix))",
"def resources(self) -> HTMLBody:\n\t\treturn render_template(\"resources.jinja2\")",
"def _update_urls(self):\n\n to_fix = [\n # We fix the urls in the README file.\n PyFunceble.CONFIG_DIRECTORY + \"README.rst\",\n # We fix the urls in the configuration file.\n PyFunceble.CONFIG_DIRECTORY + \".PyFunceble_production.yaml\",\n # We fix the urls in the setup.py file.\n PyFunceble.CONFIG_DIRECTORY + \"setup.py\",\n # We fix the urls in the documentation index.\n PyFunceble.CONFIG_DIRECTORY\n + directory_separator\n + \"docs\"\n + directory_separator\n + \"index.rst\",\n # We fix the urls in the documentation logic representation.\n PyFunceble.CONFIG_DIRECTORY\n + directory_separator\n + \"docs\"\n + directory_separator\n + \"code\"\n + directory_separator\n + \"logic-representation.rst\",\n # We fix the urls in the usage documentation.\n PyFunceble.CONFIG_DIRECTORY\n + directory_separator\n + \"docs\"\n + directory_separator\n + \"usage\"\n + directory_separator\n + \"from-a-terminal.rst\",\n # We fix the urls in the links configuration documentation.\n PyFunceble.CONFIG_DIRECTORY\n + directory_separator\n + \"docs\"\n + directory_separator\n + \"configuration\"\n + directory_separator\n + \"links.rst\",\n ]\n\n for fix_it in to_fix:\n if PyFunceble.helpers.File(fix_it).exists():\n self._update_docs(fix_it)\n elif PyFunceble.helpers.Directory(fix_it).exists():\n for root, _, files in walk(fix_it):\n for file in files:\n self._update_docs(root + directory_separator + file)\n else:\n raise FileNotFoundError(fix_it)",
"def resources(filename):\n return send_from_directory(\"resources\", filename)",
"def resources(self, resources):\n self._resources = resources",
"def project():",
"def project():",
"def project():",
"def to_projectlink(self):\n\n thumb_image_url = reverse('project_serve_file', args=[self.short_name,self.logo])\n\n args = {\"abreviation\":self.short_name,\n \"title\":self.short_name,\n \"description\":self.description,\n \"URL\":reverse('comicsite.views.site', args=[self.short_name]),\n \"download URL\":\"\",\n \"submission URL\":self.get_submission_URL(),\n \"event name\":self.event_name,\n \"year\":\"\",\n \"event URL\":self.event_url,\n \"image URL\":self.logo,\n \"thumb_image_url\":thumb_image_url,\n \"website section\":\"active challenges\",\n \"overview article url\":self.publication_url,\n \"overview article journal\":self.publication_journal_name,\n \"overview article citations\":\"\",\n \"overview article date\":\"\",\n \"submission deadline\":\"\",\n \"workshop date\":self.workshop_date,\n \"open for submission\":\"yes\" if self.is_open_for_submissions else \"no\",\n \"data download\":\"yes\" if self.offers_data_download else \"no\",\n \"dataset downloads\":self.number_of_downloads,\n \"registered teams\":\"\",\n \"submitted results\":self.number_of_submissions,\n \"last submission date\":self.last_submission_date,\n \"hosted on comic\":True,\n \"created at\":self.created_at\n }\n\n projectlink = ProjectLink(args)\n return projectlink",
"def run(self):\n logging.debug('Relinking All Programs')\n\n dst = pakit.conf.CONFIG.path_to('link')\n walk_and_unlink_all(dst, pakit.conf.CONFIG.path_to('prefix'))\n\n for _, recipe in pakit.recipe.RDB:\n walk_and_link(recipe.install_dir, dst)",
"def _load_resources(self):\n puts = (getattr(self, 'project', None) or self).puts\n for resource_type, resource_cls in six.iteritems(AVAILABLE_RESOURCES):\n for name in self.settings.get(resource_type, {}):\n extra = {\n 'project': getattr(self, 'project', None) or self,\n 'app': self if hasattr(self, 'project') else None,\n }\n\n with indent(4 if hasattr(self, 'project') else 2):\n puts(colored.green(u\"✓ {}:{}\".format(resource_type, name)))\n\n self._resources[resource_type].append(\n resource_cls.factory(\n name=name,\n settings=self.settings.get(resource_type, {})[name],\n **extra\n )\n )",
"def get_resources_abs_path() -> pathlib.Path:\n return PathManager._ROOT.joinpath(\n PathManager._TILINGS_GUI, PathManager._RESOURCES\n )",
"def symlink():\n releases()\n env.current_path = '/root/your_project/current'\n run('rm %(current_path)s' % env)\n run('ln -s %(current_release)s %(current_path)s' % env)",
"def create_links(self, name):\n for target, linknames in self._link_map.iteritems():\n for linkname in linknames:\n self._api.path.mock_copy_paths(target, linkname)\n self._api.python(\n name,\n self._resource,\n args = [\n '--link-json',\n self._api.json.input({str(target) : linkname\n for target, linkname in self._link_map.iteritems()\n }),\n ],\n infra_step=True)",
"def resource_path(name):\n return os.path.join(\n os.path.dirname(__file__), 'images', 'resource', name)",
"def __init__(self, root, api, symlink_resource):\n assert root and isinstance(root, config_types.Path)\n self._root = root\n self._api = api\n self._resource = symlink_resource\n # dict[Path]list(Path): Maps target to a list of linknames.\n self._link_map = {}",
"def linkAssets(des, Xrc):\n with open(des, 'r') as f:\n body = f.read()\n f.close()\n with open(des, 'w') as f:\n body = body.replace(\"custom.css\", \"\\\\\" + Xrc[\"gh_repo_name\"] + \"/Assets\" + \"/css\" + \"/custom.css\")\n f.write(body)\n f.close()\n ccc.success(\"linking assets to \" + des)",
"def makeLinks(self, source, target):\n\n if os.path.exists(target): os.unlink(target)\n os.symlink(source, target)"
] |
[
"0.6542292",
"0.65333784",
"0.6218728",
"0.6017278",
"0.5855742",
"0.58258486",
"0.57360697",
"0.57241046",
"0.57001704",
"0.5696988",
"0.56854105",
"0.5660929",
"0.56329405",
"0.5610566",
"0.5601242",
"0.5526083",
"0.55225146",
"0.55170727",
"0.55170727",
"0.55170727",
"0.5509046",
"0.5458762",
"0.5443087",
"0.5442857",
"0.5425645",
"0.54186803",
"0.541784",
"0.54053885",
"0.5369334",
"0.53625154"
] |
0.72876143
|
0
|
Return enumerated responses for various datasets. Raises NotImplementedError if the dataset doesn't support enumerated responses.
|
def get_responses(data_folder):
return [str(i) for i in range(10)]
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"async def get_datasets(db_pool, query_parameters, include_dataset):\n hit_datasets = []\n miss_datasets = []\n response = []\n dataset_ids = query_parameters[-2]\n\n # Fetch datasets where the variant is found\n hit_datasets = await fetch_resulting_datasets(db_pool, query_parameters)\n LOG.debug(f\"hit_datasets: {hit_datasets}\")\n\n # If the response has to include the datasets where the variant is not found, \n # we want to fetch info about them and shape them to be shown\n if include_dataset in ['ALL', 'MISS']:\n list_all = list(map(int, dataset_ids.split(\",\")))\n LOG.debug(f\"list_all: {list_all}\")\n list_hits = [dict[\"internalId\"] for dict in hit_datasets]\n LOG.debug(f\"list_hits: {list_hits}\")\n accessible_missing = [int(x) for x in list_all if x not in list_hits]\n LOG.debug(f\"accessible_missing: {accessible_missing}\")\n miss_datasets = await fetch_resulting_datasets(db_pool, query_parameters, misses=True, accessible_missing=accessible_missing)\n response = hit_datasets + miss_datasets\n return response",
"def GetDataEncodings(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def GetDataEncodings(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def __iter__(self):\n return iter(self._datasets)",
"async def values(self) -> Iterable[ResponseOrKey]:",
"def indicators(self) -> 'outputs.TIDataConnectorDataTypesResponseIndicators':\n return pulumi.get(self, \"indicators\")",
"def datasets(self):\n return [Dataset.ENSEMBL]",
"def download_dataset(self):\n raise NotImplementedError",
"def get_responses(self, scaled=True, use_indices=True):\n return self._get_variables_of_type('response', scaled, use_indices)",
"def _create_response_objects(self) -> list[JsonDict]:\n responses = []\n for feat_type, feat_name, _ in self.features:\n if feat_type.is_array():\n feat_name = cast(str, feat_name) # can only be string since it's an array type\n responses.append(SentinelHubRequest.output_response(feat_name, MimeType.TIFF))\n elif feat_type.is_meta():\n responses.append(SentinelHubRequest.output_response(\"userdata\", MimeType.JSON))\n else:\n # should not happen as features have already been validated\n raise ValueError(f\"{feat_type} not supported!\")\n\n return responses",
"def get_response_data(self):\r\n raise NotImplementedError",
"def get_data_set(self):\n raise exceptions.NotImplemented",
"def GetDataEncodings(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)",
"def iterate_data(self):\n if \"single\" in self.dataset_name:\n # Index 0 for list of sentence lengths, index 1 for list of token lengths\n self.stat_dict = {'question': [[], []], 'summary': [[], []], 'article': [[], []]}\n for answer_id in self.data:\n summary = self.data[answer_id]['summary']\n articles = self.data[answer_id]['articles']\n question = self.data[answer_id]['question']\n if args.tokenize:\n self._get_token_cnts(summary, 'summary')\n self._get_token_cnts(articles, 'article')\n self._get_token_cnts(question, 'question')\n self._write_stats(\"token_counts\")\n\n if \"multi\" in self.dataset_name:\n self.stat_dict = {'question': [[], []], 'summary': [[], []], 'article': [[], []]}\n for q_id in self.data:\n summary = self.data[q_id]['summary']\n question = self.data[q_id]['question']\n if args.tokenize:\n self._get_token_cnts(summary, 'summary')\n self._get_token_cnts(question, 'question')\n question = self.data[q_id]['question']\n for answer_id in self.data[q_id]['articles']:\n articles = self.data[q_id]['articles'][answer_id][0]\n if args.tokenize:\n self._get_token_cnts(articles, 'article')\n self._write_stats(\"token_counts\")\n\n if self.dataset_name == \"complete_dataset\":\n self.stat_dict = {'urls': [], 'sites': []}\n article_dict = {}\n print(\"Counting answers, sites, unique urls, and tokenized counts of unique articles\")\n answer_cnt = 0\n for q_id in self.data:\n for a_id in self.data[q_id]['answers']:\n answer_cnt += 1\n url = self.data[q_id]['answers'][a_id]['url']\n article = self.data[q_id]['answers'][a_id]['article']\n if url not in article_dict:\n article_dict[url] = article\n self.stat_dict['urls'].append(url)\n assert \"//\" in url, url\n site = url.split(\"//\")[1].split(\"/\")\n self.stat_dict['sites'].append(site[0])\n print(\"# of Answers:\", answer_cnt)\n print(\"Unique articles: \", len(article_dict)) # This should match up with count written to file\n self._write_stats(\"full collection\")\n\n # Get token/sent averages of unique articles\n if args.tokenize:\n self.stat_dict = {'article': [[], []]}\n for a in article_dict:\n self._get_token_cnts(article_dict[a], 'article')\n self._write_stats(\"token_counts\")",
"async def test_25() -> None:\n LOG.debug(\"Test query for targeting three datasets, using ALL. (expect data shown)\")\n payload = {\n \"referenceName\": \"MT\",\n \"start\": 10,\n \"referenceBases\": \"T\",\n \"alternateBases\": \"C\",\n \"assemblyId\": \"GRCh38\",\n \"datasetIds\": [\"urn:hg:1000genome\", \"urn:hg:1000genome:controlled\", \"urn:hg:1000genome:registered\"],\n \"includeDatasetResponses\": \"ALL\",\n }\n headers = {\"Authorization\": f\"Bearer {TOKEN}\"}\n async with aiohttp.ClientSession(headers=headers) as session:\n async with session.post(\"http://localhost:5050/query\", data=json.dumps(payload)) as resp:\n data = await resp.json()\n assert data[\"exists\"] is False, sys.exit(\"Query POST Endpoint Error!\")\n assert len(data[\"datasetAlleleResponses\"]) == 3, sys.exit(\"Should be able to retrieve data for all datasets.\")",
"def _get_responses(self):\n result: Dict[Type[BaseModel], Dict[int, ResponseData]] = defaultdict(lambda: {})\n\n for response_class, code, data in chain(\n self._get_response_from_annotation(),\n self._get_responses_from_decorators(),\n self._get_responses_from_raises(),\n ):\n if code in result[response_class].keys():\n raise TypeError(\"Multiple responses declared with the same schema and code\")\n\n result[response_class][code] = data\n\n return result",
"def _get_responses_from_decorators(self) -> Generator[Tuple[Type[BaseModel], int, ResponseData], None, None]:\n for decorator in self._find_decorators(RespondsWithDecorator):\n yield decorator.response_class, decorator.code, ResponseData(decorator.description, decorator.mimetype)",
"def download_dataset(\n dataset: Literal['penntreebank', 'wikitext2', 'wikitext103'],\n data_dir: str,\n) -> tuple[IterableDataset[str], IterableDataset[str], IterableDataset[str]]:\n datasets = {}\n for split in ('train', 'valid', 'test'):\n if dataset.lower() == 'penntreebank':\n datasets[split] = PennTreebank(root=data_dir, split=split)\n elif dataset.lower() == 'wikitext2':\n datasets[split] = WikiText2(root=data_dir, split=split)\n elif dataset.lower() == 'wikitext103':\n datasets[split] = WikiText103(root=data_dir, split=split)\n else:\n raise AssertionError(f'Unsupported dataset {dataset}.')\n\n return (datasets['train'], datasets['valid'], datasets['test'])",
"def datasets(self):\n return [Dataset.GWAS_CATALOG, Dataset.CLINVAR, Dataset.EFO]",
"def get(datasets_identifiers, identifier_type='hid', history_id=None, retrieve_datatype=None):\n history_id = history_id or os.environ['HISTORY_ID']\n # The object version of bioblend is to slow in retrieving all datasets from a history\n # fallback to the non-object path\n gi = get_galaxy_connection(history_id=history_id, obj=False)\n file_path_all = []\n datatypes_all = []\n\n if type(datasets_identifiers) is not list:\n datasets_identifiers = [datasets_identifiers]\n\n if identifier_type == \"regex\":\n datasets_identifiers = find_matching_history_ids(datasets_identifiers)\n identifier_type = \"hid\"\n\n\n for dataset_id in datasets_identifiers:\n file_path = '/import/%s' % dataset_id\n log.debug('Downloading gx=%s history=%s dataset=%s', gi, history_id, dataset_id)\n # Cache the file requests. E.g. in the example of someone doing something\n # silly like a get() for a Galaxy file in a for-loop, wouldn't want to\n # re-download every time and add that overhead.\n if not os.path.exists(file_path):\n hc = HistoryClient(gi)\n dc = DatasetClient(gi)\n history = hc.show_history(history_id, contents=True)\n datasets = {ds[identifier_type]: ds['id'] for ds in history}\n if retrieve_datatype:\n datatypes_all.append({ds[identifier_type]: ds['extension'] for ds in history})\n if identifier_type == 'hid':\n dataset_id = int(dataset_id)\n dc.download_dataset(datasets[dataset_id], file_path=file_path, use_default_filename=False)\n else:\n hc = HistoryClient(gi)\n dc = DatasetClient(gi)\n history = hc.show_history(history_id, contents=True)\n datatypes_all.append({ds[identifier_type]: ds['extension'] for ds in history})\n log.debug('Cached, not re-downloading')\n\n file_path_all.append(file_path)\n\n ## First path if only one item given, otherwise all paths.\n ## Should not break compatibility.\n if retrieve_datatype:\n if len(file_path_all) == 1:\n dataset_number = int(file_path_all[0].strip().split(\"/\")[-1])\n return file_path_all, datatypes_all[0][dataset_number]\n else:\n datatype_multi = dict()\n for i in file_path_all:\n dataset_number = int(i.strip().split(\"/\")[-1])\n datatype_multi[dataset_number] = datatypes_all[0][dataset_number]\n return file_path_all, datatype_multi\n else:\n return file_path_all[0] if len(file_path_all) == 1 else file_path_all",
"def list_datasets(self):\n if self.list_type == \"base\":\n ds = Dataset(f\"{self.pool}/iocage/releases\").get_dependents()\n elif self.list_type == \"template\":\n ds = Dataset(\n f\"{self.pool}/iocage/templates\").get_dependents()\n else:\n ds = Dataset(f\"{self.pool}/iocage/jails\").get_dependents()\n\n ds = list(ds)\n\n if self.list_type in ('all', 'basejail', 'template'):\n if self.quick:\n _all = self.list_all_quick(ds)\n else:\n _all = self.list_all(ds)\n\n return _all\n elif self.list_type == \"uuid\":\n jails = {}\n\n for jail in ds:\n uuid = jail.name.rsplit(\"/\", 1)[-1]\n try:\n jails[uuid] = jail.properties[\"mountpoint\"]\n except KeyError:\n iocage_lib.ioc_common.logit(\n {\n 'level': 'ERROR',\n 'message': f'{jail.name} mountpoint is '\n 'misconfigured. Please correct this.'\n },\n _callback=self.callback,\n silent=self.silent\n )\n\n template_datasets = Dataset(\n f'{self.pool}/iocage/templates').get_dependents()\n\n for template in template_datasets:\n uuid = template.name.rsplit(\"/\", 1)[-1]\n jails[uuid] = template.properties['mountpoint']\n\n return jails\n elif self.list_type == \"base\":\n bases = self.list_bases(ds)\n\n return bases",
"def get_iterators(data: Dict[str, Dict[str, Dataset]], include_domains: List[str], split: str = 'train', \n collapse_domains: bool = False, batch_size: int = 64, device='cpu', **kwargs) -> Dict[str, 'Iterator']:\n iterators = {}\n if collapse_domains:\n # collect instances from `split` of every domain\n all_examples = [example for domain, splits in data.items()\\\n for example in splits[split].examples\\\n if domain in include_domains]\n arbitrary_split_fields = list(data.values())[0][split].fields\n all_dataset = Dataset(all_examples, fields=arbitrary_split_fields)\n iterators['all'] = Iterator(all_dataset, batch_size=batch_size, device=device)\n else:\n for domain, splits in data.items():\n if domain in include_domains:\n iterators[domain] = Iterator(splits[split], batch_size=batch_size, device=device)\n return iterators",
"def all_data(\n request: Any,\n data: ExtensionArray,\n data_missing: ExtensionArray,\n) -> ExtensionArray:\n if request.param == \"data\":\n return data\n elif request.param == \"data_missing\":\n return data_missing",
"def enum(self, path):\n req = self.session.get(self.url + path + '/enumerate',\n verify=False)\n jdata = req.json()\n if jdata['status'] != 'ok':\n raise Exception(\"Failed to query enumerate: \\n\" + req.text)\n\n return jdata['data']",
"def multiple_choices(self, choices, response):\n for elem in self.method_order:\n if elem in choices:\n return [elem]\n raise NoData",
"def enumeration(self):\n raise exceptions.NotImplementedError()",
"def get_bulk_compliant_imeis(cls, response):\n compliant_imeis = []\n non_compliant_imeis = []\n response_list = response.json().get('results')\n for response in response_list:\n is_compliant, imei = cls.check_imei_status(response)\n if is_compliant:\n compliant_imeis.append(imei)\n else:\n non_compliant_imeis.append(imei)\n return compliant_imeis, non_compliant_imeis",
"def sitetotalresponses(self) :\n\t\ttry :\n\t\t\treturn self._sitetotalresponses\n\t\texcept Exception as e:\n\t\t\traise e",
"def test_enumerations( self ):\n with self.app.app_context():\n url = '/donation/enumeration/{}/{}'\n\n response = self.test_client.get( url.format( 'giftmodel', 'given_to' ), headers=self.headers )\n self.assertGreater( len( json.loads( response.data.decode( 'utf-8' ) ) ), 0 )\n\n response = self.test_client.get( url.format( 'transactionmodel', 'type' ), headers=self.headers )\n self.assertGreater( len( json.loads( response.data.decode( 'utf-8' ) ) ), 0 )\n\n response = self.test_client.get( url.format( 'transactionmodel', 'status' ), headers=self.headers )\n self.assertGreater( len( json.loads( response.data.decode( 'utf-8' ) ) ), 0 )\n\n response = self.test_client.get( url.format( 'agentmodel', 'type' ), headers=self.headers )\n self.assertGreater( len( json.loads( response.data.decode( 'utf-8' ) ) ), 0 )",
"def data_kinds():\n\n return ..."
] |
[
"0.5544492",
"0.53126925",
"0.52980965",
"0.5260126",
"0.52431834",
"0.517832",
"0.517747",
"0.51302063",
"0.5119488",
"0.5118657",
"0.50919074",
"0.5065844",
"0.5031305",
"0.50046426",
"0.4993167",
"0.49779087",
"0.49499705",
"0.49407813",
"0.49365756",
"0.49354747",
"0.49096364",
"0.490708",
"0.49005628",
"0.48866433",
"0.48829192",
"0.4882693",
"0.4877323",
"0.4869223",
"0.484193",
"0.48290634"
] |
0.594185
|
0
|
For some reason if a RecipeForm is saved with only data specifically `Recipe` data, it somehow copies ingredients/instructions form some other recipe. This test specifically checks for this.
|
def test_recipe_no_related_stays_no_related(self):
form = RecipeForm(data=self.form_data_recipe_no_related)
recipe = form.save()
# Check that the resulting recipe has no related items
self.assertEqual(len(recipe.ingredient_groups.all()), 0)
self.assertEqual(len(recipe.instructions.all()), 0)
self.assertEqual(len(recipe.notes.all()), 0)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_item_form_ingredients(self):\n\n test_ingredients = [Ingredient.objects.create(name='chocolate').id]\n\n form_data = {'name': 'test_item2',\n 'description': 'Description is longer than 10 characters',\n 'chef': self.test_user.id,\n 'ingredients': test_ingredients\n }\n form = ItemForm(data=form_data)\n self.assertFalse(form.is_valid())\n self.assertEqual(\n form['ingredients'].errors[0],\n 'An item must have at least two ingredients.'\n )",
"def form_valid(self, form):\n context = self.get_context_data()\n ingredients = context['ingredients']\n # .atomic() - If there is an exception, the changes are rolled back.\n with transaction.atomic():\n form.instance.author = self.request.user\n self.object = form.save()\n if ingredients.is_valid():\n ingredients.instance = self.object\n ingredients.save()\n return super(RecipeCreate, self).form_valid(form)",
"def create_recipe(request, pk):\n recipeform = RecipeForm()\n IngredientFormSet = formset_factory(IngredientForm)\n InstructionFormSet = formset_factory(InstructionForm)\n cookbook = CookBook.objects.get(pk=pk)\n if request.method == \"POST\":\n recipeform = RecipeForm(request.POST, request.FILES)\n ingredientformset = IngredientFormSet(request.POST)\n instructionformset = InstructionFormSet(request.POST)\n if recipeform.is_valid() and ingredientformset.is_valid() and instructionformset.is_valid():\n new_ingredients = []\n picture = recipeform['image']\n for letter in picture:\n if letter in [' ', '20', '%']:\n letter.replace(letter, '_')\n new_recipe = Recipe(\n user=request.user,\n cookbook=cookbook,\n title=recipeform.cleaned_data['title'],\n image=picture,\n prep_time=recipeform.cleaned_data['prep_time'],\n cook_time=recipeform.cleaned_data['cook_time'],\n tags=recipeform.cleaned_data['tags'],\n )\n new_recipe.save()\n for ingredient_form in ingredientformset:\n description = ingredient_form.cleaned_data['ingredient']\n if ingredient_form:\n new_ingredients.append(Ingredient.objects.create(recipe=new_recipe, ingredient=description))\n Instruction.objects.create(recipe=new_recipe, direction=request.POST.get('direction'))\n return HttpResponseRedirect(reverse('list_cookbooks'))\n else:\n recipe_form = RecipeForm()\n ingredient_form_set = IngredientFormSet()\n instruction_form_set = InstructionFormSet()\n return render(request, 'cookbook/recipe_form.html', {'recipe_form': recipe_form,\n 'ingredient_formset': ingredient_form_set,\n 'instruction_formset': instruction_form_set})",
"def test_full_update_recipe(self):\n recipe = sample_recipe()\n recipe.ingredients.create(name='Eggs')\n original_description = recipe.description\n\n payload = {\n 'name': 'Vegan gnocchi',\n 'ingredients': [{'name': 'Vegegg'}]\n }\n url = recipe_detail_url(recipe.id)\n self.client.put(url, payload, format='json')\n\n recipe.refresh_from_db()\n self.assertEqual(recipe.name, payload['name'])\n self.assertEqual(recipe.description, original_description)\n self.assertEqual(recipe.ingredients.count(), 1)\n self.assertTrue(recipe.ingredients.first().name, 'Eggs')",
"def save_recipe(self, recipe):\n\n if self.photo.data:\n recipe.photo = photos.save(self.photo.data.stream)\n\n recipe.title = self.title.data\n recipe.title_slug = slugify(self.title.data)\n recipe.description = self.description.data\n recipe.instructions = self.instructions.data\n recipe.general_ingredients = [\n i.to_model() for i in self.general_ingredients]\n recipe.ingredient_groups = [\n g.to_model() for g in self.ingredient_groups]\n recipe.tags = self.tags.data\n\n recipe.save()",
"def test_products_model_entry(self):\n data = self.data1\n self.assertTrue(isinstance(data, Recipe))\n self.assertEqual(str(data), 'django beginners')",
"def test_item_form_description(self):\n\n test_ingredients = [Ingredient.objects.create(name='chocolate').id, Ingredient.objects.create(name='cherry').id]\n\n form_data = {'name': 'test_item2',\n 'description': 'Short',\n 'chef': self.test_user.id,\n 'ingredients': test_ingredients\n }\n form = ItemForm(data=form_data)\n self.assertFalse(form.is_valid())\n self.assertEqual(\n form['description'].errors[0],\n 'Description must be at least 10 characters.'\n )",
"def test_create_recipe_with_ingredients(self):\n\n payload = {\n 'name': 'Gnocchi',\n 'description': 'A detailed description of a yummy recipe!',\n 'ingredients': [\n {'name': 'Potatoes'},\n {'name': 'Flour'},\n {'name': 'Nutmeg'}\n ]\n }\n\n res = self.client.post(RECIPES_URL, payload, format='json')\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n\n recipe = Recipe.objects.get(id=res.data['id'])\n\n self.assertEqual(payload['name'], recipe.name)\n self.assertEqual(payload['description'], recipe.description)\n self.assertEqual(recipe.ingredients.count(), 3)\n self.assertEqual(recipe.ingredients.first().name, 'Potatoes')",
"def test_create_recipe_with_ingredients(self):\n ingred1 = sample_ingredient(self.user)\n ingred2 = sample_ingredient(self.user, name='sugar')\n payload = {\n 'title': 'cake',\n 'time_minutes': 39,\n 'price': 39,\n 'ingredients': [ingred1.id, ingred2.id]\n }\n res = self.client.post(RECIPE_URL, payload)\n\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n recipe = Recipe.objects.get(id=res.data['id'])\n ingredients = recipe.ingredients.all()\n\n self.assertEqual(ingredients.count(), 2)\n self.assertIn(ingred1, ingredients)\n self.assertIn(ingred2, ingredients)",
"def test_creating_recipe_with_ingredients(self):\n ingredient1 = create_sample_ingredient(user=self.user, name=\"Paprika\")\n ingredient2 = create_sample_ingredient(user=self.user, name=\"Salad\")\n\n payload = {\n \"title\": \"Green Salad\",\n \"time_minutes\": 34,\n \"price\": 4.66,\n \"ingredients\": [ingredient1.id, ingredient2.id]\n }\n res = self.client.post(RECIPE_URL, payload)\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n recipe = Recipe.objects.get(id=res.data['id'])\n ingredients = recipe.ingredients.all()\n self.assertEqual(ingredients.count(), 2)\n self.assertIn(ingredient1, ingredients)\n self.assertIn(ingredient2, ingredients)",
"def test_patch_recipe(self):\n recipe = sample_recipe(self.user)\n recipe.tags.add(sample_tag(self.user))\n tag = sample_tag(self.user, name='bacon')\n\n payload = {\n 'title': 'Ham hack',\n 'tags': tag.id\n }\n res = self.client.patch(detail_url(recipe.id), payload)\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n\n recipe.refresh_from_db()\n serializer = RecipeSerializer(recipe)\n self.assertEqual(res.data, serializer.data)\n self.assertEqual(recipe.title, payload['title'])\n self.assertEqual(serializer.data['tags'], [payload['tags']])\n tags = recipe.tags.all()\n self.assertEqual(len(tags), 1)\n self.assertIn(tag, tags)",
"def test_create_recipe_with_ingredients(self):\n ingredient1 = sample_ingredient(user=self.user, name = 'bla')\n ingredient2 = sample_ingredient(user=self.user, name = 'blaa')\n payload = {\n 'title': 'red curry',\n 'ingredients': [ingredient1.id, ingredient2.id],\n 'time_minutes': 30,\n 'price': 30.00\n }\n res = self.client.post(RECIPE_URL, payload)\n\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n recipe = Recipe.objects.get(id=res.data['id'])\n ingredients = recipe.ingredients.all()\n\n self.assertEqual(ingredients.count(), 2)\n self.assertIn(ingredient1, ingredients)\n self.assertIn(ingredient2, ingredients)",
"def test_partial_update_recipe(self):\n recipe = sample_recipe()\n original_description = recipe.description\n payload = {'name': 'Panqueques con dulce de leche'}\n\n url = recipe_detail_url(recipe.id)\n res = self.client.patch(url, payload)\n\n recipe.refresh_from_db()\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(recipe.name, payload['name'])\n self.assertEqual(recipe.description, original_description)",
"def test_cleaned_data_worked(self):\n pass",
"def test_get_recipe_information(self):\n pass",
"def test_partial_update_recipe(self):\n recipe = sample_recipe(user=self.user)\n recipe.tag.add(sample_tag(user=self.user))\n recipe.ingredient.add(sample_ingredient(user=self.user))\n new_tag = sample_tag(user=self.user,name='curry')\n payload = {\n 'title':'chicken tikka recipe',\n 'tag' : [new_tag.id]\n }\n url = detail_url(recipe.id)\n res = self.client.patch(url,payload)\n recipe.refresh_from_db();\n self.assertEqual(recipe.title,payload['title'])\n self.assertEqual(len(recipe.tag.all()),1)\n self.assertIn(new_tag,recipe.tag.all())",
"def save(self, commit=True):\n\t\t\n\t\tm = super(RecipeIngredientForm, self).save(commit=False)\n\t\tingredient_name = self.cleaned_data['ingredient_name']\n\t\tunit_name = self.cleaned_data['unit_name']\n\t\toptional = self.cleaned_data['optional']\n\t\tingredient = Ingredient.objects.get_or_create(name__iexact=ingredient_name, defaults={'name': ingredient_name, 'slug': slugify(ingredient_name)})[0]\n\t\tunit = MeasurementUnit.objects.get_or_create(name__iexact=unit_name, defaults={'name': unit_name, 'slug': slugify(unit_name)})[0]\n\t\tm.ingredient = ingredient\n\t\tm.unit = unit\n\t\tm.optional = optional\n\t\tif commit:\n\t\t\tm.save()\n\t\treturn m",
"def test_create_basic_recipe(self):\n\n payload = {'name': 'Focaccia', 'description': 'Detailed description'}\n\n res = self.client.post(RECIPES_URL, payload)\n\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n\n recipe = Recipe.objects.get(id=res.data['id'])\n\n self.assertEqual(payload['name'], recipe.name)\n self.assertEqual(payload['description'], recipe.description)",
"def test_full_update(self):\n recipe = create_sample_recipe(user=self.user)\n recipe.ingredients.add(create_sample_ingredient(\n user=self.user,\n name='Fries'\n ))\n payload = {\n \"title\": \"New Cuisine\",\n \"price\": 5.00,\n \"time_minutes\": 90\n }\n recipe_url = create_detail_url(recipe.id)\n self.client.put(recipe_url, payload)\n recipe.refresh_from_db()\n ingredients = recipe.ingredients.all()\n self.assertEqual(recipe.title, payload['title'])\n self.assertEqual(recipe.time_minutes, payload['time_minutes'])\n self.assertEqual(len(ingredients), 0)",
"def test_create_new_recipe(self):\n payload = {\n 'title': 'Cheescake',\n 'time_taken': 35,\n 'price': 5\n }\n\n res = self.client.post(RECIPE_URL, payload)\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n\n recipe = Recipe.objects.get(id=res.data['id'])\n for key in payload.keys():\n self.assertEqual((payload)[key], getattr(recipe, key))\n\n # recipe = get_sample_recipe(self.sample_user)\n # db_recipe =\n\n # self.assertEqual(recipe.title, )",
"def test_user_own_recipes(self):\n\n recipe1 = Recipe(uri=\"testuri\", name=\"testname\", image_url=\"test_image_url\", user_id=self.uid)\n recipe2 = Recipe(uri=\"testuri2\", name=\"testname2\", image_url=\"test_image_url2\")\n\n db.session.add_all([recipe1, recipe2])\n db.session.commit()\n\n self.assertEqual(recipe1.user_id, self.uid)\n self.assertNotEqual(recipe2.user_id, self.uid)\n self.assertEqual(recipe2.user_id, None)",
"def test_recipe_model(self):\n recipe = Recipe(uri=\"testuri\", name=\"testname\", image_url=\"test_image_url\")\n\n db.session.add(recipe)\n db.session.commit()\n\n recipes = Recipe.query.all()\n\n self.assertEqual(len(recipes), 1)\n self.assertEqual(recipes[0].uri, \"testuri\")\n self.assertEqual(recipes[0].name, \"testname\")\n self.assertEqual(recipes[0].image_url, \"test_image_url\")",
"def test_create_recipe_with_ingredients(self):\n ing1 = sample_ingredient(user=self.user,name=\"ginger\")\n ing2 = sample_ingredient(user=self.user, name=\"Prawn\")\n payload = {\n 'title':'Prawn curry',\n 'ingredient':[ing1.id,ing2.id],\n 'time_minutes':60,\n 'price':10.00,\n }\n res = self.client.post(RECIPE_URL,payload)\n self.assertEqual(res.status_code,status.HTTP_201_CREATED)\n recipe = Recipe.objects.get(id=res.data['id'])\n ingredients = recipe.ingredient.all()\n self.assertEqual(ingredients.count(),2)\n self.assertIn(ing1,ingredients)\n self.assertIn(ing2,ingredients)",
"def test_create_recipe_with_ingredient(self):\n ingredient1 = sample_ingredient(user=self.user, name='Prawns')\n ingrident2 = sample_ingredient(user=self.user, name ='Ginger')\n\n payload = {\n 'title': 'Thai prawn and curry',\n 'ingredient': [ingredient1.id,ingrident2.id],\n 'time_minuts':60,\n 'price': 250\n }\n res = self.client.post(RECIPE_URL,payload)\n\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n\n recipe = Recipe.objects.get(id=res.data['id'])\n ingredients = recipe.ingredient.all()\n self.assertEqual(ingredients.count(),2)\n self.assertIn(ingredient1, ingredients)\n self.assertIn(ingrident2,ingredients)",
"def test_partial_update_recipe(self):\n recipe = sample_recipe(user=self.user)\n recipe.tags.add(sample_tags(user=self.user))\n new_tag = sample_tags(user=self.user, name='Cabbage')\n\n payload = {'title': 'Salad', 'tags': [new_tag.id]}\n url = detail_url(recipe_id=recipe.id)\n self.client.patch(url, payload)\n\n recipe.refresh_from_db()\n self.assertEqual(recipe.title, payload['title'])\n tags = recipe.tags.all()\n self.assertEqual(len(tags), 1)\n self.assertIn(new_tag, tags)",
"def test_partial_update_recipe(self):\n\n recipe = create_sample_recipe(user=self.sample_user)\n recipe.tag.add(create_sample_tag(user=self.sample_user, name=\"Curry\"))\n new_tag = create_sample_tag(user=self.sample_user, name=\"bread\")\n\n payload = {\n 'title': 'Chicken Tikka with Bread',\n 'tag': [new_tag.id]\n }\n url = get_detail_URL(recipe.id)\n self.client.patch(url, payload)\n\n recipe.refresh_from_db()\n self.assertEqual(recipe.title, payload['title'])\n tags = recipe.tag.all()\n self.assertEqual(len(tags), 1)\n self.assertIn(new_tag, tags)",
"def test_put_recipe(self):\n recipe = sample_recipe(self.user)\n recipe.tags.add(sample_tag(self.user))\n payload = {\n 'title': 'Ham hack',\n 'time_minutes': 38,\n 'price': 33.00\n }\n res = self.client.put(detail_url(recipe.id), payload)\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n\n recipe.refresh_from_db()\n serializer = RecipeSerializer(recipe)\n self.assertEqual(res.data, serializer.data)\n self.assertEqual(recipe.title, payload['title'])\n tags = recipe.tags.all()\n self.assertEqual(len(tags), 0)",
"def test_recipes_create(self):\n app = self.create_app()\n c = app.test_client()\n\n # test if authorization is required to create a recipe\n rv = c.get('/recipes/create')\n self.assertRedirects(rv, \"/auth/login\")\n\n # test recipe page\n register(c, app.config[\"USERNAME\"], app.config[\"PASSWORD\"])\n login(c, app.config[\"USERNAME\"], app.config[\"PASSWORD\"])\n c.get('/recipes/create')\n self.assert_template_used(\"recipes/create.html\")\n\n # test adding recipe\n recipe = {'author_id': \"unittest\", 'title': \"recipe_unittest2\", 'body': \"Empty body\",\n 'servings': 4, 'tag': \"dessert\", 'ingredients': [{'ingName': \"ing_unittest3_solid\", 'quantity': 180, 'portion': 'g'}, {\n 'ingName': \"ing_unittest1_liquid\", 'quantity': 2, 'portion': 'cup'}]}\n with app.app_context():\n create_recipe(c, recipe)\n self.assert_template_used(\"recipes/index.html\")",
"def test_partial_update_recipe(self):\n recipe = sample_recipe(user=self.user)\n recipe.tags.add(sample_tag(user=self.user))\n new_tag = sample_tag(user=self.user, name = 'Curry')\n\n payload = {'title': 'Chicken tikka', 'tags': [new_tag.id]}\n url = detail_url(recipe.id) # to update an object you have to use the detail endpoint(with the pk of the specific recipe)\n self.client.patch(url, payload)\n\n recipe.refresh_from_db() # we always need this when we update an object\n self.assertEqual(recipe.title, payload['title'])\n\n tags = recipe.tags.all()\n self.assertEqual(tags.count(), 1)\n self.assertIn(new_tag, tags)",
"def test_partial_update_recipe(self):\n recipe = sample_recipe(user=self.user)\n recipe.tags.add(sample_tag(user=self.user))\n new_tag = sample_tag(user=self.user, name= 'curry')\n\n payload = {\n 'title':'chicken tikka', 'tags':[new_tag.id]\n\n }\n url = detail_url(recipe.id)\n\n self.client.patch(url, payload)\n\n recipe.refresh_from_db()\n self.assertEqual(recipe.title, payload['title'])\n\n tags = recipe.tags.all()\n self.assertEqual(len(tags), 1)\n self.assertIn(new_tag, tags)"
] |
[
"0.6628717",
"0.6450618",
"0.6439353",
"0.641164",
"0.6409625",
"0.63565224",
"0.6294549",
"0.6208296",
"0.6173742",
"0.61556935",
"0.6142231",
"0.6087612",
"0.6058639",
"0.6040254",
"0.6011858",
"0.59933704",
"0.5978738",
"0.59578437",
"0.5951923",
"0.5949646",
"0.59408957",
"0.59190494",
"0.59175134",
"0.59128505",
"0.589327",
"0.5879715",
"0.58784735",
"0.58609986",
"0.5852514",
"0.5846253"
] |
0.67777187
|
0
|
Fits the data assuming a normal distributions, returns mean and standard deviation
|
def gauss_fit(seld, data=''):
mean, std = norm.fit(data)
return mean, std
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def find_mean_std(self, data):\n if self._data_mean is None:\n self._data_mean = np.mean(data)\n if self._data_std is None:\n self._data_std = np.std(data)",
"def MeanAndStandardDeviation(data):\n n = len(data)\n if n == 0:\n return 0.0, 0.0\n mean = float(sum(data)) / n\n variance = sum([(element - mean)**2 for element in data]) / n\n return mean, math.sqrt(variance)",
"def normalization_stats(completeData):\n data_mean = np.mean(completeData, axis=0)\n data_std = np.std(completeData, axis=0)\n\n dimensions_to_ignore = []\n dimensions_to_use = []\n\n dimensions_to_ignore.extend(list(np.where(data_std < 1e-4)[0]))\n dimensions_to_use.extend(list(np.where(data_std >= 1e-4)[0]))\n\n data_std[dimensions_to_ignore] = 1.0\n\n return data_mean, data_std, dimensions_to_ignore, dimensions_to_use",
"def standard_deviation(data):\n\n return np.sqrt(variance(data))",
"def standard_deviation(data):\n\n return np.sqrt(variance(data))",
"def stdev(headers, data):\n\tcolumn_matrix=data.get_data(headers)\n\tmean_values=column_matrix.std(0)\n\tstd_values=mean_values.tolist()\n\treturn std_values",
"def get_stddev(self):\r\n for i in range(1,len(self.data[0])):\r\n self.stddev.append(np.std(self.data[:,i]))",
"def _std(self, data):\n var = stats.var(data)\n if var>0.0:\n sd = math.sqrt(var)\n else:\n sd = 0.0\n return sd",
"def normalize_standard_deviation(dataset):\n return dataset*(1/np.std(dataset))",
"def get_std_dev(self, data):\n mean = 0\n data_arr = []\n for i in data:\n data_arr.append(i[1])\n return statistics.stdev(data_arr)",
"def fit(sample):\r\n if not hasattr(sample, \"stddev\"):\r\n sample = Sample(sample)\r\n return Normal(sample.mean, sample.stddev)",
"def unstandardize(da: xr.DataArray, mean: xr.DataArray, std: xr.DataArray):\n return (std * da) + mean",
"def normalize_data(self, data):\n self.find_mean_std(data)\n return (data - self._data_mean) / self._data_std",
"def standardize(data):\r\n mean = data.mean(axis=0)\r\n std = data.std(axis=0)\r\n return (data - mean)/std",
"def norm_data(self):\n if (self.nrows, self.ncolumns) < self.data.shape:\n self.data = self.data[0:self.nrows, 0:self.ncolumns]\n if self.data.dtype != np.float64:\n self.data = self.data.astype(np.float64)\n self.meanval = self.data.mean()\n self.stdval = self.data.std()",
"def normalize_data(data):\n mean = np.mean(data)\n std = np.std(data)\n return (data - mean) / std",
"def _gen_normal(self, count, **kwargs):\n normal = scipy.stats.norm(loc=kwargs['mean'], scale=kwargs['stdev'])\n rvs = normal.rvs(count)\n return rvs",
"def __call__(self, shape):\n return np.random.normal(loc=self.mean, scale=self.stddev, size=shape)",
"def normalizeData(meanAndStd, dataset):\n\n for i in range(len(dataset)):\n for j in range(len(dataset[i])-1):\n mean = meanAndStd[j][\"mean\"]\n std = meanAndStd[j][\"std\"]\n dataset[i][j] = (dataset[i][j] - mean)/std",
"def normal(mean, std):\n\n return random.gauss(mean, std)",
"def test_2_normal(self):\n print(\"test 2: normal distributions\")\n\n mean = self.means[0]\n dispersion = self.dispersions[0]\n\n for i, x in enumerate(self.X):\n print(i+1, normal(x, mean, dispersion), sep=' : ')",
"def Std(data):\n return data.std()",
"def transform(self, data):\n data -= self.mean\n if 0.0 in self.std:\n self.std = np.where(self.std == 0.0, 1.0, self.std)\n data /= self.std\n return data",
"def stdDev(data):\r\n sum = 0\r\n ave = average(data)\r\n for i in data:\r\n sum += (i-ave)**2\r\n return math.sqrt(sum/len(data))",
"def _get_normalisation_stats(self):\n p_net_datasets = [self.pdf_dataset] + [self.PDE_dataset] + [self.BC_dataset]\n p_net_means, p_net_stds = get_mean_std_from_datasets(p_net_datasets)\n\n D_net_datasets = [self.PDE_dataset]\n D_net_means, D_net_stds = get_mean_std_from_datasets(D_net_datasets)\n\n U_net_datasets = [self.PDE_dataset]\n U_net_means, U_net_stds = get_mean_std_from_datasets(U_net_datasets)\n\n return p_net_means, p_net_stds, D_net_means, D_net_stds, U_net_means, U_net_stds",
"def unnormalize_multivariate_data(normed_data, scaling_values):\n data = np.zeros(normed_data.shape, dtype=normed_data.dtype)\n for i in range(normed_data.shape[-1]):\n data[:, :, :, i] = normed_data[:, :, :, i] * scaling_values.loc[i, \"std\"] + scaling_values.loc[i, \"mean\"]\n return data",
"def get_std_dev(data, n = -1):\n mean = get_mean(data, n =n)\n\n deviations = []\n\n for i in range(0,n):\n deviations.append( (data[i] - mean)**2 )\n\n std_dev = sqrt( sum(deviations)/n )\n\n return std_dev",
"def calculate_std(self) -> float:\n\n if self.data:\n return np.std(self.data)\n else:\n return self.sigma",
"def standardize(data):\n stddev = data.std()\n #if stddev == 0.:\n # sys.exit(\"data.std() == 0. !\")\n if stddev != 0.:\n data = (data - data.mean()) / (data.std())\n\n return data",
"def normalize(data):\n data_range = data.max() - data.min()\n #if data_range == 0.:\n # sys.exit(\"data.max() - data.min() == 0. !\")\n if stddev != 0.:\n data = (data - data.min()) / data_range\n\n return data"
] |
[
"0.6888555",
"0.68504137",
"0.6841606",
"0.6830163",
"0.6830163",
"0.68180627",
"0.6798879",
"0.6758738",
"0.67473656",
"0.6739367",
"0.6725009",
"0.6633292",
"0.6610731",
"0.6590639",
"0.6556199",
"0.6534529",
"0.6516616",
"0.6500733",
"0.6474724",
"0.64506733",
"0.643497",
"0.6424185",
"0.64206094",
"0.64031976",
"0.6395287",
"0.63816303",
"0.6353818",
"0.6347244",
"0.6346125",
"0.6336182"
] |
0.69635624
|
0
|
Given the list of cov. matrices and the index of the chosen matrix, returns an averaged matrix. The average is calculated using the N following matrices. Only the the entries with actual values are used, nans values are skipped
|
def average_matrix(self,matrices_list='', N='', matrix_index=""):
averaged = np.empty([16, 16], dtype = float) # initialize an empty 16*16 matrix (for the 16 standard pressure levels)
new_mlist = matrices_list[matrix_index:matrix_index+N] # selecting the subset of matrices from index=matrix_index to matrix_index+N
for x in range(16):
for y in range(16):
valueslist = self.select_ijentry(matrices=new_mlist , i=x , j=y)
cleaned, outliers, lower, upper, median = self.data.remove_outliers(valueslist, cut= 1.5)
not_nan = [ x for x in cleaned if not np.isnan(x) ]
N = len(not_nan)
try:
averaged[x,y] = sum(not_nan)/N
except ZeroDivisionError:
averaged[x,y] = np.nan
return averaged
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def __average__(self,indices):\n # first, do a sanity check on the X parameters\n for index in indices[1:]:\n # this assertion expects data to be a numpy array\n assert (self.data[0][0] == self.data[0][index]).all()\n # first, build the matrix of quantities to be averaged over\n tmp = [[] for j in range(4)]\n for ch in self.chs:\n for index in indices:\n tmp[ch-1].append(self.data[1][index][ch-1])\n # compute the average of dependent quantities, as well as the\n # standard deviation\n Y = [[[],[]] for j in range(4)]\n for ch in self.chs:\n Y[ch-1][0] = numpy.mean(tmp[ch-1],0)\n Y[ch-1][1] = numpy.std(tmp[ch-1],0)\n return (self.data[0][0], Y)",
"def nanMeanByIndex(data, index, largeGroupMean=True):\n th = int((3.2e10 / 32) / np.prod(data.shape[1:])) // 2\n data = data.tordd()\n index = index.astype(int)\n\n def map_keys(x):\n if isinstance(x[0], tuple):\n x = (int(x[0][0]), x[1])\n y = ((index[int(x[0])],), x[1])\n return y\n\n data = data.map(map_keys)\n\n def comb1(x, y):\n if len(x.shape) == 1:\n return y\n elif len(y.shape) == 1:\n return x\n elif len(x.shape) == 3 and len(y.shape) == 3:\n return np.stack([x, y])\n else:\n if len(x.shape) < len(y.shape):\n x = np.expand_dims(x, 0)\n elif len(y.shape) < len(x.shape):\n y = np.expand_dims(y, 0)\n z = np.concatenate((x, y))\n # if len(z.shape) == 4 and z.shape[0] > 100:\n # if largeGroupMean:\n # z = np.expand_dims(np.nanmean(z, axis=0), 0)\n return z\n\n def getTargetLocal(array):\n sz = array.shape\n if len(sz) == 3:\n return array\n result = np.zeros(shape=(sz[1], sz[2], sz[3]), dtype=array.dtype)\n Mean = np.nan_to_num(copy.deepcopy(\n np.nanmean(array, axis=0).reshape(sz[1] * sz[2], sz[3])))\n array2 = np.nan_to_num(copy.deepcopy(array.reshape(sz[0], sz[1] * sz[2], sz[3])))\n for i in range(sz[3]):\n CC = list()\n for k in range(sz[0]):\n CC.append(np.corrcoef(array2[k, :, i], Mean[:, i])[0, 1])\n CC = np.array(CC)\n if sz[0] < 30:\n points = sz[0]\n elif sz[0] < 100:\n points = np.round(sz[0] / 2).astype(int)\n elif sz[0] < 200:\n points = np.round(sz[0] / 3).astype(int)\n elif sz[0] < 300:\n points = np.round(sz[0] / 4).astype(int)\n else:\n points = np.round(sz[0] / 5).astype(int)\n ind = np.argpartition(CC, -points)[-points:]\n result[:, :, i] = np.nanmean(array[ind, :, :, i], axis=0).astype('float32')\n return result\n\n def getMean(array):\n sz = array.shape\n if len(sz) == 3:\n return array\n else:\n return np.nanmean(array, axis=0)\n\n # If the data (i.e. a single group) is bigger then 4GB (3.2e10 bits) the aggregation will fail in spark,\n # so split it into two or more group and average the result\n\n index2 = copy.deepcopy(index)\n counts = np.bincount(index2)\n bigGroups = np.where(counts > th)[0]\n fixList = []\n for bigGroup in bigGroups:\n index2 = np.where(index == bigGroup)[0]\n numGroups = len(index2) // th\n for k in range(1, numGroups):\n newVal = np.max(index) + 1\n index[index2[k::numGroups]] = newVal\n fixList.append((bigGroup, copy.deepcopy(index2[k::numGroups])))\n\n data = data.aggregateByKey(np.array([]), comb1, comb1)\n if largeGroupMean:\n data = data.mapValues(getMean).collectAsMap()\n else:\n data = data.mapValues(getTargetLocal).collectAsMap()\n r = np.array([data[idx] for idx in sorted(data.keys())])\n extraIndex = r.shape[0] - len(fixList)\n extra = r[extraIndex:, ...]\n r = r[:extraIndex, ...]\n for k, (bigGroup, index2) in enumerate(fixList):\n comb = np.nanmean(np.stack((r[bigGroup, ...], extra[k, ...]), 0), 0)\n r[bigGroup, ...] = comb\n return r",
"def _arrays_mean(array_list):\n dims = array_list[0].shape[2]\n out = np.zeros(array_list[0].shape)\n var_out = out.copy()\n\n# i = 1\n for i in range(dims):\n temp = [j[:, :, i] for j in array_list]\n\n # calculate mean\n means_out = np.zeros(temp[0].shape)\n for k in temp:\n means_out += k # sum\n\n out[:, :, i] = means_out / len(array_list) # mean\n\n return(out)",
"def calcVmoy(self, V, idx, n):\n i = max(idx-n, 0)\n f = min(idx+n+1, V.shape[0])\n av = np.mean(V[i:f])\n return av",
"def get_average_n(Z,Z_occam,valid_idxs,N=range(1,3)):\n averages = []\n for n in N:\n def topn_fitter(z,z_occam):\n \"\"\"This fitter performs a change-of-basis to a more appropriate basis for scaling\"\"\"\n return fitters.TopNFitter(z,z_occam,use_relative_scaling=True,is_pooled=True,is_robust=True,N=n)\n ev = evaluators.EvaluatorWithFiltering(Z[:,:z_dim],Z_occam[:,:z_dim],leave_out=True,fitter_class=topn_fitter,valid_idxs=valid_idxs)\n averages.append(ev.weighted_average)\n return averages",
"def sim_avg(sim_mats):\n return np.array(sim_mats).mean(axis=0)",
"def get_cluster_average(cls, indices, dist_mat):\n distances = cls.get_all_distances(indices, dist_mat)\n return np.mean(distances)",
"def average_matrix(self, groups=[\"SP\", \"SL06\", \"SL12\", \"SL24\",\"SL48\", \"SL96\",\"FP06\", \"FP12\", \"FP24\",\"FP48\", \"FP96\", \"FL\"]):\n\n # will only work if genes are columns in matrix\n revert = False\n if self._genes_as_rows:\n self.invert_matrix()\n revert = True\n\n limits = self.reorder_matrix(groups=groups)\n\n v = numpy.ones((len(groups),self.genenumber)) # create new array of size (y groups) and (n genes)\n\n for g in range(self.genenumber):\n v[0,g] = numpy.average(self.data_matrix[:limits[0],g])\n for i in range(len(groups)-1):\n v[i + 1,g] = numpy.average(self.data_matrix[limits[i]:limits[i + 1],g])\n\n self.row_header = groups\n self.column_header = self.gene_header\n self.data_matrix = v\n self.refresh_headers()\n\n # if matrix was inverted for gene removal, restore to its previous orientation:\n if revert:\n self.invert_matrix()\n self.averaged = True",
"def _compute_mean(index, M, R, rake):\r\n mean = (a1[index] + _compute_linear_magnitude_term(index, M) + _compute_quadratic_magnitude_term(index, M) +\r\n _compute_logarithmic_distance_term(index, M, R) + _compute_faulting_style_term(index, rake))\r\n\r\n return mean",
"def _compute_mean(index, M, R, rake):\r\n mean = (a1[index] + _compute_linear_magnitude_term(index, M) + _compute_quadratic_magnitude_term(index, M) +\r\n _compute_logarithmic_distance_term(index, M, R) + _compute_faulting_style_term(index, rake))\r\n\r\n return mean",
"def score_APC(scores_matrix, N, s_index):\n \n scores = scores_matrix2dict(scores_matrix, s_index)\n # compute the average score of each site\n print('scores for %d pairs ' % len(scores))\n av_score_sites = list()\n for i in range(len(s_index)):\n i_scores = [score for pair, score in scores if s_index[i] in pair]\n assert len(i_scores) == len(s_index) - 1\n i_scores_sum = sum(i_scores)\n i_scores_ave = i_scores_sum/float(len(s_index) - 1)\n av_score_sites.append(i_scores_ave)\n # compute average product corrected DI\n av_all_scores = sum(av_score_sites)/float(len(s_index))\n sorted_score_APC = list()\n for pair, score in scores:\n i, j = pair\n i = np.where(s_index==i)[0][0]\n j = np.where(s_index==j)[0][0]\n score_apc = score - av_score_sites[i] * (av_score_sites[j]/av_all_scores)\n sorted_score_APC.append((pair, score_apc))\n # sort the scores as doing APC may have disrupted the ordering\n sorted_score_APC = sorted(sorted_score_APC, key = lambda k : k[1], reverse=True)\n return sorted_score_APC",
"def _local_pairwise_cov(class_mean, neighbor_mean):\n covs = []\n for i in range(0, len(class_mean)):\n local_pair = np.vstack((class_mean[i], neighbor_mean[i]))\n covs.append(np.atleast_2d(_cov(local_pair)))\n return np.average(covs, axis=0)",
"def matrix_mean(matrix):\n return sum(map(mean,matrix))",
"def n_finder(gt_mat, x, eps):\n numsnps = int(0.9*gt_mat.shape[0])\n assert(x < numsnps)\n assert(eps > 0 and eps<= 1.0)\n indices = np.random.choice(numsnps, size=x, replace=False)\n n = 0\n avg_array = np.zeros(gt_mat.shape[0])\n going = True\n while going:\n r2_list = [np.corrcoef(gt_mat[i,:],gt_mat[i+n,:])[0,1]**2 for i in indices]\n avg_array[n] = np.nanmean(r2_list)\n n += 1\n if np.mean(r2_list) < eps:\n going = False \n return n,avg_array[:n]",
"def averageTrialsByTriggers(trigger_indices, np_data):\n trialLen = trigger_indices[1] -trigger_indices[0] -1\n data_avg = [] \n data_std = [] \n\n for i in trigger_indices:\n data_avg.append(numpy.average(np_data[i+1:i+trialLen-1])) \n data_std.append(numpy.std(np_data[i+1:i+trialLen-1])) \n \n return (data_avg, data_std)",
"def meanm(mats, max_iter=1024, tol=1e-20):\n\n # Authors\n # -------\n # .. John Ashburner <[email protected]> : original Matlab code\n # .. Mikael Brudfors <[email protected]> : Python port\n #\n # License\n # -------\n # The original Matlab code is (C) 2012-2019 WCHN / John Ashburner\n # and was distributed as part of [SPM](https://www.fil.ion.ucl.ac.uk/spm)\n # under the GNU General Public Licence (version >= 2).\n\n mats = np.asarray(mats)\n dim = mats.shape[2]-1\n in_dtype = mats.dtype\n acc_dtype = np.float64\n\n mean_mat = np.eye(dim+1, dtype=acc_dtype)\n zero_mat = np.zeros((dim+1, dim+1), dtype=acc_dtype)\n for n_iter in range(max_iter):\n mean_log_mat = zero_mat.copy()\n for mat in mats:\n mean_log_mat += logm(lmdiv(mean_mat, mat.astype(acc_dtype)))\n mean_log_mat /= mats.shape[0]\n mean_mat = np.matmul(mean_mat, expm(mean_log_mat))\n if (mean_log_mat ** 2).sum() < tol:\n break\n return mean_mat.astype(in_dtype)",
"def __array_mean_indices(a, indices, func_axis=None, dtype=None):\n if func_axis == None:\n return (numpy.mean(a.flat[indices], dtype=dtype), )\n else:\n return tuple(numpy.mean(numpy.reshape(numpy.take(a, [j,], axis=func_axis), -1)[indices]) for j in range(a.shape[func_axis]))",
"def cluster_matrix_average(M, cluster_assignments):\n\n# #TODO FIGURE OUT TEST FOR THIS FUNCTION\n# \n# ## from individual_group_clustered_maps(indiv_stability_list, clusters_G, roi_mask_file)\n# \n# indiv_stability_set = np.asarray([np.load(ism_file) for ism_file in indiv_stability_list])\n# #\n# \n# cluster_voxel_scores = np.zeros((nClusters, nSubjects, nVoxels))\n# for i in range(nSubjects):\n# cluster_voxel_scores[:,i] = utils.cluster_matrix_average(indiv_stability_set[i], clusters_G)\n# ##\n# \n \n\n if np.any(np.isnan(M)):\n #np.save('bad_M.npz', M)\n raise ValueError('M matrix has a nan value')\n\n cluster_ids = np.unique(cluster_assignments)\n vox_cluster_label = np.zeros((cluster_ids.shape[0], cluster_assignments.shape[0]), dtype='float64')\n s_idx = 0\n K_mask=np.zeros(M.shape)\n for cluster_id in cluster_ids:\n #import pdb;pdb.set_trace()\n vox_cluster_label[s_idx, :] = M[:,cluster_assignments == cluster_id].mean(1)\n \n \n \n k = (cluster_assignments == cluster_id)[:, np.newaxis]\n k=k*1\n print('Cluster %i size: %i' % (cluster_id, k.sum()))\n K = np.dot(k,k.T)\n K[np.diag_indices_from(K)] = False\n Ktemp=K*1\n K_mask=K_mask+Ktemp\n #import pdb;pdb.set_trace()\n if K.sum() == 0: # Voxel with its own cluster\n #import pdb; pdb.set_trace()\n vox_cluster_label[k[:,0]] = 0.0\n s_idx += 1\n else:\n Kbool=K.astype(bool)\n vox_cluster_label[s_idx,k[:,0].T] = M[Kbool].mean()\n s_idx += 1\n #import pdb; pdb.set_trace()\n return vox_cluster_label, K_mask",
"def computeMeans(X, idx, K):\n\tm, n = X.shape\n\tcentroids = np.zeros((K, n))\n\tcount = np.zeros(K)\n\n\tfor j in range(m):\n\t\tcentroids[int(idx[j])] += X[j]\n\n\tfor i in range(m):\n\t\tcount[int(idx[i])] += 1\n\n\treturn centroids / np.tile(count.reshape((K, 1)), n)",
"def _energy_test_statistic_multivariate_from_distance_matrix(\n distance: Array,\n indexes: Sequence[int],\n sizes: Sequence[int],\n average: Callable[[Array], Array] | None = None,\n estimation_stat: EstimationStatisticLike = EstimationStatistic.V_STATISTIC,\n) -> Array:\n first_iter = True\n\n for i, _ in enumerate(indexes):\n for j in range(i + 1, len(indexes)):\n slice_i = slice(indexes[i], indexes[i] + sizes[i])\n slice_j = slice(indexes[j], indexes[j] + sizes[j])\n\n n = sizes[i]\n m = sizes[j]\n\n distance_xx = distance[slice_i, slice_i]\n distance_yy = distance[slice_j, slice_j]\n distance_xy = distance[slice_i, slice_j]\n\n pairwise_energy = _energy_test_statistic_from_distance_matrices(\n distance_xx=distance_xx,\n distance_yy=distance_yy,\n distance_xy=distance_xy,\n n=n,\n m=m,\n average=average,\n estimation_stat=estimation_stat,\n )\n\n if first_iter:\n energy = pairwise_energy\n first_iter = False\n else:\n energy += pairwise_energy\n\n return energy",
"def mean_matrix(datasets, axes = None, label = None):\n fn = lambda fd, axis: fd[axis].mean()\n \n return fn_matrix(datasets, fn, axes, label)",
"def MM_n(N, data):\n out = np.zeros(len(data))\n\n for j in range(N):\n out[j] = np.average(data[:j+1])\n for (j,d) in enumerate(data[N-1:]):\n out[j+N-1] = np.average(data[j:j+N])\n\n return out",
"def _foi_average(conn, foi_idx):\n # get the number of foi\n n_foi = foi_idx.shape[0]\n\n # get input shape and replace n_freqs with the number of foi\n sh = list(conn.shape)\n sh[-2] = n_foi\n\n # compute average\n conn_f = np.zeros(sh, dtype=conn.dtype)\n for n_f, (f_s, f_e) in enumerate(foi_idx):\n conn_f[..., n_f, :] = conn[..., f_s:f_e, :].mean(-2)\n return conn_f",
"def get_A(vs,neighbours,CV_matrix,n_c):\n AA_mat = np.empty((neighbours.shape[0], neighbours.shape[1]))\n for i in range(3):\n Neighbours = neighbours[:, np.mod(i + 2, 3)]\n AA_mat[:, i] = 0.5 * (Neighbours[:, 0] * vs[:, 1] - Neighbours[:, 1] * vs[:, 0])\n AA_flat = AA_mat.ravel()\n AA_flat[np.isnan(AA_flat)] = 0\n AA_mat = AA_flat.reshape(AA_mat.shape)\n A = np.zeros((n_c))\n for i in range(3):\n A += np.asfortranarray(CV_matrix[:, :, i])@np.asfortranarray(AA_mat[:, i])\n return A",
"def manual_mean(arr):\n my_sum = 0\n for i in range(0, arr.shape[0]):\n for j in range(0, arr.shape[1]):\n my_sum += arr[i,j]\n return my_sum / arr.size",
"def get_avg_correlation_from_matrix(zz):\n L=zz.shape[0]\n ns=L-1\n #zzbar = np.zeros((ns, *zz.shape[2:]))\n zzbar = np.zeros_like(zz)\n for i in range(ns):\n s=i+1\n zzbar[i, ...] = np.mean(np.asarray([zz[ii, ii+s, ...] for ii in range(L-s)]), axis=0)\n return zzbar",
"def _compute_jn_pcoa_avg_ranges(jn_flipped_matrices, method):\r\n x, y = shape(jn_flipped_matrices[0])\r\n all_flat_matrices = [matrix.ravel() for matrix in jn_flipped_matrices]\r\n summary_matrix = vstack(all_flat_matrices)\r\n matrix_sum = np_sum(summary_matrix, axis=0)\r\n matrix_average = matrix_sum / float(len(jn_flipped_matrices))\r\n matrix_average = matrix_average.reshape(x, y)\r\n if method == 'IQR':\r\n result = matrix_IQR(summary_matrix)\r\n matrix_low = result[0].reshape(x, y)\r\n matrix_high = result[1].reshape(x, y)\r\n elif method == 'ideal_fourths':\r\n result = idealfourths(summary_matrix, axis=0)\r\n matrix_low = result[0].reshape(x, y)\r\n matrix_high = result[1].reshape(x, y)\r\n elif method == \"sdev\":\r\n # calculate std error for each sample in each dimension\r\n sdevs = zeros(shape=[x, y])\r\n for j in xrange(y):\r\n for i in xrange(x):\r\n vals = array([pcoa[i][j] for pcoa in jn_flipped_matrices])\r\n sdevs[i, j] = vals.std(ddof=1)\r\n matrix_low = -sdevs / 2\r\n matrix_high = sdevs / 2\r\n\r\n return matrix_average, matrix_low, matrix_high",
"def mean(self, indices = None):\n\n # make sure that data is well-formed\n for j in range(len(self.data)-1):\n assert len(self.data[j]) == len(self.data[j+1])\n\n # populate indices, if not given:\n if (indices == None):\n indices = range(len(self.data[0]))\n \n # __average__() need to be defined in child classes\n # the child class also knows what needs to be averaged\n # and what needs to be sanity checked.\n return self.__average__(indices)",
"def v2v3mean(vn_event_sums, nevents):\n if nevents > 0:\n print \"Events:\", nevents\n meanv2 = vn_event_sums[0] / nevents\n meanv2sqr = vn_event_sums[1] / nevents\n meanv3 = vn_event_sums[2] / nevents\n meanv3sqr = vn_event_sums[3] / nevents\n\n errv2 = math.sqrt((meanv2sqr - meanv2**2) / (nevents - 1))\n errv3 = math.sqrt((meanv3sqr - meanv3**2) / (nevents - 1))\n\n print \"Uncorrected v2:\", meanv2, \"err:\", errv2,\n print \"v3:\", meanv3, \"err:\", errv3\n\n submean = [0.0, 0.0, 0.0, 0.0]\n\n for i in range(0, len(submean)):\n if vn_event_sums[i+4] > 0.0:\n submean[i] = math.sqrt(vn_event_sums[i+4] / nevents)\n\n print \"Rsub2:\", submean[0], \"err:\", submean[1],\n print \"Rsub3:\", submean[2], \"err:\", submean[3]\n\n v2corr = 0.0\n v2err = 0.0\n v3corr = 0.0\n v3err = 0.0\n\n print \"R2_full chi2 R3_full chi3\"\n v2 = True\n for vnsub in [submean[0], submean[2]]:\n chisub = 2.0\n delta = 8.0\n for iteration in range(0, 20):\n arg = chisub**2 / 4\n result = (math.sqrt(math.pi / 2) / 2 * chisub\n * math.exp(-arg) * (bessel.i0(arg) + bessel.i1(arg)))\n if result < vnsub:\n chisub += delta\n elif result > vnsub:\n chisub -= delta\n else:\n break\n delta /= 2\n\n chifull = math.sqrt(2) * chisub\n arg = chifull**2 / 4\n reso_corr = (math.sqrt(math.pi / 2) / 2 * chifull\n * math.exp(-arg) * (bessel.i0(arg) + bessel.i1(arg)))\n print reso_corr, chifull,\n\n if v2:\n if reso_corr > 0.0:\n v2corr = meanv2 / reso_corr\n v2err = errv2 / reso_corr\n v2 = False\n elif reso_corr > 0.0:\n v3corr = meanv3 / reso_corr\n v3err = errv3 / reso_corr\n\n print \"\"\n print \"Corrected v2 err v3 err\"\n print v2corr, v2err, v3corr, v3err\n\n else:\n print \"No events found for event plane v_n determination.\"",
"def calCentroids(X, idx, K):\r\n # Useful variables\r\n m, n = X.shape\r\n # You need to return the following variables correctly.\r\n centroids = np.zeros((K, n))\r\n\r\n\r\n # ====================== YOUR CODE HERE ======================\r\n\r\n for i in np.arange(K):\r\n centroids[i] = np.mean(X[idx == i], axis = 0)\r\n\r\n\r\n # =============================================================\r\n return centroids"
] |
[
"0.64237404",
"0.59842265",
"0.5794853",
"0.57721573",
"0.5706617",
"0.5686782",
"0.56770647",
"0.56623095",
"0.56601936",
"0.56601936",
"0.5605636",
"0.5566386",
"0.55588996",
"0.5492407",
"0.5470992",
"0.5452519",
"0.5416029",
"0.5415425",
"0.53965974",
"0.53833455",
"0.5372797",
"0.53465337",
"0.5307091",
"0.5291941",
"0.5289259",
"0.5275433",
"0.52728945",
"0.5269501",
"0.5262138",
"0.52471656"
] |
0.7203003
|
0
|
Used to delete the document. This method will return a folder object of the parent folder that this document resided in. Once the document has been deleted it will be added to this documents graveyard. This graveyard is used to restore the deleted document. If you delete this document it will set metadata.deleted to true. This will stop you doing any method except restore.
|
def delete(self):
documentUrl = self.metaData.getLink("delete")
assert documentUrl is not None
response = self._adapter.deleteRequest(documentUrl, self._baseHeader)
self.metaData.graveyard.append(self.metaData)
return Folder(self._client, self._client.getUrlFromHeaderLink(response['Headers']['link']))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def delete_document(self):\n pass",
"def delete_document(self, document):\n assert isinstance(document, pylastica.document.Document), \"document must be a Document object: %r\" % document\n options = document.get_options([\n 'version',\n 'version_type',\n 'routing',\n 'parent',\n 'replication',\n 'consistency',\n 'refresh',\n 'timeout'\n ])\n return self.delete_by_id(document.doc_id, options)",
"def delete(self):\n return self.client._perform_empty(\n \"DELETE\", \"/projects/%s/managedfolders/%s\" % (self.project_key, self.odb_id))",
"def deleteDocument(self, document):\n data = self.deleteDocumentAll([document])\n try:\n return data[0]\n except: pass",
"def deleteFolder(self, index=None):\n assert isinstance(index, int) or index is None\n\n try:\n if index is None:\n url = self.metaData.getLink(\"delete\")\n else:\n url = self.getFolders()[index].metaData().getLink(\"delete\")\n\n assert url is not None\n\n response = self._adapter.deleteRequest(url, self._baseHeader)\n\n self.metaData.graveyard.append(self.metaData)\n\n newLink = self._client.getUrlFromHeaderLink(response['Headers']['link'])\n\n return Folder(self._client, newLink)\n\n except IndexError:\n print(\"the index: \" + str(index) + \" does not exist in the list of folder numbers we have\")",
"def delete(self, request, *args, **kwargs):\n\n return super(APIFolderDocumentView, self).delete(request, *args, **kwargs)",
"def restore(self):\n documentUrl = self.metaData.graveyard[0].selfLink + \"/restore\"\n response = self._adapter.putRequest(documentUrl, self._baseHeader, \"{}\")\n self.metaData.graveyard.pop()\n\n return Document(self._client, response['Headers']['location'])",
"def restoreFolder(self, index):\n assert isinstance(index, int)\n\n try:\n url = self.metaData.graveyard[index].selfLink + \"/restore\"\n response = self._adapter.putRequest(url, self._baseHeader, \"{}\")\n\n self.metaData.graveyard.pop(index)\n\n return Folder(self._client, response['Headers']['location'])\n except IndexError:\n print(\"the index: \" + str(index) + \" does not exist in the graveyard\")",
"def delete_folder(self, name):\n return self.DeleteFolder(name, 0)",
"def check_delete_parent_folder(cls, parent):\n folder = os.path.join(\n settings.MEDIA_ROOT, cls.parent_base_upload_to(parent))\n\n def rmdir(target):\n items = os.listdir(target)\n if len(items) == 0:\n os.rmdir(target)\n else:\n for item in items:\n path = os.path.join(target, item)\n if not os.path.isdir(path):\n msg = 'The folder %s contains some file' % path\n raise FolderNotEmptyException(msg)\n for item in items:\n path = os.path.join(target, item)\n rmdir(path)\n os.rmdir(target)\n\n try:\n rmdir(folder)\n except FileNotFoundError as e:\n logger.debug(e)\n except Exception as e:\n logger.warning(e)",
"def delete(self):\n return self.client._perform_empty(\n \"DELETE\", \"/project-folders/%s\" % self.project_folder_id)",
"def delete(self):\n if self.parent:\n assert isinstance(self.parent, Collection) # only know how to delete from Collection parents\n self.parent.delete_child(self)\n else:\n self._mark_deleted()",
"def delete(self, *args, **kwargs):\n return super(APIFolderView, self).delete(*args, **kwargs)",
"def del_object_from_parent(self):\n if self.parent:\n self.parent.objects.pop(self.ref)",
"def userproject_post_delete(sender, instance, **kwargs):\n instance.document.delete(False)",
"def delete(self):\n if not hasattr(self, 'id'):\n raise BadReference('No matching issue on disk')\n shutil.rmtree(self.paths['root'])",
"def delete(self):\n if self.left is None or self.right is None:\n if self is self.parent.left:\n self.parent.left = self.left or self.right\n if self.parent.left is not None:\n self.parent.left.parent = self.parent\n else:\n self.parent.right = self.left or self.right\n if self.parent.right is not None:\n self.parent.right.parent = self.parent\n else:\n s = self.successor()\n self.key, s.key = s.key, self.key\n return s.delete()",
"def delete_object(self):\n qry = DeleteEntityQuery(self)\n self.context.add_query(qry)\n self.remove_from_parent_collection()\n return self",
"def delete(self):\n pdbox._args.get(\"dryrun\") or shutil.rmtree(self.path)\n pdbox.info(\"Deleted %s/\" % self.path)",
"def delete(self):\n if not self.id:\n raise AttributeError(\n \"Provide Document ID to delete a document.\"\n \"Assign it to AutomatedDocument object `id` attribute or pass to class constructor.\"\n )\n\n return self._client.delete(\"{}{}/\".format(self._path, self.id))",
"def parent_folder(self):\n return self.properties.get(\"ParentFolder\",\n Folder(self.context, ResourcePath(\"ParentFolder\", self.resource_path)))",
"def delete_folder_from_datastore(content, datacenter_name, folder):\n datacenter = get_obj(content, [vim.Datacenter], datacenter_name)\n task = vim.FileManager.DeleteDatastoreFile_Task(\n content.fileManager,\n folder,\n datacenter\n )\n wait_for_task(task)",
"def parent_document(cls):\n return cls.parent_resources()[0]",
"def _delete_root_dir(self):\n\n staf_request = ('DELETE ENTRY \"{0}\" RECURSE '\n 'CONFIRM '.format(unix_style_path(self._sut.bespoke_root)))\n\n result = self._staf_handle.submit(self._sut.network_address, 'fs', staf_request)\n\n if result.rc not in [result.Ok, result.DoesNotExist]:\n raise CoreError(result.result)",
"def get_parent(self):\n parent_id = self.client._perform_json(\"GET\", \"/project-folders/%s\" % self.project_folder_id).get(\"parentId\", None)\n if parent_id is None:\n return None\n else:\n return DSSProjectFolder(self.client, parent_id)",
"def remove(self, document):\n return self.db.pop(document['id'], None)",
"def delete(self):\n self.parent.delete_node(self)",
"def delete_folder(self, path):\n if not path_exists(path, self._store_folder):\n raise NotFoundException(\"\")\n rmdir(path)",
"def delete(self):\n return self.parent.delete_instance(self.name)",
"def document_delete(document_id):\n\n log(session['login'], 'deleted', 'document {}'.format(document_id))\n\n doc = Document.query.filter(Document.id == document_id).first_or_404()\n db.session.delete(doc)\n db.session.commit()\n return redirect(request.referrer)"
] |
[
"0.6484638",
"0.61606187",
"0.60621583",
"0.6035355",
"0.5832543",
"0.5816837",
"0.57378757",
"0.57177037",
"0.57134974",
"0.5706775",
"0.57032114",
"0.56466174",
"0.5635844",
"0.56301296",
"0.5602695",
"0.55812174",
"0.5522459",
"0.5512628",
"0.5492893",
"0.54907286",
"0.5454022",
"0.5429797",
"0.53619725",
"0.53122944",
"0.5306263",
"0.5286135",
"0.52723247",
"0.5268911",
"0.5261525",
"0.5221269"
] |
0.74119663
|
0
|
Restores the document from this objects graveyard. If the document is deleted and not in the graveyard there is no way to restore the folder without knowing the full uri of the deleted folders restore link. Once the document has been restored that document will be removed from the graveyard. This method will return the document object of the restoredDocument
|
def restore(self):
documentUrl = self.metaData.graveyard[0].selfLink + "/restore"
response = self._adapter.putRequest(documentUrl, self._baseHeader, "{}")
self.metaData.graveyard.pop()
return Document(self._client, response['Headers']['location'])
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def onDocumentRestored(self, obj):\n\n ArchComponent.Component.onDocumentRestored(self, obj)\n self.setProperties(obj)",
"def restoreFolder(self, index):\n assert isinstance(index, int)\n\n try:\n url = self.metaData.graveyard[index].selfLink + \"/restore\"\n response = self._adapter.putRequest(url, self._baseHeader, \"{}\")\n\n self.metaData.graveyard.pop(index)\n\n return Folder(self._client, response['Headers']['location'])\n except IndexError:\n print(\"the index: \" + str(index) + \" does not exist in the graveyard\")",
"def restore(self):\n return self._restore",
"def Restore(self):\n\n return self._persistentHandler.Restore()",
"def _restore(self, restore_folder):\n tf.reset_default_graph()\n self.init_session()\n ckpt = tf.train.get_checkpoint_state(restore_folder)\n self.saver = tf.train.import_meta_graph('{}.meta'.format(ckpt.model_checkpoint_path))\n self.saver.restore(self.sess, ckpt.model_checkpoint_path)\n print(\"Model restored from {}\".format(restore_folder))",
"def restore(self):\n if self._restored_model:\n return\n with self.eval_graph.graph.as_default():\n last_checkpoint = self._find_last_checkpoint()\n # TODO(rbharath): Is setting train=False right here?\n saver = tf.train.Saver()\n saver.restore(self._get_shared_session(train=False), last_checkpoint)\n self._restored_model = True",
"def mos_object(self):\n return self._restore_fn(*self._restore_args)",
"def restore(self, obj):\n return obj",
"def restore(self, restore):\n self._restore = restore",
"def restore(self):\n self.abstract_obj.restore()",
"def restore_deleted_site(self, site_url):\n result = SpoOperation(self.context)\n qry = ServiceOperationQuery(self, \"RestoreDeletedSite\", [site_url], None, None, result)\n self.context.add_query(qry)\n return result",
"def restore(self, session, **attrs):\n body = {\"instance\": {\"restorePoint\": {\"backupRef\": self.id}}}\n body.update(attrs)\n resp = session.post('instances', service=self.service, json=body).body\n return resp['instance']",
"def load(self) -> None:\n doc_ref = self.doc_ref\n if not isinstance(doc_ref, DocumentReference):\n return\n\n doc = doc_ref.get()\n if doc.exists:\n self.load_storage_model(doc.to_dict())",
"def restore(self):\n\n self.brain.restore_checkpoint()",
"def restore(self):\n\t\treturn Job(SDK.PrlVm_Restore(self.handle)[0])",
"def restore(self):\n # For multi-worker training, it should not restore a model in certain\n # worker setting (e.g. non-chief worker in ParameterServerStrategy).\n # pylint: disable=protected-access\n if self._model._in_multi_worker_mode() and not multi_worker_util.should_load_checkpoint():\n return\n self.read_checkpoint_manager.restore_or_initialize()",
"def __restoreBackup(self):\n pass #FIXME!!!",
"def Restore(self, window):\n\n if not self._doRestore:\n return False\n\n if not self.Find(window):\n return False\n\n name = window.GetName()\n return self._persistentObjects[name].Restore()",
"def deleteDocument(self, document):\n data = self.deleteDocumentAll([document])\n try:\n return data[0]\n except: pass",
"def restore_location(self, id_, uri):\n with self._db_connection() as connection:\n was_restored = connection.restore_location(id_, uri)\n return was_restored",
"def restore_backup(self):\n print \"Restoring backup for database: %s\" % self.database['NAME']\n # Fetch the latest backup if filepath not specified\n if not self.filepath:\n print \" Finding latest backup\"\n filepaths = self.storage.list_directory()\n filepaths = self.dbcommands.filter_filepaths(filepaths, self.servername)\n if not filepaths:\n raise CommandError(\"No backup files found in: %s\" % self.storage.backup_dir())\n self.filepath = filepaths[-1]\n # Restore the specified filepath backup\n print \" Restoring: %s\" % self.filepath\n backupfile = self.storage.read_file(self.filepath)\n print \" Restore tempfile created: %s\" % utils.handle_size(backupfile)\n self.dbcommands.run_restore_commands(backupfile)",
"def restore_original_ehr(self, ehr_record):\n self._check_unecessary_restore(ehr_record)\n return self.version_manager.restore_original(ehr_record.record_id)",
"def restore_from_snapshot(SnapshotId=None):\n pass",
"def restore(self):\n raise NotImplementedError",
"def restore(self, sess, model_path, restore_mode=RestoreMode.COMPLETE_SESSION):\n logging.info('{} Restoring from tf checkpoint: {}'.format(datetime.now(), model_path))\n if restore_mode == RestoreMode.COMPLETE_SESSION:\n logging.info('{} Resuming complete session: {}'.format(datetime.now(), model_path))\n saver = tf.train.Saver()\n elif restore_mode == RestoreMode.COMPLETE_NET:\n logging.info('{} Restoring all variables for for Network: {}'.format(datetime.now(), model_path))\n saver = tf.train.Saver(self.variables)\n elif restore_mode == RestoreMode.ONLY_GIVEN_VARS:\n logging.info('{} Restoring given layers for Network: {}'.format(datetime.now(), model_path))\n saver = tf.train.Saver(self.variables_to_restore)\n else:\n raise ValueError()\n\n saver.restore(sess, model_path)\n logging.info(\"Model restored from file: %s\" % model_path)",
"def restore(self, checkpoint):\n raise NotImplementedError",
"def restore(self, checkpoint_path: str):\r\n raise NotImplementedError",
"def restore(self):\n restored_brain = self\n try:\n with open(self.pickle_filename, 'rb') as brain_data:\n loaded_brain = pickle.load(brain_data)\n \"\"\"\n Compare the number of channels in the restored brain with \n those in the already initialized brain. If it matches, \n accept the brain. If it doesn't,\n print a message, and keep the just-initialized brain.\n \"\"\"\n if ((loaded_brain.num_sensors == self.num_sensors) and \n (loaded_brain.num_actions == self.num_actions)):\n print('Brain restored at timestep {0} from {1}'.format(\n str(loaded_brain.timestep), self.pickle_filename))\n restored_brain = loaded_brain\n else:\n print('The brain {0} does not have the same number'.format(\n self.pickle_filename)) \n print('of input and output elements as the world.')\n print('Creating a new brain from scratch.')\n except IOError:\n print('Couldn\\'t open {0} for loading'.format(\n self.pickle_filename))\n except pickle.PickleError, e:\n print('Error unpickling world: {0}'.format(e))\n return restored_brain",
"def restore_part(self, other_model_dir, remap):\n load_and_map_checkpoint(self._model, other_model_dir, remap)",
"def restore_object(Bucket=None, Key=None, VersionId=None, RestoreRequest=None, RequestPayer=None):\n pass"
] |
[
"0.64388806",
"0.6095156",
"0.6062036",
"0.57751065",
"0.5687914",
"0.5608966",
"0.5519199",
"0.5485542",
"0.53546196",
"0.5215274",
"0.514078",
"0.5119221",
"0.5116823",
"0.5115562",
"0.5107034",
"0.5098345",
"0.5090812",
"0.50686884",
"0.5038821",
"0.50364536",
"0.50192696",
"0.501246",
"0.49852946",
"0.49700102",
"0.49606317",
"0.49593526",
"0.4925947",
"0.49067473",
"0.48984545",
"0.48893303"
] |
0.7713666
|
0
|
Updates the meta data about this document. This method returns the document object of the updated document. This method allows you to edit the title and description of the document, if you leave 1 argument as optional it won't update that argument
|
def update(self, title=None, description = None):
jsonData = self.metaData.jsonObj
header = self._baseHeader.copy()
header['Content-type'] = "application/vnd.huddle.data+json"
url = self.metaData.getLink("edit")
assert url is not None
if title is not None: jsonData['title'] = title
if description is not None: jsonData['description'] = description
response = self._adapter.putRequest(url, header, json.dumps(jsonData))
return Document(self._client, self._client.getUrlFromHeaderLink(response['Headers']['link']))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def update_document(self):\n pass",
"def updateDocument(self, document):\n data = self.updateDocumentAll([document])\n try:\n return data[0]\n except: pass",
"def update(self, **kwargs):\n return self._object.update(meta=kwargs)",
"def update(self, instance, validated_data):\n instance.title = validated_data.get('title', instance.title)\n instance.xml_content = validated_data.get('xml_content', instance.xml_content)\n return data_api.upsert(instance, validated_data['user'])",
"def update(self, request, *args, **kwargs):\n response = super(NoteViewSet, self).update(request, *args, **kwargs)\n obj = self.get_object()\n instance = obj.instance\n # update mongo data\n instance.parsed_instance.save()\n return response",
"def edit_document():",
"def update(self, instance, validated_data):\n instance.href = validated_data.get('href', instance.href)\n instance.title = validated_data.get('title', instance.title)\n instance.datetime = validated_data.get('datetime', instance.datetime)\n instance.content = validated_data.get('content', instance.content)\n instance.coverimg = validated_data.get('coverimg', instance.title)\n instance.save()\n return instance",
"def update_document(self, database, collection, _id, document):\n r = self.__get_response(settings.UPD_DOC,\n {\"db\": database, \"col\": collection, \"id\": str(_id)},\n data=document)\n if r[\"status\"] == 200:\n return r[\"result\"]\n raise Exception(r[\"result\"][\"message\"])",
"def update(self, instance, validated_data):\n instance.title = validated_data.get('title', instance.title)\n instance.owner = validated_data.get('owner', instance.owner)\n instance.language = validated_data.get('language', instance.language)\n instance.classes = validated_data.get('classes', instance.classes)\n instance.methods = validated_data.get('methods', instance.methods)\n instance.style = validated_data.get('style', instance.style)\n instance.example = validated_data.get('example', instance.example)\n instance.save()\n return instance",
"def update_doc(doc):\n doc.long_name = \"X\"",
"def edit(ctx, docid, password):\n coll = db.get_document_collection(ctx)\n config = ctx.obj[\"config\"]\n\n doc, docid = db.get_document_by_id(ctx, docid)\n title = doc[\"title\"]\n\n template, c = db.get_content(ctx, doc, password=password)\n\n content, tmpfile = utils.get_content_from_editor(config[\"editor\"], template=template)\n d = datetime.datetime.now()\n\n if doc[\"encrypted\"] is True:\n title = utils.get_title_from_content(content)\n content = c.encrypt_content(content.decode(\"utf-8\").encode(\"utf-8\"))\n else:\n if not \"links\" in doc[\"categories\"]:\n title = utils.get_title_from_content(content)\n\n if isinstance(template, unicode):\n content = content.decode(\"utf-8\")\n\n if content != template:\n doc[\"content\"] = content\n doc[\"title\"] = title\n doc[\"updated\"] = d\n if validate(doc):\n coll.save(doc)\n else:\n utils.log_error(\"Validation of the updated object did not succeed\")\n\n transaction.log(ctx, docid, \"edit\", title)\n utils.log_info(\"Document \\\"%s\\\" updated.\" % title)\n else:\n utils.log_info(\"No changes detected for \\\"%s\\\"\" % title)\n\n utils.clean_tmpfile(tmpfile)\n\n return True",
"def update(self, doc):\n if app.config.get(\"READ_ONLY_MODE\", False) and app.config.get(\"SCRIPTS_READ_ONLY_MODE\", False):\n app.logger.warn(\"System is in READ-ONLY mode, update command cannot run\")\n return\n\n return requests.post(self.target() + self.id + \"/_update\", data=json.dumps({\"doc\": doc}))",
"def save(self):\n if self.uuid is None:\n logger.info('Saving \"{}\" metadata: {}'.format(self.name, self.request_body))\n result = self._agave.meta.addMetadata(body=self.request_body)\n else:\n logger.info('Updating \"{}\" metadata {}: {}'.format(self.name, self.uuid,\n self.request_body))\n result = self._agave.meta.updateMetadata(uuid=self.uuid,\n body=self.request_body)\n self._wrapped.update(**result)\n return self",
"def set_meta(api, name = \"Docs\", intro_text = \"\", **kwargs):\n registry.set_api_meta(api, name = name, intro_text = intro_text, **kwargs)",
"def update_document(self, data):\n if not isinstance(data, pylastica.document.Document) and not isinstance(data, pylastica.script.Script):\n raise TypeError(\"data must be an instance of Document or Script: %r\" % data)\n if not data.has_id():\n raise pylastica.exception.InvalidException(\"Document id is not set.\")\n return self.index.client.update_document(data.doc_id, data, self.index.name, self.name)",
"def update_document(self, portal_name, content_id, document):\n if isinstance(document, dict):\n document = json.dumps(document)\n r = requests.put('/'.join([self.base_url,\n self.DOCUMENTS_ENDPOINT,\n portal_name,\n str(content_id)]),\n data=document,\n headers={'Content-Type': 'application/json'})\n return r.json()",
"def document_update(index_name, doc_type, doc_id, doc=None, new=None):\n if doc:\n resp = es.index(index=index_name, doc_type=doc_type,\n id=doc_id, body=doc)\n print(resp)\n else:\n resp = es.update(index=index_name, doc_type=doc_type,\n id=doc_id, body={\"doc\": new})",
"def update_document(obj):\n index = obj.get_index_name()\n doc_type = obj.get_document_type()\n body = dict(doc=obj.get_document_body())\n try:\n ES.update(index=index, doc_type=doc_type, body=body, id=obj.pk)\n except NotFoundError:\n raise DocumentNotFound(obj.get_index_name(), obj.pk)",
"def update(self, instance, validated_data):\n instance.title = validated_data.get('title', instance.title)\n instance.url = validated_data.get('url', instance.url)\n instance.star = validated_data.get('star', instance.star)\n instance.save()\n return instance",
"def add_document(self, doc):\n assert isinstance(doc, pylastica.document.Document), \"doc must be of type Document: %r\" % doc\n path = urllib.quote_plus(str(doc.doc_id))\n request_type = pylastica.request.Request.PUT\n if path is None or path == '':\n #no doc id has been given; use post so that an id is automatically created\n request_type = pylastica.request.Request.POST\n options = doc.get_options([\n 'version',\n 'version_type',\n 'routing',\n 'percolate',\n 'parent',\n 'ttl',\n 'timestamp',\n 'op_type',\n 'consistency',\n 'replication',\n 'refresh',\n 'timeout'\n ])\n response = self.request(path, request_type, doc.data, options)\n data = response.data\n if (doc.auto_populate or self.index.client.get_config_value(['document', 'autoPopulate'], False)) and response.is_ok():\n if doc.has_id():\n if '_id' in data:\n doc.doc_id = data['_id']\n if '_version' in data:\n doc.version = data['_version']\n return response",
"def update_metadata(self):\n self.data[\"keywords\"] = self.repo.topics(self.data.get(\"keywords\", []))\n self.data[\"description\"] = self.data.get(\"description\") or self.repo.description\n self.data[\"codeRepository\"] = (\n self.data.get(\"codeRepository\") or self.repo.html_url\n )\n self.data[\"name\"] = self.data.get(\"name\") or self.repo.name\n self.data[\"issueTracker\"] = (\n self.data.get(\"issueTracker\") or self.repo.issues_url\n )\n self.data[\"license\"] = self.data.get(\"license\") or self.repo.license",
"def update(self, instance, validated_data):\n instance.title = validated_data.get('title', instance.title)\n instance.code = validated_data.get('code', instance.code)\n instance.linenos = validated_data.get('linenos', instance.linenos)\n instance.language = validated_data.get('language', instance.language)\n instance.style = validated_data.get('style', instance.style)\n instance.save()\n return instance",
"def update(self, instance, validated_data):\n instance.title = validated_data.get('title', instance.title)\n instance.inspection_tag = validated_data.get('inspection_tag', instance.code)\n instance.content = validated_data.get('content', instance.language)\n instance.status = validated_data.get('status', instance.style)\n instance.save()\n return instance",
"def update(self, instance, validated_data):\n instance.title = validated_data.get('title', instance.title)\n instance.inspection_tag = validated_data.get('inspection_tag', instance.code)\n instance.content = validated_data.get('content', instance.language)\n instance.status = validated_data.get('status', instance.style)\n instance.save()\n return instance",
"def update_document(\n self,\n index: str,\n doc_id: str,\n document: Dict[str, Any],\n partial_update: bool = False,\n ):\n if partial_update:\n self.__client__.update(index=index, id=doc_id, body={\"doc\": document})\n self.__client__.index(index=index, id=doc_id, body=document)",
"def save(self):\n if self.document.id:\n self.db.insert(self.document)\n else:\n self.db.update(self.document.id,self.document)",
"def update(self, **kwargs):\n return self._update_data(self.put(None, data=kwargs))",
"def update(self, instance: Snippet, validated_data: dict) -> Snippet:\n instance.title = validated_data.get('title', default=instance.title)\n instance.code = validated_data.get('code', default=instance.code)\n instance.language = validated_data.get('language', default=instance.language)\n instance.style = validated_data.get('style', default=instance.style)\n instance.save()\n return instance",
"def update(self, index, id, **kwargs):\n url = f'{self.host}{index}/_doc/{id}/_update'\n data = {'doc': {**kwargs}}\n requests.post(url, json=data)\n self.flush(index)\n return self.get(index, id)",
"def update(self, instance, validated_data):\n\n # Use an atomic transaction for managing dataset and authors\n with transaction.atomic():\n # pop off the authors data\n if \"authors\" in validated_data.keys():\n author_data = validated_data.pop('authors')\n\n instance._change_reason = 'Adding Authors to Dataset Metadata'\n # remove the existing authors\n Author.objects.filter(dataset_id=instance.id).delete() # delete first\n self.add_authors(author_data, instance)\n\n instance._change_reason = 'Update Dataset Metadata'\n\n # Update Dataset metadata\n super(self.__class__, self).update(instance=instance, validated_data=validated_data)\n\n return instance"
] |
[
"0.66688526",
"0.62547284",
"0.60333407",
"0.5880621",
"0.5862949",
"0.5862766",
"0.5861224",
"0.58463687",
"0.5817708",
"0.5811926",
"0.5780793",
"0.5764078",
"0.57550293",
"0.572387",
"0.5709954",
"0.56813335",
"0.5664649",
"0.5654251",
"0.563176",
"0.56187606",
"0.56129867",
"0.5554067",
"0.55450284",
"0.55450284",
"0.55256784",
"0.5523488",
"0.55194",
"0.55165803",
"0.547434",
"0.5457246"
] |
0.7680318
|
0
|
Copies this document to target folder. This method returns a Document Object of the new copied document.
|
def copyTo(self, folder):
copyUrl = self.metaData.getLink("copy")
if not hasattr(folder, "metaData"): raise TypeError("Your newFolder does not have a metaData property")
assert getattr(folder, "selfLink")
assert copyUrl is not None
header = self._baseHeader.copy()
header['Content-type'] = "application/vnd.huddle.data+json"
body = '{ "targetFolder":{ "link":{ "rel": "self", "href": "' + folder.selfLink + '" } } }'
response = self._adapter.postRequest(copyUrl, header, body)
return Document(self._client, response['Headers']['location'])
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def copyDoc(self, recursive):\n ret = libxml2mod.xmlCopyDoc(self._o, recursive)\n if ret is None:raise treeError('xmlCopyDoc() failed')\n __tmp = xmlDoc(_obj=ret)\n return __tmp",
"def copydoc(self, doc):\n dst = os.path.join(self.path, os.path.split(doc)[1])\n if not self.rc.force and os.path.isfile(dst):\n raise RuntimeError(dst + \" already exists!\")\n shutil.copy2(doc, dst)\n return dst",
"def copy_doc(self, doc, dest=None, headers=None):\n\n if not headers:\n headers = {}\n\n doc1, schema = _maybe_serialize(doc)\n if isinstance(doc1, six.string_types):\n docid = doc1\n else:\n if '_id' not in doc1:\n raise KeyError('_id is required to copy a doc')\n docid = doc1['_id']\n\n if dest is None:\n destination = self.server.next_uuid(count=1)\n elif isinstance(dest, six.string_types):\n if dest in self:\n dest = self.get(dest)\n destination = \"%s?rev=%s\" % (dest['_id'], dest['_rev'])\n else:\n destination = dest\n elif isinstance(dest, dict):\n if '_id' in dest and '_rev' in dest and dest['_id'] in self:\n destination = \"%s?rev=%s\" % (dest['_id'], dest['_rev'])\n else:\n raise KeyError(\"dest doesn't exist or this not a document ('_id' or '_rev' missig).\")\n\n if destination:\n headers.update({\"Destination\": str(destination)})\n resp = self._request_session.request('copy', self._database_path(docid), headers=headers)\n try:\n resp.raise_for_status()\n except HTTPError as e:\n if e.response.status_code == 404:\n raise ResourceNotFound\n raise\n return resp.json()\n\n return {'ok': False}",
"def copyTo(self, newFolder):\n copyUri = self.metaData.getLink(\"copy\")\n\n if not hasattr(newFolder, \"metaData\"): raise TypeError(\"Your newFolder does not have a metaData property\")\n if not hasattr(newFolder, \"selfLink\"): raise TypeError(\"Your folder object is missing a selfLink\")\n assert copyUri is not None\n\n header = self._baseHeader.copy()\n header['Content-Type'] = \"application/vnd.huddle.data+json\"\n body = '{ \"targetFolder\":{ \"link\":{ \"rel\": \"self\", \"href\": \"' + newFolder.selfLink + '\" } } }'\n response = self._adapter.postRequest(copyUri, header, body)\n\n return Folder(self._client, response['Headers']['location'])",
"def get_document(self):\n return self.document",
"def copy_to_folder(self):\n if \"copyToFolder\" in self._prop_dict:\n return self._prop_dict[\"copyToFolder\"]\n else:\n return None",
"def clone(self):\n return _libsbml.SBMLDocument_clone(self)",
"def GetDocument(self):\n return self.file",
"def createDocument(self, document):\n data = self.createDocumentAll([document])\n try:\n return data[0]\n except: pass",
"def document(self, document_id):\r\n return doc.Document(self, document_id)",
"def copy(self):\n return self.from_string(self.format(), self.filename, ignore_checksum=True)",
"def _to_document(self, document):\n obj = self.document()\n obj._set_from_db(document)\n return obj",
"def copy(self):\n newobj = Corpus(docs=self.docs.copy())\n newobj.doc_paths = self.doc_paths.copy()\n\n return newobj",
"def copy_to(self, target, write_mode=\"OVERWRITE\"):\n dqr = {\n \"targetProjectKey\" : target.project_key,\n \"targetFolderId\": target.odb_id,\n \"writeMode\" : write_mode\n }\n future_resp = self.client._perform_json(\"POST\", \"/projects/%s/managedfolders/%s/actions/copyTo\" % (self.project_key, self.odb_id), body=dqr)\n return DSSFuture(self.client, future_resp.get(\"jobId\", None), future_resp)",
"def document(self):\n query = {\"_id\": ObjectId(self.document_id)}\n return Document(get_collection(\"documents\").find_one(query))",
"def copy (self):\n import copy\n return copy.copy(self)",
"def upload_document_copy(self, url_or_path, file_, content_type,\n filename, document_metadata, gever_document_uid):\n url_or_path = url_or_path.strip('/')\n\n response = self.post(\n url_or_path + '/@upload-document-copy',\n files={'file': (filename, file_, content_type)},\n data={\n 'document_metadata': json.dumps(document_metadata),\n 'gever_document_uid': gever_document_uid,\n }\n )\n return response",
"def copy(self):\n\n if self.path_source is not None:\n full_source_path = os.path.join(\n os.path.expandvars(self.path_source), self.name\n )\n\n if self.sudo:\n spawn.process(\n f'cp -v -- \"{full_source_path}\" \"{self.path_destination}\"',\n sudo=True,\n )\n else:\n message.info(\n f\"Copied: '{full_source_path}' --> '{self.path_destination}'\"\n )\n shutil.copy(full_source_path, self.path_destination)\n else:\n message.error(f\"'{self.name}' has no source from which to copy from.\")",
"def document(self):\n return self.parent.document()",
"def _create_document(result_dict):\n document = Document(\n name=result_dict['docname'],\n original_id=result_dict['itemid'],\n doctype=result_dict['doctype'],\n language=result_dict['languageisocode'],\n conclusion=result_dict['conclusion'],\n originatingbody=result_dict['originatingbody'],\n application=result_dict['application'],\n )\n return document",
"def move(self, **kwargs):\n if os.path.exists(self.old_artifact_path):\n if os.path.exists(self.target):\n shutil.rmtree(self.target)\n log.info(\"Copying %s on the local filesystem\" % self.type)\n shutil.copytree(self.old_artifact_path, self.target)\n else:\n log.warning(\"Not moving docs, because the build dir is unknown.\")",
"def getDocument(self, *args):\n return _libsbml.SBMLConverter_getDocument(self, *args)",
"def deepcopy(self):\n return ModelFile(self._key)",
"def clone(self):\n return _libsbml.SBMLDocumentPlugin_clone(self)",
"def copyDtd(self):\n ret = libxml2mod.xmlCopyDtd(self._o)\n if ret is None:raise treeError('xmlCopyDtd() failed')\n __tmp = xmlDtd(_obj=ret)\n return __tmp",
"def copy( self ):\n n = None\n if self.nodeType == 1:\n n = HtmlDomNode( self.nodeName, self.nodeType )\n n.children = self.children\n n.attributes = self.attributes\n elif self.nodeType == 3:\n n = HtmlDomNode()\n n.text = self.text\n return n",
"def get_copy(self) -> Path:\n snapshot_source_dir = PERSISTENCE_SNAPSHOTS_DIR / self.version\n snapshot_copy_dir = Path(TemporaryDirectory().name) / self.version\n copytree(src=snapshot_source_dir, dst=snapshot_copy_dir)\n return snapshot_copy_dir",
"def _copy(self):\n if os.path.isfile(self.source):\n shutil.copy(self.source, self.path)\n elif os.path.isdir(self.source):\n shutil.copytree(self.source, self.path)\n else:\n raise ValueError(\"Local path does not exist\")\n\n self._set_chmod()\n return self.check(force=False)",
"def docCopyNode(self, doc, extended):\n if doc is None: doc__o = None\n else: doc__o = doc._o\n ret = libxml2mod.xmlDocCopyNode(self._o, doc__o, extended)\n if ret is None:raise treeError('xmlDocCopyNode() failed')\n __tmp = xmlNode(_obj=ret)\n return __tmp",
"def clone(self):\n return _libsbml.CompSBMLDocumentPlugin_clone(self)"
] |
[
"0.71349126",
"0.6750404",
"0.6204421",
"0.6086825",
"0.60836196",
"0.6018448",
"0.6002959",
"0.5863096",
"0.5853461",
"0.5714051",
"0.56894773",
"0.5596433",
"0.5568087",
"0.54949385",
"0.54816085",
"0.5456911",
"0.5450843",
"0.54444283",
"0.54300004",
"0.53996104",
"0.5374041",
"0.53502953",
"0.5319265",
"0.53117675",
"0.5308167",
"0.5304707",
"0.5298962",
"0.5295114",
"0.5277899",
"0.525256"
] |
0.7200869
|
0
|
Creates a new version of the document. While you are uploading the new version of the document the document is locked. When you create a new version of the document you can specify the ability to alter meta data such as the title, description and a note about why you are uploading this new version. This method returns a Document object of the new version of the document. This meta data may be different then a normal document as it may not of finished uploading the new version. Unlike other api methods this method will handle HuddleConflictErrors so make sure your httpAdapter throws these if you get a 409 response
|
def createNewVersion(self, file, title=None, description =None, versionNote = None):
newVersionUrl = self.metaData.getLink("create-version")
assert newVersionUrl is not None
uploadFile = MultiPartFormRequest(file)
header = self._baseHeader.copy()
header['Content-type'] = "application/vnd.huddle.data+json"
body = json.dumps({"title" : title, "description" : description, "versionNote" : versionNote, "extension" : uploadFile.extension})
#create a lock
lockResponse = self.__createLock()
response = self._adapter.postRequest(newVersionUrl, header, body)
r = HyperLinkResource(response['Body'])
#now that we have a good response get the urls for the next part
uploadUrl = r.getLink("upload")
selfUrl = r.selfLink
assert uploadUrl is not None
#create the muti_form_data for the body
boundary = uploadFile.create_boundary_string()
header['Content-Type'] = "mutipart/form-data; boundary=" + boundary
body = uploadFile.encode_mutipart_form_data(boundary)
try:
response = self._adapter.putRequest(uploadUrl, header, body)
except HuddleConflictError:
print("we couldn't upload the document as it has been locked by someone else")
finally:
self.__deleteLock(HyperLinkResource(lockResponse['Body']).getLink("delete"))
return Document(self._client, selfUrl)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def add_document(self, doc):\n assert isinstance(doc, pylastica.document.Document), \"doc must be of type Document: %r\" % doc\n path = urllib.quote_plus(str(doc.doc_id))\n request_type = pylastica.request.Request.PUT\n if path is None or path == '':\n #no doc id has been given; use post so that an id is automatically created\n request_type = pylastica.request.Request.POST\n options = doc.get_options([\n 'version',\n 'version_type',\n 'routing',\n 'percolate',\n 'parent',\n 'ttl',\n 'timestamp',\n 'op_type',\n 'consistency',\n 'replication',\n 'refresh',\n 'timeout'\n ])\n response = self.request(path, request_type, doc.data, options)\n data = response.data\n if (doc.auto_populate or self.index.client.get_config_value(['document', 'autoPopulate'], False)) and response.is_ok():\n if doc.has_id():\n if '_id' in data:\n doc.doc_id = data['_id']\n if '_version' in data:\n doc.version = data['_version']\n return response",
"def _put_assume_new(self, _id=None, **data):\n if _id is None:\n _id = str(uuid4())\n doc = dict(_id=_id, **data)\n try:\n current_doc = self._db.create_document(doc, throw_on_exists=True)\n except couchdb.http.ResourceConflict:\n # TODO: _rev is in header, don't need to get entire doc\n # Don't use self.get, don't want to actually download an attachment\n current_doc = self._db.get(_id)\n current_doc.update(doc)\n current_doc.save()\n return current_doc",
"def create_document(obj):\n index = obj.get_index_name()\n doc_type = obj.get_document_type()\n body = obj.get_document_body()\n exists = ES.exists(index=index, doc_type=doc_type, id=obj.pk)\n\n if not exists:\n ES.create(index=index, doc_type=doc_type, body=body, id=obj.pk)\n return None\n\n return \"Conflict: document already exists for {0} with id {1}.\".format(\n obj.__class__.__name__, obj.pk)",
"def create_document(self):\n # set single values\n if len(self.field_values) > 0:\n self._set_field_values()\n\n # set multi values\n if len(self.block_field_values) > 0:\n self._set_multi_field_values()\n\n self.field_values = {}\n self.block_field_values = {}\n\n self.client.service.CreateDocument()",
"def createDocument(self, document):\n data = self.createDocumentAll([document])\n try:\n return data[0]\n except: pass",
"def save(self):\n if self.document.id:\n self.db.insert(self.document)\n else:\n self.db.update(self.document.id,self.document)",
"def save( self, request, idx ) :\n\n if idx != 'None' :\n obj = models.Document.objects.get( id = idx )\n obj.element = self.cleaned_data['element']\n obj.type = self.cleaned_data['type']\n obj.name = self.cleaned_data['name']\n\n else :\n obj = models.Document.objects.get_or_create(element = self.cleaned_data['element'],\n type = self.cleaned_data['type'],\n name = self.cleaned_data['name'],\n author = request.user )[0]\n\n obj.link = self.cleaned_data['link']\n obj.save()\n\n return obj",
"def create_document(document: DocumentIn, db: Session = Depends(get_db)):\n return add_document(db, document)",
"def update_document(self):\n pass",
"def CreateVersion(self, request, timeout, metadata=None, with_call=False, protocol_options=None):\n raise NotImplementedError()",
"def CreateVersion(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)",
"def updateDoc(self, path):\n self.db.setDb(self.db_file)\n \n if not self.authd:\n self._authorize()\n \n db_row = self.db.getRowFromPath(path)\n if not db_row:\n return False\n \n resource_id = db_row[0]\n etag = db_row[1]\n title = db_row[2]\n \n ms = gdata.data.MediaSource(file_path=path, content_type=MIMETYPES['ODT'])\n doc = self.client.GetDoc(resource_id.replace(':', '%3A'))\n new_version = self.client.Update(doc, media_source=ms)\n print 'Document pushed:', new_version.GetAlternateLink().href\n \n self.db.resetEtag(new_version)",
"def createDocument(self, file, description=\"\", index=None):\n\n assert isinstance(index, int) or index is None\n header = self._baseHeader.copy().copy()\n header['Content-Type'] = \"application/vnd.huddle.data+json\"\n uploadFile = MultiPartFormRequest(file)\n\n skeletonDocument= {\"title\" : uploadFile.title, \"description\" : description, \"extension\" : uploadFile.extension}\n jsonString = json.dumps(skeletonDocument)\n\n try:\n if index is None:\n url = self.metaData.getLink(\"create-document\")\n else:\n url = self.getFolders()[index].metaData.getLink(\"create-document\")\n\n assert url is not None\n\n response = self._adapter.postRequest(url, header, jsonString)\n\n uploadUrl = HyperLinkResource(response['Body']).getLink(\"upload\")\n selfUrl = HyperLinkResource(response['Body']).selfLink\n assert uploadUrl is not None\n\n boundary = uploadFile.create_boundary_string()\n header['Content-Type'] = \"mutipart/form-data; boundary=\" + boundary\n body = uploadFile.encode_mutipart_form_data(boundary)\n header['Content-Length'] = len(body)\n\n response = self._adapter.putRequest(uploadUrl, header, body)\n\n return Document(self._client, selfUrl)\n except IndexError:\n print(\"the index: \" + index + \" does not exist in the list of folder numbers we have\")",
"def document_new():\n\n t = request.form['type']\n if t == 'book':\n doc = Book(\n title=request.form['title'],\n price=request.form['price'],\n keywords=comma_to_list(request.form['keywords']),\n authors=comma_to_list(request.form['authors']),\n edition=request.form['edition'],\n publisher=request.form['publisher'],\n publishment_year=request.form['publishment_year'],\n bestseller='bestseller' in request.form,\n reference='reference' in request.form\n )\n elif t == 'av':\n doc = AVMaterial(\n title=request.form['title'],\n price=request.form['price'],\n keywords=comma_to_list(request.form['keywords']),\n authors=comma_to_list(request.form['authors'])\n )\n elif t == 'article':\n doc = JournalArticle(\n title=request.form['title'],\n price=request.form['price'],\n keywords=comma_to_list(request.form['keywords']),\n authors=comma_to_list(request.form['authors']),\n issue_editor=request.form['issue_editor'],\n issue_publication_date=request.form['issue_publication_date'],\n journal=request.form['journal']\n )\n\n for i in range(int(request.form['copies'])):\n dc = DocumentCopy(document=doc)\n\n db.session.add(doc)\n db.session.commit()\n\n log(session['login'], 'created', 'document {}'.format(doc.id))\n\n # TODO\n return redirect('/admin/documents')",
"def create( self, message, manual = False ):\n\n version = self.domain_model()\n context = self.__parent__\n trusted = removeSecurityProxy(context)\n \n # set values on version from context\n self._copyFields(trusted, version)\n \n # content domain ids are typically not in the interfaces\n # manually inspect and look for one, by hand to save on the new version\n mapper = orm.object_mapper(trusted)\n version.content_id = mapper.primary_key_from_instance(trusted)[0]\n version.status = None\n version.manual = manual\n \n # we rely on change handler to attach the change object to the version\n event.notify(\n interfaces.VersionCreated(context, self, version, message))\n \n session = Session()\n session.add(version)\n \n version.context = context \n event.notify(ObjectCreatedEvent(version))\n\n return version",
"async def create_doc(self, *args, **kwargs):\n pass",
"def add(self, document):\n return self.db.update({document['id']: document})",
"def create(self, request, *args, **kwargs):\n logger.debug(u'DocumentDefinition.create ...')\n logger.debug(u'DocumentDefinition.create :: REQUEST: {}'.format(request.REQUEST))\n version = request.version\n if '@' in version:\n branch_name, tag_name = version.split('@')\n else:\n tag_name = version\n branch_name = None\n logger.debug(u'DocumentDefinition.create :: tag: {}'.format(tag_name))\n now_es = datetime.now().strftime(\"%Y-%m-%dT%H:%M:%S\")\n if len(kwargs) == 0:\n raise exceptions.XimpiaAPIException(_(u'No document type sent'))\n doc_type = kwargs['doc_type']\n logger.debug(u'DocumentDefinition.create :: doc_type: {}'.format(doc_type))\n # resolve index based on request host for site\n site_slug = get_site(request)\n index = '{}__base'.format(site_slug)\n logger.debug(u'DocumentDefinition.create :: index: {}'.format(index))\n ###############\n # validations\n ###############\n # check user request and user is admin\n if not request.user or (request.user and not request.user.id):\n raise exceptions.XimpiaAPIException(_(u'User needs to be authenticated'))\n user = request.user\n logger.debug(u'DocumentDefinition.create :: request.user: {}'.format(user))\n groups = user.document['groups']\n logger.debug(u'DocumentDefinition.create :: groups: {}'.format(groups))\n admin_groups = filter(lambda x: x['name'] == 'admin', groups)\n if not admin_groups:\n raise exceptions.XimpiaAPIException(_(u'User needs to be admin'))\n # generate mappings\n doc_def = DocumentDefinition(json.loads(request.body), doc_type, user, tag_name=tag_name,\n branch_name=branch_name)\n document_definition_input = doc_def.logical\n logger.info(u'DocumentDefinition.create :: document_definition_input: {}'.format(\n pprint.PrettyPrinter(indent=4).pformat(document_definition_input)))\n bulk_queries = list()\n # Check db validations: tag exists, document definition not exists, no fields\n bulk_queries.append(\n (json.dumps(\n {\n 'index': index,\n 'type': 'document-definition'\n }\n ), json.dumps(\n {\n 'query': {\n 'match_all': {}\n },\n 'filter': {\n 'term': {\n 'document-definition__doc_type__v1.raw__v1': doc_type\n }\n }\n }\n )\n )\n )\n # meta_data = document_definition_input['_meta']\n # Check mapping does not exist\n es_response_raw = requests.get(\n '{host}/{index}/_mapping/{doc_type}'.format(\n host=settings.ELASTIC_SEARCH_HOST,\n index=index,\n doc_type=doc_type\n )\n )\n existing_mapping = es_response_raw.json()\n if existing_mapping:\n raise exceptions.XimpiaAPIException(_(u'Document definition already exists :: {}'.format(\n existing_mapping\n )))\n # Check no fields for doc type\n logger.debug(u'DocumentDefinition.create :: mapping in ES: {}'.format(es_response_raw.content))\n\n bulk_queries.append(\n (json.dumps(\n {\n 'index': index,\n 'type': 'field-version'\n }\n ), json.dumps(\n {\n 'query': {\n 'match_all': {}\n },\n 'filter': {\n 'term': {\n 'field-version__doc_type__v1.raw__v1': doc_type\n }\n }\n }\n )\n )\n )\n # Validate tag exists\n bulk_queries.append(\n (json.dumps(\n {\n 'index': index,\n 'type': 'tag'\n }\n ), json.dumps(\n {\n 'query': {\n 'match_all': {}\n },\n 'filter': {\n 'term': {\n 'tag__slug__v1.raw__v1': slugify(tag_name)\n }\n }\n }\n )\n )\n )\n # print ''.join(map(lambda x: '{}\\n'.format(x[0]) + '{}\\n'.format(x[1]), bulk_queries))\n es_response_raw = requests.get(\n '{host}/_msearch'.format(\n host=settings.ELASTIC_SEARCH_HOST\n ),\n data=''.join(map(lambda x: '{}\\n'.format(x[0]) + '{}\\n'.format(x[1]), bulk_queries))\n )\n es_response = es_response_raw.json()\n logger.info(u'DocumentDefinition.create :: response validations: {}'.format(\n es_response\n ))\n responses = es_response.get('responses', [])\n if responses[0]['hits']['total'] > 0:\n raise exceptions.XimpiaAPIException(_(u'Document definition already exists'))\n if responses[1]['hits']['total'] > 0:\n raise exceptions.XimpiaAPIException(_(u'Document definition already exists'))\n if responses[2]['hits']['total'] == 0:\n raise exceptions.XimpiaAPIException(_(u'Tag does not exist'))\n ##################\n # End validations\n ##################\n\n # Build data\n doc_mapping = doc_def.get_mappings()\n fields_version_str = doc_def.get_field_versions(index, user)\n # Create document definition document\n physical = doc_def.get_physical()\n logger.debug(u'_create_index :: document definition: {}'.format(\n pprint.PrettyPrinter(indent=4).pformat(physical))\n )\n es_response_raw = requests.post(\n '{host}/{index}/{doc_type}'.format(\n host=settings.ELASTIC_SEARCH_HOST,\n index=u'{}__document-definition'.format(index),\n doc_type='document-definition'\n ),\n data=json.dumps(\n physical\n )\n )\n es_response = es_response_raw.json()\n document_created = es_response\n logger.info(u'DocumentDefinition.create :: response create document definition: {}'.format(\n es_response\n ))\n if 'error' in es_response and es_response['error']:\n raise exceptions.XimpiaAPIException(u'Error creating document definition')\n # Bulk insert for all fields\n # print fields_version_str\n es_response_raw = requests.post(\n '{host}/_bulk'.format(host=settings.ELASTIC_SEARCH_HOST),\n data=fields_version_str,\n headers={'Content-Type': 'application/octet-stream'},\n )\n es_response = es_response_raw.json()\n logger.info(u'DocumentDefinition.create :: response create field versions: {}'.format(\n es_response\n ))\n if 'errors' in es_response and es_response['errors']:\n raise exceptions.XimpiaAPIException(u'Error creating fields')\n # Create mapping\n logger.debug(u'DocumentDefinition.create :: mappings: {}'.format(\n pprint.PrettyPrinter(indent=4).pformat(doc_mapping)\n ))\n es_response_raw = requests.put(\n '{host}/{index}/_mapping/{doc_type}'.format(\n host=settings.ELASTIC_SEARCH_HOST,\n index=index,\n doc_type=doc_type\n ),\n data=json.dumps(doc_mapping)\n )\n es_response = es_response_raw.json()\n logger.info(u'DocumentDefinition.create :: response put mapping: {}'.format(es_response))\n if 'error' in es_response and es_response['error']:\n raise exceptions.XimpiaAPIException(u'Error in saving mappings')\n # output document\n output_document = json.loads(request.body)\n output_document['_id'] = document_created['_id']\n output_document['_version'] = document_created['_version']\n return Response(output_document)",
"async def _save(self, document, alias=None):\n doc = document.to_son()\n\n if document._id is not None:\n try:\n await self.coll(alias).update({\"_id\": document._id}, doc)\n except DuplicateKeyError as e:\n raise UniqueKeyViolationError.from_pymongo(str(e), self.__klass__)\n else:\n try:\n doc_id = await self.coll(alias).insert(doc)\n except DuplicateKeyError as e:\n raise UniqueKeyViolationError.from_pymongo(str(e), self.__klass__)\n document._id = doc_id\n\n return document",
"def _put(self, name, document):\n raise NotImplementedError",
"async def create(self, alias=None, **kwargs):\n document = self.__klass__(**kwargs)\n return await self.save(document=document, alias=alias)",
"def save_doc(self, doc, encode_attachments=True, force_update=False,\n **params):\n if doc is None:\n doc1 = {}\n else:\n doc1, schema = _maybe_serialize(doc)\n\n if '_attachments' in doc1 and encode_attachments:\n doc1['_attachments'] = resource.encode_attachments(doc['_attachments'])\n\n if '_id' in doc1:\n docid = doc1['_id'] if six.PY3 else doc1['_id'].encode('utf-8')\n couch_doc = Document(self.cloudant_database, docid)\n couch_doc.update(doc1)\n try:\n # Copied from Document.save to ensure that a deleted doc cannot be saved.\n headers = {}\n headers.setdefault('Content-Type', 'application/json')\n put_resp = couch_doc.r_session.put(\n couch_doc.document_url,\n data=couch_doc.json(),\n headers=headers\n )\n put_resp.raise_for_status()\n data = put_resp.json()\n super(Document, couch_doc).__setitem__('_rev', data['rev'])\n except HTTPError as e:\n if e.response.status_code != 409:\n raise\n\n if force_update:\n couch_doc['_rev'] = self.get_rev(docid)\n couch_doc.save()\n else:\n raise ResourceConflict\n res = couch_doc\n else:\n res = self.cloudant_database.create_document(doc1)\n\n if 'batch' in params and ('id' in res or '_id' in res):\n doc1.update({ '_id': res.get('_id')})\n else:\n doc1.update({'_id': res.get('_id'), '_rev': res.get('_rev')})\n\n if schema:\n for key, value in six.iteritems(doc.__class__.wrap(doc1)):\n doc[key] = value\n else:\n doc.update(doc1)\n return {\n 'id': res['_id'],\n 'rev': res['_rev'],\n 'ok': True,\n }",
"def add_document(self, index: str, doc_id: str, document: Dict[str, Any]):\n self.__client__.index(index=index, body=document, id=doc_id, refresh=\"wait_for\")",
"def CreateVersion(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def put(self):\n if 'file' not in self.request.POST:\n self.request.errors.add('body', 'file', 'Not Found')\n self.request.errors.status = 404\n return\n tender = TenderDocument.load(self.db, self.tender_id)\n if not tender:\n self.request.errors.add('url', 'tender_id', 'Not Found')\n self.request.errors.status = 404\n return\n data = self.request.POST['file']\n bids = [i for i in tender.bids if i.id == self.bid_id]\n if not bids:\n self.request.errors.add('url', 'bid_id', 'Not Found')\n self.request.errors.status = 404\n return\n bid = bids[0]\n documents = [i for i in bid.documents if i.id == self.request.matchdict['id']]\n if not documents:\n self.request.errors.add('url', 'id', 'Not Found')\n self.request.errors.status = 404\n return\n src = tender.serialize(\"plain\")\n document = Document()\n document.id = self.request.matchdict['id']\n document.title = data.filename\n document.format = data.type\n document.datePublished = documents[0].datePublished\n key = uuid4().hex\n document.url = self.request.route_url('Tender Bid Documents', tender_id=self.tender_id, bid_id=self.bid_id, id=document.id, _query={'download': key})\n bid.documents.append(document)\n filename = \"{}_{}\".format(document.id, key)\n tender['_attachments'][filename] = {\n \"content_type\": data.type,\n \"data\": b64encode(data.file.read())\n }\n patch = make_patch(tender.serialize(\"plain\"), src).patch\n tender.revisions.append(revision({'changes': patch}))\n try:\n tender.store(self.db)\n except Exception, e:\n return self.request.errors.add('body', 'data', str(e))\n return {'data': document.serialize(\"view\")}",
"def create_from_document(\n doc: Dict[str, Any],\n doc_location: str,\n filename: str,\n batch_timestamp: dt.datetime,\n pub: Publication) -> 'VersionedDoc':\n return VersionedDoc(\n publication=pub,\n name=doc['doc_name'],\n type=doc['doc_type'],\n number=doc['doc_num'],\n # TODO: Pass actual filename using ProcessedDoc instead of Doc\n # TODO: Tweak for clones?\n filename=filename,\n doc_location=doc_location,\n batch_timestamp=batch_timestamp,\n publication_date=parse_timestamp(doc['publication_date']),\n json_metadata=doc,\n version_hash=doc['version_hash'],\n md5_hash=\"\",\n is_ignored=False\n )",
"def create_version(instance,\n operation,\n versional_comment=None):\n if not versional_comment:\n versional_comment = \"Instance %sd\" % operation\n\n return {\n #'_id': str(ObjectId()),\n #'_id': ObjectId(),\n 'document_id': instance['_id'],\n 'document_model': instance['_model'],\n 'document': instance,\n 'comment': versional_comment,\n 'operation': operation}",
"def upsert(cls, title, version=None):\n if version is None:\n # Check for existing page object\n obj = cls.query.get(app_config_id=c.app.config._id, title=title)\n if obj is None:\n obj = cls(title=title, app_config_id=c.app.config._id)\n Thread(discussion_id=obj.app_config.discussion_id,\n ref_id=obj.index_id())\n return obj\n else:\n pg = cls.upsert(title)\n history_cls = cls.__mongometa__.history_class\n ss = history_cls.query.find({\n 'artifact_id': pg._id,\n 'version': int(version)\n }).one()\n return ss",
"def index_document(self):\n index_url = self.indexd.rstrip(\"/\") + \"/index/blank/\"\n params = {\"uploader\": self.uploader, \"file_name\": self.file_name}\n\n # if attempting to set record's authz field, need to pass token\n # through\n if self.authz:\n params[\"authz\"] = self.authz\n token = get_jwt()\n\n auth = None\n headers = {\"Authorization\": f\"bearer {token}\"}\n logger.info(\"passing users authorization header to create blank record\")\n else:\n logger.info(\"using indexd basic auth to create blank record\")\n auth = (config[\"INDEXD_USERNAME\"], config[\"INDEXD_PASSWORD\"])\n headers = {}\n\n indexd_response = requests.post(\n index_url, json=params, headers=headers, auth=auth\n )\n if indexd_response.status_code not in [200, 201]:\n try:\n data = indexd_response.json()\n except ValueError:\n data = indexd_response.text\n self.logger.error(\n \"could not create new record in indexd; got response: {}\".format(data)\n )\n raise InternalError(\n \"received error from indexd trying to create blank record\"\n )\n document = indexd_response.json()\n guid = document[\"did\"]\n self.logger.info(\n \"created blank index record with GUID {} for upload\".format(guid)\n )\n return document",
"async def add_document(\n self,\n doc_id,\n nosave=False,\n score=1.0,\n payload=None,\n replace=False,\n partial=False,\n no_create=False,\n **fields,\n ):\n self.client._add_document(\n doc_id,\n conn=self._pipeline,\n nosave=nosave,\n score=score,\n payload=payload,\n replace=replace,\n partial=partial,\n no_create=no_create,\n **fields,\n )\n self.current_chunk += 1\n self.total += 1\n if self.current_chunk >= self.chunk_size:\n await self.commit()"
] |
[
"0.6806192",
"0.6367714",
"0.63407457",
"0.63301355",
"0.6271407",
"0.62206775",
"0.6092243",
"0.6000473",
"0.5980312",
"0.5973449",
"0.59679925",
"0.5838267",
"0.5769989",
"0.57600176",
"0.5757804",
"0.57221675",
"0.5720036",
"0.57080275",
"0.56902295",
"0.56881624",
"0.56689644",
"0.5661652",
"0.56483895",
"0.5638236",
"0.55896",
"0.5570743",
"0.55574936",
"0.55574816",
"0.551431",
"0.5497673"
] |
0.6962159
|
0
|
Puts a lock on the document. You should never need to call this method as it is called by the methods that need to lock content. This method returns the httprequest response of the request to lock
|
def __createLock(self):
lockUrl = self.metaData.getLink("lock")
assert lockUrl is not None
lockBody = json.dumps({"lockIntent" : "lockedForEdit"})
header = self._baseHeader.copy()
header['Content-type'] = "application/vnd.huddle.data+json"
lockResponse = self._adapter.postRequest(lockUrl, header, lockBody)
return lockResponse
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def lock(self):\n print(\"DEPRECATED lock\")\n return self._operations.lock()",
"def response_lock(self) -> asyncio.Lock:\n return self._response_lock",
"def lock_renew(self) -> None:\n self.__logger.debug('Eva.lock_renew called')\n return self.__http_client.lock_renew()",
"def lock(self):\n from .services import locking\n\n return locking.lock_quota(self)",
"def lock(self, writelock=False, nattempts=0):\n return _image.image_lock(self, writelock, nattempts)",
"def _lock(req): # static method\n tran = req.db.transaction(req.log_info)\n c = tran.cursor()\n c.execute('BEGIN EXCLUSIVE TRANSACTION')\n return c",
"def get_release_lock_response(cls):\n return cls.create_simple_success_response()",
"def lock_for_update(self):\n return self.lock(True)",
"def lock(self):\n raise NotImplementedError",
"def lock(self, nReserved):\n\t\treturn Job(SDK.PrlVm_Lock(self.handle, nReserved)[0])",
"def lock(self):\r\n out = self._authsvn('lock').strip()\r\n if not out:\r\n # warning or error, raise exception\r\n raise Exception(out[4:])",
"def lock_status(self) -> Dict[str, str]:\n self.__logger.debug('Eva.lock_status called')\n return self.__http_client.lock_status()",
"def lock(self):\r\n return self._lock",
"def write_acquire(self):\n self.is_locked = True\n self.rwlock = RWLock().write_acquire()",
"def _lock_key_and_execute_operation(self, raise_if_locked, operation, *args):\n lock = None\n response = None\n if raise_if_locked:\n lock = Job._create_and_add_lock(self, args[0], args[1], None,\n datetime.utcnow())\n try:\n response = operation(*args)\n except Exception:\n if raise_if_locked:\n self._remove_lock(lock)\n raise\n if raise_if_locked:\n self._remove_lock(lock)\n return response",
"def lock(self):\n return self._lock",
"def set_lock(self, value):\n act = LockAction(self, value)\n return act.invoke()",
"def lock(self, value=True):\n self.lock_ = value\n\n return self",
"def create_lock(self, lock_name):\n path = '/locks/create/%s' % lock_name\n response = self.rest.request(method='post',\n content_type='text/plain', path=path)\n return response.text",
"def request_write_access(self):\n self.access_ = False\n\n # double-check that the server is running\n if not self.running():\n self.start()\n\n # submit request via touching a file\n request_file_name = os.path.join(self.cache_dir_, str(self.pid_) + MGR_REQUEST_EXT)\n f = open(request_file_name, 'w')\n f.write('%d\\n' %(self.pid_))\n f.close()\n\n # wait for signal\n while not self.access_:\n time.sleep(MGR_SLEEP_TIME)\n return self.access_",
"def lock_object(self):\n return gevent.thread.allocate_lock()",
"async def handle_project_lock(request: aiohttp.web.Request) -> aiohttp.web.Response:\n log = request.app[\"Log\"]\n log.info(\"Call for locking down the project.\")\n\n session = await aiohttp_session.get_session(request)\n\n project = request.match_info[\"project\"]\n\n # Ditch all projects that aren't the one specified if project is defined\n if project in session[\"projects\"]:\n session[\"projects\"] = dict(\n filter(\n lambda val: val[0] == project,\n session[\"projects\"].items(),\n )\n )\n # If the project doesn't exist, allow all untainted projects\n else:\n session[\"projects\"] = dict(\n filter(lambda val: not val[1][\"tainted\"], session[\"projects\"].items())\n )\n\n if not session[\"projects\"]:\n session.invalidate()\n raise aiohttp.web.HTTPForbidden(reason=\"No untainted projects available.\")\n\n # The session is no longer tainted if it's been locked\n session[\"taint\"] = False\n\n session.changed()\n return aiohttp.web.Response(\n status=303,\n body=None,\n headers={\n \"Location\": \"/browse\",\n },\n )",
"def lock(self) -> TokenEKeyLock:\n return self._lock",
"def lock(self, wait: bool = True, timeout: int = None) -> 'Eva':\n self.__logger.debug('Eva.lock called')\n if wait:\n self.__http_client.lock_wait_for(timeout=timeout)\n else:\n self.__http_client.lock_lock()\n return self",
"def processLock(self):\r\n self.controller.executionLock()",
"def test_locked_thread_belongs_to_document(self):\n r = post(\n self.client, \"wiki.discuss.lock_thread\", {}, args=[self.doc_2.slug, self.thread.id]\n )\n self.assertEqual(404, r.status_code)",
"def lock (self):\n self._createLockFileIfNeeded()\n self._lockHandle=open(self._lockFileName, \"r\")\n fcntl.flock(self._lockHandle, fcntl.LOCK_EX)",
"def svn_fs_lock(*args):\r\n return _fs.svn_fs_lock(*args)",
"def lock(self):\n\n\t\t# Acquire the threading lock first\n\t\tself.s.acquire()\n\t\t# Acquire the file lock\n\t\tfcntl.lockf(self.lockfile, fcntl.LOCK_EX)",
"async def async_lock(hass, cluster, entity_id):\n from zigpy.zcl.foundation import Status\n with patch(\n 'zigpy.zcl.Cluster.request',\n return_value=mock_coro([Status.SUCCESS, ])):\n # lock via UI\n await hass.services.async_call(DOMAIN, 'lock', {\n 'entity_id': entity_id\n }, blocking=True)\n assert cluster.request.call_count == 1\n assert cluster.request.call_args[0][0] is False\n assert cluster.request.call_args[0][1] == LOCK_DOOR"
] |
[
"0.6321234",
"0.6303788",
"0.61984277",
"0.6192926",
"0.6147174",
"0.61304164",
"0.6030385",
"0.58965194",
"0.5814758",
"0.5760184",
"0.57189375",
"0.5718485",
"0.57042176",
"0.5690154",
"0.5661982",
"0.5631437",
"0.5613648",
"0.5563974",
"0.55422676",
"0.55245847",
"0.552153",
"0.549592",
"0.54907733",
"0.548275",
"0.5435248",
"0.5415431",
"0.538928",
"0.53793544",
"0.53628993",
"0.53547007"
] |
0.7071806
|
0
|
Unlocks a lock on the document. To unlock the document you need to specify the locks delete link. You should never need to call this method as it is called by the methods that need to unlock content. This method returns the httprequest response of the request to lock
|
def __deleteLock(self, url):
response = self._adapter.deleteRequest(url, self._baseHeader)
return response
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def unlock(self) -> None:\n self.__logger.debug('Eva.unlock called')\n return self.__http_client.lock_unlock()",
"def unlock(self):\n print(\"DEPRECATED unlock\")\n return self._operations.unlock()",
"def delete_lock(self, lock_name):\n path = '/locks/delete/%s' % lock_name\n response = self.rest.request(content_type='text/plain',\n method='delete', path=path)\n return response.ok",
"def _unlock(self):\n self._lockFile.close()\n os.unlink(self._lockFilename)",
"def __createLock(self):\n lockUrl = self.metaData.getLink(\"lock\")\n assert lockUrl is not None\n\n lockBody = json.dumps({\"lockIntent\" : \"lockedForEdit\"})\n header = self._baseHeader.copy()\n header['Content-type'] = \"application/vnd.huddle.data+json\"\n lockResponse = self._adapter.postRequest(lockUrl, header, lockBody)\n\n return lockResponse",
"def release_lock(self):\n senlin_lock.node_lock_release(self.entity.id, self.id)\n\n # only release cluster lock if it was locked as part of this\n # action (i.e. it's a user intiated action aka CAUSE_RPC from\n # senlin API and a not a CAUSED_DERIVED)\n if self.cause == consts.CAUSE_RPC:\n senlin_lock.cluster_lock_release(self.entity.cluster_id, self.id,\n senlin_lock.NODE_SCOPE)\n return self.RES_OK",
"def _unlock(self):\n if self.is_locked():\n self._unlink(self.lockfile)\n self._remove_unique_file()\n self._p(\"Lock removed.\")\n else:\n self._remove_unique_file()",
"def get_release_lock_response(cls):\n return cls.create_simple_success_response()",
"def svn_client_unlock(apr_array_header_t_targets, svn_boolean_t_break_lock, svn_client_ctx_t_ctx, apr_pool_t_pool): # real signature unknown; restored from __doc__\n pass",
"def svn_fs_unlock(*args):\r\n return _fs.svn_fs_unlock(*args)",
"def lock_renew(self) -> None:\n self.__logger.debug('Eva.lock_renew called')\n return self.__http_client.lock_renew()",
"def unlock(self, nReserved):\n\t\treturn Job(SDK.PrlVm_Unlock(self.handle, nReserved)[0])",
"def release_lock():\r\n get_lock.n_lock -= 1\r\n assert get_lock.n_lock >= 0\r\n # Only really release lock once all lock requests have ended.\r\n if get_lock.lock_is_enabled and get_lock.n_lock == 0:\r\n get_lock.start_time = None\r\n get_lock.unlocker.unlock()",
"async def async_unlock(hass, cluster, entity_id):\n from zigpy.zcl.foundation import Status\n with patch(\n 'zigpy.zcl.Cluster.request',\n return_value=mock_coro([Status.SUCCESS, ])):\n # lock via UI\n await hass.services.async_call(DOMAIN, 'unlock', {\n 'entity_id': entity_id\n }, blocking=True)\n assert cluster.request.call_count == 1\n assert cluster.request.call_args[0][0] is False\n assert cluster.request.call_args[0][1] == UNLOCK_DOOR",
"def close_file(self):\n SpooledTemporaryFile.flush(self)\n response = requests.post(LOCK_SERVER_ADDR, json={'file_path': self.filename, 'lock_file': False, 'user_id': self.user_id})\n print response",
"def unlock(lock):\n lock.release()",
"def release_lock():\n lock_file = get_lock_file()\n if exists(lock_file):\n LOG.info('Removing lock file %r' % lock_file)\n os.unlink(lock_file)\n else:\n LOG.warning('Lock file %r did not exist.' % lock_file)",
"def unlock (self):\n fcntl.flock(self._lockHandle, fcntl.LOCK_UN)\n self._lockHandle.close()",
"def rpc_unlock(self, session, rpc, target):\n del rpc, session, target # avoid unused errors from pylint\n return",
"def revoke(self):\r\n return http.Request('DELETE', self.get_url()), parsers.parse_empty",
"def locked_delete(self):\n self._multistore._delete_credential(self._key)",
"def f_unlock(self):\n self._locked = False",
"def release(self):\n self.is_locked = False\n os.unlink(self.lockfile)",
"def _release_listgen_lock(self):\n with self._conn as conn, conn.cursor() as cursor:\n cursor.execute('SELECT pg_advisory_unlock(%s::BIGINT)', [self._lock_key])",
"def lock_delete(self):\n self.lock_stop()\n # print(self.lock_obj.file_path)\n self.lock_obj.delete_instance()\n # print(self.lock_obj.file_path)\n # print(\"self.lock_obj.lock_delete()\")",
"def unlock(self):\n raise NotImplementedError",
"def _release(self):\n try:\n os.unlink(self.lockfile)\n\n # Log success.\n logging.info(\"Released lock at \" + self.lockfile + \"...\")\n except:\n # Ignore all errors.\n pass",
"def _unlock(self):\n self._file.unlock_and_close()\n self._thread_lock.release()",
"def _unlock(self):\n from os import remove\n remove(self.db_path + \".lock\")",
"def testUnlockWait(t, env):\n c = env.c1\n c.init_connection()\n fh, stateid = c.create_confirm(t.code)\n res = c.lock_file(t.code, fh, stateid, 20, 100)\n check(res, msg=\"Locking file %s\" % t.code)\n sleeptime = c.getLeaseTime() * 2\n env.sleep(sleeptime)\n ops = c.use_obj(fh)\n ops += [c.locku_op(READ_LT, 1, res.lockid, 0, 0xffffffffffffffff)]\n _replay(c, ops, [NFS4_OK, NFS4ERR_EXPIRED])"
] |
[
"0.68083674",
"0.6345917",
"0.6094885",
"0.5934181",
"0.5823708",
"0.58127487",
"0.5714637",
"0.5680282",
"0.5665088",
"0.56600356",
"0.5629937",
"0.5591542",
"0.5526212",
"0.55148894",
"0.55102915",
"0.54996765",
"0.54857737",
"0.546973",
"0.54582894",
"0.5447916",
"0.5415437",
"0.54038084",
"0.5383953",
"0.5356744",
"0.5353559",
"0.534669",
"0.5338218",
"0.5337916",
"0.53332263",
"0.5309976"
] |
0.68893224
|
0
|
Moves this document to another folder. This method returns a document object of the newly moved document >>moveTo(Folder> api.document.Document object Raises an AssertError if it does not have a move link Raises an AssertError if the folder object you specify does not act like a folder Raises an AssertError if we cannot find a parentLink
|
def moveTo(self, folder):
parent = self.metaData.getLinkIndex('parent')
moveUri = self.metaData.getLink("move")
assert parent != -1
assert moveUri is not None
if not hasattr(folder, "metaData"): raise TypeError("Your newFolder does not have a metaData property")
assert hasattr(folder, "selfLink")
header = self._baseHeader.copy()
header['Content-type'] = "application/vnd.huddle.data+json"
jsonData = self.metaData.jsonObj
jsonData['links'][parent] = {'href' : folder.selfLink, 'rel' : 'parent'}
response = self._adapter.putRequest(moveUri, header, json.dumps(jsonData))
return Document(self._client, self._client.getUrlFromHeaderLink(response['Headers']['link']))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def move_to_folder(self):\n if \"moveToFolder\" in self._prop_dict:\n return self._prop_dict[\"moveToFolder\"]\n else:\n return None",
"def moveTo(self, newFolder):\n moveURI = self.metaData.getLink(\"move\")\n parent = self.metaData.getLinkIndex('parent')\n\n assert parent != -1\n assert moveURI is not None\n if not hasattr(newFolder, \"metaData\"): raise TypeError(\"Your newFolder does not have a metaData property\")\n if not hasattr(newFolder, \"selfLink\"): raise TypeError(\"Your newFolder does not have a self link\")\n\n self.metaData.jsonObj['links'][parent] = {'href' : newFolder.selfLink, 'rel' : 'parent'}\n header = self._baseHeader.copy()\n header['Content-Type'] = \"application/vnd.huddle.data+json\"\n response = self._adapter.putRequest(moveURI,header, json.dumps(self.metaData.jsonObj))\n\n newLink = self._client.getUrlFromHeaderLink(response['Headers']['link'])\n return Folder(self._client, newLink)",
"def test_migrate_folder_to_document(self):\n folder = self.portal['folder-1']\n folder.invokeFactory('Document',\n 'my-page-test',\n title=\"My page test\",\n text='spam spam')\n output = migrateContents(self.portal, \"Folder\", \"Document\")\n self.assertEqual(output.get('counter', 0), 2)\n self.assertNotEqual(output.get('error', []), [])\n self.assertEqual(output['error'][0]['msg'], 'Failed migration for object /plone/folder-1 (Folder -> Document)')\n self.assertTrue(self.portal.portal_catalog(portal_type=\"Document\").actual_result_count == 12)\n self.assertTrue(self.portal.portal_catalog(portal_type=\"Folder\").actual_result_count == 1)\n self.assertEqual(self.portal['folder-2'].portal_type, \"Document\")\n self.assertEqual(self.portal['folder-1'].portal_type, \"Folder\")",
"def _move(self, id: str, parent_id: str) -> MoveFolderResponseModel:\n endpoint: ApiEndpoint = self.api_endpoint_group.move\n request_obj: MoveFolderRequestModel = endpoint.load_request(parent_id=parent_id)\n response: MoveFolderResponseModel = endpoint.perform_request(\n http=self.auth.http,\n request_obj=request_obj,\n id=id,\n )\n return response",
"def move_to(self, destination):\n params = {\n \"destination\": destination.project_folder_id\n }\n self.client._perform_empty(\"POST\", \"/project-folders/%s/move\" % self.project_folder_id, params=params)",
"def copyTo(self, folder):\n copyUrl = self.metaData.getLink(\"copy\")\n\n if not hasattr(folder, \"metaData\"): raise TypeError(\"Your newFolder does not have a metaData property\")\n assert getattr(folder, \"selfLink\")\n assert copyUrl is not None\n\n header = self._baseHeader.copy()\n header['Content-type'] = \"application/vnd.huddle.data+json\"\n body = '{ \"targetFolder\":{ \"link\":{ \"rel\": \"self\", \"href\": \"' + folder.selfLink + '\" } } }'\n\n response = self._adapter.postRequest(copyUrl, header, body)\n\n return Document(self._client, response['Headers']['location'])",
"def move(self, **kwargs):\n if os.path.exists(self.old_artifact_path):\n if os.path.exists(self.target):\n shutil.rmtree(self.target)\n log.info(\"Copying %s on the local filesystem\" % self.type)\n shutil.copytree(self.old_artifact_path, self.target)\n else:\n log.warning(\"Not moving docs, because the build dir is unknown.\")",
"def test_migrate_document_to_folder(self):\n output = migrateContents(self.portal, \"Document\", \"Folder\")\n self.assertEqual(output.get('counter', 0), 10)\n self.assertEqual(output.get('error', []), [])\n self.assertTrue(self.portal.portal_catalog(portal_type=\"Document\").actual_result_count == 0)\n self.assertTrue(self.portal.portal_catalog(portal_type=\"Folder\").actual_result_count == 12)\n folder_titles = ['Folder 1', 'Folder 2', 'My page 0', 'My page 1', 'My page 2', 'My page 3', 'My page 4', 'My page 5', 'My page 6', 'My page 7', 'My page 8', 'My page 9']\n self.assertEqual([x.Title for x in self.portal.portal_catalog(portal_type=\"Folder\", sort_on=\"sortable_title\")], folder_titles)",
"async def move_folder(\n self,\n request: Optional[Union[folders.MoveFolderRequest, dict]] = None,\n *,\n name: Optional[str] = None,\n destination_parent: Optional[str] = None,\n retry: OptionalRetry = gapic_v1.method.DEFAULT,\n timeout: Union[float, object] = gapic_v1.method.DEFAULT,\n metadata: Sequence[Tuple[str, str]] = (),\n ) -> operation_async.AsyncOperation:\n # Create or coerce a protobuf request object.\n # Quick check: If we got a request object, we should *not* have\n # gotten any keyword arguments that map to the request.\n has_flattened_params = any([name, destination_parent])\n if request is not None and has_flattened_params:\n raise ValueError(\n \"If the `request` argument is set, then none of \"\n \"the individual field arguments should be set.\"\n )\n\n request = folders.MoveFolderRequest(request)\n\n # If we have keyword arguments corresponding to fields on the\n # request, apply these.\n if name is not None:\n request.name = name\n if destination_parent is not None:\n request.destination_parent = destination_parent\n\n # Wrap the RPC method; this adds retry and timeout information,\n # and friendly error handling.\n rpc = gapic_v1.method_async.wrap_method(\n self._client._transport.move_folder,\n default_timeout=60.0,\n client_info=DEFAULT_CLIENT_INFO,\n )\n\n # Certain fields should be provided within the metadata header;\n # add these here.\n metadata = tuple(metadata) + (\n gapic_v1.routing_header.to_grpc_metadata(((\"name\", request.name),)),\n )\n\n # Send the request.\n response = await rpc(\n request,\n retry=retry,\n timeout=timeout,\n metadata=metadata,\n )\n\n # Wrap the response in an operation future.\n response = operation_async.from_gapic(\n response,\n self._client._transport.operations_client,\n folders.Folder,\n metadata_type=folders.MoveFolderMetadata,\n )\n\n # Done; return the response.\n return response",
"def move_to_by_path(self, new_relative_path, retain_editor_and_modified=False):\n target_folder = Folder(self.context)\n target_folder.set_property(\"ServerRelativePath\", SPResPath(new_relative_path))\n\n def _move_folder():\n MoveCopyUtil.move_folder_by_path(self.context, self._build_full_url(self.server_relative_path.DecodedUrl),\n self._build_full_url(new_relative_path),\n MoveCopyOptions(\n retain_editor_and_modified_on_move=retain_editor_and_modified))\n\n self.ensure_property(\"ServerRelativePath\", _move_folder)\n return target_folder",
"def move_to(self, new_relative_url, retain_editor_and_modified=False):\n target_folder = Folder(self.context)\n target_folder.set_property(\"ServerRelativeUrl\", new_relative_url)\n\n def _move_folder():\n MoveCopyUtil.move_folder(self.context, self._build_full_url(self.serverRelativeUrl),\n self._build_full_url(new_relative_url),\n MoveCopyOptions(retain_editor_and_modified_on_move=retain_editor_and_modified))\n\n self.ensure_property(\"ServerRelativeUrl\", _move_folder)\n return target_folder",
"def copyTo(self, newFolder):\n copyUri = self.metaData.getLink(\"copy\")\n\n if not hasattr(newFolder, \"metaData\"): raise TypeError(\"Your newFolder does not have a metaData property\")\n if not hasattr(newFolder, \"selfLink\"): raise TypeError(\"Your folder object is missing a selfLink\")\n assert copyUri is not None\n\n header = self._baseHeader.copy()\n header['Content-Type'] = \"application/vnd.huddle.data+json\"\n body = '{ \"targetFolder\":{ \"link\":{ \"rel\": \"self\", \"href\": \"' + newFolder.selfLink + '\" } } }'\n response = self._adapter.postRequest(copyUri, header, body)\n\n return Folder(self._client, response['Headers']['location'])",
"def req_note_list_manipulate_foldermove(self):\n\n if self.helper_action_get_request_is_wrong(\"req_note_list_manipulate_foldermove\"):\n self.error_msg_queue_list.append(\"Note manipulation not performed.\")\n return\n\n if self.helper_sessactionauth_is_wrong():\n self.error_msg_queue_list.append(\"Note manipulation not performed - wrong session?\")\n return\n\n try:\n task_id_list = self.last_request_post_data_dict[\"taskid\"]\n foldermove = self.last_request_post_data_dict[\"foldermove\"][0]\n except:\n self.error_msg_queue_list.append(\"Note manipulation not performed - cannot access required POST data.\")\n else:\n self.ui_backend.notes_foldermove(task_id_list, foldermove)",
"def mv(path_file_folder, new_path):\n if not is_folder(new_path):\n raise DegooError(f\"mv: The target path is not a folder\")\n\n source_path = path_file_folder if is_folder(path_file_folder) else path_file_folder[:path_file_folder.rfind('/')]\n\n if source_path == new_path:\n raise DegooError(f\"mv: The target path cannot be the same as the source path\")\n\n if isinstance(path_file_folder, int):\n file_id = path_file_folder\n elif isinstance(path_file_folder, str):\n file_id = path_id(path_file_folder)\n else:\n raise DegooError(f\"rm: Illegal file: {path_file_folder}\")\n\n if isinstance(new_path, int):\n new_parent_id = new_path\n elif isinstance(new_path, str):\n new_parent_id = path_id(new_path)\n else:\n raise DegooError(f\"rm: Illegal destination folder: {new_path}\")\n\n return api.mv(file_id, new_parent_id)",
"def folder_voicemail_message(self, mailbox, folder, message_num, new_folder):\n method = \"moveFolderVoicemailMessage\"\n\n if not isinstance(mailbox, int):\n raise ValueError(\"ID for a specific Mailbox needs to be an int (Example: 1001)\")\n\n if not isinstance(folder, str):\n raise ValueError(\"Name for specific Folder needs to be a str (Required if message id is passed, Example: 'INBOX', values from: voicemail.get_voicemail_folders)\")\n\n if not isinstance(message_num, int):\n raise ValueError(\"ID for specific Voicemail Message needs to be an int (Required if folder is passed, Example: 1)\")\n\n if not isinstance(new_folder, str):\n raise ValueError(\"Destination Folder needs to be a str (Example: 'Urgent', values from: voicemail.get_voicemail_folders)\")\n\n parameters = {\n \"mailbox\": mailbox,\n \"folder\": folder,\n \"message_num\": message_num,\n \"new_folder\": new_folder,\n }\n\n return self._voipms_client._get(method, parameters)",
"def search_objects_move(\n self,\n folder: t.Union[str, Folder],\n searches: t.List[str],\n target: t.Union[str, Folder],\n create: bool = FolderDefaults.create_action,\n pattern_prefix: t.Optional[str] = FolderDefaults.pattern_prefix,\n ignore_case: bool = FolderDefaults.ignore_case,\n error_unmatched: bool = FolderDefaults.error_unmatched,\n error_no_matches: bool = FolderDefaults.error_no_matches,\n error_no_objects: bool = FolderDefaults.error_no_objects,\n recursive: bool = FolderDefaults.recursive,\n all_objects: bool = FolderDefaults.all_objects,\n full_objects: bool = FolderDefaults.full_objects_search,\n echo: bool = FolderDefaults.echo_action,\n ) -> t.Tuple[\"Folder\", t.List[BaseModel]]:\n folder: Folder = self.find(folder=folder, create=False, echo=echo)\n return folder.search_objects_move(\n searches=searches,\n folder=target,\n create=create,\n pattern_prefix=pattern_prefix,\n ignore_case=ignore_case,\n error_unmatched=error_unmatched,\n error_no_matches=error_no_matches,\n error_no_objects=error_no_objects,\n recursive=recursive,\n all_objects=all_objects,\n full_objects=full_objects,\n echo=echo,\n )",
"def move(self, to_folder, page_size=1000, chunk_size=100):\n ids = self._id_only_copy_self()\n ids.page_size = page_size\n return self.folder_collection.account.bulk_move(\n ids=ids,\n to_folder=to_folder,\n chunk_size=chunk_size,\n )",
"def __newFolder(self):\n from .BookmarkNode import BookmarkNode\n \n currentIndex = self.bookmarksTree.currentIndex()\n idx = QModelIndex(currentIndex)\n sourceIndex = self.__proxyModel.mapToSource(idx)\n sourceNode = self.__bookmarksModel.node(sourceIndex)\n row = -1 # append new folder as the last item per default\n \n if (\n sourceNode is not None and\n sourceNode.type() != BookmarkNode.Folder\n ):\n # If the selected item is not a folder, add a new folder to the\n # parent folder, but directly below the selected item.\n idx = idx.parent()\n row = currentIndex.row() + 1\n \n if not idx.isValid():\n # Select bookmarks menu as default.\n idx = self.__proxyModel.index(1, 0)\n \n idx = self.__proxyModel.mapToSource(idx)\n parent = self.__bookmarksModel.node(idx)\n node = BookmarkNode(BookmarkNode.Folder)\n node.title = self.tr(\"New Folder\")\n self.__bookmarksManager.addBookmark(parent, node, row)",
"def copy_to_folder(self):\n if \"copyToFolder\" in self._prop_dict:\n return self._prop_dict[\"copyToFolder\"]\n else:\n return None",
"def copy_to_by_path(self, new_relative_path, keep_both=False, reset_author_and_created=False):\n\n target_folder = Folder(self.context)\n target_folder.set_property(\"ServerRelativePath\", SPResPath(new_relative_path))\n\n def _copy_folder():\n opts = MoveCopyOptions(keep_both=keep_both, reset_author_and_created_on_copy=reset_author_and_created)\n MoveCopyUtil.copy_folder_by_path(self.context, self._build_full_url(self.server_relative_path.DecodedUrl),\n self._build_full_url(new_relative_path), opts)\n\n self.ensure_property(\"ServerRelativePath\", _copy_folder)\n return target_folder",
"def test_6e_move_data_btw_folders(self):\n if (not GST.logged_in) or (not GST.data_testing_swift_mounted):\n raise unittest.SkipTest(\"Skipped for failed login or failed mounting container.\")\n elif (GST.default_folder_to_be_used):\n if not (default_folders_exists):\n raise unittest.SkipTest(\"Skipped for failed to prepare default directories\")\n elif (not GST.dir1_exists) or (not GST.dir2_exists):\n raise unittest.SkipTest(\"Skipped for failed to prepare dirs\")\n elif not GST.moving_data_test_ready:\n raise unittest.SkipTest(\"Skipped for failed to prepare moving data tests.\")\n self.dismiss_dialogs()\n function = js_func[\"move_file\"] % (GST.gs_file_paths[\"file_to_move_to_folder_source_path\"], GST.gs_file_paths[\"move_to_folder_target_path\"])\n try:\n self.send_request(function, \"move_file()\")\n except Exception as e:\n raise MoveException(\"Failed to move the data between folders. \\n\" + e.__str__())\n try:\n response = self.get_response()\n assert \"Success\" in response\n self.refresh_page()\n except AssertionError:\n raise MoveException(\"Failed to move the data between folders. \\n\" + response)",
"def move_project_to(self, project_key, destination):\n params = {\n \"destination\": destination.project_folder_id\n }\n self.client._perform_empty(\"POST\", \"/project-folders/%s/projects/%s/move\" % (self.project_folder_id, project_key), params=params)",
"def copy_to(self, new_relative_url, keep_both=False, reset_author_and_created=False):\n\n target_folder = Folder(self.context)\n target_folder.set_property(\"ServerRelativeUrl\", new_relative_url)\n\n def _copy_folder():\n opts = MoveCopyOptions(keep_both=keep_both, reset_author_and_created_on_copy=reset_author_and_created)\n MoveCopyUtil.copy_folder(self.context, self._build_full_url(self.serverRelativeUrl),\n self._build_full_url(new_relative_url), opts)\n\n self.ensure_property(\"ServerRelativeUrl\", _copy_folder)\n return target_folder",
"def test_get_object_link_folder(self):\n plugin = ProjectAppPluginPoint.get_plugin(PLUGIN_NAME)\n url = reverse(\n 'filesfolders:list', kwargs={'folder': self.folder.sodar_uuid}\n )\n ret = plugin.get_object_link('Folder', self.folder.sodar_uuid)\n self.assertEqual(ret['url'], url)\n self.assertEqual(ret['label'], self.folder.name)",
"def MoveFileToFolder(self, file_id, folder_id):\n f = self.service.files().update(fileId=file_id, body={\"parents\":[{\"id\":folder_id}]}).execute()\n return f[\"id\"]",
"def mv(self, item, destination, execute=False):\n file = self.drive.files().update(\n fileId=item[\"id\"],\n addParents=destination[\"id\"],\n removeParents=\",\".join(item[\"parents\"]),\n fields=\"id, name, parents\",\n supportsAllDrives=self.shared_drive[0],\n )\n if execute:\n file = file.execute()\n return file",
"def move(self): #py:UR.move\n RUR._UR.move_(self.body)",
"def postFolder(self, parent, name, check=True):\n\n folder = vsdModels.Folder()\n if parent is None:\n parent = self.getFolderByName('MyProjects', mode='exact')\n folder.parentFolder = vsdModels.APIBase(selfUrl=parent.selfUrl)\n folder.name = name\n\n exists = False\n\n if check:\n if parent.childFolders:\n for child in parent.childFolders:\n fold = self.getFolder(child.selfUrl)\n if fold is not None:\n if fold.name == name:\n print('folder {0} already exists, id: {1}'.format(name, fold.id))\n exists = True\n return fold\n else:\n print('unexpected error, folder exists but cannot be retrieved')\n exists = True\n\n # print(self.postRequest('folders', data = data))\n if not exists:\n data = folder.to_struct()\n # for name, field in folder:\n # if name not in data:\n # data[name] = None\n # print(data)\n res = self.postRequest('folders', data=data)\n folder.populate(**res)\n print('folder {0} created, has id {1}'.format(name, folder.id))\n assert folder.name == name\n return folder",
"def addObjectToFolder(self, target, obj):\n\n objSelfUrl = vsdModels.APIBase(**obj.to_struct())\n\n if not objSelfUrl in target.containedObjects:\n target.containedObjects.append(objSelfUrl)\n res = self.putRequest('folders', data=target.to_struct())\n\n target = vsdModels.Folder(**res)\n return target\n\n else:\n return target",
"def find(\n self,\n folder: t.Union[str, Folder],\n create: bool = FolderDefaults.create,\n echo: bool = FolderDefaults.echo,\n ) -> Folder:\n root: FoldersModel = self.get()\n return root.find(folder=folder, create=create, refresh=False, echo=echo)"
] |
[
"0.7297154",
"0.7124918",
"0.6340775",
"0.6333984",
"0.6298626",
"0.6161408",
"0.609878",
"0.595133",
"0.590833",
"0.5853341",
"0.57983345",
"0.56845987",
"0.5643568",
"0.55118966",
"0.5488545",
"0.5386517",
"0.53304493",
"0.5285402",
"0.5278725",
"0.5239656",
"0.5220192",
"0.5208847",
"0.5162179",
"0.515844",
"0.51392734",
"0.5093513",
"0.5067373",
"0.5049124",
"0.50032294",
"0.49646407"
] |
0.7776914
|
0
|
grabs the version history of the content. This is returned as a dictionary >>versionHistory(Folder>
|
def versionHistory(self):
url = self.metaData().getLink("version-history")
assert url is not None
header = self._baseHeader.copy()
response = self._adapter.getRequest(url, header)
return json.loads(response['Body'])
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _grab_history(self):\n self.data['history_lines'] = []\n self.data['history_file'] = None\n self.data['history_encoding'] = None\n self.data['headings'] = []\n self.data['history_last_release'] = ''\n self.data['history_insert_line_here'] = 0\n default_location = None\n config = self.setup_cfg.config\n if config and config.has_option('zest.releaser', 'history_file'):\n default_location = config.get('zest.releaser', 'history_file')\n history_file = self.vcs.history_file(location=default_location)\n self.data['history_file'] = history_file\n if not history_file:\n logger.warn(\"No history file found\")\n return\n logger.debug(\"Checking %s\", history_file)\n history_lines, history_encoding = read_text_file(history_file)\n history_lines = history_lines.split('\\n')\n headings = utils.extract_headings_from_history(history_lines)\n if not headings:\n logger.warn(\"No detectable version heading in the history \"\n \"file %s\", history_file)\n return\n self.data['history_lines'] = history_lines\n self.data['history_encoding'] = history_encoding\n self.data['headings'] = headings\n\n # Grab last header.\n start = headings[0]['line']\n if len(headings) > 1:\n # Include the next header plus underline, as this is nice\n # to show in the history_last_release.\n end = headings[1]['line'] + 2\n else:\n end = len(history_lines)\n history_last_release = '\\n'.join(history_lines[start:end])\n self.data['history_last_release'] = history_last_release\n\n # Add line number where an extra changelog entry can be inserted. Can\n # be useful for entry points. 'start' is the header, +1 is the\n # underline, +2 is probably an empty line, so then we should take +3.\n # Or rather: the first non-empty line.\n insert = start + 2\n while insert < end:\n if history_lines[insert].strip():\n break\n insert += 1\n self.data['history_insert_line_here'] = insert",
"def revision_history(self, uuid):\n return self.write.revision_history(rid=uuid)",
"def get_history(cls, api, history):\n api_base = api.split('/')[-1]\n cursor = cls.history_index.cursor()\n cursor.execute(\n \"select filename from history where api=? and ymdh=?;\",\n (api_base, history))\n files = [r[0] for r in cursor]\n cls.history_index.commit()\n if not files:\n return {}\n results = {}\n for fn in files:\n ts = re.split('[?@]', fn)[-1].replace('.gz', '')\n fn_full = os.path.join(config.base_store_dir, fn)\n fd = (gzip.open if fn.endswith('.gz') else open)(fn_full)\n results[ts] = json.load(fd, encoding='utf8')\n fd.close()\n return results",
"def history(self):\n return self.info['history']",
"def get_revisions(self, key):\n c = pysvn.Client()\n revs = c.log(settings.SVN_WC_PATH, discover_changed_paths=True)\n crevs = []\n for r in revs:\n if '/'+key in [p.path for p in r.changed_paths]:\n crevs.append(r.revision.number)\n crevs.sort(reverse=True)\n return crevs[1:] # cut of the head revision-number",
"def History(self):\n return self.historydict.get('history', [])",
"def getCurrentVersions(self):\r\n if path.exists('../versions.pckl'):\r\n f = open('../versions.pckl', 'rb')\r\n versions = pickle.load(f)\r\n f.close()\r\n else:\r\n versions = {\"subsystems\": {}, \"grafana\": {}}\r\n return versions",
"def get_history():\n return response_texts_to_entries(make_post_request(HISTORY_API, data={\"k\": config[\"api_key\"]}))",
"def history(self):\n return self._history",
"def history(self):\n return self._history",
"def versions(self) -> Dict[str, str]:\n self.__logger.debug('Eva.versions called')\n return self.__http_client.api_versions()",
"def history():",
"def svn_fs_node_history(*args):\r\n return _fs.svn_fs_node_history(*args)",
"def all(self):\r\n if self._versions is None or \\\r\n len(self._versions) == 0:\r\n url = \"%s/versions\" % self._url\r\n params = {'f':'json'}\r\n res = self._con.get(url, params)\r\n self._versions = []\r\n if 'versions' in res:\r\n for v in res['versions']:\r\n guid = v['versionGuid'][1:-1]\r\n vurl = \"%s/versions/%s\" % (self._url, guid)\r\n self._versions.append(Version(url=vurl,\r\n flc=self._flc,\r\n gis=self._gis))\r\n return self._versions\r\n return self._versions",
"def get_history(hdr):\n return hdr['HISTORY']",
"def get_history(self):\n return self.history",
"def history(self, maxresults=None, mindate=None):\n server = self._server._server.resource(self._server.name).connect()\n return server.history(maxresults=maxresults, mindate=mindate,\n accountID=self._server.accountID, librarySectionID=self.sectionKey)",
"def versions(self):\n raise Exception(\"mcapi.Datafile.versions is not implemented\")",
"def revisions(self) -> pulumi.Output[Sequence[str]]:\n return pulumi.get(self, \"revisions\")",
"def get_linked_versions(version='current'):\n version = check_version_str(version)\n chapters = [10, 9, 8]\n version_page = 'https://research.cs.wisc.edu/htcondor/manual/{ver}/{chapter}_Version_History.html'\n r = requests.get(version_page.format(ver=version, chapter=chapters[0]))\n if r.status_code == 404:\n # Try different chapter numbers, as it changes for different versions\n i = 1\n while r.status_code == 404 and i < len(chapters):\n r = requests.get(version_page.format(ver=version, chapter=chapters[i]))\n i += 1\n if r.status_code == 404:\n return []\n soup_vers = bs4.BeautifulSoup(r.text, 'lxml')\n versions = [x.text.replace('Version ', '')\n for x in soup_vers.find_all('a')\n if x.text.startswith('Version')]\n return versions",
"def get_cache_history_items(self):\n #gdb.execute(\"p cache->history_items\")\n history_items = ZabbixHashset(gdb.parse_and_eval ('cache->history_items'))\n self.data = history_items.parse()",
"def download_revisions(self, filename, output_folder=None):\n print self.c.files_list_revisions\n revs = sorted(self.c.files_list_revisions(filename, limit=10000).entries,\n key=lambda entry: entry.server_modified)\n print revs\n #revs = self.c.files_list_revisions(filename)\n #revs = self.c.revisions(filename)\n\n for rev in revs:\n print(rev)\n revision_id = rev.rev\n mod_time = rev['client_mtime'].replace(\" \", \"_\").replace(\":\", \"\").replace(\"+\", \"\").replace(',', '')\n\n\n if output_folder is None:\n output_folder = self.output_folder\n\n if not os.path.exists(output_folder):\n os.mkdir(output_folder)\n\n folder = os.path.join(output_folder, os.path.splitext(os.path.basename(filename))[0])\n\n if not os.path.exists(folder):\n os.mkdir(folder)\n\n out_filename = os.path.join(folder, '%s.tex' % (mod_time))\n\n if not os.path.exists(out_filename):\n outfile = open(out_filename, 'wb')\n with self.c.get_file(filename, rev=revision_id) as f:\n outfile.write(f.read())\n\n outfile.close()\n else:\n print(\"Already done, skipping\")",
"def getVersions(self):\n logger.debug(\"Func: getVersions\")\n\n try:\n return self._currentSceneInfo[\"Versions\"]\n except:\n return []",
"def get_versions(self):\n raise NotImplementedError",
"def versions():\n result = timeline.versions()\n if result:\n click.echo('\\n'.join(result))",
"def get_history(page):\n headings = page.filter_headings()\n idx = [i for i, head in enumerate(headings) \n if 'History' in head or 'history' in head]\n if not idx:\n return \"\"\n sections = page.get_sections(include_headings=True)\n history = str(sections[idx[0]+1].strip_code())\n return history",
"def history(self, key, _from='-', _to='+', _desc=True):\n return [self.klass.from_json(_object)\n for _object in self.storage.history(key, _from, _to, _desc)]",
"def history(self, id):\n lm, previous_versions = h.get_model_and_previous_versions('MorphemeLanguageModel', id)\n if lm or previous_versions:\n return {'morpheme_language_model': lm,\n 'previous_versions': previous_versions}\n else:\n response.status_int = 404\n return {'error': 'No morpheme language models or morpheme language model backups match %s' % id}",
"def _get_history_data(self) -> List[Dict[str, Any]]:\n try:\n with open(self._path.as_posix(), \"r\", encoding=\"utf-8\") as history_file:\n data = json.load(history_file)\n data.append(History._get_empty_session_object())\n return data\n except FileNotFoundError:\n self._path.touch()\n return History._get_empty_json_object()\n except json.decoder.JSONDecodeError:\n return History._get_empty_json_object()",
"def history(name):\n from pybel.resources.arty import get_knowledge_history\n from pybel.resources.document import get_bel_knowledge_hash\n\n for path in get_knowledge_history(name):\n h = get_bel_knowledge_hash(path.as_posix())\n click.echo('{}\\t{}'.format(path, h))"
] |
[
"0.65825874",
"0.6521536",
"0.6386225",
"0.638059",
"0.62766725",
"0.6217639",
"0.61316204",
"0.5951934",
"0.59497416",
"0.59497416",
"0.59479266",
"0.5925264",
"0.59054226",
"0.59028655",
"0.5882962",
"0.58716905",
"0.5824519",
"0.5821543",
"0.58190054",
"0.5817504",
"0.58124316",
"0.5809492",
"0.58066356",
"0.58015746",
"0.57961774",
"0.57939476",
"0.57512456",
"0.57327044",
"0.57160586",
"0.57126987"
] |
0.752361
|
0
|
creates a sub folder in the specififed directory. The index paramater is optional If you do not specify index it will create the sub folder in the parent directory. If everything goes right this method will return a folder object of the new folder
|
def createFolder(self, title, description="", index=None):
assert isinstance(index, int) or index is None
try:
if index is None:
url = self.metaData.getLink("create-folder")
else:
url = self.getFolders()[index].getLink("create-folder")
header = self._baseHeader.copy()
header['Content-Type'] = "application/vnd.huddle.data+json"
skeletonFolder = {"title" : title, "description" : description}
jsonString = json.dumps(skeletonFolder)
response = self._adapter.postRequest(url, header, jsonString)
return Folder(self._client, response['Headers']['location'])
except IndexError:
print("the index: " + index + " does not exist in the list of folder numbers we have")
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def create_folder(id_parent, name):\n id_folder = incr_key_store('folders:counter')\n rpush_key_store('folders:list', {'id': id_folder, 'parent': id_parent, 'name': name})\n return id_folder",
"def create_folder(client, parent_folder_id, folder_name):\n\n try:\n subfolder = client.folder(parent_folder_id).create_subfolder(folder_name)\n print(f'Created subfolder with ID {subfolder.id}')\n\n except Exception as e:\n print(f\"An error occurred: {e}\")",
"def mk_index_dir(self):\n try:\n os.makedirs(self.params[\"index_path\"])\n except FileExistsError:\n pass",
"async def new_folder(name):\n res = await joplin.create_folder(folder=name)\n return res.json()['id']",
"def create_folder(self, foldername, parents=''):\r\n formatted_parents = (parents + '/').replace('/', '%2F')\r\n\r\n return self.yandex_requests.create_folder(\r\n foldername, formatted_parents)",
"def create_folder(self):\n cur_dir=os.getcwd()\n unique=False\n dirlist= [item for item in os.listdir(cur_dir) if os.path.isdir(os.path.join(cur_dir,item))]\n folder_name='taxonomy_{}_{}'.format(self.place,self.year)\n j=1\n while not unique:\n if folder_name in dirlist:\n folder_name='taxonomy_{}_{}({})'.format(self.place,self.year,str(j))\n j+=1\n else:\n unique=True\n new_folder=os.path.join(cur_dir,folder_name)\n os.mkdir(new_folder)\n os.chdir(new_folder)\n return folder_name",
"def createFolder(self):\n raise NotImplementedError",
"def create_folder(path_folder, name_subfolder=None):\n if not name_subfolder:\n if not os.path.exists(path_folder):\n os.makedirs(path_folder)\n else:\n path_result_subolder = os.path.join(path_folder, name_subfolder)\n if not os.path.exists(path_result_subolder):\n os.makedirs(path_result_subolder)",
"def create_path_and_index(subdir: str) -> None:\n if not os.path.exists(WEBOUT_PATH + subdir):\n os.makedirs(WEBOUT_PATH + subdir)\n create_blank_index(WEBOUT_PATH + subdir + \"index.html\")",
"def deleteFolder(self, index=None):\n assert isinstance(index, int) or index is None\n\n try:\n if index is None:\n url = self.metaData.getLink(\"delete\")\n else:\n url = self.getFolders()[index].metaData().getLink(\"delete\")\n\n assert url is not None\n\n response = self._adapter.deleteRequest(url, self._baseHeader)\n\n self.metaData.graveyard.append(self.metaData)\n\n newLink = self._client.getUrlFromHeaderLink(response['Headers']['link'])\n\n return Folder(self._client, newLink)\n\n except IndexError:\n print(\"the index: \" + str(index) + \" does not exist in the list of folder numbers we have\")",
"def create_folder(self, foldername: str) -> int:\n raise NotImplementedError",
"def new_dir(folder):\n os.makedirs(folder, exist_ok=True)\n return folder",
"def create_folder(self, req, folder_path, new_folder_name):\n\t\tdirectory_path = os.path.join(self.get_selected_root(req), folder_path)\n\t\t\n\t\t#prevent shenanigans\n\t\tnew_folder_name = new_folder_name.split('/').pop()\n\t\t\n\t\tnew_path = os.path.join(directory_path, new_folder_name)\n\t\tif(os.access(new_path, os.F_OK)):\n\t\t\tcontent = tags.Tag('Error')(number=FLD_EXISTS)\n\t\telse:\n\t\t\ttry:\n\t\t\t\tos.mkdir(new_path)\n\t\t\t\tcontent = tags.Tag('Error')(number=SUCCESS)\n\t\t\texcept:\n\t\t\t\tcontent = tags.Tag('Error')(number=FLD_UNKNOWN_ERROR)\n\t\t\n\t\treturn content",
"def dirCreate(newFoldername):\r\n current_directory = os.getcwd()\r\n new_directory = os.path.join(current_directory,newFoldername)\r\n \r\n if not os.path.exists(new_directory):\r\n os.makedirs(new_directory)\r\n return new_directory",
"def create_folder(v_sphere, vmw_parent_folder, new_folder_name):\n try:\n vmw_parent_folder.CreateFolder(new_folder_name)\n return\n\n except vim.fault.DuplicateName:\n raise VMWareCreateDuplicateException(f'Folder name {new_folder_name} already in use')\n\n except vim.fault.InvalidName:\n raise VMWareInvalidInputException(f'Folder name {new_folder_name} is invalid')",
"def add(self, name):\n new_folder = Folder(self.context)\n\n def _add_sub_folder():\n new_folder_url = \"/\".join([self.serverRelativeUrl, name])\n new_folder.set_property(\"ServerRelativeUrl\", new_folder_url)\n qry = CreateEntityQuery(self.folders, new_folder, new_folder)\n self.context.add_query(qry)\n\n self.ensure_property(\"ServerRelativeUrl\", _add_sub_folder)\n return new_folder",
"def create_folder(folder_path: List[str]) -> str:\n drive = _drive_gen()\n return _create_or_find_folder(folder_path, drive)",
"def createFolderStructure(self, rootfolder, filepath, parents):\n\n fp = filepath.resolve()\n folders = list(fp.parts)\n folders.reverse()\n\n ##remove file from list\n if fp.is_file():\n folders.remove(folders[0])\n\n for i in range(parents, len(folders)):\n folders.remove(folders[-1])\n folders.reverse()\n\n fparent = rootfolder\n\n if fparent:\n # iterate over file path and create the directory\n for fname in folders:\n f = vsdModels.Folder(\n name=fname,\n parentFolder=vsdModels.Folder(selfUrl=fparent.selfUrl)\n )\n fparent = f.create(self)\n return fparent\n else:\n print('Root folder does not exist', rootfolder)\n return None",
"def create_new(self, root, name_length):\n self.name = create_random_string(name_length)\n self.ctime = datetime.datetime.now()\n date_time = datetime.datetime.strftime(self.ctime, \"%Y%m%d_%H%M%S\")\n self.folder = f\"{date_time}_{self.name}\"\n self.path = os.path.join(root, self.folder)\n try:\n os.makedirs(self.path)\n print(f\"Created folder {self.folder}\")\n except OSError:\n print(f\"Directory {self.folder} already exists\")\n except:\n print(f\"Cannot create folder: {self.folder}\")\n raise",
"def create_directory_structure(path_main):\n\n if not path_main.exists():\n path_main.mkdir(parents=True)",
"def get_or_create_folder(self, folder_names):\n if not len(folder_names):\n return None\n current_parent = None\n for folder_name in folder_names:\n current_parent, created = Folder.objects.get_or_create(\n name=folder_name, parent=current_parent)\n if created:\n self.folder_created += 1\n if self.verbosity >= 2:\n print(\"folder_created #%s folder : %s -- created : %s\" % (self.folder_created,\n current_parent, created))\n return current_parent",
"def createFolder(self):\n folderName, ok = QtWidgets.QInputDialog.getText(self, 'Folder Name', 'Enter the folder name :',\n QtWidgets.QLineEdit.Normal)\n\n if ok:\n parent = self.fileDir\n currentPath = self.dataDir\n if self.fileDir.selectedItems():\n parent = self.fileDir.selectedItems()[-1]\n currentPath = str(parent.toolTip(0))\n\n if not os.path.isdir('%s/%s' % (currentPath, str(folderName))):\n item = QtWidgets.QTreeWidgetItem(parent)\n\n item.setText(0, str(folderName))\n item.setToolTip(0, '%s/%s' % (currentPath, str(folderName)))\n\n # connect icon\n icon = QtGui.QIcon()\n icon.addPixmap(QtGui.QPixmap('%s/folder.png' % (self.iconsDir)), QtGui.QIcon.Normal,\n QtGui.QIcon.Off)\n item.setIcon(0, icon)\n\n # be careful about shiboken2, you can use 'is' and 'is not' instead of using operator '==' and '!='\n if parent is not self.fileDir:\n self.fileDir.setItemExpanded(parent, True)\n self.fileDir.setItemSelected(parent, False)\n\n self.fileDir.setItemSelected(item, True)\n\n os.makedirs('%s/%s' % (currentPath, str(folderName)))",
"def create_folder_structure(self):\n # create the parent folder holding the project\n self.proj_folder.mkdir(exist_ok=False)\n # once we have setup the parent folder we can create the subfolder\n # structure\n create_subfolder = [self.aiida_subfolder, self.env_subfolder]\n if self.has_source():\n create_subfolder += [self.src_subfolder]\n for subfolder in create_subfolder:\n project_subfolder = self.proj_folder / subfolder\n project_subfolder.mkdir(exist_ok=False)",
"def create_folder(self):\n Path(self.root_name).mkdir(parents=True, exist_ok=True)\n Path(self.root_name + \"/VOC2021/\").mkdir(parents=True, exist_ok=True)\n Path(self.image_folder_path).mkdir(parents=True, exist_ok=True)\n Path(self.annot_path).mkdir(parents=True, exist_ok=True)\n Path(self.root_name + \"/VOC2021/ImageSets/\").mkdir(parents=True, exist_ok=True)\n Path(self.txt_path).mkdir(parents=True, exist_ok=True)",
"def makeFolder(pathToLocation, newFolderName):\n newFolder = os.path.join(pathToLocation, newFolderName)\n if not os.path.exists(newFolder):\n os.mkdir(newFolder)\n return newFolder",
"def postFolder(self, parent, name, check=True):\n\n folder = vsdModels.Folder()\n if parent is None:\n parent = self.getFolderByName('MyProjects', mode='exact')\n folder.parentFolder = vsdModels.APIBase(selfUrl=parent.selfUrl)\n folder.name = name\n\n exists = False\n\n if check:\n if parent.childFolders:\n for child in parent.childFolders:\n fold = self.getFolder(child.selfUrl)\n if fold is not None:\n if fold.name == name:\n print('folder {0} already exists, id: {1}'.format(name, fold.id))\n exists = True\n return fold\n else:\n print('unexpected error, folder exists but cannot be retrieved')\n exists = True\n\n # print(self.postRequest('folders', data = data))\n if not exists:\n data = folder.to_struct()\n # for name, field in folder:\n # if name not in data:\n # data[name] = None\n # print(data)\n res = self.postRequest('folders', data=data)\n folder.populate(**res)\n print('folder {0} created, has id {1}'.format(name, folder.id))\n assert folder.name == name\n return folder",
"def create_sub_folder(self, name):\n params = {\n \"name\": name\n }\n pf = self.client._perform_json(\"POST\", \"/project-folders/%s/children\" % self.project_folder_id, params=params)\n return DSSProjectFolder(self.client, pf[\"id\"])",
"def new_folder(name, bucket):\n if not name.endswith(\"/\"):\n name += \"/\"\n # if the folder already exists, just return it.\n folder = bucket.get_key(name)\n if folder:\n return folder\n\n progress = \"\"\n for part in name.split(\"/\"):\n progress += part + \"/\"\n if not bucket.get_key(progress):\n key = bucket.new_key(progress)\n key.set_contents_from_string('')\n key.set_canned_acl('private')\n\n return bucket.get_key(name)",
"def create_folder(self, c_path):\n raise NotImplementedError",
"def create_directory(parent_path, new_folder):\n newdir = os.path.join(parent_path, new_folder)\n if os.path.isdir(newdir):\n return False\n else:\n os.mkdir(newdir)\n return True"
] |
[
"0.6715626",
"0.64188874",
"0.632356",
"0.6228173",
"0.619247",
"0.6182481",
"0.6165542",
"0.6152539",
"0.6138185",
"0.6093231",
"0.60769814",
"0.60543686",
"0.60450804",
"0.60377955",
"0.60241455",
"0.60187376",
"0.59500784",
"0.5923687",
"0.58826816",
"0.5864929",
"0.58612967",
"0.5851",
"0.5839383",
"0.5825178",
"0.5820582",
"0.58184",
"0.58175427",
"0.5797069",
"0.5791134",
"0.5783412"
] |
0.6823123
|
0
|
updated the meta data of the specified folde. If you do not specify index it will update the parent directory. This method will return a Folder object of the updated folder
|
def updateFolder(self, title=None, description=None, index = None):
url = self.metaData.getLink("edit")
assert url is not None
assert isinstance(index, int) or index is None
header = self._baseHeader.copy()
header['Content-Type'] = "application/vnd.huddle.data+json"
try:
if index is None:
jsonData = self.metaData.jsonObj
else:
jsonData = self.getFolders()[index].metaData().jsonObj
if title is not None: jsonData['title'] = title
if description is not None: jsonData['description'] = description
response = self._adapter.putRequest(url, header, json.dumps(jsonData))
newLink = self._client.getUrlFromHeaderLink(response['Headers']['link'])
return Folder(self._client, newLink)
except IndexError:
print("the index: " + str(index) + " does not exist in the list of folder numbers we have")
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def moveTo(self, folder):\n parent = self.metaData.getLinkIndex('parent')\n moveUri = self.metaData.getLink(\"move\")\n\n assert parent != -1\n assert moveUri is not None\n if not hasattr(folder, \"metaData\"): raise TypeError(\"Your newFolder does not have a metaData property\")\n assert hasattr(folder, \"selfLink\")\n\n header = self._baseHeader.copy()\n header['Content-type'] = \"application/vnd.huddle.data+json\"\n jsonData = self.metaData.jsonObj\n jsonData['links'][parent] = {'href' : folder.selfLink, 'rel' : 'parent'}\n response = self._adapter.putRequest(moveUri, header, json.dumps(jsonData))\n\n return Document(self._client, self._client.getUrlFromHeaderLink(response['Headers']['link']))",
"def moveTo(self, newFolder):\n moveURI = self.metaData.getLink(\"move\")\n parent = self.metaData.getLinkIndex('parent')\n\n assert parent != -1\n assert moveURI is not None\n if not hasattr(newFolder, \"metaData\"): raise TypeError(\"Your newFolder does not have a metaData property\")\n if not hasattr(newFolder, \"selfLink\"): raise TypeError(\"Your newFolder does not have a self link\")\n\n self.metaData.jsonObj['links'][parent] = {'href' : newFolder.selfLink, 'rel' : 'parent'}\n header = self._baseHeader.copy()\n header['Content-Type'] = \"application/vnd.huddle.data+json\"\n response = self._adapter.putRequest(moveURI,header, json.dumps(self.metaData.jsonObj))\n\n newLink = self._client.getUrlFromHeaderLink(response['Headers']['link'])\n return Folder(self._client, newLink)",
"def restoreFolder(self, index):\n assert isinstance(index, int)\n\n try:\n url = self.metaData.graveyard[index].selfLink + \"/restore\"\n response = self._adapter.putRequest(url, self._baseHeader, \"{}\")\n\n self.metaData.graveyard.pop(index)\n\n return Folder(self._client, response['Headers']['location'])\n except IndexError:\n print(\"the index: \" + str(index) + \" does not exist in the graveyard\")",
"def put(self, *args, **kwargs):\n return super(APIFolderView, self).put(*args, **kwargs)",
"def deleteFolder(self, index=None):\n assert isinstance(index, int) or index is None\n\n try:\n if index is None:\n url = self.metaData.getLink(\"delete\")\n else:\n url = self.getFolders()[index].metaData().getLink(\"delete\")\n\n assert url is not None\n\n response = self._adapter.deleteRequest(url, self._baseHeader)\n\n self.metaData.graveyard.append(self.metaData)\n\n newLink = self._client.getUrlFromHeaderLink(response['Headers']['link'])\n\n return Folder(self._client, newLink)\n\n except IndexError:\n print(\"the index: \" + str(index) + \" does not exist in the list of folder numbers we have\")",
"def fusion_api_edit_directory(self, body, uri, api=None, headers=None):\n return self.logindomain.update(body, uri, api, headers)",
"def directoryModifiedHandler(ob, event):\n query = dict(object_provides=IEntry.__identifier__)\n for l in ob.restrictedTraverse('@@folderListing')(**query):\n l.getObject().reindexObject(idxs=[\"pdir_keywords\"])",
"def get_folder(self):\n name = \"%s_%s\" % (self.PREFIX, self.FOLDER_NAME)\n folders = self.mw.get_folders()\n for fldr in folders:\n if fldr[\"name\"] == name:\n self.folder_id = fldr[\"folder_id\"]\n return\n self.folder_id = self.mw.create_folder(name)",
"def postFolder(self, parent, name, check=True):\n\n folder = vsdModels.Folder()\n if parent is None:\n parent = self.getFolderByName('MyProjects', mode='exact')\n folder.parentFolder = vsdModels.APIBase(selfUrl=parent.selfUrl)\n folder.name = name\n\n exists = False\n\n if check:\n if parent.childFolders:\n for child in parent.childFolders:\n fold = self.getFolder(child.selfUrl)\n if fold is not None:\n if fold.name == name:\n print('folder {0} already exists, id: {1}'.format(name, fold.id))\n exists = True\n return fold\n else:\n print('unexpected error, folder exists but cannot be retrieved')\n exists = True\n\n # print(self.postRequest('folders', data = data))\n if not exists:\n data = folder.to_struct()\n # for name, field in folder:\n # if name not in data:\n # data[name] = None\n # print(data)\n res = self.postRequest('folders', data=data)\n folder.populate(**res)\n print('folder {0} created, has id {1}'.format(name, folder.id))\n assert folder.name == name\n return folder",
"def update(self):\n brains = self.query\n items_with_bodytext = ['Document', 'News Item']\n folderish_items = ['Folder', 'nva.flexfolder.flexfolder']\n counter = 1\n objectlist = []\n for i in brains:\n entry = {}\n if i.portal_type in items_with_bodytext:\n obj = i.getObject()\n entry['title'] = obj.Title()\n entry['desc'] = obj.Description()\n entry['text'] = obj.getText()\n entry['marker'] = 'collapse-%s' % counter\n if i.portal_type in folderish_items:\n info = self.createHtmlSnippet(i.getObject())\n if not info:\n info = u'<p>Für weitere Informationen klicken Sie bitte <a class=\"internal-link\" href=\"%s\">hier.</a></p>' %i.getURL() \n entry['title'] = i.Title\n entry['desc'] = i.Description\n entry['text'] = info\n entry['marker'] = 'collapse-%s' % counter\n else:\n info = u'<p>Für weitere Informationen klicken Sie bitte <a class=\"internal-link\" href=\"%s\">hier.</a></p>' %i.getURL() \n entry['title'] = i.Title\n entry['desc'] = i.Description\n entry['text'] = info\n entry['marker'] = 'collapse-%s' % counter\n objectlist.append(entry)\n counter += 1\n self.objectlist = objectlist",
"def patch(self, *args, **kwargs):\n return super(APIFolderView, self).patch(*args, **kwargs)",
"def test_directory_meta(self):\n with TemporaryDirectory() as td:\n store = tb.HDFFile(td + '/test', 'w', type='directory')\n store['AAPL'] = df\n store.handle.meta('testtest', 123)\n store.table.meta('testtest', 456)\n store.close()\n\n # reload\n store = tb.HDFFile(td + '/test')\n assert store.handle.meta('testtest') == 123\n assert store.table.meta('testtest') == 456",
"def updateFilePath(self, index):\n with Tracer(traceLogger):\n oldLocationSetting = self.topLevelOperator.Dataset[index].value.location\n\n # Get the directory by inspecting the original operator path\n oldTotalPath = self.topLevelOperator.Dataset[index].value.filePath.replace('\\\\', '/')\n # Split into directory, filename, extension, and internal path\n lastDotIndex = oldTotalPath.rfind('.')\n extensionAndInternal = oldTotalPath[lastDotIndex:]\n extension = extensionAndInternal.split('/')[0]\n oldFilePath = oldTotalPath[:lastDotIndex] + extension\n\n fileNameText = str(self.fileInfoTableWidget.item(index, Column.Name).text())\n\n internalPathCombo = self.fileInfoTableWidget.cellWidget(index, Column.InternalID)\n #internalPath = str(self.fileInfoTableWidget.item(index, Column.InternalID).text())\n internalPath = str(internalPathCombo.currentText())\n\n directory = os.path.split(oldFilePath)[0]\n newFileNamePath = fileNameText\n if directory != '':\n newFileNamePath = directory + '/' + fileNameText\n\n newTotalPath = newFileNamePath\n if internalPath != '':\n if internalPath[0] != '/':\n newTotalPath += '/'\n newTotalPath += internalPath\n\n cwd = self.topLevelOperator.WorkingDirectory.value\n absTotalPath, relTotalPath = getPathVariants( newTotalPath, cwd )\n absTotalPath = absTotalPath.replace('\\\\','/')\n relTotalPath = relTotalPath.replace('\\\\','/')\n\n # Check the location setting\n locationCombo = self.fileInfoTableWidget.cellWidget(index, Column.Location)\n comboIndex = locationCombo.currentIndex()\n newLocationSelection = locationCombo.itemData(comboIndex).toInt()[0] # In PyQt, toInt() returns a tuple\n\n if newLocationSelection == LocationOptions.Project:\n newLocationSetting = DatasetInfo.Location.ProjectInternal\n elif newLocationSelection == LocationOptions.AbsolutePath:\n newLocationSetting = DatasetInfo.Location.FileSystem\n newTotalPath = absTotalPath\n elif newLocationSelection == LocationOptions.RelativePath:\n newLocationSetting = DatasetInfo.Location.FileSystem\n newTotalPath = relTotalPath\n\n if newTotalPath != oldTotalPath or newLocationSetting != oldLocationSetting:\n # Be sure to copy so the slot notices the change when we setValue()\n datasetInfo = copy.copy(self.topLevelOperator.Dataset[index].value)\n datasetInfo.filePath = newTotalPath\n datasetInfo.location = newLocationSetting\n\n # TODO: First check to make sure this file exists!\n self.topLevelOperator.Dataset[index].setValue( datasetInfo )\n\n # Update the storage option combo to show the new path\n self.updateStorageOptionComboBox(index, newFileNamePath)",
"def updateIndex(self):\n for root, dirs, files in os.walk(self.serverdir):\n for d in dirs:\n if not d.startswith('.'):\n relpath = os.path.relpath(os.path.join(root, d), self.serverdir)\n self.serverindex[relpath] = (self.getNametype(os.path.join(root,d)), os.path.getmtime(os.path.join(root, d)))\n for f in files:\n if not f.startswith('.'):\n relpath = os.path.relpath(os.path.join(root, f), self.serverdir)\n self.serverindex[relpath] = (self.getNametype(os.path.join(root,f)), os.path.getmtime(os.path.join(root, f)))",
"def syncfolder():",
"def update_metadata(self, metadata):\n return self.parent.update_metadata_for_node(self, metadata)",
"def prepopulate_memo(self):\n existing = self.gi.libraries.show_library(self.library_id, contents=True)\n\n uploading_to = [x for x in existing if x['id'] == self.folder_id]\n if len(uploading_to) == 0:\n raise Exception(\"Unknown folder [%s] in library [%s]\" %\n (self.folder_id, self.library_id))\n else:\n uploading_to = uploading_to[0]\n\n for x in existing:\n # We only care if it's a subdirectory of where we're uploading to\n if not x['name'].startswith(uploading_to['name']):\n continue\n\n name_part = x['name'].split(uploading_to['name'], 1)[-1]\n if name_part.startswith('/'):\n name_part = name_part[1:]\n self.memo_path[name_part] = x['id']",
"def test_migrate_folder_to_document(self):\n folder = self.portal['folder-1']\n folder.invokeFactory('Document',\n 'my-page-test',\n title=\"My page test\",\n text='spam spam')\n output = migrateContents(self.portal, \"Folder\", \"Document\")\n self.assertEqual(output.get('counter', 0), 2)\n self.assertNotEqual(output.get('error', []), [])\n self.assertEqual(output['error'][0]['msg'], 'Failed migration for object /plone/folder-1 (Folder -> Document)')\n self.assertTrue(self.portal.portal_catalog(portal_type=\"Document\").actual_result_count == 12)\n self.assertTrue(self.portal.portal_catalog(portal_type=\"Folder\").actual_result_count == 1)\n self.assertEqual(self.portal['folder-2'].portal_type, \"Document\")\n self.assertEqual(self.portal['folder-1'].portal_type, \"Folder\")",
"def take_action(self, parsed_args):\n folder_content = dict()\n parent = utils.key_len(parsed_args.parent)\n folder_content = self.app.metagen.directory_list(parent)\n content_type_map = {\n '1': 'Folder',\n '2': 'Sample',\n '3': 'MRSA Sample',\n '4': 'Listeria Sample'\n }\n header = ['type', 'name', 'id', 'status', 'size', 'created']\n if folder_content:\n if not folder_content['items']:\n self.logger.info('\\nFolder {} (id: {}) is empty'.format(folder_content['name'], parent))\n for_output = [[' ', ' ', ' ', ' ', ' ', ' ']]\n return (header, for_output)\n else:\n raise Exception(\"Exception uccured.\")\n\n def _set_date(inp):\n return dt.fromtimestamp((inp[1]/1000)).strftime('%Y-%m-%d %H:%M:%S')\n\n def _del_none(inp):\n out = [inp[1]]\n if not out[0]:\n out = [0 if v[1] == 'int' else '-' for k, v in field_maps.items() if inp[0] == v[0]]\n return out[0]\n\n def _set_dim(inp):\n out = inp if inp else 0\n out = utils.convert_size(out)\n return out if out is not '0B' else '-'\n\n def _set_type(inp):\n ctype = content_type_map[str(inp[1])] if content_type_map.get(str(inp[1])) else inp[1]\n return ctype\n\n def _convert(inp):\n for item in inp.items():\n for k, v in field_maps.items():\n if item[0] == v[0]:\n inp[item[0]] = field_maps[k][2](item)\n break\n return inp\n\n field_maps = {\n 'type': ['content_type', 'str', _set_type],\n 'id': ['id', 'str', _del_none],\n 'name': ['name', 'str', _del_none],\n 'status': ['status', 'str', _del_none],\n 'size': ['size', 'int', _del_none],\n 'created': ['created', 'int', _set_date]\n }\n\n \"\"\"we need just items for output\"\"\"\n items_data = [_convert(item) for item in folder_content['items']]\n\n \"\"\"order regarding order parameters\"\"\"\n if parsed_args.order:\n if parsed_args.order.lower() in header:\n items_data = sorted(items_data,\n key=itemgetter(field_maps[parsed_args.order.lower()][0]),\n reverse=(not parsed_args.up)\n )\n for_output = [[item[field_maps[f][0]] if f is not 'size'\n else _set_dim(item[field_maps[f][0]])\n for f in header]\n for item in items_data\n ]\n self.logger.info('\\nContent of the Folder {} (id: {})'.format(folder_content['name'], parent))\n return (header, for_output)",
"def _update_index(self):\n start_time = datetime.datetime.now()\n sys.stdout.write(\"Updating index. Depending on the size of your music \"\n \"collection this may take some time, so please be patient. \"\n \"(Update started at %s)\\n\" % start_time)\n new_index_file = \"%s/music_index_%s.txt\" % (self.index_dir,\n start_time.strftime(\"%Y%m%d_%H%M%S\"))\n files = (os.path.join(tup[0], f) for d in self.music_dirs \n for tup in os.walk(d) \n for f in tup[2] )\n \n with open(new_index_file, \"w\") as fh:\n for filename in files:\n fh.write(\"%s\\n\" % filename)\n \n end_time = datetime.datetime.now()\n sys.stdout.write(\"Music index updated (created index file '%s')\\n\" \n \"Update duration:%s\\n\" % \n (new_index_file, end_time - start_time))",
"def reindex(self):\n self.index.drop_db()\n objectpath = os.path.join(self.rootpath, self.OBJECTPATH)\n for root, dirs, files in os.walk(objectpath, topdown=False):\n for name in files:\n blob_uuid = name\n self.index.update_from_metadata(self.load_blob_metadata(blob_uuid))",
"def mv(self, item, destination, execute=False):\n file = self.drive.files().update(\n fileId=item[\"id\"],\n addParents=destination[\"id\"],\n removeParents=\",\".join(item[\"parents\"]),\n fields=\"id, name, parents\",\n supportsAllDrives=self.shared_drive[0],\n )\n if execute:\n file = file.execute()\n return file",
"def update(self, index: NoteIndex, force_checksums: bool = False):\n raw_witnessed: List[FileInfo] = self.provider.get_all(index.path, self._markdown_filter)\n witnessed: Dict[str, FileInfo] = {w.full_path: w for w in raw_witnessed}\n\n if force_checksums:\n for w in witnessed.values():\n w.check_sum = self.provider.checksum(w.full_path)\n\n # Determine items in the index that are no longer present in the witnessed information\n to_remove = []\n for k in index.files.keys():\n if k not in witnessed:\n to_remove.append(k)\n\n for k in to_remove:\n del index.files[k]\n del index.notes[k]\n if k in index.exceptions:\n del index.exceptions[k]\n\n # Add new witnessed items to the index, loading the notes from the file system\n for w in witnessed.values():\n key = w.full_path\n # Really relying on lazy evaluation here...\n if key not in index.files or w.has_changed_from(index.files[key], force_checksums):\n if key in index.exceptions:\n del index.exceptions[key]\n\n index.files[key] = w\n try:\n index.files[key].check_sum = self.provider.checksum(key)\n index.notes[key] = self.note_builder.load_info(key)\n except Exception as e:\n index.exceptions[key] = IndexOperationResult(w, e)",
"def update_item_md(dataobj_id, new_content):\n\n from archivy.models import DataObj\n\n filename = get_by_id(dataobj_id)\n dataobj = frontmatter.load(filename)\n dataobj[\"modified_at\"] = datetime.now().strftime(\"%x %H:%M\")\n dataobj.content = new_content\n md = frontmatter.dumps(dataobj)\n with open(filename, \"w\", encoding=\"utf-8\") as f:\n f.write(md)\n\n converted_dataobj = DataObj.from_md(md)\n converted_dataobj.fullpath = str(\n filename.relative_to(current_app.config[\"USER_DIR\"])\n )\n converted_dataobj.index()\n current_app.config[\"HOOKS\"].on_edit(converted_dataobj)",
"def copyTo(self, newFolder):\n copyUri = self.metaData.getLink(\"copy\")\n\n if not hasattr(newFolder, \"metaData\"): raise TypeError(\"Your newFolder does not have a metaData property\")\n if not hasattr(newFolder, \"selfLink\"): raise TypeError(\"Your folder object is missing a selfLink\")\n assert copyUri is not None\n\n header = self._baseHeader.copy()\n header['Content-Type'] = \"application/vnd.huddle.data+json\"\n body = '{ \"targetFolder\":{ \"link\":{ \"rel\": \"self\", \"href\": \"' + newFolder.selfLink + '\" } } }'\n response = self._adapter.postRequest(copyUri, header, body)\n\n return Folder(self._client, response['Headers']['location'])",
"def test_directory_meta(self):\n with TemporaryDirectory() as td:\n store = tb.OBTFile(td + '/test', 'w', 'symbol', type='directory')\n store['AAPL'] = df\n store.handle.meta('testtest', 123)\n store.obt.meta('testtest', 456)\n store.close()\n\n # reload\n store = tb.OBTFile(td + '/test')\n assert store.handle.meta('testtest') == 123\n assert store.obt.meta('testtest') == 456",
"def _populate_index(self):\n os.makedirs(self.cache_dir, exist_ok=True)\n local_files = glob('{}/*'.format(self.cache_dir))\n for file in local_files:\n self._add_to_index(os.path.basename(file), os.path.getsize(file))",
"def editParentIndex(src, des, tipe, Xrc):\n old_title = src.split(\"/\")[-1].replace(\".html\", \"\")\n new_title = des.split(\"/\")[-1].replace(\".html\", \"\")\n index = des.replace(os.path.basename(des), \"index.html\")\n with open(index, 'r') as f:\n soup = BeautifulSoup(f, \"html.parser\")\n f.close()\n tag = soup.select(\"#\"+old_title)[0]\n old_tstamp = tag.td.string.lstrip().rstrip()\n new_tstamp = datetime.datetime.fromtimestamp(time.time()).strftime(\"%H:%M.%S|$MONTH$ %d %Y by Xbooks[bot]\").replace(\"$MONTH$\", chooseMonth(datetime.datetime.fromtimestamp(time.time()).strftime(\"%m\")))\n old_src = tag[\"onclick\"].split(\";\")[0].split('(')[1].split(')')[0]\n if tipe == 'Xbook':\n new_src = '\\'\\\\\\\\' + Xrc[\"gh_repo_name\"] + '/' + \"/\".join(des.split(\"/\")[2:]) + \"/index.html\\'\"\n if tipe == 'Xpage':\n new_src = '\\'\\\\\\\\' + Xrc[\"gh_repo_name\"] + '/' + \"/\".join(des.split(\"/\")[2:]) + \"\\'\"\n tag.td.string = tag.td.string.replace(old_tstamp, new_tstamp)\n tag.th.string = tag.th.string.replace(old_title, new_title)\n tag[\"onclick\"] = tag[\"onclick\"].replace(old_src, new_src)\n tag[\"id\"] = new_title\n with open(index, 'w') as f:\n f.write(soup.prettify(formatter=\"html\"))\n f.close()\n ccc.success(\"updating \" + des + \" from parent index\")",
"def test_migrate_document_to_folder(self):\n output = migrateContents(self.portal, \"Document\", \"Folder\")\n self.assertEqual(output.get('counter', 0), 10)\n self.assertEqual(output.get('error', []), [])\n self.assertTrue(self.portal.portal_catalog(portal_type=\"Document\").actual_result_count == 0)\n self.assertTrue(self.portal.portal_catalog(portal_type=\"Folder\").actual_result_count == 12)\n folder_titles = ['Folder 1', 'Folder 2', 'My page 0', 'My page 1', 'My page 2', 'My page 3', 'My page 4', 'My page 5', 'My page 6', 'My page 7', 'My page 8', 'My page 9']\n self.assertEqual([x.Title for x in self.portal.portal_catalog(portal_type=\"Folder\", sort_on=\"sortable_title\")], folder_titles)",
"def identify_folder(self, folder):"
] |
[
"0.6175569",
"0.5814018",
"0.55603045",
"0.5426907",
"0.54050916",
"0.53955954",
"0.53315574",
"0.5192204",
"0.51585466",
"0.50854665",
"0.50382555",
"0.50282925",
"0.49623883",
"0.49554533",
"0.494669",
"0.49134162",
"0.48834294",
"0.48825207",
"0.4874468",
"0.48549354",
"0.4822626",
"0.4813859",
"0.47895598",
"0.4764743",
"0.47332639",
"0.47115543",
"0.47114393",
"0.47026098",
"0.46867487",
"0.46863332"
] |
0.7469257
|
0
|
Deletes the parent folder, if an index is specified it will delete the sub folder instead. This method returns a folder object of the deleted folders parent folder e.g foo>bar if we deleted bar we will be returned a folder of foo. Deleting a folder will also add it to this objects graveyard. The graveyard is used to restore folders you delete. This will stop you doing any method except restore.
|
def deleteFolder(self, index=None):
assert isinstance(index, int) or index is None
try:
if index is None:
url = self.metaData.getLink("delete")
else:
url = self.getFolders()[index].metaData().getLink("delete")
assert url is not None
response = self._adapter.deleteRequest(url, self._baseHeader)
self.metaData.graveyard.append(self.metaData)
newLink = self._client.getUrlFromHeaderLink(response['Headers']['link'])
return Folder(self._client, newLink)
except IndexError:
print("the index: " + str(index) + " does not exist in the list of folder numbers we have")
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def restoreFolder(self, index):\n assert isinstance(index, int)\n\n try:\n url = self.metaData.graveyard[index].selfLink + \"/restore\"\n response = self._adapter.putRequest(url, self._baseHeader, \"{}\")\n\n self.metaData.graveyard.pop(index)\n\n return Folder(self._client, response['Headers']['location'])\n except IndexError:\n print(\"the index: \" + str(index) + \" does not exist in the graveyard\")",
"def check_delete_parent_folder(cls, parent):\n folder = os.path.join(\n settings.MEDIA_ROOT, cls.parent_base_upload_to(parent))\n\n def rmdir(target):\n items = os.listdir(target)\n if len(items) == 0:\n os.rmdir(target)\n else:\n for item in items:\n path = os.path.join(target, item)\n if not os.path.isdir(path):\n msg = 'The folder %s contains some file' % path\n raise FolderNotEmptyException(msg)\n for item in items:\n path = os.path.join(target, item)\n rmdir(path)\n os.rmdir(target)\n\n try:\n rmdir(folder)\n except FileNotFoundError as e:\n logger.debug(e)\n except Exception as e:\n logger.warning(e)",
"def delete(self):\n documentUrl = self.metaData.getLink(\"delete\")\n assert documentUrl is not None\n\n response = self._adapter.deleteRequest(documentUrl, self._baseHeader)\n self.metaData.graveyard.append(self.metaData)\n\n return Folder(self._client, self._client.getUrlFromHeaderLink(response['Headers']['link']))",
"def delete(self):\n return self.client._perform_empty(\n \"DELETE\", \"/project-folders/%s\" % self.project_folder_id)",
"def delete_folder(self, name):\n return self.DeleteFolder(name, 0)",
"def _delete_root_dir(self):\n\n staf_request = ('DELETE ENTRY \"{0}\" RECURSE '\n 'CONFIRM '.format(unix_style_path(self._sut.bespoke_root)))\n\n result = self._staf_handle.submit(self._sut.network_address, 'fs', staf_request)\n\n if result.rc not in [result.Ok, result.DoesNotExist]:\n raise CoreError(result.result)",
"def create_folder(id_parent, name):\n id_folder = incr_key_store('folders:counter')\n rpush_key_store('folders:list', {'id': id_folder, 'parent': id_parent, 'name': name})\n return id_folder",
"def postFolder(self, parent, name, check=True):\n\n folder = vsdModels.Folder()\n if parent is None:\n parent = self.getFolderByName('MyProjects', mode='exact')\n folder.parentFolder = vsdModels.APIBase(selfUrl=parent.selfUrl)\n folder.name = name\n\n exists = False\n\n if check:\n if parent.childFolders:\n for child in parent.childFolders:\n fold = self.getFolder(child.selfUrl)\n if fold is not None:\n if fold.name == name:\n print('folder {0} already exists, id: {1}'.format(name, fold.id))\n exists = True\n return fold\n else:\n print('unexpected error, folder exists but cannot be retrieved')\n exists = True\n\n # print(self.postRequest('folders', data = data))\n if not exists:\n data = folder.to_struct()\n # for name, field in folder:\n # if name not in data:\n # data[name] = None\n # print(data)\n res = self.postRequest('folders', data=data)\n folder.populate(**res)\n print('folder {0} created, has id {1}'.format(name, folder.id))\n assert folder.name == name\n return folder",
"def get_parent(self):\n parent_id = self.client._perform_json(\"GET\", \"/project-folders/%s\" % self.project_folder_id).get(\"parentId\", None)\n if parent_id is None:\n return None\n else:\n return DSSProjectFolder(self.client, parent_id)",
"def parent_folder(self):\n return self.properties.get(\"ParentFolder\",\n Folder(self.context, ResourcePath(\"ParentFolder\", self.resource_path)))",
"def test_create_parentless_folder_from_json(self):\n my_folder = folder.Folder.from_json(None, _FOLDER_JSON)\n self.assertEqual('987', my_folder.id)\n self.assertEqual('folder', my_folder.type)\n self.assertEqual('folders/987', my_folder.name)\n self.assertEqual('My folder', my_folder.display_name)\n self.assertEqual('folder/987/', my_folder.full_name)\n self.assertEqual(folder.FolderLifecycleState.ACTIVE,\n my_folder.lifecycle_state)",
"def get_or_create_folder(self, folder_names):\n if not len(folder_names):\n return None\n current_parent = None\n for folder_name in folder_names:\n current_parent, created = Folder.objects.get_or_create(\n name=folder_name, parent=current_parent)\n if created:\n self.folder_created += 1\n if self.verbosity >= 2:\n print(\"folder_created #%s folder : %s -- created : %s\" % (self.folder_created,\n current_parent, created))\n return current_parent",
"def ensure_directory(explorer, parent_id, dirname):\n cache_key = (parent_id, dirname)\n if cache_key in DIR_CACHE:\n return DIR_CACHE[cache_key]\n\n for folder in explorer.list_folder(parent_id):\n if folder['name'] == dirname:\n folder_id = folder['id']\n break\n else:\n print(\"Creating folder {!r} in parent {}\".format(dirname, parent_id))\n folder_id = explorer.create_folder(dirname, parent_id)\n DIR_CACHE[cache_key] = folder_id\n return folder_id",
"def delete(self):\n return self.client._perform_empty(\n \"DELETE\", \"/projects/%s/managedfolders/%s\" % (self.project_key, self.odb_id))",
"def delete_folder(self, path):\n if not path_exists(path, self._store_folder):\n raise NotFoundException(\"\")\n rmdir(path)",
"def remove_temp_folder(context):\n\n app = context.unrestrictedTraverse(\"/\")\n broken_id = \"temp_folder\"\n if broken_id in app.objectIds():\n temp_folder = app.unrestrictedTraverse(broken_id, None)\n if not isinstance(temp_folder, Broken):\n logger.info(\"%s is not broken, so we keep it.\", broken_id)\n return\n app._delObject(broken_id)\n logger.info(\"Removed broken %s from Zope root.\", broken_id)\n\n # The root Zope object has a dictionary '_mount_points.\n # >>> app._mount_points\n # {'temp_folder': MountedObject(id='temp_folder')}\n if not hasattr(app, \"_mount_points\"):\n return\n if broken_id in app._mount_points:\n del app._mount_points[broken_id]\n app._p_changed = True\n logger.info(\"Removed %s from Zope root _mount_points.\", broken_id)",
"def createFolderStructure(self, rootfolder, filepath, parents):\n\n fp = filepath.resolve()\n folders = list(fp.parts)\n folders.reverse()\n\n ##remove file from list\n if fp.is_file():\n folders.remove(folders[0])\n\n for i in range(parents, len(folders)):\n folders.remove(folders[-1])\n folders.reverse()\n\n fparent = rootfolder\n\n if fparent:\n # iterate over file path and create the directory\n for fname in folders:\n f = vsdModels.Folder(\n name=fname,\n parentFolder=vsdModels.Folder(selfUrl=fparent.selfUrl)\n )\n fparent = f.create(self)\n return fparent\n else:\n print('Root folder does not exist', rootfolder)\n return None",
"def delete(self):\n self._api.delete(f'elasticubes/localhost/{self._elasticube}/hierarchies/{self._id}')",
"def delete_folder(folder_path):\r\n if os.path.exists(folder_path):\r\n shutil.rmtree(folder_path)",
"def delete(self):\n if self.left is None or self.right is None:\n if self is self.parent.left:\n self.parent.left = self.left or self.right\n if self.parent.left is not None:\n self.parent.left.parent = self.parent\n else:\n self.parent.right = self.left or self.right\n if self.parent.right is not None:\n self.parent.right.parent = self.parent\n else:\n s = self.successor()\n self.key, s.key = s.key, self.key\n return s.delete()",
"def drop_parent_of(self, eid):\n return self \\\n .parent_of(eid) \\\n .sideEffect(__.drop()) \\\n .count()",
"def delete_folder(folder_path):\n shutil.rmtree(folder_path)",
"def folder(\n folder_name: str,\n *,\n parent_folder_id: drive_api.ResourceID,\n drive_service: Optional[discovery.Resource] = None,\n) -> drive_api.ResourceID:\n return resource(\n name=folder_name,\n mime_type=mime_types.folder,\n parent_folder_id=parent_folder_id,\n drive_service=drive_service,\n )",
"def delete(self, *args, **kwargs):\n return super(APIFolderView, self).delete(*args, **kwargs)",
"def _delete_folder(folder_path, warn=False):\n try:\n if os.path.exists(folder_path):\n # This can fail under windows,\n # but will succeed when called by atexit\n shutil.rmtree(folder_path)\n except OSError:\n if warn:\n warnings.warn(\"Could not delete temporary folder %s\" % folder_path)",
"def delete_folder(self, instance, folder, where):\n\n instance = self.get_instance(instance)\n try:\n if instance.get('address'):\n username = instance.get('address') + \"@\" + instance.get('credentials').get('username')\n key = instance.get('credentials').get('publickey')\n subprocess.check_output([\"ssh\", key, username, 'rm', '-r', self.default_path_aws + where + folder])\n else:\n username = 'ubuntu@' + instance.get('credentials').get('EC2_ACCESS_ID')\n key = instance.get('credentials').get('EC2_SECRET_KEY')\n # output = os.popen(\"ls\"+ \" | \" + \"ssh\"+ \" -i \"+ key +\" \"+ username).read()\n subprocess.check_output(\n [\"ssh\", \"-i\", key, username, 'rm', '-r', self.default_path_aws + where + folder])\n return \"Success to delete the folder \" + folder + \" from \" + self.default_path_aws + where\n except:\n return \"Fail to access the instance\"",
"def create_folder(v_sphere, vmw_parent_folder, new_folder_name):\n try:\n vmw_parent_folder.CreateFolder(new_folder_name)\n return\n\n except vim.fault.DuplicateName:\n raise VMWareCreateDuplicateException(f'Folder name {new_folder_name} already in use')\n\n except vim.fault.InvalidName:\n raise VMWareInvalidInputException(f'Folder name {new_folder_name} is invalid')",
"def find_by_parent_id(self, parent_id: str) -> 'StorageObjectFolderCollection':\n result = StorageObjectFolderCollection()\n for server_folder in self:\n if server_folder.parent_id == parent_id:\n result.add(server_folder)\n return result",
"def delete_folder_from_datastore(content, datacenter_name, folder):\n datacenter = get_obj(content, [vim.Datacenter], datacenter_name)\n task = vim.FileManager.DeleteDatastoreFile_Task(\n content.fileManager,\n folder,\n datacenter\n )\n wait_for_task(task)",
"def resolve_backup_target(self):\n\n response = self.http_client.get(\n self.metadata_url + 'nodes?filters=kind:FOLDER AND isRoot:true')\n parent_node_id = response.json()['data'][0]['id']\n\n for component in [x for x in self.backup_target.split('/') if x]:\n # There doesn't seem to be escaping support, so cut off filter\n # after first unsupported character\n query = re.search('^[A-Za-z0-9_-]*', component).group(0)\n if component != query:\n query = query + '*'\n\n matches = self.read_all_pages(\n self.metadata_url + 'nodes?filters=kind:FOLDER AND name:%s '\n 'AND parents:%s' % (query, parent_node_id))\n candidates = [f for f in matches if f.get('name') == component]\n\n if len(candidates) >= 2:\n log.FatalError('There are multiple folders with the same name '\n 'below one parent.\\nParentID: %s\\nFolderName: '\n '%s' % (parent_node_id, component))\n elif len(candidates) == 1:\n parent_node_id = candidates[0]['id']\n else:\n log.Debug('Folder %s does not exist yet. Creating.' % component)\n parent_node_id = self.mkdir(parent_node_id, component)\n\n log.Debug(\"Backup target folder has id: %s\" % parent_node_id)\n self.backup_target_id = parent_node_id"
] |
[
"0.6515443",
"0.59471726",
"0.569328",
"0.557232",
"0.5469304",
"0.5413422",
"0.5399414",
"0.5327829",
"0.5294971",
"0.5158216",
"0.50951225",
"0.5028694",
"0.50104535",
"0.4989633",
"0.495497",
"0.49530485",
"0.4880121",
"0.4831308",
"0.48299935",
"0.48282167",
"0.48012197",
"0.48005286",
"0.47980052",
"0.4787924",
"0.4757713",
"0.47399852",
"0.47357503",
"0.4727869",
"0.4719213",
"0.47033226"
] |
0.6842206
|
0
|
Restores a folder from this objects graveyard. All resources in the deleted folder will also be restored. If the folder is deleted and not in the graveyard there is no way to restore the folder without knowing the full uri of the deleted folders restore link. Once the folder has been restored that folder will be removed from the graveyard. This method will return a folder of the restored folder
|
def restoreFolder(self, index):
assert isinstance(index, int)
try:
url = self.metaData.graveyard[index].selfLink + "/restore"
response = self._adapter.putRequest(url, self._baseHeader, "{}")
self.metaData.graveyard.pop(index)
return Folder(self._client, response['Headers']['location'])
except IndexError:
print("the index: " + str(index) + " does not exist in the graveyard")
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _restore(self, restore_folder):\n tf.reset_default_graph()\n self.init_session()\n ckpt = tf.train.get_checkpoint_state(restore_folder)\n self.saver = tf.train.import_meta_graph('{}.meta'.format(ckpt.model_checkpoint_path))\n self.saver.restore(self.sess, ckpt.model_checkpoint_path)\n print(\"Model restored from {}\".format(restore_folder))",
"def restore(self):\n documentUrl = self.metaData.graveyard[0].selfLink + \"/restore\"\n response = self._adapter.putRequest(documentUrl, self._baseHeader, \"{}\")\n self.metaData.graveyard.pop()\n\n return Document(self._client, response['Headers']['location'])",
"def getFolder(self, resource):\n res = self.getRequest(self.parseUrl(resource, 'folders'))\n return vsdModels.Folder(**res)",
"def move_to_folder(self):\n if \"moveToFolder\" in self._prop_dict:\n return self._prop_dict[\"moveToFolder\"]\n else:\n return None",
"def reset_backup_folder(self):\n pass",
"def get_folder(self):\n name = \"%s_%s\" % (self.PREFIX, self.FOLDER_NAME)\n folders = self.mw.get_folders()\n for fldr in folders:\n if fldr[\"name\"] == name:\n self.folder_id = fldr[\"folder_id\"]\n return\n self.folder_id = self.mw.create_folder(name)",
"def resolve_backup_target(self):\n\n response = self.http_client.get(\n self.metadata_url + 'nodes?filters=kind:FOLDER AND isRoot:true')\n parent_node_id = response.json()['data'][0]['id']\n\n for component in [x for x in self.backup_target.split('/') if x]:\n # There doesn't seem to be escaping support, so cut off filter\n # after first unsupported character\n query = re.search('^[A-Za-z0-9_-]*', component).group(0)\n if component != query:\n query = query + '*'\n\n matches = self.read_all_pages(\n self.metadata_url + 'nodes?filters=kind:FOLDER AND name:%s '\n 'AND parents:%s' % (query, parent_node_id))\n candidates = [f for f in matches if f.get('name') == component]\n\n if len(candidates) >= 2:\n log.FatalError('There are multiple folders with the same name '\n 'below one parent.\\nParentID: %s\\nFolderName: '\n '%s' % (parent_node_id, component))\n elif len(candidates) == 1:\n parent_node_id = candidates[0]['id']\n else:\n log.Debug('Folder %s does not exist yet. Creating.' % component)\n parent_node_id = self.mkdir(parent_node_id, component)\n\n log.Debug(\"Backup target folder has id: %s\" % parent_node_id)\n self.backup_target_id = parent_node_id",
"def restore(self):\n return self._restore",
"def restore_location(self, id_, uri):\n with self._db_connection() as connection:\n was_restored = connection.restore_location(id_, uri)\n return was_restored",
"def folder(\n folder_name: str,\n *,\n parent_folder_id: drive_api.ResourceID,\n drive_service: Optional[discovery.Resource] = None,\n) -> drive_api.ResourceID:\n return resource(\n name=folder_name,\n mime_type=mime_types.folder,\n parent_folder_id=parent_folder_id,\n drive_service=drive_service,\n )",
"def _get_folder(self):\n # type: () -> str\n headers = Headers({\"content-type\": \"application/json\", \"accept\": \"application/json\"})\n response = self.connection.api_call(\n \"GET\", [\"v1\", \"resources\", self.id, \"folderpath\"], headers=headers\n )\n\n return response.json().get(\"path\")",
"def folder(self):\n return self._folder",
"def restore_deleted_site(self, site_url):\n result = SpoOperation(self.context)\n qry = ServiceOperationQuery(self, \"RestoreDeletedSite\", [site_url], None, None, result)\n self.context.add_query(qry)\n return result",
"def restore(self, dest: str, remove_existing: bool = False):\n if os.path.isdir(dest):\n dest = os.path.join(dest, \"lightningd.sqlite3\")\n if os.path.exists(dest):\n if not remove_existing:\n raise ValueError(\n \"Destination for backup restore exists: {dest}\".format(\n dest=dest\n )\n )\n os.unlink(dest)\n\n self.db = self._db_open(dest)\n for c in tqdm(self.stream_changes(), total=self.version_count):\n if c.snapshot is not None:\n self._restore_snapshot(c.snapshot, dest)\n if c.transaction is not None:\n self._restore_transaction(c.transaction)\n self.db.commit()",
"def delete_folder(self, name):\n return self.DeleteFolder(name, 0)",
"def restore(self):\n if self._restored_model:\n return\n with self.eval_graph.graph.as_default():\n last_checkpoint = self._find_last_checkpoint()\n # TODO(rbharath): Is setting train=False right here?\n saver = tf.train.Saver()\n saver.restore(self._get_shared_session(train=False), last_checkpoint)\n self._restored_model = True",
"def Restore(self):\n\n return self._persistentHandler.Restore()",
"def get_folder_by_id(self, folder_id):\n endpoint = 'https://outlook.office.com/api/v2.0/me/MailFolders/' + folder_id\n\n r = requests.get(endpoint, headers=self._headers)\n\n check_response(r)\n return_folder = r.json()\n return Folder._json_to_folder(self, return_folder)",
"def restore(self, obj):\n return obj",
"def overwrite_folder(folder_path, overwrite):\n if os.path.exists(folder_path):\n if overwrite:\n shutil.rmtree(folder_path)",
"def restore_backup(self):\n print \"Restoring backup for database: %s\" % self.database['NAME']\n # Fetch the latest backup if filepath not specified\n if not self.filepath:\n print \" Finding latest backup\"\n filepaths = self.storage.list_directory()\n filepaths = self.dbcommands.filter_filepaths(filepaths, self.servername)\n if not filepaths:\n raise CommandError(\"No backup files found in: %s\" % self.storage.backup_dir())\n self.filepath = filepaths[-1]\n # Restore the specified filepath backup\n print \" Restoring: %s\" % self.filepath\n backupfile = self.storage.read_file(self.filepath)\n print \" Restore tempfile created: %s\" % utils.handle_size(backupfile)\n self.dbcommands.run_restore_commands(backupfile)",
"def delete(self):\n documentUrl = self.metaData.getLink(\"delete\")\n assert documentUrl is not None\n\n response = self._adapter.deleteRequest(documentUrl, self._baseHeader)\n self.metaData.graveyard.append(self.metaData)\n\n return Folder(self._client, self._client.getUrlFromHeaderLink(response['Headers']['link']))",
"def __restoreBackup(self):\n pass #FIXME!!!",
"def get(self) -> FoldersModel:\n root: FoldersModel = self._get()\n return root",
"def moveTo(self, folder):\n parent = self.metaData.getLinkIndex('parent')\n moveUri = self.metaData.getLink(\"move\")\n\n assert parent != -1\n assert moveUri is not None\n if not hasattr(folder, \"metaData\"): raise TypeError(\"Your newFolder does not have a metaData property\")\n assert hasattr(folder, \"selfLink\")\n\n header = self._baseHeader.copy()\n header['Content-type'] = \"application/vnd.huddle.data+json\"\n jsonData = self.metaData.jsonObj\n jsonData['links'][parent] = {'href' : folder.selfLink, 'rel' : 'parent'}\n response = self._adapter.putRequest(moveUri, header, json.dumps(jsonData))\n\n return Document(self._client, self._client.getUrlFromHeaderLink(response['Headers']['link']))",
"def subFolder(self):\r\n return self.__folder",
"def _check_folder_in_browse(\r\n self,\r\n _vm_id,\r\n _folder_to_restore,\r\n from_date,\r\n to_date,\r\n copy_precedence,\r\n media_agent):\r\n\r\n source_item = None\r\n\r\n _folder_to_restore = _folder_to_restore.replace(\":\", \"\")\r\n _restore_folder_name = _folder_to_restore.split(\"\\\\\")[-1]\r\n _folder_to_restore = _folder_to_restore.replace(\"\\\\\" + _restore_folder_name, \"\")\r\n _source_path = r'\\\\'.join([_vm_id, _folder_to_restore])\r\n\r\n _browse_files, _browse_files_dict = self.guest_files_browse(\r\n _source_path, from_date=from_date, to_date=to_date,\r\n copy_precedence=copy_precedence, media_agent=media_agent)\r\n\r\n for _path in _browse_files_dict:\r\n _browse_folder_name = _path.split(\"\\\\\")[-1]\r\n if _browse_folder_name == _restore_folder_name:\r\n source_item = r'\\\\'.join([_source_path, _restore_folder_name])\r\n break\r\n\r\n if source_item is None:\r\n raise SDKException('Subclient', '102', 'Browse failure: Folder not found in browse')\r\n\r\n return source_item",
"def find(\n self,\n folder: t.Union[str, Folder],\n create: bool = FolderDefaults.create,\n echo: bool = FolderDefaults.echo,\n ) -> Folder:\n root: FoldersModel = self.get()\n return root.find(folder=folder, create=create, refresh=False, echo=echo)",
"def deleteFolderContent(self, folder):\n\n state = False\n\n folder.populate(containedObjects=None)\n\n res = self.putRequest('folders', data=folder.to_struct())",
"def restore(self, backup_id):\n request = Request(\n method='post',\n endpoint='/_admin/backup/restore',\n data={'id': backup_id}\n )\n\n def response_handler(resp):\n if resp.is_success:\n return format_backup_restore(resp.body['result'])\n raise BackupRestoreError(resp, request)\n\n return self._execute(request, response_handler)"
] |
[
"0.5945062",
"0.5876237",
"0.5666672",
"0.54734826",
"0.5426759",
"0.5377757",
"0.53391206",
"0.53048646",
"0.52903396",
"0.52628475",
"0.5223572",
"0.51942337",
"0.5185043",
"0.5169018",
"0.5158355",
"0.51475716",
"0.5130876",
"0.50951403",
"0.50185645",
"0.50087166",
"0.5007639",
"0.49532157",
"0.49522656",
"0.49510106",
"0.49507645",
"0.49467176",
"0.4922258",
"0.49221283",
"0.4912666",
"0.4899578"
] |
0.72095025
|
0
|
Moves this folder to the specified folder in the parameter. This method will then return a folder object of the parent of the updated folder >>moveTo(FolderB) this folder is now in folder b Raises an AssertError if the folder you are trying to move doesn't have a move link Raises an AssertError or if it can't find the parent link of this folder Raises an TypeError if the newFolder parameter doesn't act like a folder object
|
def moveTo(self, newFolder):
moveURI = self.metaData.getLink("move")
parent = self.metaData.getLinkIndex('parent')
assert parent != -1
assert moveURI is not None
if not hasattr(newFolder, "metaData"): raise TypeError("Your newFolder does not have a metaData property")
if not hasattr(newFolder, "selfLink"): raise TypeError("Your newFolder does not have a self link")
self.metaData.jsonObj['links'][parent] = {'href' : newFolder.selfLink, 'rel' : 'parent'}
header = self._baseHeader.copy()
header['Content-Type'] = "application/vnd.huddle.data+json"
response = self._adapter.putRequest(moveURI,header, json.dumps(self.metaData.jsonObj))
newLink = self._client.getUrlFromHeaderLink(response['Headers']['link'])
return Folder(self._client, newLink)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def moveTo(self, folder):\n parent = self.metaData.getLinkIndex('parent')\n moveUri = self.metaData.getLink(\"move\")\n\n assert parent != -1\n assert moveUri is not None\n if not hasattr(folder, \"metaData\"): raise TypeError(\"Your newFolder does not have a metaData property\")\n assert hasattr(folder, \"selfLink\")\n\n header = self._baseHeader.copy()\n header['Content-type'] = \"application/vnd.huddle.data+json\"\n jsonData = self.metaData.jsonObj\n jsonData['links'][parent] = {'href' : folder.selfLink, 'rel' : 'parent'}\n response = self._adapter.putRequest(moveUri, header, json.dumps(jsonData))\n\n return Document(self._client, self._client.getUrlFromHeaderLink(response['Headers']['link']))",
"def move_to_folder(self):\n if \"moveToFolder\" in self._prop_dict:\n return self._prop_dict[\"moveToFolder\"]\n else:\n return None",
"def move_to_by_path(self, new_relative_path, retain_editor_and_modified=False):\n target_folder = Folder(self.context)\n target_folder.set_property(\"ServerRelativePath\", SPResPath(new_relative_path))\n\n def _move_folder():\n MoveCopyUtil.move_folder_by_path(self.context, self._build_full_url(self.server_relative_path.DecodedUrl),\n self._build_full_url(new_relative_path),\n MoveCopyOptions(\n retain_editor_and_modified_on_move=retain_editor_and_modified))\n\n self.ensure_property(\"ServerRelativePath\", _move_folder)\n return target_folder",
"def copyTo(self, newFolder):\n copyUri = self.metaData.getLink(\"copy\")\n\n if not hasattr(newFolder, \"metaData\"): raise TypeError(\"Your newFolder does not have a metaData property\")\n if not hasattr(newFolder, \"selfLink\"): raise TypeError(\"Your folder object is missing a selfLink\")\n assert copyUri is not None\n\n header = self._baseHeader.copy()\n header['Content-Type'] = \"application/vnd.huddle.data+json\"\n body = '{ \"targetFolder\":{ \"link\":{ \"rel\": \"self\", \"href\": \"' + newFolder.selfLink + '\" } } }'\n response = self._adapter.postRequest(copyUri, header, body)\n\n return Folder(self._client, response['Headers']['location'])",
"def move_to(self, new_relative_url, retain_editor_and_modified=False):\n target_folder = Folder(self.context)\n target_folder.set_property(\"ServerRelativeUrl\", new_relative_url)\n\n def _move_folder():\n MoveCopyUtil.move_folder(self.context, self._build_full_url(self.serverRelativeUrl),\n self._build_full_url(new_relative_url),\n MoveCopyOptions(retain_editor_and_modified_on_move=retain_editor_and_modified))\n\n self.ensure_property(\"ServerRelativeUrl\", _move_folder)\n return target_folder",
"def mv(path_file_folder, new_path):\n if not is_folder(new_path):\n raise DegooError(f\"mv: The target path is not a folder\")\n\n source_path = path_file_folder if is_folder(path_file_folder) else path_file_folder[:path_file_folder.rfind('/')]\n\n if source_path == new_path:\n raise DegooError(f\"mv: The target path cannot be the same as the source path\")\n\n if isinstance(path_file_folder, int):\n file_id = path_file_folder\n elif isinstance(path_file_folder, str):\n file_id = path_id(path_file_folder)\n else:\n raise DegooError(f\"rm: Illegal file: {path_file_folder}\")\n\n if isinstance(new_path, int):\n new_parent_id = new_path\n elif isinstance(new_path, str):\n new_parent_id = path_id(new_path)\n else:\n raise DegooError(f\"rm: Illegal destination folder: {new_path}\")\n\n return api.mv(file_id, new_parent_id)",
"def move_to(self, destination):\n params = {\n \"destination\": destination.project_folder_id\n }\n self.client._perform_empty(\"POST\", \"/project-folders/%s/move\" % self.project_folder_id, params=params)",
"def _move(self, id: str, parent_id: str) -> MoveFolderResponseModel:\n endpoint: ApiEndpoint = self.api_endpoint_group.move\n request_obj: MoveFolderRequestModel = endpoint.load_request(parent_id=parent_id)\n response: MoveFolderResponseModel = endpoint.perform_request(\n http=self.auth.http,\n request_obj=request_obj,\n id=id,\n )\n return response",
"def folder_voicemail_message(self, mailbox, folder, message_num, new_folder):\n method = \"moveFolderVoicemailMessage\"\n\n if not isinstance(mailbox, int):\n raise ValueError(\"ID for a specific Mailbox needs to be an int (Example: 1001)\")\n\n if not isinstance(folder, str):\n raise ValueError(\"Name for specific Folder needs to be a str (Required if message id is passed, Example: 'INBOX', values from: voicemail.get_voicemail_folders)\")\n\n if not isinstance(message_num, int):\n raise ValueError(\"ID for specific Voicemail Message needs to be an int (Required if folder is passed, Example: 1)\")\n\n if not isinstance(new_folder, str):\n raise ValueError(\"Destination Folder needs to be a str (Example: 'Urgent', values from: voicemail.get_voicemail_folders)\")\n\n parameters = {\n \"mailbox\": mailbox,\n \"folder\": folder,\n \"message_num\": message_num,\n \"new_folder\": new_folder,\n }\n\n return self._voipms_client._get(method, parameters)",
"async def move_folder(\n self,\n request: Optional[Union[folders.MoveFolderRequest, dict]] = None,\n *,\n name: Optional[str] = None,\n destination_parent: Optional[str] = None,\n retry: OptionalRetry = gapic_v1.method.DEFAULT,\n timeout: Union[float, object] = gapic_v1.method.DEFAULT,\n metadata: Sequence[Tuple[str, str]] = (),\n ) -> operation_async.AsyncOperation:\n # Create or coerce a protobuf request object.\n # Quick check: If we got a request object, we should *not* have\n # gotten any keyword arguments that map to the request.\n has_flattened_params = any([name, destination_parent])\n if request is not None and has_flattened_params:\n raise ValueError(\n \"If the `request` argument is set, then none of \"\n \"the individual field arguments should be set.\"\n )\n\n request = folders.MoveFolderRequest(request)\n\n # If we have keyword arguments corresponding to fields on the\n # request, apply these.\n if name is not None:\n request.name = name\n if destination_parent is not None:\n request.destination_parent = destination_parent\n\n # Wrap the RPC method; this adds retry and timeout information,\n # and friendly error handling.\n rpc = gapic_v1.method_async.wrap_method(\n self._client._transport.move_folder,\n default_timeout=60.0,\n client_info=DEFAULT_CLIENT_INFO,\n )\n\n # Certain fields should be provided within the metadata header;\n # add these here.\n metadata = tuple(metadata) + (\n gapic_v1.routing_header.to_grpc_metadata(((\"name\", request.name),)),\n )\n\n # Send the request.\n response = await rpc(\n request,\n retry=retry,\n timeout=timeout,\n metadata=metadata,\n )\n\n # Wrap the response in an operation future.\n response = operation_async.from_gapic(\n response,\n self._client._transport.operations_client,\n folders.Folder,\n metadata_type=folders.MoveFolderMetadata,\n )\n\n # Done; return the response.\n return response",
"def copy_to_by_path(self, new_relative_path, keep_both=False, reset_author_and_created=False):\n\n target_folder = Folder(self.context)\n target_folder.set_property(\"ServerRelativePath\", SPResPath(new_relative_path))\n\n def _copy_folder():\n opts = MoveCopyOptions(keep_both=keep_both, reset_author_and_created_on_copy=reset_author_and_created)\n MoveCopyUtil.copy_folder_by_path(self.context, self._build_full_url(self.server_relative_path.DecodedUrl),\n self._build_full_url(new_relative_path), opts)\n\n self.ensure_property(\"ServerRelativePath\", _copy_folder)\n return target_folder",
"def copy_to(self, new_relative_url, keep_both=False, reset_author_and_created=False):\n\n target_folder = Folder(self.context)\n target_folder.set_property(\"ServerRelativeUrl\", new_relative_url)\n\n def _copy_folder():\n opts = MoveCopyOptions(keep_both=keep_both, reset_author_and_created_on_copy=reset_author_and_created)\n MoveCopyUtil.copy_folder(self.context, self._build_full_url(self.serverRelativeUrl),\n self._build_full_url(new_relative_url), opts)\n\n self.ensure_property(\"ServerRelativeUrl\", _copy_folder)\n return target_folder",
"def __newFolder(self):\n from .BookmarkNode import BookmarkNode\n \n currentIndex = self.bookmarksTree.currentIndex()\n idx = QModelIndex(currentIndex)\n sourceIndex = self.__proxyModel.mapToSource(idx)\n sourceNode = self.__bookmarksModel.node(sourceIndex)\n row = -1 # append new folder as the last item per default\n \n if (\n sourceNode is not None and\n sourceNode.type() != BookmarkNode.Folder\n ):\n # If the selected item is not a folder, add a new folder to the\n # parent folder, but directly below the selected item.\n idx = idx.parent()\n row = currentIndex.row() + 1\n \n if not idx.isValid():\n # Select bookmarks menu as default.\n idx = self.__proxyModel.index(1, 0)\n \n idx = self.__proxyModel.mapToSource(idx)\n parent = self.__bookmarksModel.node(idx)\n node = BookmarkNode(BookmarkNode.Folder)\n node.title = self.tr(\"New Folder\")\n self.__bookmarksManager.addBookmark(parent, node, row)",
"def create_folder(v_sphere, vmw_parent_folder, new_folder_name):\n try:\n vmw_parent_folder.CreateFolder(new_folder_name)\n return\n\n except vim.fault.DuplicateName:\n raise VMWareCreateDuplicateException(f'Folder name {new_folder_name} already in use')\n\n except vim.fault.InvalidName:\n raise VMWareInvalidInputException(f'Folder name {new_folder_name} is invalid')",
"def copyTo(self, folder):\n copyUrl = self.metaData.getLink(\"copy\")\n\n if not hasattr(folder, \"metaData\"): raise TypeError(\"Your newFolder does not have a metaData property\")\n assert getattr(folder, \"selfLink\")\n assert copyUrl is not None\n\n header = self._baseHeader.copy()\n header['Content-type'] = \"application/vnd.huddle.data+json\"\n body = '{ \"targetFolder\":{ \"link\":{ \"rel\": \"self\", \"href\": \"' + folder.selfLink + '\" } } }'\n\n response = self._adapter.postRequest(copyUrl, header, body)\n\n return Document(self._client, response['Headers']['location'])",
"def test_6e_move_data_btw_folders(self):\n if (not GST.logged_in) or (not GST.data_testing_swift_mounted):\n raise unittest.SkipTest(\"Skipped for failed login or failed mounting container.\")\n elif (GST.default_folder_to_be_used):\n if not (default_folders_exists):\n raise unittest.SkipTest(\"Skipped for failed to prepare default directories\")\n elif (not GST.dir1_exists) or (not GST.dir2_exists):\n raise unittest.SkipTest(\"Skipped for failed to prepare dirs\")\n elif not GST.moving_data_test_ready:\n raise unittest.SkipTest(\"Skipped for failed to prepare moving data tests.\")\n self.dismiss_dialogs()\n function = js_func[\"move_file\"] % (GST.gs_file_paths[\"file_to_move_to_folder_source_path\"], GST.gs_file_paths[\"move_to_folder_target_path\"])\n try:\n self.send_request(function, \"move_file()\")\n except Exception as e:\n raise MoveException(\"Failed to move the data between folders. \\n\" + e.__str__())\n try:\n response = self.get_response()\n assert \"Success\" in response\n self.refresh_page()\n except AssertionError:\n raise MoveException(\"Failed to move the data between folders. \\n\" + response)",
"def req_note_list_manipulate_foldermove(self):\n\n if self.helper_action_get_request_is_wrong(\"req_note_list_manipulate_foldermove\"):\n self.error_msg_queue_list.append(\"Note manipulation not performed.\")\n return\n\n if self.helper_sessactionauth_is_wrong():\n self.error_msg_queue_list.append(\"Note manipulation not performed - wrong session?\")\n return\n\n try:\n task_id_list = self.last_request_post_data_dict[\"taskid\"]\n foldermove = self.last_request_post_data_dict[\"foldermove\"][0]\n except:\n self.error_msg_queue_list.append(\"Note manipulation not performed - cannot access required POST data.\")\n else:\n self.ui_backend.notes_foldermove(task_id_list, foldermove)",
"def mv(cur_path, new_path):\n cur_abs = navigate.get_abs_path(cur_path)\n new_abs = navigate.get_abs_path(new_path)\n cur_parent, cur_name = navigate.split_path(cur_abs)\n new_parent, new_name = navigate.split_path(new_abs)\n up_parent, up_name = navigate.split_path(new_parent)\n if not db.file_exists(cur_parent, cur_name):\n print \"Error: '\" + cur_name + \"' does not exist.\"\n elif up_parent is not None and not db.directory_exists(up_parent, up_name):\n print \"Error: '\" + new_parent + \"' is not a valid directory.\"\n elif db.file_exists(new_parent, new_name):\n print \"Error: '\" + new_name + \"' already exists at that location.\"\n else:\n cur_dbox_path = '/' + cur_name\n new_dbox_path = '/' + new_name\n access_token = db.get_access_to_file(cur_parent, cur_name)\n client = dropbox.client.DropboxClient(access_token)\n client.file_move(cur_dbox_path, new_dbox_path)\n db.move_file(cur_parent, cur_name, new_parent, new_name)",
"def MoveFileToFolder(self, file_id, folder_id):\n f = self.service.files().update(fileId=file_id, body={\"parents\":[{\"id\":folder_id}]}).execute()\n return f[\"id\"]",
"def move_project_to(self, project_key, destination):\n params = {\n \"destination\": destination.project_folder_id\n }\n self.client._perform_empty(\"POST\", \"/project-folders/%s/projects/%s/move\" % (self.project_folder_id, project_key), params=params)",
"def move(self, new_path):\n assert isinstance(new_path, str)\n if not new_path.startswith('/'):\n new_path = '/' + new_path\n if new_path.endswith('/'):\n self.filename = new_path + self.name\n else:\n try:\n self.items.get(filepath=new_path, is_dir=True)\n self.filename = new_path + '/' + self.name\n except exceptions.NotFound:\n self.filename = new_path\n\n return self.update(system_metadata=True)",
"def _move_self_to(self, new_dir=None, new_name=None):\n if self.is_downloaded:\n if new_dir and not new_name:\n shutil.move(self._download_path, os.path.join(new_dir, self.download_filename))\n elif new_name and not new_dir:\n shutil.move(self._download_path, os.path.join(self.download_dir, new_name))\n elif new_name and new_dir:\n shutil.move(self._download_path, os.path.join(new_dir, new_name))",
"def mv(self, file_id, new_parent_id):\n func = f\"setMoveFile(Token: $Token, Copy: $Copy, NewParentID: $NewParentID, FileIDs: $FileIDs)\"\n query = f\"mutation SetMoveFile($Token: String!, $Copy: Boolean, $NewParentID: String!, $FileIDs: [String]!) {{ {func} }}\"\n\n request = {\"operationName\": \"SetMoveFile\",\n \"variables\": {\n \"Token\": self.KEYS[\"Token\"],\n \"NewParentID\": new_parent_id,\n \"FileIDs\": [\n file_id\n ]\n },\n \"query\": query\n }\n\n header = {\"x-api-key\": self.KEYS[\"x-api-key\"]}\n\n response = requests.post(URL_API, headers=header, data=json.dumps(request))\n\n if response.ok:\n rd = json.loads(response.text)\n\n if \"errors\" in rd:\n messages = []\n for error in rd[\"errors\"]:\n messages.append(error[\"message\"])\n message = '\\n'.join(messages)\n raise DegooError(f\"getUserInfo failed with: {message}\")\n else:\n return rd[\"data\"]['setMoveFile']\n else:\n raise DegooError(f\"renameFile failed with: {response}\")",
"def makeFolder(pathToLocation, newFolderName):\n newFolder = os.path.join(pathToLocation, newFolderName)\n if not os.path.exists(newFolder):\n os.mkdir(newFolder)\n return newFolder",
"def move_item(dataobj_id, new_path):\n file = get_by_id(dataobj_id)\n data_dir = get_data_dir()\n out_dir = (data_dir / new_path).resolve()\n if not file:\n raise FileNotFoundError\n if (out_dir / file.parts[-1]).exists():\n raise FileExistsError\n elif is_relative_to(out_dir, data_dir) and out_dir.exists(): # check file isn't\n return shutil.move(str(file), f\"{get_data_dir()}/{new_path}/\")\n return False",
"def test_patch_project_move_child(self):\n new_category = self.make_project(\n 'NewCategory', PROJECT_TYPE_CATEGORY, self.category\n )\n self.make_assignment(new_category, self.user, self.role_owner)\n url = reverse(\n 'projectroles:api_project_update',\n kwargs={'project': self.category.sodar_uuid},\n )\n patch_data = {'parent': str(new_category.sodar_uuid)}\n response = self.request_knox(url, method='PATCH', data=patch_data)\n self.assertEqual(response.status_code, 400, msg=response.content)",
"def create_folder(self, req, folder_path, new_folder_name):\n\t\tdirectory_path = os.path.join(self.get_selected_root(req), folder_path)\n\t\t\n\t\t#prevent shenanigans\n\t\tnew_folder_name = new_folder_name.split('/').pop()\n\t\t\n\t\tnew_path = os.path.join(directory_path, new_folder_name)\n\t\tif(os.access(new_path, os.F_OK)):\n\t\t\tcontent = tags.Tag('Error')(number=FLD_EXISTS)\n\t\telse:\n\t\t\ttry:\n\t\t\t\tos.mkdir(new_path)\n\t\t\t\tcontent = tags.Tag('Error')(number=SUCCESS)\n\t\t\texcept:\n\t\t\t\tcontent = tags.Tag('Error')(number=FLD_UNKNOWN_ERROR)\n\t\t\n\t\treturn content",
"def move(self, destination, **kwargs):\n assert _os.path.exists(self.__str__()) == True\n _shutil.move(self.__str__(), destination, **kwargs)",
"def postFolder(self, parent, name, check=True):\n\n folder = vsdModels.Folder()\n if parent is None:\n parent = self.getFolderByName('MyProjects', mode='exact')\n folder.parentFolder = vsdModels.APIBase(selfUrl=parent.selfUrl)\n folder.name = name\n\n exists = False\n\n if check:\n if parent.childFolders:\n for child in parent.childFolders:\n fold = self.getFolder(child.selfUrl)\n if fold is not None:\n if fold.name == name:\n print('folder {0} already exists, id: {1}'.format(name, fold.id))\n exists = True\n return fold\n else:\n print('unexpected error, folder exists but cannot be retrieved')\n exists = True\n\n # print(self.postRequest('folders', data = data))\n if not exists:\n data = folder.to_struct()\n # for name, field in folder:\n # if name not in data:\n # data[name] = None\n # print(data)\n res = self.postRequest('folders', data=data)\n folder.populate(**res)\n print('folder {0} created, has id {1}'.format(name, folder.id))\n assert folder.name == name\n return folder",
"def move_node(self, node_id, new_parent_id, connection=None):\n\n connection = connection or self.engine.connect()\n\n self.detach_node(node_id=node_id, connection=connection)\n self.attach_node(node_id=node_id, new_parent_id=new_parent_id, connection=connection)"
] |
[
"0.7261138",
"0.6705902",
"0.6575451",
"0.6526594",
"0.65095097",
"0.64146525",
"0.6387781",
"0.6270765",
"0.6242283",
"0.6054274",
"0.58678836",
"0.5814525",
"0.5769776",
"0.5745311",
"0.55582905",
"0.5547848",
"0.54846185",
"0.5406132",
"0.5359096",
"0.5345634",
"0.5344492",
"0.5328795",
"0.5317656",
"0.5316117",
"0.530607",
"0.52739257",
"0.52567",
"0.52565545",
"0.5207401",
"0.5182266"
] |
0.7869363
|
0
|
Copies this folder to the specified folder in the parameter. This method will then return a folder object of the new copied folder >>copyTo(FolderB) this folder has now been copied to folder b Raises an AssertError if the folder you are trying to move doesn't have a copy link Raises an TypeError if the newFolder parameter doesn't act like a folder object
|
def copyTo(self, newFolder):
copyUri = self.metaData.getLink("copy")
if not hasattr(newFolder, "metaData"): raise TypeError("Your newFolder does not have a metaData property")
if not hasattr(newFolder, "selfLink"): raise TypeError("Your folder object is missing a selfLink")
assert copyUri is not None
header = self._baseHeader.copy()
header['Content-Type'] = "application/vnd.huddle.data+json"
body = '{ "targetFolder":{ "link":{ "rel": "self", "href": "' + newFolder.selfLink + '" } } }'
response = self._adapter.postRequest(copyUri, header, body)
return Folder(self._client, response['Headers']['location'])
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def moveTo(self, newFolder):\n moveURI = self.metaData.getLink(\"move\")\n parent = self.metaData.getLinkIndex('parent')\n\n assert parent != -1\n assert moveURI is not None\n if not hasattr(newFolder, \"metaData\"): raise TypeError(\"Your newFolder does not have a metaData property\")\n if not hasattr(newFolder, \"selfLink\"): raise TypeError(\"Your newFolder does not have a self link\")\n\n self.metaData.jsonObj['links'][parent] = {'href' : newFolder.selfLink, 'rel' : 'parent'}\n header = self._baseHeader.copy()\n header['Content-Type'] = \"application/vnd.huddle.data+json\"\n response = self._adapter.putRequest(moveURI,header, json.dumps(self.metaData.jsonObj))\n\n newLink = self._client.getUrlFromHeaderLink(response['Headers']['link'])\n return Folder(self._client, newLink)",
"def copyTo(self, folder):\n copyUrl = self.metaData.getLink(\"copy\")\n\n if not hasattr(folder, \"metaData\"): raise TypeError(\"Your newFolder does not have a metaData property\")\n assert getattr(folder, \"selfLink\")\n assert copyUrl is not None\n\n header = self._baseHeader.copy()\n header['Content-type'] = \"application/vnd.huddle.data+json\"\n body = '{ \"targetFolder\":{ \"link\":{ \"rel\": \"self\", \"href\": \"' + folder.selfLink + '\" } } }'\n\n response = self._adapter.postRequest(copyUrl, header, body)\n\n return Document(self._client, response['Headers']['location'])",
"def copy_to(self, new_relative_url, keep_both=False, reset_author_and_created=False):\n\n target_folder = Folder(self.context)\n target_folder.set_property(\"ServerRelativeUrl\", new_relative_url)\n\n def _copy_folder():\n opts = MoveCopyOptions(keep_both=keep_both, reset_author_and_created_on_copy=reset_author_and_created)\n MoveCopyUtil.copy_folder(self.context, self._build_full_url(self.serverRelativeUrl),\n self._build_full_url(new_relative_url), opts)\n\n self.ensure_property(\"ServerRelativeUrl\", _copy_folder)\n return target_folder",
"def copy_to_by_path(self, new_relative_path, keep_both=False, reset_author_and_created=False):\n\n target_folder = Folder(self.context)\n target_folder.set_property(\"ServerRelativePath\", SPResPath(new_relative_path))\n\n def _copy_folder():\n opts = MoveCopyOptions(keep_both=keep_both, reset_author_and_created_on_copy=reset_author_and_created)\n MoveCopyUtil.copy_folder_by_path(self.context, self._build_full_url(self.server_relative_path.DecodedUrl),\n self._build_full_url(new_relative_path), opts)\n\n self.ensure_property(\"ServerRelativePath\", _copy_folder)\n return target_folder",
"def copy_folder(source, destination):\n\n try:\n shutil.copytree(source, destination)\n except (OSError, IOError):\n return False\n else:\n return True",
"def copy_to_folder(self):\n if \"copyToFolder\" in self._prop_dict:\n return self._prop_dict[\"copyToFolder\"]\n else:\n return None",
"def move_to(self, new_relative_url, retain_editor_and_modified=False):\n target_folder = Folder(self.context)\n target_folder.set_property(\"ServerRelativeUrl\", new_relative_url)\n\n def _move_folder():\n MoveCopyUtil.move_folder(self.context, self._build_full_url(self.serverRelativeUrl),\n self._build_full_url(new_relative_url),\n MoveCopyOptions(retain_editor_and_modified_on_move=retain_editor_and_modified))\n\n self.ensure_property(\"ServerRelativeUrl\", _move_folder)\n return target_folder",
"def moveTo(self, folder):\n parent = self.metaData.getLinkIndex('parent')\n moveUri = self.metaData.getLink(\"move\")\n\n assert parent != -1\n assert moveUri is not None\n if not hasattr(folder, \"metaData\"): raise TypeError(\"Your newFolder does not have a metaData property\")\n assert hasattr(folder, \"selfLink\")\n\n header = self._baseHeader.copy()\n header['Content-type'] = \"application/vnd.huddle.data+json\"\n jsonData = self.metaData.jsonObj\n jsonData['links'][parent] = {'href' : folder.selfLink, 'rel' : 'parent'}\n response = self._adapter.putRequest(moveUri, header, json.dumps(jsonData))\n\n return Document(self._client, self._client.getUrlFromHeaderLink(response['Headers']['link']))",
"def move_to_by_path(self, new_relative_path, retain_editor_and_modified=False):\n target_folder = Folder(self.context)\n target_folder.set_property(\"ServerRelativePath\", SPResPath(new_relative_path))\n\n def _move_folder():\n MoveCopyUtil.move_folder_by_path(self.context, self._build_full_url(self.server_relative_path.DecodedUrl),\n self._build_full_url(new_relative_path),\n MoveCopyOptions(\n retain_editor_and_modified_on_move=retain_editor_and_modified))\n\n self.ensure_property(\"ServerRelativePath\", _move_folder)\n return target_folder",
"def move_to(self, destination):\n params = {\n \"destination\": destination.project_folder_id\n }\n self.client._perform_empty(\"POST\", \"/project-folders/%s/move\" % self.project_folder_id, params=params)",
"def folder_voicemail_message(self, mailbox, folder, message_num, new_folder):\n method = \"moveFolderVoicemailMessage\"\n\n if not isinstance(mailbox, int):\n raise ValueError(\"ID for a specific Mailbox needs to be an int (Example: 1001)\")\n\n if not isinstance(folder, str):\n raise ValueError(\"Name for specific Folder needs to be a str (Required if message id is passed, Example: 'INBOX', values from: voicemail.get_voicemail_folders)\")\n\n if not isinstance(message_num, int):\n raise ValueError(\"ID for specific Voicemail Message needs to be an int (Required if folder is passed, Example: 1)\")\n\n if not isinstance(new_folder, str):\n raise ValueError(\"Destination Folder needs to be a str (Example: 'Urgent', values from: voicemail.get_voicemail_folders)\")\n\n parameters = {\n \"mailbox\": mailbox,\n \"folder\": folder,\n \"message_num\": message_num,\n \"new_folder\": new_folder,\n }\n\n return self._voipms_client._get(method, parameters)",
"def copydir(self, destination, **kwargs):\n assert _os.path.isdir(self.__str__()) == True\n _shutil.copy(self.__str__(), destination, **kwargs)",
"def move_to_folder(self):\n if \"moveToFolder\" in self._prop_dict:\n return self._prop_dict[\"moveToFolder\"]\n else:\n return None",
"async def copy_folders_from_project(body_item: FoldersBody, user_id: UserID):",
"def test_6b_copy_data_btw_folders(self):\n if (not GST.logged_in) or (not GST.data_testing_swift_mounted):\n raise unittest.SkipTest(\"Skipped for failed login or failed mounting container.\")\n elif (GST.default_folder_to_be_used):\n if not (default_folders_exists):\n raise unittest.SkipTest(\"Skipped for failed to prepare default directories\")\n elif (not GST.dir1_exists) or (not GST.dir2_exists):\n raise unittest.SkipTest(\"Skipped for failed to prepare dirs\")\n elif not GST.copying_data_test_ready:\n raise unittest.SkipTest(\"Skipped for failed to prepare copying data tests.\")\n self.dismiss_dialogs()\n function = js_func[\"copy_file\"] % (GST.gs_file_paths[\"copy_to_folder_target_path\"], GST.gs_file_paths[\"file_to_copy_source_path\"])\n try:\n self.send_request(function, \"copy_file()\")\n except Exception as e:\n raise CopyException(\"Failed to copy the file between folders. \\n\" + e.__str__())\n try:\n response = self.get_response()\n assert \"Success\" in response\n self.refresh_page()\n except AssertionError:\n raise CopyException(\"Failed to copy the file between folders. \\n\" + response)",
"def __newFolder(self):\n from .BookmarkNode import BookmarkNode\n \n currentIndex = self.bookmarksTree.currentIndex()\n idx = QModelIndex(currentIndex)\n sourceIndex = self.__proxyModel.mapToSource(idx)\n sourceNode = self.__bookmarksModel.node(sourceIndex)\n row = -1 # append new folder as the last item per default\n \n if (\n sourceNode is not None and\n sourceNode.type() != BookmarkNode.Folder\n ):\n # If the selected item is not a folder, add a new folder to the\n # parent folder, but directly below the selected item.\n idx = idx.parent()\n row = currentIndex.row() + 1\n \n if not idx.isValid():\n # Select bookmarks menu as default.\n idx = self.__proxyModel.index(1, 0)\n \n idx = self.__proxyModel.mapToSource(idx)\n parent = self.__bookmarksModel.node(idx)\n node = BookmarkNode(BookmarkNode.Folder)\n node.title = self.tr(\"New Folder\")\n self.__bookmarksManager.addBookmark(parent, node, row)",
"def mv(path_file_folder, new_path):\n if not is_folder(new_path):\n raise DegooError(f\"mv: The target path is not a folder\")\n\n source_path = path_file_folder if is_folder(path_file_folder) else path_file_folder[:path_file_folder.rfind('/')]\n\n if source_path == new_path:\n raise DegooError(f\"mv: The target path cannot be the same as the source path\")\n\n if isinstance(path_file_folder, int):\n file_id = path_file_folder\n elif isinstance(path_file_folder, str):\n file_id = path_id(path_file_folder)\n else:\n raise DegooError(f\"rm: Illegal file: {path_file_folder}\")\n\n if isinstance(new_path, int):\n new_parent_id = new_path\n elif isinstance(new_path, str):\n new_parent_id = path_id(new_path)\n else:\n raise DegooError(f\"rm: Illegal destination folder: {new_path}\")\n\n return api.mv(file_id, new_parent_id)",
"def makeFolder(pathToLocation, newFolderName):\n newFolder = os.path.join(pathToLocation, newFolderName)\n if not os.path.exists(newFolder):\n os.mkdir(newFolder)\n return newFolder",
"def copy_folder(src: str, dest: str) -> None:\n\tuux.show_info(\"Copying folder \" + src + \" => \" + dest)\n\n\tif not os.path.exists(src):\n\t\tuux.show_error(\"Unable to copy, '\" + src + \"' does not exist.\")\n\t\treturn\n\n\tmkdir(dest)\n\n\tfor fn in os.listdir(src):\n\t\tif os.path.isfile(src + fn):\n\t\t\ttry:\n\t\t\t\tcopy_file(src + fn, dest)\n\t\t\texcept IOError as ex:\n\t\t\t\tuux.show_error(\"Failed to copy file, \" + os.strerror(ex.errno))",
"def copy_one(self, src, dest):\n if dest.is_dir():\n shutil.rmtree(dest)\n elif dest.exists():\n dest.unlink()\n\n if not dest.parent.exists():\n self.log.debug(f\"creating folder {dest.parent}\")\n dest.parent.mkdir(parents=True)\n\n self.maybe_timestamp(dest.parent)\n\n if src.is_dir():\n shutil.copytree(src, dest)\n else:\n shutil.copy2(src, dest)\n\n self.maybe_timestamp(dest)",
"def copy(self, dest, overwrite=False):\n dest = normpath(dest)\n try:\n remote = get_remote(dest)\n except ValueError: # Nothing exists at dest, nothing to worry about.\n remote = None\n else: # Something exists here.\n if not overwrite:\n raise ValueError(\"Something exists at %s\" % remote.uri)\n try:\n if self.hash == remote.hash: # Nothing to update.\n pdbox.info(\n \"%s and %s are identical\" % (self.uri, remote.uri),\n )\n return\n except AttributeError: # RemoteFolder doesn't have a hash.\n pass\n\n if not pdbox._args.get(\"dryrun\"):\n if overwrite and remote:\n # There's no way to copy and overwrite at the same time,\n # so delete the existing file first.\n remote.delete()\n\n result = execute(pdbox.dbx.files_copy_v2, self.path, dest)\n pdbox.debug(\"Metadata respones: %s\" % result.metadata)\n\n pdbox.info(\"Copied %s to %s\" % (self.uri, dbx_uri(dest)))\n if not pdbox._args.get(\"dryrun\"): # Return the newly created object.\n return get_remote(None, meta=result.metadata)",
"def copy_folder(self, instance, folder, where):\n # runable for aws\n\n instance = self.get_instance(instance)\n try:\n if instance.get('address'):\n username = instance.get('address') + \"@\" + instance.get('credentials').get('username')\n key = instance.get('credentials').get('publickey')\n subprocess.check_output([\"scp\", key, \"-r\", folder, username + \":\" + self.default_path_aws + where])\n else:\n username = 'ubuntu@' + instance.get('credentials').get('EC2_ACCESS_ID')\n key = instance.get('credentials').get('EC2_SECRET_KEY')\n subprocess.check_output(\n [\"scp\", \"-i\", key, \"-r\", folder, username + \":\" + self.default_path_aws + where])\n return \"Success to copy the folder \" + folder + \" to \" + self.default_path_aws + where\n except:\n return \"Fail to access the instance\"",
"def copy(self, src, dest):\n\n src = os.path.join(os.path.dirname(__file__), \"collections\", \"kitchensink\", src)\n dest = os.path.join(self.checkout, dest)\n if os.path.isdir(src):\n shutil.copytree(src, dest)\n else:\n shutil.copy(src, dest)\n return dest",
"def create_folder(v_sphere, vmw_parent_folder, new_folder_name):\n try:\n vmw_parent_folder.CreateFolder(new_folder_name)\n return\n\n except vim.fault.DuplicateName:\n raise VMWareCreateDuplicateException(f'Folder name {new_folder_name} already in use')\n\n except vim.fault.InvalidName:\n raise VMWareInvalidInputException(f'Folder name {new_folder_name} is invalid')",
"def create_folder(self, req, folder_path, new_folder_name):\n\t\tdirectory_path = os.path.join(self.get_selected_root(req), folder_path)\n\t\t\n\t\t#prevent shenanigans\n\t\tnew_folder_name = new_folder_name.split('/').pop()\n\t\t\n\t\tnew_path = os.path.join(directory_path, new_folder_name)\n\t\tif(os.access(new_path, os.F_OK)):\n\t\t\tcontent = tags.Tag('Error')(number=FLD_EXISTS)\n\t\telse:\n\t\t\ttry:\n\t\t\t\tos.mkdir(new_path)\n\t\t\t\tcontent = tags.Tag('Error')(number=SUCCESS)\n\t\t\texcept:\n\t\t\t\tcontent = tags.Tag('Error')(number=FLD_UNKNOWN_ERROR)\n\t\t\n\t\treturn content",
"def from_folder(cls, *args, **kwargs):\n return cls().add_folder(*args, **kwargs)",
"async def move_folder(\n self,\n request: Optional[Union[folders.MoveFolderRequest, dict]] = None,\n *,\n name: Optional[str] = None,\n destination_parent: Optional[str] = None,\n retry: OptionalRetry = gapic_v1.method.DEFAULT,\n timeout: Union[float, object] = gapic_v1.method.DEFAULT,\n metadata: Sequence[Tuple[str, str]] = (),\n ) -> operation_async.AsyncOperation:\n # Create or coerce a protobuf request object.\n # Quick check: If we got a request object, we should *not* have\n # gotten any keyword arguments that map to the request.\n has_flattened_params = any([name, destination_parent])\n if request is not None and has_flattened_params:\n raise ValueError(\n \"If the `request` argument is set, then none of \"\n \"the individual field arguments should be set.\"\n )\n\n request = folders.MoveFolderRequest(request)\n\n # If we have keyword arguments corresponding to fields on the\n # request, apply these.\n if name is not None:\n request.name = name\n if destination_parent is not None:\n request.destination_parent = destination_parent\n\n # Wrap the RPC method; this adds retry and timeout information,\n # and friendly error handling.\n rpc = gapic_v1.method_async.wrap_method(\n self._client._transport.move_folder,\n default_timeout=60.0,\n client_info=DEFAULT_CLIENT_INFO,\n )\n\n # Certain fields should be provided within the metadata header;\n # add these here.\n metadata = tuple(metadata) + (\n gapic_v1.routing_header.to_grpc_metadata(((\"name\", request.name),)),\n )\n\n # Send the request.\n response = await rpc(\n request,\n retry=retry,\n timeout=timeout,\n metadata=metadata,\n )\n\n # Wrap the response in an operation future.\n response = operation_async.from_gapic(\n response,\n self._client._transport.operations_client,\n folders.Folder,\n metadata_type=folders.MoveFolderMetadata,\n )\n\n # Done; return the response.\n return response",
"def copy(self, to_folder, page_size=1000, chunk_size=100, **copy_kwargs):\n ids = self._id_only_copy_self()\n ids.page_size = page_size\n return self.folder_collection.account.bulk_copy(\n ids=ids, to_folder=to_folder, chunk_size=chunk_size, **copy_kwargs\n )",
"def move_project_to(self, project_key, destination):\n params = {\n \"destination\": destination.project_folder_id\n }\n self.client._perform_empty(\"POST\", \"/project-folders/%s/projects/%s/move\" % (self.project_folder_id, project_key), params=params)",
"def copy(source, destination):\n if os.path.isdir(source):\n return __copytree(source, destination)\n else:\n return __copyfile2(source, destination)"
] |
[
"0.6888628",
"0.6768166",
"0.65492064",
"0.6453956",
"0.6346155",
"0.61940354",
"0.61880684",
"0.61434925",
"0.6127224",
"0.6077558",
"0.5964605",
"0.59633493",
"0.5782848",
"0.5767946",
"0.5682942",
"0.5664407",
"0.56458795",
"0.5645485",
"0.55684936",
"0.5515525",
"0.5506051",
"0.5469371",
"0.5406148",
"0.5360965",
"0.5334664",
"0.5272928",
"0.5252701",
"0.52448136",
"0.51714784",
"0.5147514"
] |
0.7699019
|
0
|
Use gradient descent to find an image that minimizes the lossfunctions of the contentlayers and stylelayers. This should result in a mixedimage that resembles the contours of the contentimage, and resembles the colours and textures of the styleimage.
|
def style_transfer(content_image,
content_layer_ids,
num_iterations=120, step_size=10.0):
# Create an instance of the VGG16-model. This is done
# in each call of this function, because we will add
# operations to the graph so it can grow very large
# and run out of RAM if we keep using the same instance.
model = vgg16.VGG16()
# Create a TensorFlow-session.
session = tf.InteractiveSession(graph=model.graph)
# Print the names of the content-layers.
print("Content layers:")
print(model.get_layer_names(content_layer_ids))
print()
# Create the loss-function for the content-layers and -image.
loss_content = create_content_loss(session=session,
model=model,
content_image=content_image,
layer_ids=content_layer_ids)
# This is the weighted loss-function that we will minimize
# below in order to generate the mixed-image.
# Because we multiply the loss-values with their reciprocal
# adjustment values, we can use relative weights for the
# loss-functions that are easier to select, as they are
# independent of the exact choice of style- and content-layers.
loss_combined = loss_content
# Use TensorFlow to get the mathematical function for the
# gradient of the combined loss-function with regard to
# the input image.
gradient = tf.gradients(loss_combined, model.input)
# List of tensors that we will run in each optimization iteration.
run_list = [gradient]
# The mixed-image is initialized with random noise.
# It is the same size as the content-image.
mixed_image = np.random.rand(*content_image.shape) + 128
for i in range(num_iterations):
# Create a feed-dict with the mixed-image.
feed_dict = model.create_feed_dict(image=mixed_image)
# Use TensorFlow to calculate the value of the
# gradient, as well as updating the adjustment values.
grad = session.run(run_list, feed_dict=feed_dict)
# Reduce the dimensionality of the gradient.
grad = np.squeeze(grad)
# Scale the step-size according to the gradient-values.
step_size_scaled = step_size / (np.std(grad) + 1e-8)
# Update the image by following the gradient.
mixed_image -= grad * step_size_scaled
# Ensure the image has valid pixel-values between 0 and 255.
mixed_image = np.clip(mixed_image, 0.0, 255.0)
# Print a little progress-indicator.
print(". ", end="")
# Display status once every 10 iterations, and the last.
if (i % 10 == 0) or (i == num_iterations - 1):
print()
print("Iteration:", i)
# Plot the content-, style- and mixed-images.
plot_images(content_image=content_image,
mixed_image=mixed_image)
print()
print("Final image:")
plot_image_big(mixed_image)
# Close the TensorFlow session to release its resources.
session.close()
# Return the mixed-image.
return mixed_image
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def stylization(stretched_image, style_image,\r\n\t\talpha = 1.0, style_size = 512, crop_size = 0):\r\n\ttf.reset_default_graph()\r\n\r\n\tassert stretched_image.ndim == 3\r\n\t\r\n\tcp = [\"./models/relu5_1\",\r\n\t\t\t \"./models/relu4_1\",\r\n\t\t \"./models/relu3_1\",\r\n\t\t \"./models/relu2_1\",\r\n\t\t \"./models/relu1_1\"]\r\n\trelu_targets = [\"relu5_1\", \"relu4_1\", \"relu3_1\", \"relu2_1\", \"relu1_1\"]\r\n\t\t#*****************\r\n\t\t## need to modify checkpoints, relu_targets, and vgg_path\r\n\twct_model = WCT(checkpoints=cp,\r\n\t\t relu_targets=relu_targets,\r\n\t\t vgg_path='./models/vgg_normalised.t7'\r\n\r\n\t\t )\r\n\r\n\r\n\tfor style_fullpath in style_image:\r\n\t\tstyle_prefix, style_ext = os.path.splitext(style_fullpath)\r\n\t\tstyle_prefix = os.path.basename(style_prefix) # Extract filename prefix without ext\r\n\r\n\t\tstyle_img = skimage.io.imread(style_fullpath)\r\n\r\n\t\tif style_size > 0:\r\n\t\t\tstyle_img = resize_to(style_img, style_size)\r\n\t\tif crop_size > 0:\r\n\t\t\tstyle_img = center_crop(style_img, crop_size)\r\n\r\n\t\t\"\"\"\r\n\t if keep_colors:\r\n\t style_img = preserve_colors_np(style_img, content_img)\r\n\t \"\"\"\r\n\t # Run the frame through the style network\r\n\r\n\t\tstylized_rgb = wct_model.predict(stretched_image, style_img, alpha).astype(\"uint8\")\r\n\r\n\r\n\t ## the stylized_rgb size may not be equal to the original content image size\r\n\t\tstylized_rgb = image_align(stretched_image, stylized_rgb)\r\n\r\n\r\n\treturn stylized_rgb",
"def style_transfer(content_image, style_image,\n content_layer_ids, style_layer_ids,\n weight_content=1.5, weight_style=10.0,\n weight_denoise=0.3,\n num_iterations=120, step_size=10.0):\n\n # Create an instance of the VGG16-model. This is done\n # in each call of this function, because we will add\n # operations to the graph so it can grow very large\n # and run out of RAM if we keep using the same instance.\n model = vgg16.VGG16()\n\n # Create a TensorFlow-session.\n session = tf.InteractiveSession(graph=model.graph)\n\n # Print the names of the content-layers.\n print(\"Content layers:\")\n print(model.get_layer_names(content_layer_ids))\n print()\n\n # Print the names of the style-layers.\n print(\"Style layers:\")\n print(model.get_layer_names(style_layer_ids))\n print()\n\n # Create the loss-function for the content-layers and -image.\n loss_content = create_content_loss(session=session,\n model=model,\n content_image=content_image,\n layer_ids=content_layer_ids)\n\n # Create the loss-function for the style-layers and -image.\n loss_style = create_style_loss(session=session,\n model=model,\n style_image=style_image,\n layer_ids=style_layer_ids)\n\n # Create the loss-function for the denoising of the mixed-image.\n loss_denoise = create_denoise_loss(model)\n\n # Create TensorFlow variables for adjusting the values of\n # the loss-functions. This is explained below.\n adj_content = tf.Variable(1e-10, name='adj_content')\n adj_style = tf.Variable(1e-10, name='adj_style')\n adj_denoise = tf.Variable(1e-10, name='adj_denoise')\n\n # Initialize the adjustment values for the loss-functions.\n session.run([adj_content.initializer,\n adj_style.initializer,\n adj_denoise.initializer])\n\n # Create TensorFlow operations for updating the adjustment values.\n # These are basically just the reciprocal values of the\n # loss-functions, with a small value 1e-10 added to avoid the\n # possibility of division by zero.\n update_adj_content = adj_content.assign(1.0 / (loss_content + 1e-10))\n update_adj_style = adj_style.assign(1.0 / (loss_style + 1e-10))\n update_adj_denoise = adj_denoise.assign(1.0 / (loss_denoise + 1e-10))\n\n # This is the weighted loss-function that we will minimize\n # below in order to generate the mixed-image.\n # Because we multiply the loss-values with their reciprocal\n # adjustment values, we can use relative weights for the\n # loss-functions that are easier to select, as they are\n # independent of the exact choice of style- and content-layers.\n loss_combined = weight_content * adj_content * loss_content + \\\n weight_style * adj_style * loss_style + \\\n weight_denoise * adj_denoise * loss_denoise\n\n # Use TensorFlow to get the mathematical function for the\n # gradient of the combined loss-function with regard to\n # the input image.\n gradient = tf.gradients(loss_combined, model.input)\n\n # List of tensors that we will run in each optimization iteration.\n run_list = [gradient, update_adj_content, update_adj_style, \\\n update_adj_denoise]\n\n # The mixed-image is initialized with random noise.\n # It is the same size as the content-image.\n mixed_image = np.random.rand(*content_image.shape) + 128\n\n for i in range(num_iterations):\n # Create a feed-dict with the mixed-image.\n feed_dict = model.create_feed_dict(image=mixed_image)\n\n # Use TensorFlow to calculate the value of the\n # gradient, as well as updating the adjustment values.\n grad, adj_content_val, adj_style_val, adj_denoise_val \\\n = session.run(run_list, feed_dict=feed_dict)\n\n # Reduce the dimensionality of the gradient.\n grad = np.squeeze(grad)\n\n # Scale the step-size according to the gradient-values.\n step_size_scaled = step_size / (np.std(grad) + 1e-8)\n\n # Update the image by following the gradient.\n mixed_image -= grad * step_size_scaled\n\n # Ensure the image has valid pixel-values between 0 and 255.\n mixed_image = np.clip(mixed_image, 0.0, 255.0)\n\n # Print a little progress-indicator.\n print(\". \", end=\"\")\n\n # Display status once every 10 iterations, and the last.\n if (i % 10 == 0) or (i == num_iterations - 1):\n print()\n print(\"Iteration:\", i)\n\n # Print adjustment weights for loss-functions.\n msg = \"Weight Adj. for Content: {0:.2e}, Style: {1:.2e}, Denoise: {2:.2e}\"\n print(msg.format(adj_content_val, adj_style_val, adj_denoise_val))\n\n # Plot the content-, style- and mixed-images.\n # plot_images(content_image=content_image,\n # style_image=style_image,\n # mixed_image=mixed_image)\n\n print()\n # print(\"Final image:\")\n # plot_image_big(mixed_image)\n\n # Close the TensorFlow session to release its resources.\n session.close()\n\n # Return the mixed-image.\n return mixed_image",
"def calculate_gradients(image, style_targets, content_targets, \n style_weight, content_weight):\n\n ### START CODE HERE ###\n with tf.GradientTape() as tape:\n \n # get the style image features\n style_features = get_style_image_features(image)\n \n # get the content image features\n content_features = get_content_image_features(image)\n \n # get the style and content loss\n loss = get_style_content_loss(style_targets,style_features,content_targets,content_features,style_weight,content_weight)\n\n # calculate gradients of loss with respect to the image\n gradients = tape.gradient(loss, image)\n\n ### END CODE HERE ###\n\n return gradients",
"def run_style_transfer(cnn, normalization, content_img, style_img, input_img, mask_img, num_steps = 3000,\n style_weight = 100, content_weight = 5):\n print('Building the style transfer model..')\n model, style_losses, content_losses = get_style_model_and_losses(cnn, normalization, style_img, content_img, mask_img)\n optimizer = LBFGS([input_img.requires_grad_()], max_iter=num_steps,lr = 1)\n\n print('Optimizing..')\n run = [0]\n def closure():\n optimizer.zero_grad()\n model(input_img)\n style_score = 0\n content_score = 0\n\n for sl in style_losses:\n style_score += sl.loss\n for cl in content_losses:\n content_score += cl.loss\n\n style_score *= style_weight\n content_score *= content_weight\n\n loss = style_score + content_score\n loss.backward()\n\n if run[0] % 100 == 0:\n print(\"run {}:\".format(run))\n print('Style Loss : {} Content Loss: {}'.format(style_score.item(), content_score.item()))\n # print()\n # plt.figure(figsize = (8, 8))\n #imshow(input_img.clone())\n run[0] += 1\n\n return style_score + content_score\n\n optimizer.step(closure)\n\n # a last correction...\n input_img.data.clamp_(0, 1)\n\n return input_img",
"def compute_loss(model, loss_weights, init_img, gram_style_features, content_features):\n style_weight, content_weight = loss_weights\n\n # Feed our init image through our model. This will give us the content and\n # style representations at our desired layers. Since we're using eager\n # our model is callable just like any other function!\n model_outputs = model(init_img)\n\n style_output_features = model_outputs[:num_style_layers]\n content_output_features = model_outputs[num_style_layers:]\n\n style_loss = 0\n content_loss = 0\n\n # Accumulate style losses from all layers\n # Here, we equally weight each contribution of each loss layer\n weight_per_style_layer = 1.0 / float(num_style_layers)\n for target_style, comb_style in zip(gram_style_features, style_output_features):\n style_loss += weight_per_style_layer * get_style_loss(comb_style[0], target_style)\n\n # Accumulate content losses from all layers \n weight_per_content_layer = 1.0 / float(num_content_layers)\n for target_content, comb_content in zip(content_features, content_output_features):\n content_loss += weight_per_content_layer* get_content_loss(comb_content[0], target_content)\n\n style_loss *= style_weight\n content_loss *= content_weight\n\n # Get total loss\n loss = style_loss + content_loss\n return loss, style_loss, content_loss",
"def stylize(content_image, mask, style_image, content_image_name, content_size = 0,\r\n\t\t\tsave_output = False, visualize = True, alpha = 1.0, style_size = 512,\r\n\t\t\tcrop_size = 0):\r\n\r\n\tassert content_image.ndim == 3 and mask.ndim <= 3\r\n\r\n\tcp = [\"./models/relu5_1\",\r\n\t\t \"./models/relu4_1\",\r\n\t \"./models/relu3_1\",\r\n\t \"./models/relu2_1\",\r\n\t \"./models/relu1_1\"]\r\n\trelu_targets = [\"relu5_1\", \"relu4_1\", \"relu3_1\", \"relu2_1\", \"relu1_1\"]\r\n\t#*****************\r\n\t## need to modify checkpoints, relu_targets, and vgg_path\r\n\twct_model = WCT(checkpoints=cp,\r\n\t relu_targets=relu_targets,\r\n\t vgg_path='./models/vgg_normalised.t7'\r\n\r\n\t )\r\n\r\n\r\n\r\n\t\"\"\"\r\n\tif content_size > 0:\r\n\t content_img = resize_to(content_imgage, content_size)\r\n\t\"\"\"\r\n\r\n\tobject_image = extract_object(content_image, mask)\r\n\r\n\tfor style_fullpath in style_image:\r\n\t\tstyle_prefix, style_ext = os.path.splitext(style_fullpath)\r\n\t\tstyle_prefix = os.path.basename(style_prefix) # Extract filename prefix without ext\r\n\r\n\t\tstyle_img = skimage.io.imread(style_fullpath)\r\n\r\n\t\tif style_size > 0:\r\n\t\t\tstyle_img = resize_to(style_img, style_size)\r\n\t\tif crop_size > 0:\r\n\t\t\tstyle_img = center_crop(style_img, crop_size)\r\n\r\n\t\t\"\"\"\r\n\t\tif keep_colors:\r\n\t\t\tstyle_img = preserve_colors_np(style_img, content_img)\r\n\t\t\"\"\"\r\n\t\t# Run the frame through the style network\r\n\r\n\t\tstylized_rgb = wct_model.predict(object_image, style_img, alpha).astype(\"uint8\")\r\n\r\n\r\n\t\t\"\"\"\r\n\t\tif passes > 1:\r\n\t\t for _ in range(passes-1):\r\n\t\t stylized_rgb = wct_model.predict(stylized_rgb, style_img, alpha, swap5, ss_alpha, adain)\r\n\r\n\t\t# Stitch the style + stylized output together, but only if there's one style image\r\n\t\tif concat:\r\n\t\t # Resize style img to same height as frame\r\n\t\t style_img_resized = scipy.misc.imresize(style_img, (stylized_rgb.shape[0], stylized_rgb.shape[0]))\r\n\t\t # margin = np.ones((style_img_resized.shape[0], 10, 3)) * 255\r\n\t\t stylized_rgb = np.hstack([style_img_resized, stylized_rgb])\r\n\t\t\"\"\"\r\n\r\n\t\tif mask.ndim == 2:\r\n\t\t\t## increase the dimension of the mask to 3\r\n\t\t\tmask = np.stack([mask, mask, mask], axis = -1)\r\n\r\n\t\t## the stylized_rgb size may not be equal to the original content image size\r\n\t\tstylized_rgb = image_align(content_image, stylized_rgb)\r\n\r\n\r\n\r\n\t\tstylized_rgb = np.where(mask == 1, stylized_rgb, 0)\r\n\r\n\r\n\t\thollowed_content_image = np.where(mask == 1, 0, content_image)\r\n\t\t## put the stylized object back to the original content image\r\n\t\tstylized_rgb += hollowed_content_image\r\n\r\n\t\t# the final output will be saved a folder called stylized_output under current working directory\r\n\t\tif save_output:\r\n\t\t\tif not os.path.isdir(\"stylized_output\"):\r\n\t\t\t\tos.mkdir(\"stylized_output\")\r\n\r\n\t\t\toutpath = os.path.join(os.getcwd(), \"stylized_output\")\r\n\r\n\t\t\toutpath = os.path.join(outpath, '{}_{}{}'.format(content_image_name, style_prefix, style_ext))\r\n\r\n\t\t\tsave_img(outpath, stylized_rgb)\r\n\r\n\t\tif visualize:\r\n\t\t\t## visualize the stylized output\r\n\t\t\t_, ax = plt.subplots(1, figsize = (12, 12))\r\n\t\t\tax.imshow(stylized_rgb)\r\n\r\n\treturn stylized_rgb",
"def run_style_transfer(cnn, normalization_mean, normalization_std,\n args, content_layers_default, style_layers_default, num_steps,\n style_weight, content_weight): # default: style_weight = 1e6, content_weight = 1\n content_img = image_loader(args.content, args.img_size)\n style_img = image_loader(args.style, args.img_size)\n input_img = content_img.clone()\n assert style_img.size() == content_img.size(), \\\n \"we need to import style and content images of the same size\"\n \n logprint('Building the style transfer model..')\n model, style_losses, content_losses = get_style_model_and_losses(cnn,\n normalization_mean, normalization_std, style_img, content_img, \n args, content_layers_default, style_layers_default)\n \n if args.fft:\n input_img = fft_image(input_img.shape).to(device, torch.float) # convert to fft parameterization\n optimizer = get_input_optimizer(input_img)\n \n logprint('Optimizing..')\n run = [0]\n while run[0] <= num_steps:\n def closure():\n input_img.data.clamp_(0, 1) # correct the values of updated input image\n optimizer.zero_grad()\n model(input_img)\n style_score = 0\n content_score = 0\n\n for layer_name, sl in style_losses.items():\n style_score += sl.loss\n if args.plot_feature and run[0] == num_steps: # visualize feature maps at the last iter\n analyze_gram(sl.gram, layer_name) # analyze the gram matrix, like SVD analysis\n visualize_feature_map(sl.feat, layer_id=layer_name, save_dir=logger.gen_img_path, prefix=prefix, ext=args.ext)\n\n for layer_name, cl in style_losses.items():\n content_score += cl.loss\n\n style_score *= style_weight\n content_score *= content_weight\n loss = style_score + content_score\n loss.backward()\n\n run[0] += 1\n if run[0] % 50 == 0:\n logprint(\"run {}:\".format(run))\n logprint('Style Loss : {:4f} Content Loss: {:4f}'.format(style_score.item(), content_score.item()))\n return style_score + content_score\n\n optimizer.step(closure)\n if run[0] % 100 == 0:\n input_img.data.clamp_(0, 1)\n content_name = os.path.split(args.content)[1].split('.')[0] \n style_name = os.path.split(args.style)[1].split('.')[0]\n out_path = \"%s/%s__%s__%s_iter%d.jpg\" % (logger.gen_img_path, content_name, style_name, args.net, run[0])\n vutils.save_image(input_img, out_path)",
"def fit_style_transfer(style_image, content_image, style_weight=1e-2, content_weight=1e-4, \n optimizer='adam', epochs=1, steps_per_epoch=1):\n\n images = []\n step = 0\n\n # get the style image features \n style_targets = get_style_image_features(style_image)\n \n # get the content image features\n content_targets = get_content_image_features(content_image)\n\n # initialize the generated image for updates\n generated_image = tf.cast(content_image, dtype=tf.float32)\n generated_image = tf.Variable(generated_image) \n \n # collect the image updates starting from the content image\n images.append(content_image)\n \n for n in range(epochs):\n for m in range(steps_per_epoch):\n step += 1\n \n ### START CODE HERE ###\n # Update the image with the style using the function that you defined\n \n update_image_with_style(image=generated_image,\n style_targets=style_targets,\n content_targets=content_targets,\n style_weight=style_weight,\n content_weight=content_weight,\n optimizer=optimizer)\n\n ### END CODE HERE\n\n print(\".\", end='')\n if (m + 1) % 10 == 0:\n images.append(generated_image)\n \n # display the current stylized image\n clear_output(wait=True)\n display_image = tensor_to_image(generated_image)\n display_fn(display_image)\n\n # append to the image collection for visualization later\n images.append(generated_image)\n print(\"Train step: {}\".format(step))\n \n # convert to uint8 (expected dtype for images with pixels in the range [0,255])\n generated_image = tf.cast(generated_image, dtype=tf.uint8)\n \n return generated_image, images",
"def _blend_layers(self, imagecontent, (z, x, y)):\n result = self._tile_image(imagecontent)\n # Paste each layer\n for (layer, opacity) in self._layers:\n try:\n # Prepare tile of overlay, if available\n overlay = self._tile_image(layer.tile((z, x, y)))\n except (DownloadError, ExtractionError), e:\n logger.warn(e)\n continue\n # Extract alpha mask\n overlay = overlay.convert(\"RGBA\")\n r, g, b, a = overlay.split()\n overlay = Image.merge(\"RGB\", (r, g, b))\n a = ImageEnhance.Brightness(a).enhance(opacity)\n overlay.putalpha(a)\n mask = Image.merge(\"L\", (a,))\n result.paste(overlay, (0, 0), mask)\n # Read result\n return self._image_tile(result)",
"def DCLoss(img, patch_size):\r\n maxpool = nn.MaxPool3d((3, patch_size, patch_size), stride=1, padding=(0, patch_size//2, patch_size//2))\r\n dc = maxpool(0-img[:, None, :, :, :])\r\n \r\n target = Variable(torch.FloatTensor(dc.shape).zero_().cuda()) \r\n \r\n loss = L1Loss(size_average=True)(-dc, target)\r\n \r\n return loss",
"def _blend_layers(self, imagecontent, (z, x, y)):\n result = self._tile_image(imagecontent)\n # Paste each layer\n for (layer, opacity) in self._layers:\n try:\n # Prepare tile of overlay, if available\n overlay = self._tile_image(layer.tile((z, x, y)))\n except (DownloadError, ExtractionError), e:\n logger.warn(e)\n continue\n # Extract alpha mask\n overlay = overlay.convert(\"RGBA\")\n r, g, b, a = overlay.split()\n overlay = Image.merge(\"RGB\", (r, g, b))\n a = ImageEnhance.Brightness(a).enhance(opacity)\n overlay.putalpha(a)\n mask = Image.merge(\"L\", (a,))\n result.paste(overlay, (0, 0), mask)\n # Read result\n return self._image_tile(result)",
"def loss_vgg(style_images, content_image, output_images, vggfile):\n c_layers = C_WEIGHTS.keys()\n s_layers = S_WEIGHTS.keys()\n vgg16_filters = load_vgg(vggfile)\n vgg16 = nn_build.Network(\n VGG16DESC, 'vgg16', initial=vgg16_filters, process=True)\n c_net = vgg16.layers(content_image, c_layers)\n\n c_loss = 0.\n s_loss = 0.\n tv_loss = 0.\n for style in style_images:\n s_net = vgg16.layers(style_images[style], s_layers)\n o_net = vgg16.layers(output_images[style], set(c_layers+s_layers))\n for layer in c_layers:\n _, h, w, c = c_net[layer].get_shape().as_list()\n c_loss += C_WEIGHTS[layer]*tf.nn.l2_loss(\n o_net[layer]-c_net[layer])/(h*w*c)\n for layer in s_layers:\n bs, _, _, c = o_net[layer].get_shape().as_list()\n s_loss += S_WEIGHTS[layer]*tf.nn.l2_loss(\n Gram(o_net[layer], bs) - Gram(s_net[layer], bs))\n tv_loss += TV_WEIGHTS*(\n tf.nn.l2_loss(output_images[style][:,1:,:,:]\n - output_images[style][:,:-1,:,:])\n + tf.nn.l2_loss(output_images[style][:,:,1:,:]\n - output_images[style][:,:,:-1,:]))\n style_num = len(style_images)\n return c_loss/style_num, s_loss/style_num, tv_loss/style_num",
"def contrastive_loss(tparams, options, im, sents):\n margin = options['margin']\n attention_type = options['attention_type'] if 'attention_type' in options else 'dot'\n n_langs = len(sents)\n\n final_cost = 0.\n # compute cost for each language and aggregate on final cost\n for i in range(n_langs):\n s_lang = sents[i]\n # compute image-sentence score matrix\n if attention_type == 'dot':\n scores_lang = tensor.dot(im, s_lang.T)\n\n elif attention_type == 'general':\n if attention_type == 'general':\n sents_img = tparams['image_sentence_%i_mapping'%i]\n scores_lang = im.dot( sents_img ).dot( s_lang.T )\n\n else:\n raise Exception(\"Attention type not supported: %s\"%attention_type)\n\n diagonal_lang = scores_lang.diagonal()\n # cost over sentence\n # compare every diagonal score to scores in its column (i.e, all contrastive images for each sentence)\n cost_sent_lang = tensor.maximum(0, margin - diagonal_lang + scores_lang)\n # compare every diagonal score to scores in its row (i.e, all contrastive sentences for each image)\n cost_im_lang = tensor.maximum(0, margin - diagonal_lang.reshape((-1, 1)) + scores_lang)\n # clear diagonals\n cost_sent_lang = fill_diagonal(cost_sent_lang, 0)\n cost_im_lang = fill_diagonal(cost_im_lang, 0)\n\n # aggregate\n final_cost += cost_sent_lang.sum() + cost_im_lang.sum()\n\n return final_cost",
"def contrastive_loss_all(tparams, options, im, sents, lambda_img_sent=0.5, lambda_sent_sent=0.5):\n margin = options['margin']\n attention_type = options['attention_type'] if 'attention_type' in options else 'dot'\n\n n_langs = len(sents)\n\n final_cost = 0.\n # compute costs for each language-image pair and aggregate final cost\n for i in range(n_langs):\n # compute image-sentence subcost\n s_lang = sents[i]\n\n # compute image-sentence score matrix\n #scores_lang = tensor.dot(im, s_lang.T)\n if attention_type == 'dot':\n scores_lang = tensor.dot(im, s_lang.T)\n\n elif attention_type == 'general':\n if attention_type == 'general':\n # get matrix to map sentences and images\n sents_img = tparams['image_sentence_%i_mapping'%i]\n scores_lang = im.dot( sents_img ).dot( s_lang.T )\n\n else:\n raise Exception(\"Attention type not supported: %s\"%attention_type)\n\n diagonal_lang = scores_lang.diagonal()\n # cost over sentence\n # compare every diagonal score to scores in its column (i.e, all contrastive images for each sentence)\n cost_sent_lang = tensor.maximum(0, margin - diagonal_lang + scores_lang)\n # compare every diagonal score to scores in its row (i.e, all contrastive sentences for each image)\n cost_im_lang = tensor.maximum(0, margin - diagonal_lang.reshape((-1, 1)) + scores_lang)\n # clear diagonals\n cost_sent_lang = fill_diagonal(cost_sent_lang, 0)\n cost_im_lang = fill_diagonal(cost_im_lang, 0)\n\n # aggregate\n final_cost += lambda_img_sent * (cost_sent_lang.sum() + cost_im_lang.sum())\n\n # compute costs for each-language-language pair and aggregate final cost\n for i in range(n_langs):\n for j in range(n_langs):\n if i==j or j<=i:\n continue\n # compute sentence-sentence subcost\n s_lang1 = sents[i]\n s_lang2 = sents[j]\n\n # compute sent1-sent2 score matrix\n #scores_lang = tensor.dot(s_lang1, s_lang2.T)\n if attention_type == 'dot':\n scores_lang = tensor.dot(s_lang1, s_lang2.T)\n\n elif attention_type == 'general':\n # get matrices to map sentences in different languages\n sents_sents = tparams['sentence_%i_sentence_%i_mapping'%(i,j)]\n scores_lang = s_lang1.dot( sents_sents ).dot( s_lang2.T )\n\n else:\n raise Exception(\"Attention type not supported: %s\"%attention_type)\n\n diagonal_lang = scores_lang.diagonal()\n # cost over sent1\n # compare every diagonal score to scores in its column (i.e, all contrastive sent2 for each sent1)\n cost_sent1 = tensor.maximum(0, margin - diagonal_lang + scores_lang)\n # compare every diagonal score to scores in its row (i.e, all contrastive sent1 for each sent2)\n cost_sent2 = tensor.maximum(0, margin - diagonal_lang.reshape((-1, 1)) + scores_lang)\n # clear diagonals\n cost_sent1 = fill_diagonal(cost_sent1, 0)\n cost_sent2 = fill_diagonal(cost_sent2, 0)\n\n # aggregate\n final_cost += lambda_sent_sent * (cost_sent1.sum() + cost_sent2.sum())\n\n return final_cost",
"def _compute_loss(self, loss_weights, init_image, gram_style_features,\n content_features):\n style_weight, content_weight, ta_weight = loss_weights\n\n # Feed our init image through our model. This will give us the content and\n # style representations at our desired layers.\n model_outputs = self.model(init_image)\n\n style_output_features = model_outputs[:self.num_style_layers]\n content_output_features = model_outputs[self.num_style_layers:]\n\n total_style_score = 0\n total_content_score = 0\n total_ta_score = 0\n # Accumulate style losses from all layers\n # Here, we equally weight each contribution of each loss layer\n averge_style_weight = 1.0 / float(self.num_style_layers)\n for target_style, comb_style in zip(gram_style_features,\n style_output_features):\n total_style_score += averge_style_weight * \\\n self._get_style_loss(comb_style[0], target_style)\n\n # Accumulate content losses from all layers\n average_content_weight = 1.0 / float(self.num_content_layers)\n for target_content, comb_content in zip(content_features,\n content_output_features):\n total_content_score += average_content_weight * \\\n self._get_content_loss(comb_content[0], target_content)\n # Get Variation loss of the image\n total_ta_score = self._get_total_variational_loss(\n init_image) * ta_weight\n total_style_score *= style_weight\n total_content_score *= content_weight\n\n # Get total loss\n total_loss = total_style_score + total_content_score + total_ta_score\n return total_loss, total_style_score, total_content_score",
"def optimize_image(layer_tensor, image,\n num_iterations=10, step_size=3.0, tile_size=400,\n show_gradient=False):\n\n # Copy the image so we don't overwrite the original image.\n img = image.copy()\n\n print(\"Image before:\")\n plot_image(img)\n\n print(\"Processing image: \", end=\"\")\n\n # Use TensorFlow to get the mathematical function for the\n # gradient of the given layer-tensor with regard to the\n # input image. This may cause TensorFlow to add the same\n # math-expressions to the graph each time this function is called.\n # It may use a lot of RAM and could be moved outside the function.\n gradient = model.get_gradient(layer_tensor)\n\n for i in range(num_iterations):\n # Calculate the value of the gradient.\n # This tells us how to change the image so as to\n # maximize the mean of the given layer-tensor.\n grad = tiled_gradient(gradient=gradient, image=img,\n tile_size=tile_size)\n\n # Blur the gradient with different amounts and add\n # them together. The blur amount is also increased\n # during the optimization. This was found to give\n # nice, smooth images. You can try and change the formulas.\n # The blur-amount is called sigma (0=no blur, 1=low blur, etc.)\n # We could call gaussian_filter(grad, sigma=(sigma, sigma, 0.0))\n # which would not blur the colour-channel. This tends to\n # give psychadelic / pastel colours in the resulting images.\n # When the colour-channel is also blurred the colours of the\n # input image are mostly retained in the output image.\n sigma = (i * 4.0) / num_iterations + 0.5\n grad_smooth1 = gaussian_filter(grad, sigma=sigma)\n grad_smooth2 = gaussian_filter(grad, sigma=sigma*2)\n grad_smooth3 = gaussian_filter(grad, sigma=sigma*0.5)\n grad = (grad_smooth1 + grad_smooth2 + grad_smooth3)\n\n # Scale the step-size according to the gradient-values.\n # This may not be necessary because the tiled-gradient\n # is already normalized.\n step_size_scaled = step_size / (np.std(grad) + 1e-8)\n\n # Update the image by following the gradient.\n img += grad * step_size_scaled\n\n if show_gradient:\n # Print statistics for the gradient.\n msg = \"Gradient min: {0:>9.6f}, max: {1:>9.6f}, stepsize: {2:>9.2f}\"\n print(msg.format(grad.min(), grad.max(), step_size_scaled))\n\n # Plot the gradient.\n plot_gradient(grad)\n else:\n # Otherwise show a little progress-indicator.\n print(\". \", end=\"\")\n\n print()\n print(\"Image after:\")\n plot_image(img)\n\n return img",
"def get_style_content_loss(style_targets, style_outputs, content_targets, \n content_outputs, style_weight, content_weight):\n \n # Sum of the style losses\n style_loss = tf.add_n([ get_style_loss(style_output, style_target)\n for style_output, style_target in zip(style_outputs, style_targets)])\n \n # Sum up the content losses\n content_loss = tf.add_n([get_content_loss(content_output, content_target)\n for content_output, content_target in zip(content_outputs, content_targets)])\n\n ### START CODE HERE ###\n # scale the style loss by multiplying by the style weight and dividing by the number of style layers\n style_loss = style_loss * style_weight / NUM_STYLE_LAYERS\n\n # scale the content loss by multiplying by the content weight and dividing by the number of content layers\n content_loss = content_loss * content_weight / NUM_CONTENT_LAYERS\n \n # sum up the style and content losses\n total_loss = style_loss + content_loss\n ### END CODE HERE ###\n # return the total loss\n return total_loss",
"def compute_gradient_and_loss2(W, X, y, reg, reg_type, opt):\n if opt == 0: # compute gradient only if opt == 0\n dW = np.zeros(W.shape) # initialize the gradient as zero\n \n # compute the loss and the gradient\n num_classes = W.shape[1]\n num_train = X.shape[0]\n loss = 0.0\n #############################################################################\n # TODO: #\n # Implement the routine to compute the loss, storing the result in loss #\n ############################################################################# \n for i in xrange(num_train): # for every augmended image data (3072+1 vector)\n s = X[i].dot(W) # compute s (scores)\n s_y = s[y[i]] # keep the correct ground truth class score\n max_sj = -999\n argmax_sj = -1\n local_loss = 0.0\n for j in xrange(num_classes): # for every class \n if j == y[i]: # don't take the correct ground truth index\n continue\n if s[j] > max_sj:\n max_sj = s[j]\n argmax_sj = j\n\n term = 1 + max_sj - s_y # max term with Delta = 1, according to Hinge loss formula \n\n for j in xrange(num_classes): # for every class \n if j == y[i]: # don't take the correct ground truth index\n continue\n if term > 0: # trick: take only the term > 0, equal to max(0,...) formula\n local_loss = term # add the possitive term \n if opt == 0: # compute gradient only if opt == 0\n if j == argmax_sj:\n dW[:, j] += X[i] # this is a analytically with Calculus gradient, case j<>y[i]\n dW[:, y[i]] -= X[i] # case j==y[i]\n \n loss += local_loss \n\n# loss /= num_train # num_train = M, according to given formula \n\n if reg_type == 1: # loss + regularization , l2 or l1\n loss += reg * np.sum(np.abs(W)) # l1, reg is actually lambda regularization strength\n else:\n loss += reg * np.sum(W * W) # l2\n \n if opt == 0: # compute gradient only if opt == 0\n dW /= num_train # we have to divide by num_train in order to have the 'mean' gradient\n if reg_type == 1: # we use deriv_abs function for l1 derivative\n dW[:,-1] += reg * deriv_abs(W[:,-1]) #dW[:,-1]\n else:\n dW[:,-1] += 2 * reg * W[:,-1] # l2 derivative formula\n \n return loss, dW\n else:\n return loss, None\n \n print 'CSFAK INSIDE compute_gradient_and_loss'\n #############################################################################\n # TODO: #\n # Implement the gradient for the required loss, storing the result in dW.\t #\n # #\n # Hint: Instead of computing the gradient from scratch, it may be easier #\n # to reuse some of the intermediate values that you used to compute the #\n # loss. #\n #############################################################################\n \n #pass\n\n #############################################################################\n # END OF YOUR CODE #\n ############################################################################# ",
"def classic_transfer(content_img, style_img, args, ctx = None, verbose = True):\n\n assert isinstance(args, ClassicTransferArgs), 'Args should be instance of ClassicTransferArgs'\n\n content_img = PreprocessImage(content_img).copyto(ctx)\n style_img = PreprocessImage(style_img).copyto(ctx)\n\n # load pretrained vgg19\n vgg19avg = get_vgg19_avg(pretrained = True)\n # style = [relu1_1, relu2_1, relu3_1, relu4_1, relu5_1]\n # content = [relu4_2]\n input = mx.sym.var('data')\n style_content_symbols = vgg19avg.get_output_symbols(input, args.style_feature, args.content_feature),\n\n style_content_net = mx.gluon.SymbolBlock(inputs = input, outputs = style_content_symbols, params = vgg19avg.collect_params())\n style_content_net.collect_params().reset_ctx(ctx)\n\n # extract target content and style\n target = style_content_net(content_img)[0]\n content_targets = target[len(args.style_feature):]\n target = style_content_net(style_img)[0]\n style_targets = target[:len(args.style_feature)]\n\n # compute target gram matrix\n target_gram_list, gram_scale_list = __get_style_gram(style_targets)\n\n # Generate random image to do style transfer\n random_img = mx.nd.random_uniform(-0.1, 0.1, content_img.shape, ctx = ctx)\n clip_norm = np.prod(random_img.shape)\n\n # optimizer\n lr = mx.lr_scheduler.FactorScheduler(step = args.lr_sched_delay, factor = args.lr_sched_factor)\n optimizer = mx.optimizer.NAG(learning_rate = args.learning_rate, wd = 0.0001,\n momentum = 0.95, lr_scheduler = lr)\n\n # This is needed for momentum\n optim_state = optimizer.create_state(0, random_img)\n\n # Training and transfer\n random_img.attach_grad() # attach grad for update\n for epoch in tqdm(range(args.epochs)):\n with mx.autograd.record():\n style_content = style_content_net(random_img)[0]\n contents = style_content[len(args.style_feature):]\n styles = style_content[:len(args.style_feature)]\n\n gram_list, _ = __get_style_gram(styles)\n total_loss = 0\n for content, target_content in zip(contents, content_targets):\n loss = mx.nd.sum(mx.nd.square(content - target_content))\n total_loss = total_loss + loss * args.content_weight\n\n for gram, target_gram, gscale in zip(gram_list, target_gram_list, gram_scale_list):\n loss = mx.nd.sum(mx.nd.square(gram - target_gram))\n total_loss = total_loss + loss * args.style_weight / gscale\n\n total_loss.backward()\n\n gnorm = mx.nd.norm(random_img.grad).asscalar()\n if gnorm > clip_norm:\n random_img.grad[:] *= clip_norm / gnorm\n\n if verbose:\n print('Training: epoch %d, loss: %f' % (epoch, total_loss.asscalar()))\n\n old_img = random_img.copy()\n tv_grad = __get_tv_grad(random_img, ctx, args.tv_weight)\n optimizer.update(0, random_img, random_img.grad + tv_grad, optim_state)\n\n eps = (mx.nd.norm(old_img - random_img) / mx.nd.norm(random_img)).asscalar()\n if eps < args.stop_eps:\n print('eps (%f) < args.stop_eps (%f), training finished' % (eps, args.stop_eps))\n break\n\n yield PostprocessImage(random_img)\n yield PostprocessImage(random_img)",
"def style_loss(feats, style_layers, style_targets, style_weights):\n # Hint: you can do this with one for loop over the style layers, and should\n # not be short code (~5 lines). You will need to use your gram_matrix function.\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n pass\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****",
"def get_style_image_features(image):\n ### START CODE HERE ###\n # preprocess the image using the given preprocessing function\n preprocessed_style_image = preprocess_image(image)\n\n # get the outputs from the inception model that you created using inception_model()\n outputs = inception(preprocessed_style_image)\n\n # Get just the style feature layers (exclude the content layer)\n style_outputs = outputs[:NUM_STYLE_LAYERS]\n\n # for each style layer, calculate the gram matrix for that layer and store these results in a list\n gram_style_features = [gram_matrix(style_layer) for style_layer in style_outputs]\n ### END CODE HERE ###\n return gram_style_features",
"def DCLoss(img, opt):\n maxpool = nn.MaxPool3d((3, opt.patch_size, opt.patch_size), stride=1, padding=(0, opt.patch_size//2, opt.patch_size//2))\n dc = maxpool(1-img[:, None, :, :, :])\n \n target = torch.FloatTensor(dc.shape).zero_().cuda(opt.gpu_ids[0])\n \n loss = L1Loss(reduction='sum')(dc, target)\n return -loss",
"def compute_gradient_and_loss(W, X, y, reg, reg_type, opt):\n if opt == 0: # compute gradient only if opt == 0\n dW = np.zeros(W.shape) # initialize the gradient as zero\n \n # compute the loss and the gradient\n num_classes = W.shape[1]\n num_train = X.shape[0]\n loss = 0.0\n #############################################################################\n # TODO: #\n # Implement the routine to compute the loss, storing the result in loss #\n ############################################################################# \n for i in xrange(num_train): # for every augmended image data (3072+1 vector)\n s = X[i].dot(W) # compute s (scores)\n s_y = s[y[i]] # keep the correct ground truth class score\n max_sj = -999\n argmax_sj = -1\n local_loss = 0.0\n for j in xrange(num_classes): # for every class \n if j != y[i]: # don't take the correct ground truth index\n if s[j] > max_sj:\n max_sj = s[j]\n argmax_sj = j\n\n term = 1 + max_sj - s_y # max term with Delta = 1, according to Hinge loss formula \n \n if term > 0:\n local_loss = term\n \n loss += local_loss\n \n for j in xrange(num_classes): # for every class \n if j != y[i]: # don't take the correct ground truth index\n if opt == 0: # compute gradient only if opt == 0\n if j == argmax_sj:\n dW[:, j] += X[i] # this is a analytically with Calculus gradient, case j<>y[i]\n dW[:, y[i]] -= X[i] # case j==y[i]\n \n \n\n# loss /= num_train # num_train = M, according to given formula \n\n if reg_type == 1: # loss + regularization , l2 or l1\n loss += reg * np.sum(np.abs(W)) # l1, reg is actually lambda regularization strength\n else:\n loss += reg * np.sum(W * W) # l2\n \n if opt == 0: # compute gradient only if opt == 0\n dW /= num_train # we have to divide by num_train in order to have the 'mean' gradient\n if reg_type == 1: # we use deriv_abs function for l1 derivative\n# dW += reg * deriv_abs(W) #dW[:,-1]\n# else:\n# dW += 2 * reg * W # l2 derivative formula \n dW[:-1,:] += reg * np.sign((W[:-1,:])) #dW[:,-1]\n else:\n dW[:-1,:] += 2 * reg * W[:-1,:] # l2 derivative formula \n return loss, dW\n else:\n return loss, None\n \n print 'CSFAK INSIDE compute_gradient_and_loss'\n #############################################################################\n # TODO: #\n # Implement the gradient for the required loss, storing the result in dW.\t #\n # #\n # Hint: Instead of computing the gradient from scratch, it may be easier #\n # to reuse some of the intermediate values that you used to compute the #\n # loss. #\n #############################################################################\n \n #pass\n\n #############################################################################\n # END OF YOUR CODE #\n ############################################################################# ",
"def evaluation(pre_model, img_1, img_2,\n default_mean_std = True,\n style_layers=default_style_layers,\n weight = 1000000):\n # load the image\n imsize = 512 if torch.cuda.is_available() else 128 # use small size if no gpu\n img_1 = image_loader(img_1)\n img_2 = image_loader(img_2)\n\n cnn = copy.deepcopy(pre_model)\n\n # normalization module\n normalization = Normalization(default_mean_std = default_mean_std)\n\n style_losses = 0\n\n # create our model\n model = nn.Sequential(normalization)\n\n # increment every time we see a conv\n i = 0 \n # go through all the layers\n for layer in cnn.children():\n if isinstance(layer, nn.Conv2d):\n i += 1\n name = 'conv_{}'.format(i)\n elif isinstance(layer, nn.ReLU):\n name = 'relu_{}'.format(i)\n # According to Alexis Jacq, the in-place version doesn't play \n # very nicely with the ContentLoss with the ContentLoss and StyleLoss \n # we insert below. So we replace with out-of-place ones here.\n layer = nn.ReLU(inplace=False)\n elif isinstance(layer, nn.MaxPool2d):\n name = 'maxpool_{}'.format(i)\n elif isinstance(layer, nn.BatchNorm2d):\n name = 'bn_{}'.format(i)\n\n model.add_module(name, layer)\n\n if name in style_layers:\n # add style loss:\n # calculate target style\n style_1 = model(img_1).detach()\n style_1 = gram_matrix(style_1)\n style_2 = model(img_2).detach()\n style_2 = gram_matrix(style_2)\n # save the loss\n style_losses += F.mse_loss(style_1, style_2) / len(style_layers)\n \n style_losses *= weight\n return float(style_losses)",
"def __call__(self, prediction, fg_mask, image):\n # prediction = self.pooling(prediction)\n # fg_mask = self.pooling(fg_mask)\n N, C, H, W = prediction.size()\n bg = prediction*(1-fg_mask)\n fg = prediction*fg_mask\n\n\n fg_patch = fg.view(N,C,-1).permute(0,2,1)\n bg_patch = bg.view(N,C,-1)\n\n fg_patch_mu = torch.mean(fg_patch, dim=2, keepdim=True)\n bg_patch_mu = torch.mean(bg_patch, dim=1, keepdim=True)\n fg_bg_local_conv = torch.matmul((fg_patch-fg_patch_mu), (bg_patch-bg_patch_mu))/(C-1)\n\n bg_distribution_std = (torch.var(bg_patch, dim=1, keepdim=True) + 1e-8).sqrt()\n fg_distribution_std = (torch.var(fg_patch, dim=2, keepdim=True) + 1e-8).sqrt()\n fg_bg_r = fg_bg_local_conv.div(torch.matmul(fg_distribution_std,bg_distribution_std)+1e-8)\n fg_bg_r = fg_bg_r.abs()\n # fg_bg_r[fg_bg_r<0.7] = 0\n\n pixel_count = H*W\n # # bg_patch_one = bg.unsqueeze(1).repeat(1, pixel_count, 1, 1, 1)\n # # fg_patch_one = fg.view(N,C,-1).permute(0,2,1).unsqueeze(-1).unsqueeze(-1).expand_as(bg_patch_one)\n # bg_patch_one = bg.permute(0,2,1,3).permute(0,1,3,2).unsqueeze(1)\n # fg_patch_one = fg.view(N,C,-1).permute(0,2,1).unsqueeze(-2).unsqueeze(-2)\n # fg_bg_L1 = (fg_patch_one-bg_patch_one).pow(2).mean(dim=-1)\n # fg_bg_L1_drop_fg = fg_bg_L1*(1-fg_mask)\n\n # fg_mask_channel = fg_mask.view(N, -1, 1, 1).expand_as(fg_bg_L1)\n # fg_bg_L1_only_fg = fg_bg_L1_drop_fg*fg_mask_channel\n\n # # fg_bg_local_conv[fg_bg_local_conv<0] = 0\n # # fg_bg_local_conv = torch.softmax(fg_bg_local_conv, dim=2)\n # # local_loss = fg_bg_L1_only_fg.view(N, pixel_count, pixel_count)*fg_bg_local_conv.permute(0,2,1).detach()\n # local_loss = fg_bg_L1_only_fg.view(N, pixel_count, -1)*fg_bg_r\n # fg_mask_sum = fg_mask.view(N, -1).sum(dim=1)\n\n C1 = 0.01**2\n image = self.adaptivepooling(image)\n # image = F.adaptive_avg_pool2d(image, 32)\n # print(image.size())\n image_fg = image*fg_mask\n image_bg = image*(1-fg_mask)\n image_fg_mu = image_fg.mean(dim=1)\n image_bg_mu = image_bg.mean(dim=1)\n image_fg_patch_one = image_fg_mu.view(N, -1,1)\n image_bg_patch_one = image_bg_mu.view(N, -1,1)\n image_fg_patch_one_sq = image_fg_patch_one.pow(2)\n image_bg_patch_one_sq = image_bg_patch_one.pow(2)\n\n luminance = torch.matmul(image_fg_patch_one, image_bg_patch_one.permute(0,2,1)+C1).div(image_fg_patch_one_sq+image_bg_patch_one_sq+C1)\n # image_bg_patch_one = image_bg.permute(0,2,1,3).permute(0,1,3,2).unsqueeze(1)\n # image_fg_patch_one = image_fg.view(N,image_fg.size(1),-1).permute(0,2,1).unsqueeze(-2).unsqueeze(-2)\n # fg_bg_L1 = (image_fg_patch_one-image_bg_patch_one).pow(2).mean(dim=-1)\n fg_bg_loss = luminance\n \n fg_bg_loss_drop_fg = fg_bg_loss*(1-fg_mask.view(N,1, -1))\n fg_mask_channel = fg_mask.view(N, -1, 1).expand_as(fg_bg_loss)\n fg_bg_loss_only_fg = fg_bg_loss_drop_fg*fg_mask_channel\n local_loss = fg_bg_loss_only_fg*fg_bg_r.detach()\n\n local_loss = local_loss.mean()\n loss = local_loss\n # if target_is_real:\n # loss = local_loss # self.relu(1-prediction.mean())\n # else:\n # loss = -local_loss # self.relu(1+prediction.mean())\n return loss",
"def __enhance_image(self, img):\n\n blue = self.g.clahe.apply(img[:,:,0])\n green = self.g.clahe.apply(img[:,:,1])\n red = self.g.clahe.apply(img[:,:,2])\n img[:,:,0] = blue\n img[:,:,1] = green\n img[:,:,2] = red\n return img",
"def style_transfer(vgg_model, content_tensor, style_tensor,\n prop, device, n_iterations, learning_rate):\n # creating a random image and set requires_grad to True\n target_image = torch.randn_like(content_tensor).requires_grad_(True).to(device)\n # extract content features\n content_features = __get_features(vgg_model, content_tensor)\n # create optimizer to optimize the target image\n optimizer = torch.optim.Adam([target_image], lr=learning_rate)\n for i in range(n_iterations):\n optimizer.zero_grad()\n\n target_features = __get_features(vgg_model, target_image)\n content_loss = __calculate_content_loss(content_features, target_features, \"10\")\n style_loss = __new_style_loss(target_image, style_tensor, prop, device)\n total_loss = content_loss + style_loss\n\n total_loss.backward()\n optimizer.step()\n\n if i % 50 == 0:\n print(\n f\"Iteration {i}, Total Loss: {total_loss.item():.2f}, Content Loss: {content_loss.item():.2f}\"\n f\", Style Loss {style_loss.item():.2f}\")\n\n return target_image",
"def get_within_scene_loss(pixelwise_contrastive_loss, image_a_pred, image_b_pred,\n matches_a, matches_b,\n masked_non_matches_a, masked_non_matches_b,\n background_non_matches_a, background_non_matches_b,\n blind_non_matches_a, blind_non_matches_b):\n pcl = pixelwise_contrastive_loss\n\n match_loss, masked_non_match_loss, num_masked_hard_negatives =\\\n pixelwise_contrastive_loss.get_loss_matched_and_non_matched_with_l2(image_a_pred, image_b_pred,\n matches_a, matches_b,\n masked_non_matches_a, masked_non_matches_b,\n M_descriptor=pcl._config[\"M_masked\"])\n\n if pcl._config[\"use_l2_pixel_loss_on_background_non_matches\"]:\n background_non_match_loss, num_background_hard_negatives =\\\n pixelwise_contrastive_loss.non_match_loss_with_l2_pixel_norm(image_a_pred, image_b_pred, matches_b, \n background_non_matches_a, background_non_matches_b, M_descriptor=pcl._config[\"M_background\"]) \n \n else:\n background_non_match_loss, num_background_hard_negatives =\\\n pixelwise_contrastive_loss.non_match_loss_descriptor_only(image_a_pred, image_b_pred,\n background_non_matches_a, background_non_matches_b,\n M_descriptor=pcl._config[\"M_background\"])\n \n \n\n blind_non_match_loss = zero_loss()\n num_blind_hard_negatives = 1\n if not (SpartanDataset.is_empty(blind_non_matches_a.data)):\n blind_non_match_loss, num_blind_hard_negatives =\\\n pixelwise_contrastive_loss.non_match_loss_descriptor_only(image_a_pred, image_b_pred,\n blind_non_matches_a, blind_non_matches_b,\n M_descriptor=pcl._config[\"M_masked\"])\n \n\n\n total_num_hard_negatives = num_masked_hard_negatives + num_background_hard_negatives\n total_num_hard_negatives = max(total_num_hard_negatives, 1)\n\n if pcl._config[\"scale_by_hard_negatives\"]:\n scale_factor = total_num_hard_negatives\n\n masked_non_match_loss_scaled = masked_non_match_loss*1.0/max(num_masked_hard_negatives, 1)\n\n background_non_match_loss_scaled = background_non_match_loss*1.0/max(num_background_hard_negatives, 1)\n\n blind_non_match_loss_scaled = blind_non_match_loss*1.0/max(num_blind_hard_negatives, 1)\n else:\n # we are not currently using blind non-matches\n num_masked_non_matches = max(len(masked_non_matches_a),1)\n num_background_non_matches = max(len(background_non_matches_a),1)\n num_blind_non_matches = max(len(blind_non_matches_a),1)\n scale_factor = num_masked_non_matches + num_background_non_matches\n\n\n masked_non_match_loss_scaled = masked_non_match_loss*1.0/num_masked_non_matches\n\n background_non_match_loss_scaled = background_non_match_loss*1.0/num_background_non_matches\n\n blind_non_match_loss_scaled = blind_non_match_loss*1.0/num_blind_non_matches\n\n\n\n non_match_loss = 1.0/scale_factor * (masked_non_match_loss + background_non_match_loss)\n\n loss = pcl._config[\"match_loss_weight\"] * match_loss + \\\n pcl._config[\"non_match_loss_weight\"] * non_match_loss\n\n \n\n return loss, match_loss, masked_non_match_loss_scaled, background_non_match_loss_scaled, blind_non_match_loss_scaled",
"def deep_dream_of_extreme_control(FLAGS,model,input_images=[],num_iterations=10,step_size=0.1):\n if len(input_images) == 0:\n # use predefined images\n img_dir='/esat/opal/kkelchte/docker_home/pilot_data/visualization_images'\n input_images=sorted([img_dir+'/'+f for f in os.listdir(img_dir)])\n\n print(\"[tools.py]: extracting deep dream maps of {0} in {1}\".format([os.path.basename(i) for i in input_images], os.path.dirname(input_images[0])))\n \n # experts=np.asarray([[k]*(FLAGS.action_quantity if FLAGS.discrete else 1) for v in sorted(model.factor_offsets.values()) for k in model.factor_offsets.keys() if model.factor_offsets[k]==v]).flatten()\n\n inputs = load_images(input_images, model.input_size[1:])\n \n # collect gradients for output endpoint of evaluation model\n grads={}\n with tf.device('/cpu:0'):\n output_tensor = model.endpoints['eval']['outputs']\n for i in range(output_tensor.shape[1].value):\n layer_loss = output_tensor[:,i]\n gradients = tf.gradients(layer_loss, model.inputs)[0]\n gradients /= (tf.sqrt(tf.reduce_mean(tf.square(gradients))) + 1e-5)\n grads[output_tensor.name+'_'+str(i)]=gradients\n\n\n # apply gradient ascent for all outputs and each input image\n # if number of outputs ==1 apply gradient descent for contrast\n if len(grads.keys())== 1:\n opposite_results={}\n else:\n opposite_results=None\n\n import copy\n results = {}\n for gk in grads.keys(): \n results[gk]=copy.deepcopy(inputs)\n if isinstance(opposite_results,dict): opposite_results[gk]=copy.deepcopy(inputs)\n\n for step in range(num_iterations):\n if step%10==0: print \"{0} step: {1}\".format(time.ctime(), step)\n for i,gk in enumerate(sorted(grads.keys())):\n results[gk] += step_size * model.sess.run(grads[gk], {model.inputs: results[gk]})\n if isinstance(opposite_results,dict):\n opposite_results[gk] -= step_size * model.sess.run(grads[gk], {model.inputs: opposite_results[gk]})\n\n # Normalize results within 0:1 range\n clean_results={}\n for gk in results.keys():\n clean_results[gk]=[]\n for i in range(results[gk].shape[0]):\n clean_results[gk].append(deprocess_image(results[gk][i], one_channel=True))\n # results[gk][i]=deprocess_image(results[gk][i], one_channel=True)\n if isinstance(opposite_results,dict):\n opposite_results[gk][i]=deprocess_image(opposite_results[gk][i])\n\n # combine adjust input images in one overview image\n # one column for each input image\n # one row with each extreme control for separate and difference images\n num_rows=1+len(results.keys())\n fig, axes = plt.subplots(num_rows ,min(len(input_images),5),figsize=(23, 4*(len(grads.keys())+1)))\n # fig, axes = plt.subplots(num_rows ,min(len(input_images),5),figsize=(23, 4*(len(grads.keys())+1)))\n # add original images in first row\n for i in range(axes.shape[1]):\n axes[0, i].set_title(os.path.basename(input_images[i]).split('.')[0])\n axes[0, i].imshow(matplotlibprove(inputs[i]), cmap='inferno')\n axes[0, i].axis('off')\n\n # add for each filter the modified input\n row_index=1\n for gk in sorted(results.keys()):\n for i in range(axes.shape[1]):\n # print gk\n # axes[row_index, i].set_title('Grad Asc: '+gk.split('/')[1]+'/'+gk[-1]) \n axes[row_index, i].set_title('Grad Asc: '+gk)\n # axes[row_index, i].set_title(experts[row_index-1])\n\n axes[row_index, i].imshow(np.concatenate((inputs[i],np.expand_dims(clean_results[gk][i],axis=2)), axis=2), cmap='inferno')\n # axes[row_index, i].imshow(matplotlibprove(results[gk][i]), cmap='inferno')\n axes[row_index, i].axis('off')\n row_index+=1\n # In cas of continouos controls: visualize the gradient descent and difference\n # if isinstance(opposite_results,dict):\n # for gk in opposite_results.keys():\n # for i in range(axes.shape[1]):\n # # axes[row_index, i].set_title('Grad Desc: '+gk.split('/')[1]) \n # axes[row_index, i].set_title('Grad Desc: '+gk) \n # axes[row_index, i].imshow(matplotlibprove(opposite_results[gk][i]), cmap='inferno')\n # axes[row_index, i].axis('off')\n # row_index+=1\n \n # # add difference\n # for gk in opposite_results.keys():\n # for i in range(axes.shape[1]):\n # # axes[row_index, i].set_title('Diff: '+gk.split('/')[1]) \n # axes[row_index, i].set_title('Diff: '+gk) \n # axes[row_index, i].imshow(matplotlibprove(deprocess_image((opposite_results[gk][i]-results[gk][i])**2)), cmap='inferno')\n # axes[row_index, i].axis('off')\n # row_index+=1\n # else:\n # # add difference between 2 exteme actions\n # gk_left=sorted(results.keys())[0]\n # gk_right=sorted(results.keys())[-1]\n # for i in range(axes.shape[1]):\n # # axes[row_index, i].set_title('Diff : '+gk.split('/')[1]) \n # axes[row_index, i].set_title('Diff : '+gk) \n # axes[row_index, i].imshow(matplotlibprove(deprocess_image((results[gk_left][i]-results[gk_right][i])**2)), cmap='inferno')\n # axes[row_index, i].axis('off')\n # row_index+=1\n \n \n plt.savefig(FLAGS.summary_dir+FLAGS.log_tag+'/control_dream_maps.jpg',bbox_inches='tight')\n # plt.show()",
"def stylize(network, initial, content, styles, iterations,\n content_weight, style_weight, style_blend_weights, tv_weight,\n learning_rate, print_iterations=None, checkpoint_iterations=None):\n shape = (1,) + content.shape\n style_shapes = [(1,) + style.shape for style in styles]\n content_features = {}\n style_features = [{} for _ in styles]\n\n # compute content features in feedforward mode\n g = tf.Graph()\n with g.as_default(), g.device('/cpu:0'), tf.Session() as sess:\n image = tf.placeholder('float', shape=shape)\n net, mean_pixel = vgg.net(network, image)\n content_pre = np.array([vgg.preprocess(content, mean_pixel)])\n content_features[CONTENT_LAYER] = net[CONTENT_LAYER].eval(\n feed_dict={image: content_pre})\n\n # compute style features in feedforward mode\n for i in range(len(styles)):\n g = tf.Graph()\n with g.as_default(), g.device('/cpu:0'), tf.Session() as sess:\n image = tf.placeholder('float', shape=style_shapes[i])\n net, _ = vgg.net(network, image)\n style_pre = np.array([vgg.preprocess(styles[i], mean_pixel)])\n for layer in STYLE_LAYERS:\n features = net[layer].eval(feed_dict={image: style_pre})\n features = np.reshape(features, (-1, features.shape[3]))\n gram = np.matmul(features.T, features) / features.size\n style_features[i][layer] = gram\n\n # make stylized image using backpropogation\n with tf.Graph().as_default():\n if initial is None:\n noise = np.random.normal(size=shape, scale=np.std(content) * 0.1)\n initial = tf.random_normal(shape) * 0.256\n else:\n initial = np.array([vgg.preprocess(initial, mean_pixel)])\n initial = initial.astype('float32')\n image = tf.Variable(initial)\n net, _ = vgg.net(network, image)\n\n # content loss\n content_loss = content_weight * (2 * tf.nn.l2_loss(\n net[CONTENT_LAYER] - content_features[CONTENT_LAYER]) /\n content_features[CONTENT_LAYER].size)\n # style loss\n style_loss = 0\n for i in range(len(styles)):\n style_losses = []\n for style_layer in STYLE_LAYERS:\n layer = net[style_layer]\n _, height, width, number = map(lambda i: i.value, layer.get_shape())\n size = height * width * number\n feats = tf.reshape(layer, (-1, number))\n gram = tf.matmul(tf.transpose(feats), feats) / size\n style_gram = style_features[i][style_layer]\n style_losses.append(2 * tf.nn.l2_loss(gram - style_gram) / style_gram.size)\n style_loss += style_weight * style_blend_weights[i] * reduce(tf.add, style_losses)\n # total variation denoising\n tv_y_size = _tensor_size(image[:,1:,:,:])\n tv_x_size = _tensor_size(image[:,:,1:,:])\n tv_loss = tv_weight * 2 * (\n (tf.nn.l2_loss(image[:,1:,:,:] - image[:,:shape[1]-1,:,:]) /\n tv_y_size) +\n (tf.nn.l2_loss(image[:,:,1:,:] - image[:,:,:shape[2]-1,:]) /\n tv_x_size))\n # overall loss\n loss = content_loss + style_loss + tv_loss\n\n # optimizer setup\n train_step = tf.train.AdamOptimizer(learning_rate).minimize(loss)\n\n def print_progress(i, last=False):\n stderr.write('Iteration %d/%d\\n' % (i + 1, iterations))\n if last or (print_iterations and i % print_iterations == 0):\n stderr.write(' content loss: %g\\n' % content_loss.eval())\n stderr.write(' style loss: %g\\n' % style_loss.eval())\n stderr.write(' tv loss: %g\\n' % tv_loss.eval())\n stderr.write(' total loss: %g\\n' % loss.eval())\n\n # optimization\n best_loss = float('inf')\n best = None\n with tf.Session() as sess:\n sess.run(tf.initialize_all_variables())\n for i in range(iterations):\n last_step = (i == iterations - 1)\n print_progress(i, last=last_step)\n train_step.run()\n\n if (checkpoint_iterations and i % checkpoint_iterations == 0) or last_step:\n this_loss = loss.eval()\n if this_loss < best_loss:\n best_loss = this_loss\n best = image.eval()\n yield (\n (None if last_step else i),\n vgg.unprocess(best.reshape(shape[1:]), mean_pixel)\n )"
] |
[
"0.61786896",
"0.6164723",
"0.61576635",
"0.6105687",
"0.6076067",
"0.6060993",
"0.5963049",
"0.59628075",
"0.5925563",
"0.5875337",
"0.58690226",
"0.576405",
"0.57587606",
"0.5723707",
"0.5684236",
"0.5642646",
"0.56415737",
"0.5629251",
"0.562675",
"0.5611985",
"0.55835485",
"0.55765",
"0.5557703",
"0.5537705",
"0.55249435",
"0.55139214",
"0.5508946",
"0.5472382",
"0.5465368",
"0.54580295"
] |
0.63845575
|
0
|
Constructor that intializes the search to extract the data
|
def __init__(self, driver, output_folder, search_parameters):
self.driver = driver
self.search_results = SearchResults(output_folder, search_parameters)
self.version = search_parameters["version"]
self.region = search_parameters["community"]
self.province = search_parameters["province"]
self.entity_type = search_parameters["entity_type"]
self.name = search_parameters["name"]
self.cif = search_parameters["cif"]
self.do_search()
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def build_search_data(self):\n # Must be overriden by subclass.\n pass",
"def construct(self):\n return self.as_search().construct()",
"def __init__(self, args, parsers):\n self.parsers = parsers\n self.search_fields = args.search_field if args.search_field else []",
"def __init__(self, q):\n\n self.inverted_lists = {}\n self.q = q\n self.records = []\n self.original_records = []\n self.city_names = []",
"def __init__(self, query, title, link, subtext, searchterms, scripts):\n self.search_query = query\n self.title = title\n self.link = link\n self.subtext = subtext\n self.searchterms = searchterms\n self.link_scripts = scripts",
"def __init__(self, search_type):\n self.search = search_type\n self.items = None\n self.lang = None\n self.limit = None\n self.limit_type = None\n self.database = None\n self.table = None\n self.access_token = None\n self.access_token_secret = None\n self.consumer_key = None\n self.consumer_secret = None",
"def __init__(self, data, **kwargs):\n super(HillClimbSearchITBN, self).__init__(data, **kwargs)",
"def __init__(self, config={}, html='', query=''):\n self.config = config\n self.searchtype = self.config.get('search_type', 'normal')\n assert self.searchtype in self.search_types, 'search type \"{}\" is not supported in {}'.format(\n self.searchtype,\n self.__class__.__name__\n )\n\n self.query = query\n self.html = html\n self.dom = None\n self.search_results = {}\n self.num_results_for_query = ''\n self.num_results = 0\n self.effective_query = ''\n self.page_number = -1\n self.no_results = False\n self.related_keywords = {}\n\n # to be set by the implementing sub classes\n self.search_engine = ''\n\n # short alias because we use it so extensively\n self.css_to_xpath = HTMLTranslator().css_to_xpath\n\n if self.html:\n self.parse()",
"def __init__(self):\n\n data_extract=DataExtracter()\n self.data = tuple()",
"def __init__(self):\n self._results = {}\n self._logs = {}",
"def get_data(self):\n def _clean_search_hit(search_hit):\n \"\"\"\n Takes in a search result hit as a BeautifySoup tag and pulls out all the data to match the desired schema.\n\n :param search_hit:\n :return Dictionary: A dictionary with the cleaned data\n \"\"\"\n\n hit_name = search_hit.find(class_='hit-name')\n hit_url = hit_name.get('href')\n hit_id = hit_url.split('/')[-1]\n name = hit_name.get_text().split(',')[0].title().split()\n\n current_city = search_hit.find(class_='hit-location').get_text().upper()\n\n # Find all Addresses for search result.\n try:\n address = search_hit.find(class_='hit-pastAddresses').find_all(class_='hit-values')\n address = list({a.text.upper().replace('.', '') for a in address})\n except AttributeError:\n address = list()\n\n # find the address that is most likely the current main address.\n try:\n address.insert(0, address.pop(address.index(current_city)))\n except ValueError:\n address.insert(0, current_city)\n\n address = [\n {\n '@type': 'PostalAddress',\n 'addressLocality': locality.title(),\n 'addressRegion': region\n } for locality, region in [a.split(', ') for a in address]]\n\n work_location = {'@type': 'Place'}\n try:\n work_location['name'] = search_hit\\\n .find(class_='hit-work')\\\n .find(class_='hit-values')\\\n .get_text()\\\n .title()\n except AttributeError:\n work_location['name'] = ''\n\n alumni_of = {'@type': 'EducationalOrganization'}\n try:\n alumni_of['name'] = search_hit\\\n .find(class_='hit-high-school')\\\n .find(class_='hit-values')\\\n .get_text().title()\n except AttributeError:\n pass\n\n return {\n '@id': hit_id,\n '@type': 'Person',\n 'name': ' '.join(name),\n 'givenName': name[0],\n 'middleName': ' '.join(name[1:-1]),\n 'familyName': name[-1],\n 'url': hit_url,\n 'address': address,\n 'workLocation': work_location,\n 'alumniOf': alumni_of,\n }\n\n def _refine_search(search_str, options):\n \"\"\"\n Takes a list of WebElements and a search string, looks for string in the text of each WebElement, and\n press the option if found. Returns Boolean for found status\n\n :param search_str: str of the desired option.\n :param options: list of WebElements from Beautify Soup that represents all of the available options.\n :return:\n \"\"\"\n search_str = search_str.upper()\n logging.info(f'Looking for \\'{search_str}\\'')\n try:\n for option in options:\n option_text = option.text.upper()\n logging.info(f'Option Checked: {option_text}')\n if search_str in option_text:\n option.click()\n time.sleep(2)\n logging.info(f'Option Selected: {option_text}')\n return True\n else:\n return False\n except AttributeError:\n return True\n except StaleElementReferenceException as e:\n ChromeCrash(e)\n\n with self.driver(executable_path=self.DRIVER_DIR) as driver:\n driver.get(self.url)\n\n \"\"\"\n The CSS for the page doesn't show the State nor the City selector options if the page is too narrow,\n so we need to make sure the browser is open wide enough for the CSS to make those options visible. \n \"\"\"\n driver.fullscreen_window()\n\n # Refine the search by State\n address_region = self.person.get('addressRegion', '')\n address_region = STATES.get(address_region.upper(), address_region.upper())\n region_options = driver\\\n .find_element_by_class_name(\"STATE\")\\\n .find_elements_by_class_name(\"refinementList-text\")\n\n if not _refine_search(address_region, region_options):\n return False\n\n # Narrow the search by pressing a City option\n address_locality = self.person.get('addressLocality').title()\n locality_options = driver\\\n .find_element_by_class_name(\"CITY\")\\\n .find_elements_by_class_name(\"refinementList-text\")\n\n if not _refine_search(address_locality, locality_options):\n return False\n\n \"\"\"\n The Page Loads dynamically, so we need to scroll down the page to show all the search results. It needs to\n be done in steps with a pause between movements to allow for loading. \n Here it will first get the current location on the page, attempt to move down the page, and then check to\n see if the location changed.\n \"\"\"\n\n if self.auto_scroll and len(driver.find_elements_by_class_name(\"ais-InfiniteHits-item\")) > 15:\n current_height, new_height = 0, driver.execute_script(\"return document.body.scrollHeight\")\n\n while new_height != current_height:\n # Scroll down to the bottom of the page\n driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\n\n # Wait to load page\n time.sleep(SCROLL_PAUSE_TIME)\n\n # Calculate new scroll height and compare with last scroll height\n current_height, new_height = new_height, driver.execute_script(\"return document.body.scrollHeight\")\n\n page_source = driver.page_source\n page_soup = bs(page_source, 'html.parser')\n search_results = list(page_soup.find_all(class_='ais-InfiniteHits-item'))\n for i, search_result in enumerate(search_results):\n search_results[i] = _clean_search_hit(search_result)\n\n self.data_from_website = pd.DataFrame(search_results)\n self.data_from_website.set_index('@id', inplace=True)\n return True",
"def __init__(self, name=\"\"):\n super().__init__(\"search\", name)",
"def __init__(self, **kwargs):\n DataLoader.__init__(self, **kwargs)\n \n self._results_ = None",
"def __init__(self, searchPhrase='', intent_actions=None):\n self.id = ''\n self.searchPhrase = searchPhrase\n self.intent_actions = intent_actions\n self.es = es()",
"def __init__(self, *args, **kwargs):\n self.es_conn = Elasticsearch(ELASTICSEARCH_CONN)\n self.size = kwargs.get(\"size\", 10)\n self.from_ = int(kwargs.get(\"from\", 0))\n to_limit = kwargs.get(\"to\")\n if to_limit:\n self.size = int(to_limit) - self.from_\n self.q_dict = kwargs.get(\"query\", {})\n self.fields = kwargs.get(\"fields\", None)\n #configuration to list all keys allowed for package model\n self.es_query_keys = kwargs.get(\"ES_QUERY_KEYS\", list())\n #configuration to list date type keys in package model\n self.es_date_keys = kwargs.get(\"ES_DATE_KEYS\", list())\n self.sort = kwargs.get('sort', \"_score:desc\")",
"def do_search(self, *args, **kwargs):\n return [{}]",
"def __init__(self):\r\n\t\t\r\n\t\tself.redis = redis.Redis()\r\n\t\tself.info_to_get = ['text', 'created_at', 'user']\r\n\t\tself.search_results = {}\r\n\t\tself.raw_data_directory_name = \"raw_mining_data\"\r\n\t\tself.filtered_data_directory_name = \"filtered_mining_data\"\r\n\t\tenglish_file = pjoin( sys.path[0], \"sentiment_word_files\", \"Nielsen2010Responsible_english.csv\")\r\n\t\tself.analyzeEnglish = dict(map(lambda (w,e): (w, int(e)), \\\r\n\t\t\t\t\t\t\t\t\t[ line.strip().lower().split('\\t') for line in open(english_file) ]))\r\n\t\tself.tweets_count = 0",
"def __init__(self, **kwargs):\n DataLoader.__init__(self, **kwargs)\n \n \"\"\" Use _query_names_ to store a single query name \"\"\"\n self._query_names_ = 'report_donor_dollar_breakdown' # embed the query name in the class itself\n self._query_type_ = kwargs['query_type']",
"def search(self, query):",
"def __init__(self):\n\t\tself.relevances = None",
"def __init__(self,\r\n found=None,\r\n displaying=None,\r\n more_available=None,\r\n created_date=None,\r\n institutions=None,\r\n additional_properties = {}):\r\n\r\n # Initialize members of the class\r\n self.found = found\r\n self.displaying = displaying\r\n self.more_available = more_available\r\n self.created_date = created_date\r\n self.institutions = institutions\r\n\r\n # Add additional model properties to the instance\r\n self.additional_properties = additional_properties",
"def __init__(self, *args, **kwargs):\n cls = TestUninformedWordGameSearch\n super(cls, self).__init__(*args, **kwargs) # pylint:disable=W0142\n self.search_function = search_function",
"def __init__(self):\n self._sections = {}\n self._filters = []\n self._id = 0",
"def __init__(self, k=None, data=None, query=None):\n self.k = k\n self.data = data\n self.query = query",
"def request_constructor(self, url, search_term, tag):\n if search_term != \"NULL\":\n url = url.format(search_term)\n r = requests.get(url)\n response = r.json()\n self.search_response = response[tag]",
"def __init__(self):\n self.data = []\n self.idx = {}",
"def createSearch(self, authenticationToken, search):\r\n pass",
"def __init__(self, content):\n # Collect results extracted as JSON\n self._json_cache = [\n match.groups()\n for match in RE_JSON_KEY.finditer(content)\n ]",
"def __init__(self, name='google'):\n self.engine_info = filter(lambda x: 'NAME' in x.keys() and x['NAME'] is name, SMARTSEARCH_AVAILABLE_ENGINES)[0]\n self.connection = build('customsearch', 'v1', developerKey=self.engine_info['GOOGLE_SITE_SEARCH_API_KEY'])",
"def __init__ (self, id, finder, matches):\n\t\tself.id = id\n\t\t# self.inCitesName = fullname\n\t\tfor attr in ['fullname', 'firstName', 'middleName', 'lastName', 'note']:\n\t\t\tsetattr (self, attr, getattr (finder, attr))\n\t\tself.matches = matches\n\t\tself.numMatches = len(matches)"
] |
[
"0.74298084",
"0.72582036",
"0.69904244",
"0.687046",
"0.6810866",
"0.6688437",
"0.66254735",
"0.6623682",
"0.6594633",
"0.65885544",
"0.65650845",
"0.65181285",
"0.65010107",
"0.6488581",
"0.64617634",
"0.64591503",
"0.642912",
"0.64052725",
"0.6401393",
"0.6376879",
"0.6369297",
"0.63547844",
"0.6331652",
"0.6308675",
"0.6287423",
"0.6276054",
"0.62530816",
"0.6250773",
"0.6244571",
"0.6215766"
] |
0.74217045
|
1
|
Method that selects the desired historical data version for this search
|
def select_proper_version(self):
try:
# See if the submit button is displayed
submit_button = self.driver.find_element_by_xpath(submit_versions)
except NoSuchElementException:
# In case it is not shown, make it appear
self.driver.find_element_by_xpath(show_versions).click()
# Get the reference to the submit button
submit_button = self.driver.find_element_by_xpath(submit_versions)
# Find all options
select = Select(self.driver.find_element_by_xpath(options_versions))
found = False
for option in select.options:
# In case one matches the desired search select it
if self.version in option.text:
select.select_by_value(option.text)
self.version = option.text
self.search_results.update_search_parameter("version", self.version)
found = True
break
# If the specified version didn't match an available historical version
# select by default the newest one
if not found:
select.select_by_index(0)
self.version = select.options[0].text
print("The desired historical publication was not found. "
"Using latest publication: {0}".format(self.version))
# Send the form
submit_button.submit()
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_latest_version(self):\n study = self.source_study_version.study\n current_study_version = self.source_study_version.study.get_latest_version()\n if current_study_version is None:\n return None\n # Find the same dataset associated with the current study version.\n try:\n current_dataset = SourceDataset.objects.get(\n source_study_version=current_study_version,\n i_accession=self.i_accession\n )\n except ObjectDoesNotExist:\n return None\n return current_dataset",
"def available_versions(self, **kwargs):\n return self.raw_version_data(**kwargs)",
"def select_versions(self):\n return []",
"def get_latest_version(self):\n try:\n version = self.sourcestudyversion_set.filter(\n i_is_deprecated=False\n ).order_by( # We can't use \"latest\" since it only accepts one field in Django 1.11.\n '-i_version',\n '-i_date_added'\n ).first()\n except ObjectDoesNotExist:\n return None\n return version",
"def get_df(self, version=None):\n pass",
"def current_version(self):\n try:\n return self.versions.latest()\n except DocumentVersion.DoesNotExist:\n return None",
"def test_model_by_version_get(self):\n\n # Firstly, find existing version - latest\n response = self.client().get('/model')\n latest_model = Model.from_json(response.data.decode())\n latest_version = latest_model.version\n\n # Accesses latest model\n response = self.client().get('/models/'+str(latest_version))\n self.assertEqual(response.status_code, 200)\n loaded_model = Model.from_json(response.data.decode())\n self.assertEqual(loaded_model.version, latest_version)\n self.assertEqual(loaded_model, latest_model)\n\n # Accesses random model version\n random_version = random.choice(list(self.data_manipulation.versions.keys()))\n random_model = self.data_manipulation.versions[random_version]\n response = self.client().get('/models/'+str(random_version))\n self.assertEqual(response.status_code, 200)\n loaded_model = Model.from_json(response.data.decode())\n self.assertEqual(loaded_model.version, random_version)\n self.assertEqual(loaded_model, random_model)\n\n # Random version is removed\n del self.data_manipulation.versions[random_version]\n response = self.client().get('/models/'+str(random_version))\n self.assertEqual(response.status_code, 404)",
"def select_versions(self):\n super(ChannelBackend, self).select_versions()\n return [('1.1', '1.1')]",
"def compare_with_old_data_query(self):\n raise NotImplementedError",
"def choose_version(self):\n if len(self.unused_versions) == 0:\n self.unused_versions = list(range(len(self.versions)))\n idx = np.random.choice(self.unused_versions)\n self.unused_versions.remove(idx)\n version = self.versions[idx]\n return version",
"def get_data_history(\n self, \n rec_id=None, \n ver_id=None, \n output='df'\n ):\n if rec_id is None:\n raise Exception('Please enter a valid data clone RecId.')\n else:\n if ver_id is None:\n suffix = f\"data/{rec_id}/versions\"\n data = self.session.api_call(suffix=suffix)\n if output == 'df':\n df = pd.DataFrame.from_dict(data.json().get('versions'))\n df[\"data_rec_id\"] = rec_id\n return df\n elif output == 'dict':\n data_dict = data.json().get('versions')\n for version in data_dict:\n version.update({\"data_rec_id\": rec_id})\n return data_dict\n else:\n suffix = f\"data/{rec_id}/v/{ver_id}\"\n data = self.retrieve_paginated_data(suffix=suffix)\n if output == 'df':\n return pd.DataFrame.from_dict(data)\n elif output == 'dict':\n return data",
"def test_above_24_latest_version(self):\n self.data['version'] = ''\n self.data['appVersion'] = '28.0'\n\n up = self.get(self.data)\n rdf = up.get_rdf()\n assert rdf.find('20202020.01') > -1",
"def historical():\n\n return {\n 'page': 'historical',\n }",
"def switch_to_latest_version(self):\n self.current_version = Version.objects.filter(is_published=True).latest()\n self.save()",
"def version(self, version: Optional[str]) -> Optional[ChartVersionInfo]:\n if version is None or version == \"\":\n return self.latest\n\n versionspec = semantic_version.SimpleSpec(version)\n\n for r in self.versions:\n if versionspec.match(r.version_info):\n return r\n return None",
"def get_previous_versions(self):\n return self.study.sourcestudyversion_set.filter(\n i_version__lte=self.i_version,\n i_date_added__lt=self.i_date_added\n ).order_by(\n '-i_version',\n '-i_date_added'\n )",
"def currentVersionIndex(self, indexData):\n logger.debug(\"Func: currentVersionIndex/setter\")\n\n if indexData <= 0:\n self._currentVersionIndex = -1\n self._currentThumbFile = \"\"\n self._currentNotes = \"\"\n self._currentPreviewCamera = \"\"\n return\n if not self._currentSceneInfo:\n # logger.warning((\"BaseScene not Selected\"))\n return\n if not 1 <= indexData <= len(self._currentSceneInfo[\"Versions\"]):\n msg = \"out of range! %s\" %indexData\n # logger.error(msg)\n # raise Exception([101, msg])\n self._exception(101, msg)\n return\n # if self._currentVersionIndex == indexData:\n # logger.warning(\"Cursor is already at %s\" % indexData)\n # return\n self._currentVersionIndex = indexData\n self._currentNotes = self._currentSceneInfo[\"Versions\"][self._currentVersionIndex-1][\"Note\"]\n self._currentPreviewsDict = self._currentSceneInfo[\"Versions\"][self._currentVersionIndex-1][\"Preview\"]\n # print self._currentPreviewsDict\n if not self._currentPreviewsDict.keys():\n self._currentPreviewCamera = \"\"\n else:\n self._currentPreviewCamera = sorted(self._currentPreviewsDict.keys())[0]\n\n self._currentThumbFile = self._currentSceneInfo[\"Versions\"][self._currentVersionIndex-1][\"Thumb\"]\n self.cursorInfo()",
"def get_versions(self):\n raise NotImplementedError",
"def ask_show_version(show_data_json, show_title: str) -> str:\n versions = []\n for item in show_data_json:\n versions.append(item['name'])\n\n chosen_version = versions[0]\n if len(versions) > 1:\n chosen_version = list_options(f'Quale versione di {show_title} vuoi scaricare?', versions)\n \n return chosen_version",
"def update_which_sde_data(\n current_sde_df,\n latest_esi_df,\n index_key\n):\n pass",
"def select(self):\n last_results = self.database.query('''SELECT *\n FROM History\n ORDER BY request_date DESC\n LIMIT 10''')\n return last_results",
"def history(self, t_minus=0):\n data = self.ohlcv_df[self.ohlcv_df.index <= utc_to_epoch(\n self.prior_time)]\n return OHLCVData(data[-t_minus:])",
"def update_historical_graph(dropdown_historical_symbol, dateselector_historical_start, dateselector_historical_end,\n dropdown_historical_interval):\n\n # Variables to update\n ticker = dropdown_historical_symbol\n start = app_obj.utils.parse_date(dateselector_historical_start).date()\n end = app_obj.utils.parse_date(dateselector_historical_end).date()\n interval = dropdown_historical_interval\n\n df = dl.equities.get_historical(tickers=ticker, start_date=start, end_date=end, interval=interval)\n\n return app_obj.figures.build_ohlcv(df, title=f'{ticker} - Historical OHLCV ({start} to {end})')",
"def version(self, newVersion=None):\n pass",
"def get_new_sourcedatasets(self):\n previous_study_version = self.get_previous_version()\n SourceDataset = apps.get_model('trait_browser', 'SourceDataset')\n if previous_study_version is not None:\n qs = SourceDataset.objects.filter(source_study_version=self)\n # We can probably write this with a join to be more efficient.\n previous_dataset_accessions = SourceDataset.objects.filter(\n source_study_version=previous_study_version\n ).values_list('i_accession', flat=True)\n qs = qs.exclude(i_accession__in=previous_dataset_accessions)\n return qs\n else:\n return SourceDataset.objects.none()",
"def view_specific_paper_version():\n paper = db.paper(request.args(0))\n if paper is None:\n session.flash = T('No such paper')\n redirect(URL('default', 'index'))\n form = SQLFORM(db.paper, record=paper, readonly=True)\n all_versions_link = A('All versions', _href=URL('default', 'view_paper_versions', args=[paper.paper_id]))\n return dict(form=form,\n all_versions_link=all_versions_link)",
"def versions(self):\n raise Exception(\"mcapi.Datafile.versions is not implemented\")",
"def get_revision_heaviest_tenant_one(database_file_path):\n # remove the line below in case you have implemented the query.\n raise NotImplementedError\n\n query = \"\"\"\n \"\"\"\n\n return _fetch_result_from_database(query, database_file_path)",
"def version_link(self):\n release_link = url_for('data.data', selected_release=self.DATASET_RELEASE)\n return Markup(f\"<a href='{release_link}'>{self.DATASET_RELEASE}</a>\")",
"def get_version(self, version_number):\n version = TextVersion.objects.filter(text__exact=self).order_by('created')[version_number - 1:version_number][0]\n return version"
] |
[
"0.5997586",
"0.58068544",
"0.5563834",
"0.5411222",
"0.53838325",
"0.53473306",
"0.532596",
"0.5323683",
"0.52684224",
"0.52212214",
"0.52131855",
"0.521049",
"0.5187213",
"0.5186738",
"0.5155839",
"0.51361215",
"0.51353586",
"0.51331025",
"0.5111388",
"0.5096219",
"0.509455",
"0.509175",
"0.5088294",
"0.5086895",
"0.5077619",
"0.5074775",
"0.5071383",
"0.50618273",
"0.5046119",
"0.50448763"
] |
0.6540754
|
0
|
Method that selects the desired region for this search from the landing page
|
def select_proper_region(self):
# Click the desired region
region = self.driver.find_element_by_css_selector(web_map[self.region][button_path])
region.click()
time.sleep(1)
try:
search = self.driver.find_element_by_css_selector(go_to_search)
search.click()
time.sleep(1)
return True
except NoSuchElementException:
# This means that the information of this autonomous community is not available
# at this version
return False
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def select_regions(data, region_col, regions, combine_subregions=True):",
"def set_region(sender, instance, *args, **kwargs):\n if instance.geocity and not instance.georegion:\n instance.georegion = instance.geocity.region",
"def select_region_of_interest():\r\n image = np.array(ImageGrab.grab(bbox=None))\r\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\r\n r = cv2.selectROI(windowName='grab roi', img=image, showCrosshair=True, fromCenter=False)\r\n cv2.destroyAllWindows()\r\n return r[0], r[1], r[0] + r[2], r[1] + r[3]",
"def get_current_region(cls, request):\n # if rendered url is edit_region, the region slug originates from the region form.\n if (\n not hasattr(request, \"resolver_match\")\n or request.resolver_match.url_name == \"edit_region\"\n ):\n return None\n region_slug = request.resolver_match.kwargs.get(\"region_slug\")\n if not region_slug:\n return None\n region = cls.objects.filter(slug=region_slug)\n if not region.exists():\n raise Http404\n return region.first()",
"def region(self):\n return self._get(\"region\")",
"def which_region(self, g):\n raise NotImplementedError",
"def _choose_regions(self, display_regions=False):\n dstl = Load_DSTL()\n if self.class_type == 1:\n # Select regions where there are buildings (with red roofs)\n test_image, test_mask = dstl.extract_region_pos(2300, 3000, cutout_size=[400, 400], object_class=self.class_type)\n train_image, train_mask = dstl.extract_region_pos(1900, 3100, cutout_size=[400, 400], object_class=self.class_type)\n cv_image, cv_mask = dstl.extract_region_pos(950, 1450, cutout_size=[200, 200], object_class=self.class_type)\n elif self.class_type == 5:\n train_image, train_mask = dstl.extract_region_pos(1150, 2150, cutout_size=[400, 400], object_class=self.class_type)\n test_image, test_mask = dstl.extract_region_pos(2300, 3000, cutout_size=[400, 400], object_class=self.class_type)\n cv_image, cv_mask = dstl.extract_region_pos(1900, 1950, cutout_size=[400, 400], object_class=self.class_type)\n else:\n pass\n self.images = {'train': train_image, 'cv': cv_image, 'test': test_image}\n self.masks = {'train': train_mask, 'cv': cv_mask, 'test': test_mask}\n if display_regions:\n for key in self.images.keys():\n display_three_band(self.images[key], self.masks[key], colors='green', title='{:} region'.format(key))",
"def search_using_magento_region(cls, region, country):\n subdivisions = cls.search([\n ('name', 'ilike', region),\n ('country', '=', country.id),\n ])\n\n # TODO: Exception need be created if subdivison does not exist.\n\n return subdivisions and subdivisions[0] or None",
"def SetRegion(self,stateAbbrev):\n if not stateAbbrev in self.VectorData:\n print \"Error - No Data for %s available\" % stateAbbrev\n print \"Valid state abbreviations are:\", self.StateAbbrevList\n else:\n self.SelectedRegion = stateAbbrev",
"def __init__(self, region):\r\n self.region = region",
"def which_region(self, g):\n return NotImplementedError",
"def testSelectSiteSearchFunctionality(self):\n auth.checkIfUserIsLoggedIn(self.driver, 0, 'CRUDO')\n auth.login(self.driver, config.users['CRUDO']['username'], config.users['CRUDO']['password'])\n\n try:\n WebDriverWait(self.driver, 50).until(\n EC.presence_of_element_located((By.ID, \"com.view.viewglass:id/search_image_view\")))\n except TimeoutException:\n raiseExceptions(\"Search field is missing\")\n search = self.driver.find_element_by_id(\"com.view.viewglass:id/search_image_view\")\n search.click()\n search_text = self.driver.find_element_by_id(\"com.view.viewglass:id/search_site_edit_text\")\n # search for the site and press ENTER\n search_text.send_keys(config.sites['Default'])\n # self.self.driver.press_keycode(66)\n size = self.driver.find_element_by_id(\"com.view.viewglass:id/viewLogoLL\").size\n location = self.driver.find_element_by_id(\"com.view.viewglass:id/viewLogoLL\").location\n x = size['width'] / 2\n y = location['y'] + size['height'] * 2\n self.driver.tap([(x, y)])\n if len(self.driver.find_elements(By.ID, \"com.view.viewglass:id/viewLogoLL\")) > 0:\n y = location['y'] + size['height'] * 2.5\n self.driver.tap([(x, y)])\n else:\n raiseExceptions(\"Search function did not return any results.\")",
"def region(self, box):\n is_indexbox(box, errors=\"raise\") # Validate the box definition\n self.fetcher = self.Fetchers[\"region\"](box=box, **self.fetcher_options)\n self._AccessPoint = \"region\" # Register the requested access point\n return self",
"def river_region(rr_id):\n r = RiverRegionRenderer(request, rr_id, None)\n return r.render()",
"def selectregion(self, group=None):\n points = pylab.ginput(n=2, timeout=0)\n bounds = [int(point[not self.waveaxis]) for point in points]\n bounds = self._validateregion(bounds)\n try:\n self.regions.append({'min': bounds[0], 'max': bounds[1],\n 'group': group})\n except TypeError:\n pass",
"def select_bucket_region(\n custom_bucket: Optional[str],\n hook_region: Optional[str],\n cfngin_bucket_region: Optional[str],\n provider_region: str,\n) -> str:\n region = None\n region = hook_region if custom_bucket else cfngin_bucket_region\n return region or provider_region",
"def _get_region_value(self):\n return self.region_option.get()",
"def select_region(image):\n # Define the polygon by vertices\n rows, cols = image.shape[:2]\n bottom_left = [cols*0.05, rows*0.95]\n top_left = [cols*0.3, rows*0.55]\n bottom_right = [cols*0.95, rows*0.95]\n top_right = [cols*0.7, rows*0.55]\n # Vertices are an array of polygons (i.e array of arrays) and the data type must be integer.\n vertices = np.array([[bottom_left, top_left, top_right, bottom_right]], dtype=np.int32)\n return filter_region(image, vertices)",
"def region(self) -> Optional[str]:\n return pulumi.get(self, \"region\")",
"def region(self) -> Optional[str]:\n return pulumi.get(self, \"region\")",
"def region(self) -> Optional[str]:\n return pulumi.get(self, \"region\")",
"def region(self) -> Optional[str]:\n return pulumi.get(self, \"region\")",
"def region(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"region\")",
"def getregion(self, *args, **kwargs):\n return _image.image_getregion(self, *args, **kwargs)",
"def region(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"region\")",
"def region(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"region\")",
"def region(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"region\")",
"def region(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"region\")",
"def region(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"region\")",
"def region(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"region\")"
] |
[
"0.6173443",
"0.60131407",
"0.59935087",
"0.5924314",
"0.57906854",
"0.5778641",
"0.57748395",
"0.57520103",
"0.57211655",
"0.57080865",
"0.57039094",
"0.5672578",
"0.56141526",
"0.5597352",
"0.55812305",
"0.5569898",
"0.5565366",
"0.55647045",
"0.55633926",
"0.55633926",
"0.55633926",
"0.55633926",
"0.5556858",
"0.55319387",
"0.55046463",
"0.55046463",
"0.55046463",
"0.55046463",
"0.55046463",
"0.55046463"
] |
0.7525899
|
0
|
Method that fills the search form with all the paramters from the search
|
def fill_search_parameters(self):
time.sleep(1)
if self.province is not None:
self.driver.find_element_by_xpath(provincia_path).send_keys(self.province)
if self.entity_type is not None:
self.driver.find_element_by_xpath(tipo_ente_1_path).send_keys(self.entity_type[0])
self.driver.find_element_by_xpath(tipo_ente_2_path).send_keys(self.entity_type[1])
if self.name is not None:
self.driver.find_element_by_xpath(nombre_path).send_keys(self.name)
self.driver.find_element_by_xpath(buscar_en_historico_nombres).click()
if self.cif is not None:
self.driver.find_element_by_xpath(cif_path).send_keys(self.cif)
# Submit the search
search_button = self.driver.find_element_by_xpath(submit_search)
search_button.click()
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def perform_search(self):\n\n self.implicitly_wait(5)\n html_element = self.find_element_by_xpath(\n '/html/body').get_attribute('outerHTML')\n soup = Scraper(html_element)\n target = soup.find_search_field()\n\n for elem in target:\n for attr, value in elem.items():\n placeholder = self.find_elements_by_css_selector(\n f'input[{attr}=\"{value}\"]'\n )\n for element in placeholder:\n try:\n element.send_keys(self.keywords)\n element.send_keys(Keys.RETURN)\n print(colored(':: Placeholder fullfilled ::', 'green'))\n return\n except:\n print(\n colored('Can\\'t type inside the search input', 'yellow'))",
"def search_form(self, name_filter):\n from sagas.ofbiz.forms import print_form_list\n print_form_list(name_filter=name_filter)",
"def search():\n import booksearch as bs\n\n opt = var.get()\n term = searchBox.get()\n term2 = dateBox.get()\n\n # Case statement (substitute) for different search areas\n # Each key is an option in the OptionMenu\n searchBy = {\n \"Title & Author\" : bs.search(term),\n \"ID\" : bs.bookID(term),\n \"Date\" : bs.dateRange(term, term2),\n }\n query = searchBy[opt] # Make & stores a query (2D list)\n\n # Repopulates table\n if term != \"\":\n populate(query)",
"def search(self, *args, **kwargs):",
"def search_resources(self,searchtext):\n\n self.search.value = searchtext\n self.submit.click()",
"def searchForm(self, search=None, replace=None):\n if not search or not replace:\n raise cherrypy.HTTPError(400, \"Bad request\")\n redirurl = \"/{}/{}/\".format(search, replace)\n raise cherrypy.HTTPRedirect(redirurl)",
"def search_form(request): \n\tip = get_ip(request, right_most_proxy=True)\n\tIpAddressInformation.objects.create(ip_address=ip)\n\ttitle= \"Please search by:\"\n\t# opening files for plotting stat\n\torganismName=overallSumresult['organism']\n\tspeciesName=overallSumresult['species']\n\tspeciesstat=overallSumresult['speciesstat'][0:10]\n\tspeciesName=list(set(speciesName))\n\tspeciesName=sorted(speciesName)\n\tspeciesstat.insert(0,['Species','Unique protein','Unique peptide'])\n\tgostat=overallSumresult['gostat'][:10]\n\tgostat.insert(0,['Go Term','Unique proteins in various species'])\n\tkeggstat=overallSumresult['keggstat'][:10]\n\tkeggstat.insert(0,['Pathway Name', 'Unique proteins in various species', 'PeptideTracker', 'CPTAC', 'PASSEL', 'SRMAtlas', 'PanoramaWeb'])\n\tpepseqdic=finalresult['pepseqdic']\n\tprodic=finalresult['prodic']\n\tpepdatavalues=finalresult['pepdatavalues']\n\tprodatavalues=finalresult['prodatavalues']\n\tmrmdatabase=finalresult['mrmdatabase']\n\tallpepassay=totalpepassay['totalassayNonValid']\n\tallvalidpepassay=totalpepassay['totalassayValid']\n\tallunqStripPep=totalpepassay['totalstripPep']\n\tuqpep=len(pepseqdic)\n\tuqpro=len(prodic)\n\tkeggstat=[i[:2] for i in keggstat]\n\tspeciesstat=[i[:2] for i in speciesstat]\n\tcontextindex ={\"title\": title,\"uqpro\":uqpro, \"uqpep\":uqpep,\\\n\t\t\t\t\t\"speciesName\":speciesName,\"speciesnumber\":len(speciesName)-1,\\\n\t\t\t\t\t\"speciesstat\":json.dumps(speciesstat),\\\n\t\t\t\t\t\"gostat\":json.dumps(gostat),\"keggstat\":json.dumps(keggstat),\\\n\t\t\t\t\t'allpepassay':allpepassay,\\\n\t\t\t\t\t'allvalidpepassay':allvalidpepassay,\\\n\t\t\t\t\t'allunqStripPep':len(allunqStripPep),\\\n\t\t\t\t\t'jvennpep':json.dumps(pepdatavalues),\\\n\t\t\t\t\t'jvennprot':json.dumps(prodatavalues),\\\n\t\t\t\t\t'jvennmrmdb':json.dumps(mrmdatabase)\\\n\t\t\t\t\t}\n\treturn render(request, 'index.html', contextindex)",
"def add_search_form():\n g.form = forms.SearchPlaces(formdata=None)\n g.action = url_for(\"page.search_query\")",
"def search_form_servee(context, cl):\r\n return {\r\n \"request\": context[\"request\"],\r\n \"cl\": cl,\r\n \"show_result_count\": cl.result_count != cl.full_result_count,\r\n \"search_var\": \"q\"\r\n }",
"def search(self):\n premium = self.config.get('premium', False)\n\n self.params[self.opts['keyword']['query_key']] = self.config[self.opts['keyword']['config_key']] # keyword\n # Selection params\n self.append_param('tag_mode', 'selection')\n if premium:\n self.append_param('order_premium', 'selection')\n else:\n self.append_param('order_not_premium', 'selection')\n\n self.append_param('type', 'selection')\n self.append_param('tool', 'selection')\n self.append_param('ratio', 'selection')\n self.append_param('mode', 'selection')\n\n # Number params\n self.append_param('min_width', 'number')\n self.append_param('max_width', 'number')\n self.append_param('min_height', 'number')\n self.append_param('max_height', 'number')\n if premium:\n self.append_param('min_bookmark', 'number')\n self.append_param('max_bookmark', 'number')\n else:\n self.set_bookmark_filter()\n\n # Date params\n self.append_param('start_time', 'date')\n self.append_param('end_time', 'date')\n\n # multi work filter\n self.filters['multi'] = self.config.get('download_multi', False)\n\n for i in range(self.config['start_page'], self.config['end_page'] + 1):\n self.params['p'] = i\n self.headers['Referer'] = 'https://www.pixiv.net/'\n url ='https://www.pixiv.net/search.php'\n html = self.session.get(url, headers = self.headers, params = self.params, timeout = 10, proxies = self.proxies)\n\n soup = BeautifulSoup(html.text, 'lxml')\n data_items = json.loads(soup.find('input', id = 'js-mount-point-search-result-list')['data-items'])\n\n return self.extract_work_info(data_items)",
"def search(self, query):",
"def build_search_data(self):\n # Must be overriden by subclass.\n pass",
"def search():\n # Check for database tables\n check_db()\n # Check for GET data\n search_query = request.args.get(\"q\", None)\n # Format search results as HTML\n search_results = get_search_results_html(search_query)\n # Format recent searches as HTML\n recent_searches = get_recent_searches_html()\n\n return html_wrapper('<h1>' + SITE_NAME + '''</h1>\n <form action=\"/\" method=\"GET\">\n <input type=\"text\" name=\"q\">\n <input type=\"submit\" value=\"search\">\n </form>''' + search_results + recent_searches)",
"def on_searchButton_clicked(self):\n self.__search()",
"def search():\n\n # POST\n if request.method == \"POST\":\n\n # validate form submission\n if not request.form.get(\"intervention\"):\n return render_template(\"results.html\", results=entries.values())\n ''' \n elif not request.form.get(\"setting\"):\n return apology(\"missing setting\")\n elif not request.form.get(\"emrpref\"):\n return apology(\"missing emr pref\")\n elif not request.form.get(\"budget\"):\n return apology(\"missing budget\")'''\n \n results = []\n for k in entries:\n print('entries', entries[k]['Keywords'])\n print('term', request.form.get(\"intervention\"))\n if request.form.get(\"intervention\") in entries[k]['Keywords']:\n print('ya')\n results.append(entries[k])\n\n\n return render_template(\"results.html\", results=results)\n\n\n # GET\n else:\n return render_template(\"search.html\")",
"def __search(self):\n self.resultList.clear()\n self.infoLabel.clear()\n \n self.buttonBox.button(QDialogButtonBox.Close).setEnabled(False)\n self.buttonBox.button(QDialogButtonBox.Cancel).setEnabled(True)\n self.buttonBox.button(QDialogButtonBox.Cancel).setDefault(True)\n self.searchButton.setEnabled(False)\n QApplication.processEvents(QEventLoop.ExcludeUserInputEvents)\n \n QApplication.setOverrideCursor(Qt.WaitCursor)\n QApplication.processEvents(QEventLoop.ExcludeUserInputEvents)\n \n self.__canceled = False\n \n self.__query = [term for term in self.searchEdit.text().strip().split()\n if term not in PipSearchDialog.Stopwords]\n self.__client.call(\n \"search\",\n ({\"name\": self.__query, \"summary\": self.__query}, \"or\"),\n self.__processSearchResult,\n self.__searchError\n )",
"def search(request):\n raise NotImplementedError",
"def search():\n pass",
"def new_search(self):\n return {'search_parameters': h.get_search_parameters(self.query_builder)}",
"def search(request):\n\n term = \"\"\n organizations = None\n memberships = None\n events = None\n persons = None\n airports = None\n training_requests = None\n comments = None\n only_result = None\n\n if request.method == \"GET\" and \"term\" in request.GET:\n form = SearchForm(request.GET)\n if form.is_valid():\n term = form.cleaned_data.get(\"term\", \"\")\n tokens = re.split(r\"\\s+\", term)\n\n organizations = Organization.objects.filter(\n Q(domain__icontains=term) | Q(fullname__icontains=term)\n ).order_by(\"fullname\")\n if len(organizations) == 1 and not only_result:\n only_result = organizations[0]\n\n memberships = Membership.objects.filter(\n registration_code__icontains=term\n ).order_by(\"-agreement_start\")\n if len(memberships) == 1 and not only_result:\n only_result = memberships[0]\n\n events = Event.objects.filter(\n Q(slug__icontains=term)\n | Q(host__domain__icontains=term)\n | Q(host__fullname__icontains=term)\n | Q(url__icontains=term)\n | Q(contact__icontains=term)\n | Q(venue__icontains=term)\n | Q(address__icontains=term)\n ).order_by(\"-slug\")\n if len(events) == 1 and not only_result:\n only_result = events[0]\n\n # if user searches for two words, assume they mean a person\n # name\n if len(tokens) == 2:\n name1, name2 = tokens\n complex_q = (\n (Q(personal__icontains=name1) & Q(family__icontains=name2))\n | (Q(personal__icontains=name2) & Q(family__icontains=name1))\n | Q(email__icontains=term)\n | Q(secondary_email__icontains=term)\n | Q(github__icontains=term)\n )\n persons = Person.objects.filter(complex_q)\n else:\n persons = Person.objects.filter(\n Q(personal__icontains=term)\n | Q(family__icontains=term)\n | Q(email__icontains=term)\n | Q(secondary_email__icontains=term)\n | Q(github__icontains=term)\n ).order_by(\"family\")\n\n if len(persons) == 1 and not only_result:\n only_result = persons[0]\n\n airports = Airport.objects.filter(\n Q(iata__icontains=term) | Q(fullname__icontains=term)\n ).order_by(\"iata\")\n if len(airports) == 1 and not only_result:\n only_result = airports[0]\n\n training_requests = TrainingRequest.objects.filter(\n Q(group_name__icontains=term)\n | Q(family__icontains=term)\n | Q(email__icontains=term)\n | Q(github__icontains=term)\n | Q(affiliation__icontains=term)\n | Q(location__icontains=term)\n | Q(user_notes__icontains=term)\n )\n if len(training_requests) == 1 and not only_result:\n only_result = training_requests[0]\n\n comments = Comment.objects.filter(\n Q(comment__icontains=term)\n | Q(user_name__icontains=term)\n | Q(user_email__icontains=term)\n | Q(user__personal__icontains=term)\n | Q(user__family__icontains=term)\n | Q(user__email__icontains=term)\n | Q(user__github__icontains=term)\n ).prefetch_related(\"content_object\")\n if len(comments) == 1 and not only_result:\n only_result = comments[0]\n\n # only 1 record found? Let's move to it immediately\n if only_result and not form.cleaned_data[\"no_redirect\"]:\n msg = format_html(\n \"You were moved to this page, because your search <i>{}</i> \"\n \"yields only this result.\",\n term,\n )\n if isinstance(only_result, Comment):\n messages.success(request, msg)\n return redirect(\n only_result.content_object.get_absolute_url()\n + \"#c{}\".format(only_result.id)\n )\n elif hasattr(only_result, \"get_absolute_url\"):\n messages.success(request, msg)\n return redirect(only_result.get_absolute_url())\n\n else:\n messages.error(request, \"Fix errors below.\")\n\n # if empty GET, we'll create a blank form\n else:\n form = SearchForm()\n\n context = {\n \"title\": \"Search\",\n \"form\": form,\n \"term\": term,\n \"organisations\": organizations,\n \"memberships\": memberships,\n \"events\": events,\n \"persons\": persons,\n \"airports\": airports,\n \"comments\": comments,\n \"training_requests\": training_requests,\n }\n return render(request, \"dashboard/search.html\", context)",
"def searchFields(self):\n\n keyword = self.lineEdit.text().strip()\n self.options = []\n for field in self.all_fields:\n if keyword.lower() in field.lower(): # to make search case insensitive\n self.options.append(field)\n # Error dialog for invalid entry\n if len(self.options) == 0:\n msg = QMessageBox()\n msg.setIcon(QMessageBox.Critical)\n msg.setText(\"No field found containing keyword!\")\n msg.setInformativeText(\"Enter valid attribute\")\n msg.setWindowTitle(\"Error\")\n msg.show()\n msg.exec_()\n else:\n self.populateList()",
"def set_search_params(self, **kwargs):\n self._search_params = kwargs",
"def genSearch(request):\n \n assert isinstance(request, HttpRequest)\n booklist=[]\n form = request.GET.copy();\n searchvalue =form['query']\n for k,v in get_valid_Books().items():\n if searchvalue.lower() in v.title.lower() or searchvalue.lower() in v.desc.lower() or searchvalue.lower() in v.a_id.name.lower():\n booklist.append(v)\n if booklist is None:\n clearfilter=\"False\"\n else:\n clearfilter=\"True\"\n\n return render(\n request,\n 'app/about.html',\n {\n 'title':'Books',\n 'books':booklist,\n 'clearfilter':clearfilter,\n 'year':datetime.now().year,\n }\n )",
"def __init__(self, args, parsers):\n self.parsers = parsers\n self.search_fields = args.search_field if args.search_field else []",
"def search_builder(searchForm):\n \n assert searchForm['listingType'] in ['Sale', 'Rent', 'Share', 'Sold', 'NewHomes'], \\\n 'listingType must be one of [Sale, Rent, Share, Sold, NewHomes]'\n \n # Build the search parameters with the locations\n locations = searchForm['locations']\n SearchParameters = []\n SearchQueue = queue.Queue()\n for suburb in locations.keys():\n searchForm['locations'] = [locations[suburb]]\n SearchParameters.append(searchForm.copy())\n SearchQueue.put(searchForm.copy())\n\n \n # The price range can be adjusted later, to reduce the number of listings returned (max 1000 per search)\n ''' \n The choice to make the price adjustments later is because, when there is a list of locations, \n the price ranges neccessary will depend on the number of locations included. If only one location \n is included in the search, this limits the number of ranges that will be required to search through.\n '''\n\n return SearchParameters, SearchQueue",
"def search(self, *args, **kwargs): # real signature unknown\n pass",
"def update_search_parameters(self, selected_gender, selected_category, selected_subcategory):\r\n self.model.set_gender(selected_gender)\r\n self.model.set_category(selected_category)\r\n self.model.set_subcategory(selected_subcategory)\r\n self.model.fetch_results()",
"def search():\n form = SearchForm()\n if form.validate_on_submit():\n return render_template('reports/search_results.html', reports=form.reports)\n else:\n flash_form_errors(form)\n return render_template('reports/search.html', form=form)",
"def do_search(self, *args, **kwargs):\n return [{}]",
"def search_venues_form():\n # seach for Hop should return \"The Musical Hop\".\n # search for \"Music\" should return \"The Musical Hop\" and \"Park Square Live Music & Coffee\"\n return render_template(\n 'pages/search_venues.html'\n )"
] |
[
"0.66910803",
"0.66623443",
"0.65476847",
"0.6458294",
"0.6422288",
"0.6403961",
"0.6375824",
"0.6361148",
"0.6290763",
"0.62626666",
"0.6255805",
"0.6205426",
"0.6199787",
"0.6189706",
"0.61679465",
"0.61610675",
"0.61527574",
"0.6134219",
"0.6087507",
"0.6081296",
"0.6064715",
"0.6059424",
"0.60421634",
"0.60405153",
"0.60331666",
"0.6002625",
"0.5999529",
"0.59605616",
"0.5908458",
"0.5900737"
] |
0.79306954
|
0
|
Method called after the results from the search are shown. It obtains all the links from the shown entities and extracts the data for each one of the links. Finally it updates the variable search_result with the scraped data.
|
def scrap_results(self):
# Find the table
table = self.driver.find_element_by_xpath(results_table_path)
found_links = []
# For each row the table hase
for row in table.find_elements_by_xpath(".//tr"):
elements = row.find_elements_by_xpath(".//td")
# If this row is not empty
if len(elements) != 0:
# Extract the link
entity_link = elements[0].find_element_by_xpath(".//a").get_attribute("href")
found_links.append(entity_link)
# Randomize the list of links so each time the order is different.
shuffle(found_links)
generic_data_found = []
activity_data_found = []
components_data_found = []
components_alt_data_found = []
historical_name_data_found = []
historical_social_capital_data_found = []
count = 0
# For each link found
for link in found_links:
# Scrap the data from this entity
gd, act, comp, hist_name, hist_c_s = self._scrap_single_entity(link)
# Update the found data variables with the new data
generic_data_found.append(gd)
activity_data_found += act
if len(comp) > 0 and "total_miembros_patronado" in comp[0]:
components_alt_data_found += comp
else:
components_data_found += comp
historical_name_data_found += hist_name
historical_social_capital_data_found += hist_c_s
# TODO: Remove this
if count == 2:
pass
count += 1
# Add data to the centralized search_result variable
self.search_results.add_generic_data(generic_data_found)
self.search_results.add_activity_data(activity_data_found)
self.search_results.add_components_data(components_data_found)
self.search_results.add_components_alt_data(components_alt_data_found)
self.search_results.add_historical_names_data(historical_name_data_found)
self.search_results.add_historical_social_capital_data(historical_social_capital_data_found)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def navigate_search_results(self):\n driver = self.driver\n search_results_exhausted = False\n results_page = self.results_page\n delay = 60\n date = get_date_time()\n # css elements to view job pages\n list_element_tag = '/descendant::a[@class=\"job-title-link\"]['\n print_num_search_results(driver, self.keyword, self.location)\n # go to a specific results page number if one is specified\n go_to_specific_results_page(driver, delay, results_page)\n results_page = results_page if results_page > 1 else 1\n\n while not search_results_exhausted:\n for i in range(1,26): # 25 results per page\n # define the css selector for the blue 'View' button for job i\n job_selector = list_element_tag + str(i) + ']'\n if search_suggestion_box_is_present(driver, \n job_selector, i, results_page):\n continue\n # wait for the selector for the next job posting to load.\n # if on last results page, then throw exception as job_selector \n # will not be detected on the page\n if not link_is_present(driver, delay, \n job_selector, i, results_page):\n continue\n robust_wait_for_clickable_element(driver, delay, job_selector)\n extract_transform_load(driver,\n delay,\n job_selector,\n date,\n self.keyword,\n self.location,\n self.filename)\n # attempt to navigate to the next page of search results\n # if the link is not present, then the search results have been \n # exhausted\n try:\n next_results_page(driver, delay)\n print(\"\\n**************************************************\")\n print(\"\\n\\n\\nNavigating to results page {}\" \\\n \"\\n\\n\\n\".format(results_page + 1))\n except ValueError:\n search_results_exhausted = True\n print(\"**************************************************\")\n print(\"\\n\\n\\n\\n\\nSearch results exhausted\\n\\n\\n\\n\\n\")\n else:\n results_page += 1",
"def get_search_results(self):\n sleep(10)\n try:\n addresses = self.driver.find_elements_by_class_name('details-title')\n for p in range(len(addresses)):\n address.append(addresses[p].text)\n prices = self.driver.find_elements_by_class_name('price-info')\n for p in range(len(prices)):\n price.append(prices[p].text)\n links = self.driver.find_element_by_tag_name('a.details-titleLink jsCardLinkGA')\n for p in range(len(links)):\n link.append(links[p].text)\n except NoSuchElementException:\n sleep(3)\n self.pop_up()",
"async def get_article_links(self):\n urls = []\n for page in range(self._start, self._end+1):\n urls.append(self._searchURL + str(page))\n result_list = await self._connect(urls)\n\n self._urls = []\n hares_links = []\n for result in result_list:\n soup = result[1]\n search_links = soup.find_all(class_='search-title')\n article_links = re.findall(r'url=(.*?)\\\"', str(search_links))\n for l in article_links:\n l = unquote(l)\n if 'hare48.pixnet.net' in l:\n hares_links.append(l)\n else:\n self._urls.append(l)\n self._urls.extend(await self._transform_hares(hares_links))",
"def processSearchResult(self):",
"def extractSearchResults(self, html):\n results = list()\n soup = BeautifulSoup(html, 'html.parser')\n div = soup.find('div', id='main')\n if (type(div) == types.NoneType):\n div = soup.find('div', id='center_col')\n if (type(div) == types.NoneType):\n div = soup.find('body')\n if (type(div) != types.NoneType):\n lis = div.findAll('a')\n if(len(lis) > 0):\n for link in lis:\n if (type(link) == types.NoneType):\n continue\n \n url = link['href']\n if url.find(\".google\") > 6:\n continue\n \n url = self.extractUrl(url)\n if(cmp(url, '') == 0):\n continue\n title = link.renderContents()\n title = re.sub(r'<.+?>', '', title)\n result = SearchResult()\n result.setURL(url)\n print '### URL: ' + url\n result.setTitle(title)\n span = link.find('div')\n if (type(span) != types.NoneType):\n content = span.renderContents()\n content = re.sub(r'<.+?>', '', content)\n result.setContent(content)\n results.append(result)\n return results",
"def crawl(self) -> None:\n result = self.__exec_request(self.url)\n if result == \"failed\":\n raise InterruptedError(\"The server responded with status code: {}\".format(self._status_code))\n self.__save_relevants_in_results(result, total=True)\n self.total_nums = self.results[\"total_results\"]\n pbar = tqdm(total=self.total_nums / 100) if self.to_be_num > self.total_nums else tqdm(total=self.to_be_num/100)\n pbar.update(1)\n if len(self.results[\"documents\"]) != self.to_be_num:\n while self.num_res < self.total_nums:\n # print(\"Is: {} | To be: {}\".format(self.num_res, self.total_nums))\n for el in result['search-results']['link']:\n if el['@ref'] == 'next':\n next_url = el['@href']\n result = self.__exec_request(next_url)\n if result == \"failed\":\n print(\"Invalid request. Server responded with Statuscode 400 while crawling. \"\n \"The found articles will be saved further on...\")\n break\n self.__save_relevants_in_results(result)\n pbar.update(1)\n if len(self.results[\"documents\"]) == self.to_be_num:\n break\n if len(self.results[\"documents\"]) == self.to_be_num:\n break\n pbar.close()",
"def _handle_search_results(self, response: TextResponse) -> ScrapyYelpItem:\n\n # get yConfig\n pattern = re.compile(r\"\"\"\\n\\s+yConfig\\s+=\\s+\"\"\", re.MULTILINE | re.DOTALL)\n soup = BeautifulSoup(response.text, \"html.parser\")\n script = soup.find(\"script\", text=pattern)\n myjson = script.get_text()\n # remove start pattern (js assignment)\n s = re.sub(pattern, '', myjson)\n # remove html (parser problems)\n s = re.sub('<[^<]+?>', '', s)\n # remove last semi colon (end-of-data)\n s = s[0:s.rfind(';')]\n json_object = json.loads(s,strict=False)\n\n keys = [x for x in json_object[\"js_display\"][\"hovercard_data\"] if x.isnumeric()]\n # first part is the hovercard data - which contains most of the aggregate biz informative\n # such as total_reviews and summary_score\n df_hovercard_data = pd.DataFrame()\n for x in keys:\n tmpdf = json_normalize(json_object[\"js_display\"][\"hovercard_data\"][x])\n df_hovercard_data = df_hovercard_data.append(tmpdf,ignore_index=True)\n\n df_hovercard_data = df_hovercard_data.set_index(\"result_number\")\n df_hovercard_data.index = df_hovercard_data.index.astype(int)\n # second part is the resourceid which might be useful later on, not sure if this is used at all, but\n # it serves as a good example of how to join to other \"parts\" of the nested json structure and flatten it\n df_markers = json_normalize(json_object[\"js_display\"][\"map_state\"][\"markers\"])\n df_markers = df_markers[df_markers['resourceType'] == 'business'].loc[:, [\"url\",\"resourceId\",\"hovercardId\",\"label\",\"location.latitude\",\"location.longitude\",]]\n df_markers = df_markers.set_index('label')\n df_markers.index = df_markers.index.astype(int)\n\n # combine data into a single dataframe which will eventually be written out by our pipeline\n df = df_hovercard_data.join(df_markers)\n\n # at this point we want to also scrape the indvidual biz listing for the menu, syntax is verbose here\n\n\n ## deubg write to file\n #json_formatted = json.dumps(json_object, indent=2)\n # print(json_formatted)\n # with open(\"files/\"+'blah.json', 'wb') as file:\n # file.write(str.encode(json_formatted))\n\n \"\"\"\n\n Here is a smample of what the yConfig object looks like:\n\n json_object.keys() ====>\n ['cookies', 'gaConfig', 'adjustAndroidPaidTrafficUrl', 'webviewFlow', 'enabledSitRepChannels',\n isWebviewRequest', 'js_display', 'isLoggedIn', 'uaInfo', 'isSitRepEnabled', 'comscore', 'isBugsnagEnabled',\n 'support', 'deprecatedEncryptedYUV', 'vendorExternalURLs', 'smartBannerFallbackActive', 'version',\n 'recaptchaV3PublicKey', 'googlePlacesUrl', 'redesignActive', 'currentBaseLang', 'isClientErrorsEnabled',\n 'uniqueRequestId', 'yelpcodeTemplateVersion', 'appInstallDialogEnabled', 'smartBannerPersistent',\n 'imageUrls', 'siteUrl', 'referrer', 'webviewInfo', 'cookieDomain', 'recaptchaPublicKey',\n 'send_user_agent_to_ga', 'pGifUrl']\n\n\n json_object[\"js_display\"].keys() ===>\n ['polyglot_translations', 'raq_links', 'locale', 'hovercard_data', 'is_first_ad_hovercard_opened',\n 'zoom', 'centerLng', 'map_state', 'advertising_business_id_list', 'centerLat', 'pager']\n\n json_object[\"js_display\"][\"hovercard_data\"] ==>\n '1': {'resource_id': None,\n 'result_number': 1,\n 'biz': {'alias': 'lou-malnatis-pizzeria-chicago',\n 'review_count': 5998,\n 'name': \"Lou Malnati's Pizzeria\",\n 'rating': 4.07785928642881,\n 'url': 'https://m.yelp.com/biz/lou-malnatis-pizzeria-chicago',\n 'price': '$$',\n 'categories': 'Pizza, Italian, Sandwiches',\n 'distance': '2.5 mi'},\n 'lat': 41.890357,\n 'lng': -87.633704,\n 'type': 'natural'},\n '2': {'resource_id': None,\n ....\n\n\n json_object[\"js_display\"][\"map_state\"][\"markers\"] ===>\n [{'resourceType': 'business',\n 'url': '/biz/lou-malnatis-pizzeria-chicago',\n 'resourceId': '8vFJH_paXsMocmEO_KAa3w',\n 'label': '1',\n 'shouldOpenInNewTab': False,\n 'location': {'latitude': 41.890357, 'longitude': -87.633704},\n 'key': 1,\n 'hovercardId': 'Q6nXAEw3UuAVFSztE4lPnA',\n 'icon': {'name': 'business',\n 'anchorOffset': [12, 32],\n 'activeOrigin': [24, 0],\n 'scaledSize': [48, 320],\n 'regularUri': 'https://media0.fl.yelpcdn.com/mapmarkers/yelp_map_range/20160801/1/10.png',\n 'size': [24, 32],\n 'activeUri': 'https://media0.fl.yelpcdn.com/mapmarkers/yelp_map_range/20160801/1/10.png',\n 'regularOrigin': [0, 0]}},\n {'resourceType': 'business',\n 'url': '/biz/pequods-pizzeria-chicago',\n 'resourceId': 'DXwSYgiXqIVNdO9dazel6w',\n 'label': '2',\n 'shouldOpenInNew\n ...\n\n \"\"\"\n #print(json_object[\"js_display\"][\"hovercard_data\"])\n\n\n\n return df",
"def link_scraping(final_links, driver):\n\n for final_link in final_links:\n tags = extract_all_tags(final_link, driver)\n if len(tags) != 0:\n final_tags = find_usefull_tags(tags, tagmodel, tag_count_vect)\n if len(final_tags) != 0:\n print('Extracting(classname): ', final_link)\n scrape_data(final_link, final_tags, driver)\n else:\n print('Extracting(tag): ', final_link)\n scrape_data_tag(final_link, driver)\n else:\n print('Extracting(tag): ', final_link)\n scrape_data_tag(final_link, driver)",
"def query_and_fetch(query, top_n=12):\n global url_details, url_text\n print('Query: ' + query + '; Top N: ' + str(top_n))\n url_details = []\n url_text = []\n driver = None\n bad_request = False\n try:\n driver = Fetcher.get_selenium_driver()\n driver.get('https://api.duckduckgo.com/?q=' + query + '&kl=wt-wt')\n except:\n print('An error occurred while searching query: ' + query)\n Fetcher.close_selenium_driver(driver)\n Fetcher.search_driver = None\n bad_request = True\n finally:\n try:\n if not bad_request:\n results = driver.find_elements_by_class_name('result__a')\n result_size = len(results)\n print('Result Size: ' + str(result_size))\n while result_size > 0 and len(url_details) < top_n:\n urls = []\n for element in results:\n new_url = element.get_attribute('href')\n # TODO: Filter URLs if required\n print(new_url)\n urls.append(new_url)\n\n fetched_result = Fetcher.fetch_multiple(urls, top_n)\n\n for fetched_data in fetched_result:\n if not fetched_data[1] or len(fetched_data[1].strip()) == 0:\n continue\n details = dict()\n details['url'] = fetched_data[0]\n details['html'] = fetched_data[1]\n details['title'] = fetched_data[2]\n details['label'] = predict(fetched_data[3])\n url_details.append(details)\n url_text.append(fetched_data[3])\n if len(url_details) == top_n:\n break\n\n # Infinite Scroll\n if len(url_details) < top_n:\n driver.execute_script('window.scrollTo(0, document.body.scrollHeight);')\n results = driver.find_elements_by_class_name('result__a')\n results = results[result_size:]\n result_size = len(results)\n print('Moved to Next Page. Result Size: ' + str(result_size))\n except:\n print('An error occurred while searching query: '+ query + ' and fetching results')\n #finally:\n # if driver is not None:\n # Fetcher.close_selenium_driver(driver)\n setattr(flask.current_app, 'url_text', url_text)\n print('Search Completed')\n return url_details",
"def mor_prepare_data():\n prices, locations, areas, links = [], [], [], []\n for i in range(START_PAGE, SEARCHING_DEPTH+1):\n handler = requests.get(main_url, params={\"page\": str(i)})\n soup = bs4.BeautifulSoup(handler.text, 'lxml')\n heads = soup.find_all(\"header\")\n once = True\n for head in heads:\n if head.find(\"meta\", {\"itemprop\": \"category\"}) and once:\n\n raw_price = head.find(\"meta\", {\"itemprop\": \"price\"})\n price = int(float(raw_price[\"content\"]) if raw_price else \"\")\n\n raw_loc_list = head.find(\"h2\",\n {\"class\": \"single-result__title\"}).getText().strip().split(\n \", \")\n found = False\n for loc in raw_loc_list:\n if location_mapper[CITY].get(loc.lower(), 0):\n location = location_mapper[CITY][loc.lower()]\n\n found = True\n break\n if not found:\n location = \"\"\n if DEBUG_MODE:\n print(raw_loc_list)\n\n raw_area = head.find(\"p\", {\n \"class\": \"single-result__price single-result__price--currency\"}).getText().strip().split()\n if price and location:\n square_price = raw_area[0] if len(raw_area) == 2 else \"\".join(\n (raw_area[0], raw_area[1]))\n\n area = int(price / float(square_price.replace(\",\", \".\")))\n link_url = head.find('a')['href']\n\n if location and area and link_url:\n prices.append(price) if price < PRICE_UPPER_LIMIT else prices.append(\n PRICE_UPPER_LIMIT)\n locations.append(location)\n areas.append(area) if area < AREA_UPPER_LIMIT else areas.append(\n AREA_UPPER_LIMIT)\n links.append(link_url)\n\n return prices, locations, areas, links",
"def do_search(self):\n # Call the website\n self.driver.get(self.BASE_URL)\n\n # Request the proper historical data\n self.select_proper_version()\n self.save_image()\n\n # If the entity exists in this historical version, extract the data\n if self.select_proper_region() is True:\n # Do the search\n self.fill_search_parameters()\n # Scrap the results page\n self.scrap_results()\n # Export the data to .csv\n self.search_results.export()",
"def __update_page_results(self):\n \n pages = []\n\n # Request id for pages associated to search term \n page_fields='page&fields=id,name,username,link'\n term = self.track[self.track_index]\n self.track_index += 1\n \n # Define url for http request to get pages id associated to search term \n page_request_url = 'https://graph.facebook.com/search?q=%s&type=%s&limit=%d&access_token=%s'%(term,page_fields,self.page_lim,self.access_token)\n \n while(True):\n # Try 100 times\n for i in range(100):\n \n page_response = requests.get(page_request_url)\n \n if 'error' in page_response.json() or page_response.status_code <> 200:\n print \"\\n !---- ERROR IN SEARCH REQUEST ----!\"\n print time.ctime()\n print \"Status Code: \", page_response.status_code\n print page_response.json()\n #raise StopIteration()\n time.sleep(1800) # Wait 30 minutes\n else:\n break\n \n page_json = page_response.json()\n pages = pages + page_json['data']\n time.sleep(5)\n \n if 'next' in page_json['paging']:\n page_request_url = page_json['paging']['next']\n else:\n break\n \n print \"Term: %s, Pages: %d\"%(term, len(pages))\n return pages",
"def grab_mApe_results (searchType) :\n\n mape_main_url = 'https://www.mightyape.co.nz/'\n #Defining the url paths for search types\n mape_mv_category_url = 'movies-tv/movies?q='\n mape_mv_format_search_url = 'movieformat~blu-ray'\n\n #This is the final url string\n searchUrl = ''\n\n #Checking search type\n if searchType is SEARCH_BD_MV_TYPE :\n searchUrl = mape_main_url+mape_mv_category_url+mape_mv_format_search_url\n elif searchType is 'Title' :\n searchUrl = 'https://www.mightyape.co.nz/movies-tv/movies/all?sort=2&q=movieformat~blu-ray'\n\n\n #Using a dictionary to store data, as contains list with objects\n mape_list = {}\n\n page = requests.get(searchUrl)\n tree = html.fromstring(page.content)\n\n data = tree.xpath('//div[@class=\"product-list gallery-view\"]/div[@class=\"product\"]/div[@class=\"title\"]/a') #<--- WORKS\n\n data_alt = tree.xpath('//div[@class=\"product-list gallery-view\"]/div[@class=\"product\"]')\n\n print('Getting results from url:',searchUrl)\n print('Number of objects=',len(data_alt))\n count = 1\n\n for item in data_alt :\n simple_item = item.xpath('div[@class=\"title\"]/a')\n title = simple_item[0].text\n link = simple_item[0].get('href')\n format = item.xpath('div[@class=\"format\"]/text()')\n rating = item.xpath('div[@class=\"customer-rating\"]/span/span[@class=\"average\"]/text()')\n base_price = item.xpath('div[@class=\"price\"]/s/text()')\n hot_price = item.xpath('div[@class=\"price\"]/span[@class=\"price hot\"]/text()')\n normal_price = item.xpath('div[@class=\"price\"]/span[@class=\"price\"]/text()')\n if len(rating) > 0 :\n #temp_mv = Movie_object(title,format[0],rating[0].strip(), mape_main_url + link,normal_price, base_price, hot_price)\n print(title,format[0],rating[0].strip(), mape_main_url + link,normal_price, base_price, hot_price)\n #mape_list[title] = temp_mv\n else :\n print(title, format[0], 'n/a', mape_main_url + link, normal_price, base_price, hot_price)\n #temp_mv = Movie_object(title, format[0], 'n/a', mape_main_url + link, normal_price, base_price, hot_price)\n #mape_list[title] = temp_mv\n\n\n count += 1\n\n return mape_list",
"def _scrape(self):",
"def fetch(self):\n # This method also sets self._results_filtered and\n # self._urltable.\n page = self._conn.fetch_page(self._ddg_url.relative())\n\n if logger.isEnabledFor(logging.DEBUG):\n import tempfile\n fd, tmpfile = tempfile.mkstemp(prefix='ddgr-response-')\n os.close(fd)\n with open(tmpfile, 'w', encoding='utf-8') as fp:\n fp.write(page)\n logger.debug(\"Response body written to '%s'.\", tmpfile)\n\n parser = DdgParser(news=self._ddg_url.news)\n parser.feed(page)\n\n self.results = parser.results\n self._results_filtered = parser.filtered\n self._urltable = {}\n for r in self.results:\n self._urltable.update(r.urltable())",
"def parseSearchHtml(self):\n pass",
"def parseSearchHtml(self):\n pass",
"def get_search_results(text, out_file=None, num_res=3):\n # specify the source website\n text += ' site:tableau.com'\n text = urllib.parse.quote_plus(text)\n\n url = 'https://google.com/search?q=' + text\n USER_AGENT = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36'}\n \n # TODO: add delay here?\n response = requests.get(url,headers=USER_AGENT)\n\n soup = BeautifulSoup(response.text, 'html.parser')\n result_block = soup.find_all('div', attrs={'class': 'g'})\n\n final_result = []\n for rb_ind in range(len(result_block)):\n if len(final_result)==num_res:\n # done sraping\n break\n \n rb = result_block[rb_ind]\n # print(rb_ind)\n if rb.find('h3'):\n title = rb.find('h3').text\n link = rb.find('a', href=True)['href']\n\n desc = rb.find(class_='IsZvec').text\n \n if not desc:\n # print(rb_ind)\n # print(\"got here\")\n desc = rb.find(class_='ILfuVd')\n if desc:\n desc = desc.text\n else:\n desc = ''\n final_result.append([title,link,desc])\n print('\\n'.join([title,link,desc]))\n\n if out_file is not None:\n with open(out_file,\"a+\",encoding='utf8') as f:\n f.writelines([r + '\\n' for r in final_result])\n \n return final_result",
"async def crawl(self):\n fetch_urls = [self.start_url]\n results = []\n while len(fetch_urls):\n \"\"\"\n slicing array urls with max_async_call arg and then run extract_data_urls\n extract_data_urls return a object that contains url, data, found_urls, and all_urls\n url is a url that we crawled\n data is Html content of the url\n found_urls are new urls that we have to crawl that\n all_urls are all links in the html page\n \"\"\"\n urls = await self.extract_data_urls(fetch_urls[0:self.max_async_call])\n del fetch_urls[0:self.max_async_call]\n for url, data, found_urls, all_urls in urls:\n fetch_urls.extend(found_urls)\n result = self.parse_html_content(data)\n result['urls'] = all_urls\n results.append((url, result))\n return results",
"def cache_results(self):\n self.cache_manager.cache_results(\n self.parser,\n self.query,\n self.search_engine_name,\n self.scrape_method,\n self.page_number,\n db_lock=self.db_lock\n )",
"def __init__(self, lookup_result, scraper):\n self.scraper = scraper\n self.title = \"\"\n self.id = None\n self.links = []\n\n self.title = get_child_data(lookup_result, \"title\", \"\")\n self.id = get_child_data(lookup_result, \"id\", None)\n\n link = first_child(lookup_result, \"url\")\n while link:\n self.links.append(ScrapeURL(link, cache = scraper.cache))\n link = next_sibling(link, \"url\")\n return",
"def parse_listing(keyword,place):\n\turl = \"https://www.yellowpages.com/search?search_terms={0}&geo_location_terms={1}\".format(keyword,place)\n\tprint(\"retrieving \",url)\n\n\theaders = {'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',\n\t\t\t\t'Accept-Encoding':'gzip, deflate, br',\n\t\t\t\t'Accept-Language':'en-GB,en;q=0.9,en-US;q=0.8,ml;q=0.7',\n\t\t\t\t'Cache-Control':'max-age=0',\n\t\t\t\t'Connection':'keep-alive',\n\t\t\t\t'Host':'www.yellowpages.com',\n\t\t\t\t'Upgrade-Insecure-Requests':'1',\n\t\t\t\t'User-Agent':'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36'\n\t\t\t}\n\t# Adding retries\n\tfor retry in range(10):\n\t\ttry:\n\t\t\tresponse = requests.get(url,verify=False, headers = headers )\n\t\t\tprint(\"parsing page\")\n\t\t\tif response.status_code==200:\n\t\t\t\tparser = html.fromstring(response.text)\n\t\t\t\t#making links absolute\n\t\t\t\tbase_url = \"https://www.yellowpages.com\"\n\t\t\t\tparser.make_links_absolute(base_url)\n\n\t\t\t\tXPATH_LISTINGS = \"//div[@class='search-results organic']//div[@class='v-card']\"\n\t\t\t\tlistings = parser.xpath(XPATH_LISTINGS)\n\t\t\t\tscraped_results = []\n\n\t\t\t\tfor results in listings:\n\t\t\t\t\tXPATH_BUSINESS_NAME = \".//a[@class='business-name']//text()\"\n\n\t\t\t\t\tXPATH_WEBSITE = \".//div[@class='info']//div[contains(@class,'info-section')]//div[@class='links']//a[contains(@class,'website')]/@href\"\n\n\t\t\t\t\traw_business_name = results.xpath(XPATH_BUSINESS_NAME)\n\n\t\t\t\t\traw_website = results.xpath(XPATH_WEBSITE)\n\n\n\t\t\t\t\tbusiness_name = ''.join(raw_business_name).strip() if raw_business_name else None\n\n\t\t\t\t\twebsite = ''.join(raw_website).strip() if raw_website else None\n\n\n\n\n\n\t\t\t\t\tbusiness_details = {\n\t\t\t\t\t\t\t\t\t\t'business_name':business_name,\n\n\t\t\t\t\t\t\t\t\t\t'website':website\n\n\t\t\t\t\t}\n\t\t\t\t\tscraped_results.append(business_details)\n\t\t\t\t\tprint(scraped_results)\n\t\t\t\treturn scraped_results\n\n\t\t\telif response.status_code==404:\n\t\t\t\tprint(\"Could not find a location matching\",place)\n\t\t\t\t#no need to retry for non existing page\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tprint(\"Failed to process page\")\n\t\t\t\treturn []\n\n\t\texcept:\n\t\t\tprint(\"Failed to process page\")\n\t\t\treturn []",
"def scrape(self):\n pass",
"def _gather_deep_data(self):\n\n cleaned_data_from_website = list()\n\n for i, search_result in self.data_from_website.iterrows():\n cleaned_data_from_website.append(self._deep_data(search_result.url))\n\n cleaned_data_from_website = pd.DataFrame(cleaned_data_from_website)\n if len(cleaned_data_from_website) == 0:\n cleaned_data_from_website['@id'] = '0'\n cleaned_data_from_website.set_index('@id', inplace=True)\n self.data_from_website = cleaned_data_from_website",
"def get_final_result(self, spider):\n\n # stop crawling after yeild_item called\n if not self.result_received:\n # push to webhook\n if self.screenshots_ids:\n self.result['__screenshots_ids__'] = self.screenshots_ids\n self.data = {\n 'scrape_id': self.scrape_id,\n 'scraper_name': self.name,\n 'files_count': self.files_count,\n 'screenshots_count': self.screenshots_count,\n 'cnpj': self.cnpj}\n self.data.update({'result': self.result})\n if self.errors:\n self.data.update({'errors': self.unique_list(self.errors)})\n webhook_file_path = os.path.join(\n path, \"downloads\", self.scrape_id, '{renavam}-data_collected.json'.format(\n renavam=self.renavam))\n self.data_collected(self.data, webhook_file_path)\n # return item for scrapinghub\n self.result_received = True\n req = Request(self.start_url, callback=self.yield_item,\n errback=self.yield_item, dont_filter=True)\n self.crawler.engine.crawl(req, spider)",
"def process_search_pages(self):\r\n features_list = []\r\n for page in self.url_list:\r\n listings = extract_listings(page)\r\n for listing in listings:\r\n features = extract_listing_features(listing, RULES_SEARCH_PAGE)\r\n features['sp_url'] = page\r\n features_list.append(features)\r\n\r\n self.base_features_list = features_list",
"def _scrape_product_links(self, response):\n lis = response.xpath(\n \"//div[@id='resultsCol']/./ul/li |\"\n \"//div[@id='mainResults']/.//ul/li [contains(@id, 'result')] |\"\n \"//div[@id='atfResults']/.//ul/li[contains(@id, 'result')] |\"\n \"//div[@id='mainResults']/.//div[contains(@id, 'result')] |\"\n \"//div[@id='btfResults']//ul/li[contains(@id, 'result')]\")\n links = []\n last_idx = -1\n\n for li in lis:\n is_prime = li.xpath(\n \"*/descendant::i[contains(concat(' ', @class, ' '),\"\n \"' a-icon-prime ')] |\"\n \".//span[contains(@class, 'sprPrime')]\"\n )\n is_prime_pantry = li.xpath(\n \"*/descendant::i[contains(concat(' ',@class,' '),'\"\n \"a-icon-prime-pantry ')]\"\n )\n data_asin = self._is_empty(\n li.xpath('@id').extract()\n )\n\n is_sponsored = bool(li.xpath('.//h5[contains(text(), \"ponsored\")]').extract())\n\n try:\n idx = int(self._is_empty(\n re.findall(r'\\d+', data_asin)\n ))\n except ValueError:\n continue\n\n if idx > last_idx:\n link = self._is_empty(\n li.xpath(\n \".//a[contains(@class,'s-access-detail-page')]/@href |\"\n \".//h3[@class='newaps']/a/@href\"\n ).extract()\n )\n if not link:\n continue\n\n if 'slredirect' in link:\n link = 'http://' + self.allowed_domains[0] + '/' + link\n\n links.append((link, is_prime, is_prime_pantry, is_sponsored))\n else:\n break\n\n last_idx = idx\n\n if not links:\n self.log(\"Found no product links.\", WARNING)\n\n if links:\n for link, is_prime, is_prime_pantry, is_sponsored in links:\n prime = None\n if is_prime:\n prime = 'Prime'\n if is_prime_pantry:\n prime = 'PrimePantry'\n prod = SiteProductItem(prime=prime, is_sponsored_product=is_sponsored)\n yield Request(link, callback=self.parse_product,\n headers={'Referer': None},\n meta={'product': prod}), prod",
"def scrape(self):\n try:\n self.result = urlfetch.fetch(self.url)\n except DownloadError:\n self.result = urlfetch.fetch(self.url) \n if ((self.result.status_code == 200) and\n (self.result.content_was_truncated == 0)):\n self.soup = BeautifulSoup(self.result.content)\n else:\n logging.critical(\"Bad Status Code: \", self.result.status_code, self.url)\n sys.exit(1)",
"def deep_link_scraping(final_links, driver):\n\n import re\n second_links = [] \n for website2 in final_links:\n links2 = extract_all_links(website2, driver)\n final_links1 = find_usefull_links(links2, classmodel, class_count_vect)\n final_links2 = list(set(final_links1) - set(final_links))\n second_links += final_links2\n\n \n second_links = list(dict.fromkeys(second_links))\n second_links1 = find_usefull_links(second_links, classmodel, class_count_vect)\n second_links2 = []\n for link in second_links1:\n if re.search('#', link):\n x = re.search('#', link)\n link = link[:int(x.span()[0])]\n second_links2.append(link)\n else:\n second_links2.append(link)\n\n second_links2 = list(dict.fromkeys(second_links2))\n for final_link in second_links2:\n tags = extract_all_tags(final_link, driver)\n if len(tags) != 0:\n final_tags = find_usefull_tags(tags, tagmodel, tag_count_vect)\n if len(final_tags) != 0:\n scrape_data(final_link, final_tags, driver)\n else:\n scrape_data_tag(final_link, driver)\n else:\n scrape_data_tag(final_link, driver)\n return second_links2",
"def _fetch_items(self):\n url = self._api.router.publication['search'].format(\n project_id=self.project_id\n )\n res_data = self._api.post(url, data=self.search_param)\n self.total = res_data['total']\n self._items = (\n Publication(item, self.project_id)\n for item in res_data['hits']\n )\n div = self.total // self.search_param['limit']\n reste = self.total % self.search_param['limit']\n self.total_page = div\n if reste != 0: self.total_page += 1\n self.search_param = self.search_param.next_page()"
] |
[
"0.661581",
"0.65875775",
"0.6578253",
"0.65035903",
"0.64849716",
"0.6185542",
"0.611211",
"0.6109967",
"0.6050844",
"0.60115045",
"0.598589",
"0.595911",
"0.592672",
"0.59238786",
"0.59154433",
"0.5891052",
"0.5891052",
"0.5889387",
"0.5857786",
"0.5853461",
"0.5789102",
"0.5785204",
"0.5771823",
"0.5751375",
"0.57253003",
"0.57081044",
"0.5695546",
"0.5680947",
"0.5676633",
"0.5668928"
] |
0.7701433
|
0
|
Method that scraps a single entity. It updates the browser with the URL of the desired entity, clicks the button to show all the entity data at once and leavs everything prepared for EntityScraper to obtain the desired information.
|
def _scrap_single_entity(self, link):
# Click to show the info of a specific entity
self.driver.get(link)
# Show all data from this entity
all_data_link = self.driver.find_element_by_xpath(show_all_data)
all_data_link.click()
entity = EntityScraper(self.driver, self.version, self.region)
return entity.generic_data, entity.activity_data, entity.component_data, \
entity.historical_name_data, entity.historical_social_capital_data
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def on_clicked_update(self):\n process = crawler.CrawlerProcess(\n {\n \"USER_AGENT\": \"currency scraper\",\n \"SCRAPY_SETTINGS_MODULE\": \"currency_scraper.currency_scraper.settings\",\n \"ITEM_PIPELINES\": {\n \"currency_scraper.currency_scraper.pipelines.Sqlite3Pipeline\": 300,\n }\n }\n )\n process.crawl(InvestorSpider)\n try:\n process.start()\n gui_warnings.update_notification()\n except error.ReactorNotRestartable:\n gui_warnings.warning_already_updated()",
"def do_search(self):\n # Call the website\n self.driver.get(self.BASE_URL)\n\n # Request the proper historical data\n self.select_proper_version()\n self.save_image()\n\n # If the entity exists in this historical version, extract the data\n if self.select_proper_region() is True:\n # Do the search\n self.fill_search_parameters()\n # Scrap the results page\n self.scrap_results()\n # Export the data to .csv\n self.search_results.export()",
"def go(self, url):\n self.driver.get(url)",
"def get_page(self):\n self.browser.get(self.url)",
"def fetchContent(self):\n print 'fetching page by its path: '+ self.path\n uri = '%s?path=%s' % (self.client.MakeContentFeedUri(), self.path)\n # get the content feed\n feed = self.client.GetContentFeed(uri=uri)\n # take out the content\n self.entry = feed.get_webpages()[0]",
"def scrape(self):\n try:\n self.result = urlfetch.fetch(self.url)\n except DownloadError:\n self.result = urlfetch.fetch(self.url) \n if ((self.result.status_code == 200) and\n (self.result.content_was_truncated == 0)):\n self.soup = BeautifulSoup(self.result.content)\n else:\n logging.critical(\"Bad Status Code: \", self.result.status_code, self.url)\n sys.exit(1)",
"def scrap_results(self):\n # Find the table\n table = self.driver.find_element_by_xpath(results_table_path)\n\n found_links = []\n # For each row the table hase\n for row in table.find_elements_by_xpath(\".//tr\"):\n elements = row.find_elements_by_xpath(\".//td\")\n # If this row is not empty\n if len(elements) != 0:\n # Extract the link\n entity_link = elements[0].find_element_by_xpath(\".//a\").get_attribute(\"href\")\n found_links.append(entity_link)\n\n # Randomize the list of links so each time the order is different.\n shuffle(found_links)\n\n generic_data_found = []\n activity_data_found = []\n components_data_found = []\n components_alt_data_found = []\n historical_name_data_found = []\n historical_social_capital_data_found = []\n count = 0\n # For each link found\n for link in found_links:\n # Scrap the data from this entity\n gd, act, comp, hist_name, hist_c_s = self._scrap_single_entity(link)\n\n # Update the found data variables with the new data\n generic_data_found.append(gd)\n activity_data_found += act\n if len(comp) > 0 and \"total_miembros_patronado\" in comp[0]:\n components_alt_data_found += comp\n else:\n components_data_found += comp\n historical_name_data_found += hist_name\n historical_social_capital_data_found += hist_c_s\n\n # TODO: Remove this\n if count == 2:\n pass\n\n\n count += 1\n\n # Add data to the centralized search_result variable\n self.search_results.add_generic_data(generic_data_found)\n self.search_results.add_activity_data(activity_data_found)\n self.search_results.add_components_data(components_data_found)\n self.search_results.add_components_alt_data(components_alt_data_found)\n self.search_results.add_historical_names_data(historical_name_data_found)\n self.search_results.add_historical_social_capital_data(historical_social_capital_data_found)",
"def get_info(self):\r\n\r\n self.driver.get(WEBSITE)\r\n time.sleep(3)\r\n self.driver.find_element_by_xpath(\"\"\"//*[@id=\"modalContent\"]/div/button/i\"\"\").click()\r\n time.sleep(3)\r\n #gets prices and appends to list\r\n all_prices = self.driver.find_elements_by_class_name(\"firstPrice\")\r\n for price in all_prices:\r\n text = price.text\r\n new_p = text.replace(\".\", \"\")\r\n price_int = int(new_p.split(\" \")[1])\r\n self.price_list.append(price_int)\r\n #gets addresses\r\n all_addresses = self.driver.find_elements_by_class_name(\"postingCardLocationTitle\")\r\n for address in all_addresses:\r\n self.address_list.append(address.text)\r\n print(self.address_list)\r\n # gets info\r\n ad_info = self.driver.find_elements_by_css_selector(\"a.go-to-posting\")\r\n for info in ad_info:\r\n links = info.get_attribute('href') #gets href link inside the css\r\n self.all_links.append(links)\r\n self.all_info.append(info.text)\r\n\r\n # Just for tests\r\n print(self.price_list)\r\n print(self.all_info)\r\n print(self.all_links)",
"def _go_to_page(self):\n self.salesforce.go_to_setup_home()\n self.eda.wait_for_new_window(\"Home | Salesforce\")\n self.selenium.switch_window(\"Home | Salesforce\")\n self.salesforce.wait_until_loading_is_complete()",
"def old_get_details(self, entity):\n self.logger.debug(\"get_details: entered\")\n\n # For every <url> tag that this entity has, we fetch the details it\n # provides.\n #\n link = first_child(entity, \"url\")\n i = 0\n while link:\n i += 1\n src_url = ScrapeURL(link, cache = self.cache)\n url_data = src_url.get()\n\n # If we get back an object with an iterator then we loop over the\n # elements in our src data, putting successive one in successive\n # buffers.\n #\n if hasattr(url_data, '__iter__'):\n for j,data in enumerate(url_data):\n self.parser.set_buffer(i+j, data)\n i += j\n else:\n self.parser.set_buffer(i, url_data)\n # XXX for debugging purposes again we write out the details\n # we get in uniquely named files that correspond to the\n # param buffer we use for the url data.\n #\n with open(\"details.%d.html\" % i, \"w\") as f:\n f.write(url_data)\n\n\n link = next_sibling(link, \"url\")\n\n # Now we get the url based id used to identify this entity, if we\n # have one. This is passed in to the parser as the next free\n # parameter buffer.\n #\n # XXX NOTE: the xml scraper seems to always expect the id in\n # buffer 2 (and then details html in buffer 1.)\n #\n entity_id = first_child(entity, \"id\")\n if entity_id is not None:\n entity_id = entity_id.firstChild.data\n self.parser.set_buffer(i+1, entity_id)\n self.logger.debug(\"get_details: buffer: %d entity id: %s\" % \\\n (i+1,entity_id))\n\n details = self.parser.parse(FN_GET_DETAILS, self.settings)\n\n # XXX I think we only need this file for debugging. Eventually\n # we will just remove this output statement.\n #\n with open(\"details.%s.xml\" % entity_id, \"w\") as f:\n f.write(details)\n\n self.logger.debug(\"get_details: leaving\")\n return details",
"def request_html_page(self):\n try:\n response = requests.get('http://www.indeed.com/jobs?', params=self.payload)\n except:\n print \"got error for \", self.payload\n self.page = response.content",
"def do_get(self, url):\n self.driver.get(url)",
"def scrape(self):\n pass",
"def fetch(self):\n self.genre = \"Review\"\n try:\n if not self.__setSoup():\n log.info(self.log_msg(\"Soup not set,returning false\"))\n return False\n #if not self._getParentPage():\n # log.info(self.log_msg(\"Parent page not found\"))\n while True:\n parent_page_soup = copy.copy(self.soup)\n # log.info(self.log_msg('current uri%s'%parent_page_soup))\n if not self.__addReviews():\n log.info(self.log_msg('fetched all reviews for the url %s'\\\n %self.task.instance_data['uri']))\n \n log.info(self.log_msg('Next page%s'%self.currenturi))\n try:\n \n # self.currenturi = self.task.instance_data['uri'].rsplit\\\n # ('/', 1)[0] + '/' + self.soup.find('a', \\\n # title='Go to the next page')['href']\n self.currenturi = 'http://www.phonedog.com' + parent_page_soup.find('a',title='Go to the next page')['href']\n \n if not self.__setSoup():\n log.info(self.log_msg('soup not set for the uri %s'%\\\n self.currenturi))\n break\n except:\n log.info(self.log_msg('Next page not found for the uri %s'%\\\n self.currenturi))\n break\n return True\n except:\n log.exception(self.log_msg(\"Exception in fetch\"))\n return False",
"def Reload(self):\n self._inspector_backend.Navigate(self.url, None, 10)",
"def step():\n \n step = models.Step(action=u\"goto\", target=u\"http://www.joesfunerals.com\")",
"def fetch(self):\n try:\n self.genre = 'Review'\n log.debug(self.log_msg(\"Fetching the prouct page url %s\"%self.currenturi))\n res=self._getHTML(self.currenturi) # Assuming self.currenturi is at the product page\n self.rawpage=res['result']\n self._setCurrentPage()\n try:\n self.parent_page_title = stripHtml(self.soup.find('h1',{'id':'pgTitleDetail'}).renderContents())\n except:\n self.parent_page_title =''\n try:\n self.__product_price = self.soup.find('tbody',{'class':'prices'}).td.renderContents().replace('$','')\n except:\n log.exception(\"Error in fetching product_price\")\n self.__product_price = None\n\n parent_page_url = self.task.instance_data['uri']\n review_first_page_url = self.soup.find('a',text=\"Show All Customer Reviews » \").parent['href']\n review_url_order = \"&sortReviewsBy=DateDescending\"\n self.currenturi = self.base_url + review_first_page_url + review_url_order\n log.info(self.log_msg('current_uri :: %s'%(self.currenturi)))\n self._getParentPage()\n self.next_url_links=[]\n self.fetch_next_link = True\n while self.fetch_next_link:\n self._iterateReviewPages(parent_page_url)\n return True\n except Exception,e:\n log.exception(self.log_msg(\"Exception occured in fetch()\"))\n return False",
"def start(self):\n self.get(self.url)",
"def _scrape(self):",
"def step_impl(context):\r\n context.browser.get('https://opensource-demo.orangehrmlive.com/')\r\n time.sleep(10)",
"def show(self):\n webopen(str(self))",
"def request_page(self, url, action=None):\n if url.startswith(self.url):\n self.driver.get(url)\n else:\n self.driver.get(self.url + url)\n self.default_wait.until(EC.invisibility_of_element_located((By.XPATH, \"//div[@class='loading-bar']\")))\n if action:\n action(self.driver)\n return self.driver.page_source",
"def submit(self):\n url = self.__moss.send()\n\n self.home_url = url\n self.moss_results = self.__extract_info()",
"def fetch(self):\n try:\n self.__genre = 'review'\n self.__baseuri = 'http://ehealthforum.com'\n self.__setSoupForCurrentUri()\n \n if not re.search('\\d+\\.html$', self.currenturi):\n return self.__createTasksForThreads()\n else:\n return self.__addThreadAndPosts()\n except:\n log.exception(self.log_msg('Exception in fetch for the url %s'%self.currenturi))\n return False",
"async def goto(self, url: str, *args: Any, **kwargs: Any) -> Any:",
"def fetch_page(classnum=0): # ! This will generate web requests\r\n r = requests.post('http://deshalit.iscool.co.il/default.aspx',\r\n data={'__EVENTTARGET': 'dnn$ctr11396$TimeTableView$btnChangesTable',\r\n '__VIEWSTATE': post_viewstate,\r\n 'dnn$ctr11396$TimeTableView$ClassesList': str(classnum)})\r\n return r.text",
"def open(self):\n self.driver.get('{}/submit'.format(self.config.get('Test', 'url')))\n return self",
"async def fetch_page(self, url: str) -> PageRaw:\n\n raise NotImplementedError()",
"def get(self, what):\n\n\t\t# ChromeDriver is 2.38 available at https://chromedriver.storage.googleapis.com/2.38/chromedriver_mac64.zip\n\t\tself.DRIVER = webdriver.Chrome('webdriver/chromedriver')\n\n\t\tself.DRIVER.get(Billboard.URL[what])\n\n\t\tWebDriverWait(self.DRIVER, 20).until(EC.element_to_be_clickable((By.CLASS_NAME, \"copyright__paragraph\")))\n\n\t\tfor _ in self.DRIVER.find_elements_by_class_name('chart-row__artist'):\n\t\t\tself.ARTISTS.add(_.text.lower().strip())\n\n\t\tself.DRIVER.close()\n\n\t\treturn self",
"def go_to(self):\r\n return UnitPage(self.browser, self.locator).visit()"
] |
[
"0.5507844",
"0.5499927",
"0.54691416",
"0.5409359",
"0.5368381",
"0.53061575",
"0.53056824",
"0.5304008",
"0.53017473",
"0.5299134",
"0.5276429",
"0.5218978",
"0.518513",
"0.51674116",
"0.5150308",
"0.514106",
"0.5113388",
"0.50975657",
"0.5088687",
"0.50573105",
"0.50335795",
"0.5024065",
"0.49863338",
"0.49602082",
"0.4955338",
"0.4954233",
"0.49541003",
"0.4924622",
"0.49194625",
"0.48939714"
] |
0.6079428
|
0
|
Return next word from user_word and computer_word.
|
def get_next_word(self, user_word, computer_word):
pass
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def generate_next_word(model: Dict[str, Set[str]], word: str) -> str:\n possible_words = [pos_word for pos_word in model[word]]\n return random.choice(possible_words)",
"def __findNextWord(self):\n self.activeWindow().searchCurrentWordForward()",
"def gen_user_word_2(word, user_word, user_char, sep=\"_\"):\n moving_index = 0\n while True:\n try:\n char_index = word.index(user_char, moving_index)\n user_word[char_index] = user_char\n moving_index = char_index + 1\n print(moving_index)\n except ValueError:\n break\n return user_word",
"def get_word():\n valid_word = False\n while (not valid_word):\n index = random.randint(0,len(words)-1)\n word = words[index]\n if (word[0]):\n valid_word = True\n return word",
"def findNextWordForSpellcheck(text, startPos, wikiPage):\r\n return (None, None, None)",
"def next_word(word, string, start=0, sensitive=True):\n if start in range(0, len(string)):\n ls = SE.full_words(word, string[start:], sensitive)\n if ls:\n return [ls[0][0] + start, ls[0][1] + start]\n return []",
"def getWordKey(word):\n # BEGIN_YOUR_ANSWER (our solution is 1 lines of code, but don't worry if you deviate from this)\n return len(word), word\n # END_YOUR_ANSWER",
"def choose_word():\n pass",
"def next_word(self):\n self.append = self.add_new_word",
"def valid_word(self, word):\n\n word = ''.join(word)\n return next((w for w in self.words if w == word), None)",
"def next_word(self, start):\n end_idx = self.idx + len(start)\n if self.match(start):\n return self.line[end_idx] if end_idx < len(self.line) else END",
"def next_word(self) -> None:\n if not self.buffer:\n return\n\n if self.index == self.buffer.end:\n return\n\n while True:\n character = self.buffer[self.index]\n if character in WHITESPACE:\n break\n self.index += 1\n\n if self.index == self.buffer.end:\n return\n\n while True:\n character = self.buffer[self.index]\n if not character in WHITESPACE:\n break\n self.index += 1\n\n if self.index == self.buffer.end:\n return",
"def get_next_word(self, peek=False, accept_eol=False):\n if self.idx == len(self.words):\n if accept_eol:\n return None\n else:\n raise ParseError('Unexpected end of file')\n\n if self.length == 0:\n if accept_eol:\n return None\n else:\n raise ParseError('Incorrect instruction length')\n\n word = self.words[self.idx]\n\n if not peek:\n self.idx += 1\n self.length -= 1\n\n return word",
"def get_word(wordlist, args): #{{{\n iters = 0\n while iters < 500:\n if args.lowercase == True:\n word = random.choice(wordlist).strip().lower()\n return word\n elif args.lowercase == False:\n word = random.choice(wordlist).strip().lower().capitalize()\n return word\n\n if args.punctuation == False:\n if len(word) < args.max_length and word.isalpha() == True:\n return word\n iters += 1\n elif args.punctuation == True:\n if len(word) < args.max_length:\n return word\n iters += 1 #}}}",
"def getWord(self,):\n\t\treturn self.word;",
"def __next__(self) -> HiddenWord:\n return HiddenWord(self._board, next(self._words))",
"def getSpecWord(words):\n logger.info(\"getSpecWord started\")\n index = random.randrange(2) # randomly select 0 or 1\n logger.info(\"getSpecWord ended\")\n return index, words[index]",
"def word(word_time):\n return word_time[0]",
"def check_word(self, word):\n\n if not self.words:\n return None\n word = ''.join(word)\n return next((True for w in self.words if w == word), False)",
"def next_word_proba(self, word, seq):\n context = tuple(seq[-2:]) # last two words\n return self.probas[context].get(word, 0.0)",
"def next_word(self, key_id):\n for i in range(len(key_id)):\n try:\n if len(self.grams[len(key_id)][key_id]) >= self.min_length:\n return random.choice(self.grams[len(key_id)][key_id])\n except KeyError:\n pass\n # if the length requirement isn't met, we shrink the key_id\n if len(key_id) > 1:\n key_id = key_id[1:]\n # when we're down to only a one-word sequence,\n #ignore the requirement\n try:\n return random.choice(self.grams[len(key_id)][key_id])\n except KeyError:\n # key does not exist: should only happen when user enters\n # a sequence whose last word was not in the corpus\n # choose next word at random\n return random.choice(' '.join(self.corpus).split())",
"def return_word():\n wordlist = load_words()\n word = random.choice(wordlist)\n return word",
"def gen_user_word(word, user_word, user_char, sep=\"_\"):\n indexes = [i for i, letter in enumerate(word) if letter == user_char]\n for i in indexes:\n user_word[i] = user_char\n return user_word",
"def word_midpoint(user_word):\n\t\n\tword_length = len(user_word) \t\t# Length of word.\n\tmid_point = word_length / 2 \t\t# Mid-Point of word.\n\tmid_letter = user_word[int(mid_point)]\t# Middle letter that will track.\n\t\n\treturn word_length, mid_letter",
"def get_word(self):\r\n\r\n # Get a unique word anyway\r\n if not self.word_count or random.random() > self.new_word_chance:\r\n self.word_count += 1\r\n return self.create_word() \r\n else:\r\n word_choice = random.randrange(0, self.word_count)\r\n try:\r\n return self.words[word_choice]\r\n except IndexError:\r\n return self.create_word()",
"def get_word(self) -> str: \n #return str(choice(word_list).upper())\n return \"ANONYMOUS\"",
"def get_next_spell(next_spell, spell_itr):\n current_spell = next_spell\n try:\n next_spell = next(spell_itr)\n except StopIteration:\n next_spell = None\n return current_spell, next_spell",
"def find_next_word(first, ngram_freq, random=False):\n # get only ngrams whose first word matches first\n filtered_ngrams = filter(lambda (ngram, v): ngram.split()[0] == first,\n ngram_freq)\n if len(filtered_ngrams) == 0:\n return \"\"\n\n # if we use randomness, we use the ngram's score\n # as part of a multinomial distribution so that we\n # are more likely to sample from ngrams with higher score\n if random:\n probs = [score for (word,score) in filtered_ngrams]\n sum_score = float(sum(probs))\n probs = [score / sum_score for score in probs]\n result = np.random.multinomial(n=1, pvals=probs, size=1)\n index = result[0].tolist().index(1)\n best_ngram = filtered_ngrams[index][0]\n return best_ngram.split()[1]\n\n # return second word of ngram\n best_ngram = filtered_ngrams[0][0]\n next = best_ngram.split()[1]\n\n # to avoid infinite recursion, make sure the next word\n # is different from the current word\n if next != first or len(filtered_ngrams) == 1:\n return next\n best_ngram = filtered_ngrams[1][0]\n return best_ngram.split()[1]",
"def get_word():\n word=words_dict[randrange(0,len(words_dict))]\n return word",
"def get_word():\n word=words_dict[randrange(0,len(words_dict))]\n return word"
] |
[
"0.64305073",
"0.64022636",
"0.6212819",
"0.61267036",
"0.61255014",
"0.60444844",
"0.59781766",
"0.59431016",
"0.5898044",
"0.5867311",
"0.58612424",
"0.5834356",
"0.582984",
"0.58241343",
"0.57892656",
"0.57833505",
"0.5781561",
"0.5773234",
"0.5772917",
"0.5763064",
"0.57629186",
"0.57564414",
"0.5727541",
"0.5726229",
"0.5697232",
"0.5682817",
"0.56761026",
"0.56655073",
"0.56601286",
"0.56601286"
] |
0.9207571
|
0
|
Create a new Scratchpad and associated ScratchpadRevision. The POST data should be a JSONencoded dict, which is passed verbatim to Scratchpad.create as keyword arguments.
|
def create_scratchpad():
if not gandalf.bridge.gandalf("scratchpads"):
return api_forbidden_response(
"Forbidden: You don't have permission to do this")
if not request.json:
return api_invalid_param_response("Bad data supplied: Not JSON")
# TODO(jlfwong): Support phantom users
user = UserData.current()
if not (user and user.developer):
# Certain fields are only modifiable by developers
for field in scratchpad_models.Scratchpad._developer_only_fields:
if request.json.get(field):
return api_forbidden_response(
"Forbidden: Only developers can change the %s" % field)
try:
# Convert unicode encoded JSON keys to strings
create_args = dict_keys_to_strings(request.json)
if user:
create_args['user_id'] = user.user_id
return scratchpad_models.Scratchpad.create(**create_args)
except (db.BadValueError, db.BadKeyError), e:
return api_invalid_param_response("Bad data supplied: " + e.message)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def post(self):\n data = request.json\n create_entry(data)\n return None, 201",
"def _create(self, request, *args, **kwargs):\n app = kwargs['app']\n\n data_form = PreviewJSONForm(request.data)\n if not data_form.is_valid():\n return Response(data_form.errors, status=HTTP_400_BAD_REQUEST)\n\n form = PreviewForm(data_form.cleaned_data)\n if not form.is_valid():\n return Response(data_form.errors, status=HTTP_400_BAD_REQUEST)\n\n form.save(app)\n log.info('Preview created: %s' % form.instance)\n serializer = self.get_serializer(form.instance)\n return Response(serializer.data, status=HTTP_201_CREATED)",
"def post(self):\n data = request.json\n return create_new_blog(data=data)",
"def post(self):\n created = post_tool(request.json)\n return created, 201",
"def post(self):\n data = request.json\n return save_new_post(data=data)",
"def post(self):\n body = request.get_json(force=True)\n try:\n st = StudySchema(strict=True).load(body).data\n except ValidationError as err:\n abort(400, 'could not create study: {}'.format(err.messages))\n\n db.session.add(st)\n db.session.commit()\n return StudySchema(\n 201, 'study {} created'.format(st.kf_id)\n ).jsonify(st), 201",
"def post(self):\n data = request.json\n create_testing_scenario(data)\n return None, 201",
"def post_review(recipe_id=None):\n\n if not storage.get(Recipe, recipe_id):\n abort(404)\n data = request.get_json()\n if not data:\n abort(400, 'Not a JSON')\n if 'user_id' not in data.keys():\n abort(400, 'Missing user_id')\n if not storage.get(User, data['user_id']):\n abort(404)\n if 'text' not in data.keys():\n abort(400, 'Missing text')\n data['recipe_id'] = recipe_id\n new_review = Review(**data)\n storage.new(new_review)\n storage.save()\n return make_response(jsonify(new_review.to_dict()), 201)",
"def TestRecordCreate(request, format=None):\n\tdata = request.data\n\n\tjson_data = json.dumps(data[0], ensure_ascii=False)\n\tjson_data = json.loads(json_data)\n\t# obj = data[0].pgbench\n\t# jsLoads = json.loads(data[0])\n\n\tfrom django.db import transaction\n\n\ttry:\n\t\tsecret = request.META.get(\"HTTP_AUTHORIZATION\")\n\t\tret = Machine.objects.filter(machine_secret=secret, state='A').get()\n\t\ttest_machine = ret.id\n\t\tif test_machine <= 0:\n\t\t\traise TestDataUploadError(\"The machine is unavailable.\")\n\n\t\trecord_hash = make_password(str(json_data), 'pg_perf_farm')\n\t\tr = TestRecord.objects.filter(hash=record_hash).count()\n\t\tif r != 0:\n\t\t\traise TestDataUploadError('The same record already exists, please do not submit it twice.')\n\n\t\twith transaction.atomic():\n\n\t\t\tif 'linux' not in json_data:\n\t\t\t\tprint('linuxInfo not found')\n\t\t\t\tlinuxInfo = LinuxInfoSerializer(data={'mounts': 'none', 'cpuinfo': 'none', 'sysctl': 'none', 'meminfo': 'none'})\n\n\t\t\telse:\n\t\t\t\tlinux_data = json_data['linux']\n\t\t\t\tlinuxInfo = LinuxInfoSerializer(data=linux_data)\n\t\t\t\tlinuxInfoRet = None\n\n\t\t\tif linuxInfo.is_valid():\n\t\t\t\tlinuxInfoRet = linuxInfo.save()\n\n\t\t\telse:\n\t\t\t\tmsg = 'linuxInfo invalid'\n\t\t\t\traise TestDataUploadError(msg)\n\n\t\t\tmeta_data = json_data['meta']\n\t\t\tmetaInfo = MetaInfoSerializer(data=meta_data)\n\t\t\tmetaInfoRet = None\n\t\t\tif metaInfo.is_valid():\n\t\t\t\tmetaInfoRet = metaInfo.save()\n\t\t\telse:\n\t\t\t\tmsg = 'metaInfo invalid'\n\t\t\t\traise TestDataUploadError(msg)\n\n\t\t\tpg_data = json_data['postgres']\n\t\t\tbranch_str = pg_data['branch']\n\n\t\t\tif (branch_str == 'master'):\n\t\t\t\tbranch_str = 'HEAD'\n\n\t\t\tbranch = TestBranch.objects.filter(branch_name__iexact=branch_str, is_accept=True).get()\n\n\t\t\tif not branch:\n\t\t\t\traise TestDataUploadError('The branch name is unavailable.')\n\n\t\t\tcommit = pg_data['commit']\n\t\t\tpg_settings = pg_data['settings']\n\n\t\t\tfiltered = ['checkpoint_timeout','work_mem','shared_buffers','maintenance_work_mem','max_wal_size','min_wal_size']\n\n\t\t\tfor item in filtered:\n\t\t\t\tif item.isdigit():\n\t\t\t\t\tpg_settings[item] = int(pg_settings[item])\n\t\t\t\t# pg_settings[item] = pg_settings[item].encode('utf-8')\n\t\t\t\t# pg_settings[item] = filter(str.isdigit, pg_settings[item])\n\n\t\t\tpg_settings['log_checkpoints'] = DB_ENUM['general_switch'][pg_settings['log_checkpoints']]\n\t\t\tpgInfo = CreatePGInfoSerializer(data=pg_settings)\n\t\t\tpgInfoRet = None\n\n\t\t\tif pgInfo.is_valid():\n\t\t\t\tpgInfoRet = pgInfo.save()\n\n\t\t\telse:\n\t\t\t\tmsg = pgInfo.errors\n\t\t\t\traise TestDataUploadError(msg)\n\n\t\t\ttest_record_data = {\n\t\t\t\t'pg_info': pgInfoRet.id,\n\t\t\t\t'linux_info': linuxInfoRet.id,\n\t\t\t\t'meta_info': metaInfoRet.id,\n\t\t\t\t'test_machine': test_machine,\n\t\t\t\t'test_desc': 'here is desc',\n\t\t\t\t'meta_time': metaInfoRet.date,\n\t\t\t\t'hash': record_hash,\n\t\t\t\t'commit': commit,\n\t\t\t\t'branch': branch.id,\n\t\t\t\t'uuid': shortuuid.uuid()\n\t\t\t}\n\n\t\t\ttestRecord = CreateTestRecordSerializer(data=test_record_data)\n\t\t\ttestRecordRet = None\n\n\t\t\tif testRecord.is_valid():\n\t\t\t\ttestRecordRet = testRecord.save()\n\n\t\t\telse:\n\t\t\t\tmsg = 'testRecord invalid'\n\t\t\t\tprint(testRecord.errors)\n\t\t\t\traise TestDataUploadError(msg)\n\n\t\t\tpgbench = json_data['pgbench']\n\t\t\t# print(type(ro))\n\t\t\tro = pgbench['ro']\n\n\t\t\t#for tag, tag_list in pgbench.iteritems():\n\t\t\tfor tag, tag_list in pgbench.items():\n\t\t\t\t#print(tag)\n\t\t\t\t#print(tag_list)\n\n\t\t\t\ttest_cate = TestCategory.objects.get(cate_sn=tag)\n\n\t\t\t\tif not test_cate:\n\t\t\t\t\tcontinue\n\t\t\t\telse:\n\t\t\t\t\tprint(test_cate.cate_name)\n\n\t\t\t\tfor scale, dataset_list in tag_list.items():\n\n\t\t\t\t\tfor client_num, dataset in dataset_list.items():\n\n\t\t\t\t\t\ttest_dataset_data = {\n\t\t\t\t\t\t\t'test_record': testRecordRet.id,\n\t\t\t\t\t\t\t'clients': client_num,\n\t\t\t\t\t\t\t'scale': scale,\n\t\t\t\t\t\t\t'std': dataset['std'],\n\t\t\t\t\t\t\t'metric': dataset['metric'],\n\t\t\t\t\t\t\t'median': dataset['median'],\n\t\t\t\t\t\t\t'test_cate': test_cate.id,\n\t\t\t\t\t\t\t# status, percentage will calc by receiver\n\t\t\t\t\t\t\t'status': -1,\n\t\t\t\t\t\t\t'percentage': 0.0,\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\ttestDateSet = CreateTestDateSetSerializer(data=test_dataset_data)\n\t\t\t\t\t\ttestDateSetRet = None\n\n\t\t\t\t\t\tif testDateSet.is_valid():\n\t\t\t\t\t\t\ttestDateSetRet = testDateSet.save()\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tprint(testDateSet.errors)\n\t\t\t\t\t\t\tmsg = 'testDateSet invalid'\n\t\t\t\t\t\t\traise TestDataUploadError(msg)\n\n\t\t\t\t\t\ttest_result_list = dataset['results']\n\n\t\t\t\t\t\tfor test_result in test_result_list:\n\t\t\t\t\t\t\ttest_result_data = test_result\n\t\t\t\t\t\t\ttest_result_data['test_dataset'] = testDateSetRet.id\n\t\t\t\t\t\t\ttest_result_data['mode'] = DB_ENUM['mode'][test_result_data['mode']]\n\t\t\t\t\t\t\ttestResult = CreateTestResultSerializer(data=test_result_data)\n\n\t\t\t\t\t\t\ttestResultRet = None\n\n\t\t\t\t\t\t\tif testResult.is_valid():\n\t\t\t\t\t\t\t\ttestResultRet = testResult.save()\n\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tprint(testResult.errors)\n\t\t\t\t\t\t\t\tmsg = testResult.errors\n\t\t\t\t\t\t\t\traise TestDataUploadError(msg)\n\n\n\texcept Exception as e:\n\t\tmsg = 'Upload error: ' + e.__str__()\n\t\tprint(msg)\n\t\treturn Response(msg, status=status.HTTP_406_NOT_ACCEPTABLE)\n\n\tprint('Upload successful!')\n\treturn Response(status=status.HTTP_201_CREATED)",
"def post(self):\r\n data = request.form\r\n return create(data=data)",
"def post(self, **kwargs):\n data = request.json\n return save_new_writer(data=data)",
"def create_review(place_id):\n place = storage.get(\"Place\", place_id)\n if place is None:\n abort(404)\n if not request.get_json():\n return jsonify({'error': 'Not a JSON'}), 400\n if 'user_id' not in request.get_json():\n return jsonify({'error': 'Missing user_id'}), 400\n user = storage.get(\"User\", request.get_json().get('user_id'))\n if user is None:\n abort(404)\n user_id = request.get_json().get('user_id')\n if 'text' not in request.get_json():\n return jsonify({'error': 'Missing text'}), 400\n text = request.get_json().get('text')\n obj = Review(text=text, place_id=place_id, user_id=user_id)\n obj.save()\n return jsonify(obj.to_dict()), 201",
"def post(self):\n if not request.json:\n return None, 400\n\n created_git_repository: GitRepositoryModel = self.datastore.create(document=request.json)\n return created_git_repository, 201",
"def post():\n\n errors = check_petitions_keys2(request)\n if errors:\n return raise_error(400, \"Invalid {} key\".format(', '.join(errors)))\n details = request.get_json()\n createdBy = details['createdBy']\n office = details['office']\n body = details['body']\n\n if details['office'].isalpha() is False \\\n or details['createdBy'].isalpha() is False:\n return raise_error(400, \"input is in wrong format\")\n petition = PetitionsModel().save(createdBy, office, body)\n return make_response(jsonify({\n \"status\": \"201\",\n \"message\": \"petition filed successfully\",\n \"petition\": petition\n }), 201)",
"def post(self):\n\n try:\n\n controller = self.controller()\n kwargs = controller.date_time_parser(request.json)\n schema = self.schema(many=False)\n raw_data = controller.create(**kwargs)\n data = schema.dump(raw_data)\n\n return ResponseHandler.render_response(data=data)\n\n except Exception as ex:\n\n return ResponseHandler.render_response(status=ERR, message=traceback.format_exc())",
"def create():\n data = request.get_json()\n print(\"DATA: \", data)\n db_helper.insert_new_record(data['first_name'], data['last_name'], data['class_period'], data['current_belt'], data['student_teacher_id'])\n result = {'success': True, 'response': 'Done'}\n return jsonify(result)",
"def create(self, request, *args, **kwargs):\n subreddit_title = kwargs[\"sub_title\"]\n if subreddit_title.lower() in Sub.pseudo_subreddits.keys():\n message = _((\n \"You can't create a post to the \"\n \"'{}' subreddit\".format(subreddit_title)\n ))\n return Response(\n {\"detail\": message},\n status=status.HTTP_400_BAD_REQUEST\n )\n else:\n subreddit = Sub.objects.get(title=subreddit_title)\n user = self.request.user\n data = request.data.copy()\n data[\"subreddit\"] = subreddit.title\n data[\"authorsender\"] = user.username\n serializer = self.get_serializer(data=data)\n serializer.is_valid(raise_exception=True)\n self.perform_create(serializer)\n headers = self.get_success_headers(serializer.data)\n return Response(\n serializer.data,\n status=status.HTTP_201_CREATED,\n headers=headers\n )",
"def create(self, data=None, **options) -> Dict:\n return self._call(\"\", data=data, method=\"POST\", **options)",
"def post_game():\n body_json = flask.request.json\n if body_json is None:\n flask.abort(400)\n\n request = PostGameRequest(**body_json)\n\n try:\n response = minesweeper_service.create_game(request)\n return flask.jsonify(response)\n except ValueError: # todo: make errors types to allow for more specificity\n flask.abort(400)",
"def create(self, **kwargs):\n url_str = self.base_url\n newheaders = self.get_headers()\n payload = kwargs['definition']\n resp, body = self.client.json_request('POST', url_str,\n data=payload,\n headers=newheaders)\n return resp",
"def create(self, validated_data):\r\n return Snippet.objects.create(**validated_data)",
"def create_review(place_id=None):\n place = storage.get(Place, place_id)\n if place:\n review = request.get_json()\n if not review:\n abort(400, \"Not a JSON\")\n if \"user_id\" not in review:\n abort(400, \"Missing user_id\")\n if not storage.get(\"User\", review[\"user_id\"]):\n abort(404)\n if \"text\" not in review:\n abort(400, \"Missing text\")\n else:\n review['place_id'] = place.id\n new_review = Review(**review)\n storage.new(new_review)\n storage.save()\n return jsonify(new_review.to_dict()), 201\n abort(404)",
"def create(self, validated_data):\n return Snippet.objects.create(**validated_data)",
"def create_post():\n\n #Get prompt id\n prompt_id = request.form.get('prompt_id')\n\n # Get post text\n post_text = request.form.get('user_post')\n\n # Create post timestamp\n created_at = datetime.now()\n user_facing_date = created_at.strftime(\"%B %d, %Y\")\n\n # Save post and related data to database\n post = crud.create_post(session['user_id'], prompt_id, post_text, session['lat'], session['lng'], session['user_facing_location'], created_at)\n\n return render_template('post_data.html', post=post, user_facing_date=user_facing_date)",
"def create(self, validated_data: dict) -> Snippet:\n return Snippet.objects.create(**validated_data)",
"def post(self):\n\n body = request.get_json(force=True)\n\n # Deserialize\n try:\n d = DiagnosisSchema(strict=True).load(body).data\n # Request body not valid\n except ValidationError as e:\n abort(400, 'could not create diagnosis: {}'.format(e.messages))\n\n # Add to and save in database\n db.session.add(d)\n db.session.commit()\n\n return DiagnosisSchema(201, 'diagnosis {} created'\n .format(d.kf_id)).jsonify(d), 201",
"def post(self, copy_id):\n checkCopyValidity(copy_id)\n note_body = request.get_json()\n new_note = models.Notes()\n new_note.parse_body(note_body)\n new_note.copy_id = copy_id\n db.session.add(new_note)\n db.session.commit()\n return 'A note \\\"{}\\\" has been added to book copy of {}'.format(new_note.note, copy_id), 201",
"def create(self, data, **kwargs):\n\n return self._post(self.class_url(), data, **kwargs)",
"def posts_post():\n data = request.json\n\n try:\n validate(data, post_schema)\n except ValidationError as error:\n data = {\"message\": error.message}\n return Response(json.dumps(data), 422, mimetype=\"application/json\")\n\n post = Post(title=data[\"title\"], body=data[\"body\"])\n session.add(post)\n session.commit()\n\n data = json.dumps(post.as_dictionary())\n headers = {\"Location\": url_for(\"post_get\", id=post.id)}\n\n return Response(data, 201, headers=headers, mimetype=\"application/json\")",
"def create(ctx, payload):\n payload = parse_payload(ctx, payload)\n r = SavedSearch(ctx.obj['TOKEN'], ctx.obj['DEBUG']).create(payload)\n click.echo(json_dumps(r, ctx.obj['PRETTY']))"
] |
[
"0.61571413",
"0.5872489",
"0.57040757",
"0.5469651",
"0.5447269",
"0.54139733",
"0.53953916",
"0.5317828",
"0.5221214",
"0.52156967",
"0.52054673",
"0.51913214",
"0.5187153",
"0.5175984",
"0.5161655",
"0.51363313",
"0.51328707",
"0.5120854",
"0.51141393",
"0.51068985",
"0.50715137",
"0.50708246",
"0.50672585",
"0.50616705",
"0.50593895",
"0.5055447",
"0.50487494",
"0.50437224",
"0.5037617",
"0.5014055"
] |
0.72182924
|
0
|
Update a preexisting Scratchpad and create a new ScratchpadRevision. The POST data should be a JSONencoded dict, which is passsed verbatim to Scratchpad.update as keyword arguments.
|
def update_scratchpad(scratchpad_id):
if not gandalf.bridge.gandalf("scratchpads"):
return api_forbidden_response(
"Forbidden: You don't have permission to do this")
if not request.json:
return api_invalid_param_response("Bad data supplied: Not JSON")
user = UserData.current()
scratchpad = scratchpad_models.Scratchpad.get_by_id(scratchpad_id)
if not scratchpad or scratchpad.deleted:
return api_not_found_response(
"No scratchpad with id %s" % scratchpad_id)
if not user.developer:
# Certain fields are only modifiable by developers
for field in scratchpad_models.Scratchpad._developer_only_fields:
if request.json.get(field):
return api_forbidden_response(
"Forbidden: Only developers can change the %s" % field)
# The user can update the scratchpad if any of the following are true:
# 1. The scratchpad is tutorial/official and the user is a developer
# 2. The scratchpad was created by the user
if scratchpad.category in ("tutorial", "official") and user.developer:
pass
elif scratchpad.user_id != user.user_id:
# Only the creator of a scratchpad can update it
return api_forbidden_response(
"Forbidden: Scratchpad owned by different user")
try:
# Convert unicode encoded JSON keys to strings
update_args = dict_keys_to_strings(request.json)
if 'id' in update_args:
# Backbone passes the id in update calls - ignore it
del update_args['id']
return scratchpad.update(**update_args)
except (db.BadValueError, db.BadKeyError), e:
return api_invalid_param_response("Bad data supplied: " + e.message)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def create_scratchpad():\n if not gandalf.bridge.gandalf(\"scratchpads\"):\n return api_forbidden_response(\n \"Forbidden: You don't have permission to do this\")\n\n if not request.json:\n return api_invalid_param_response(\"Bad data supplied: Not JSON\")\n\n # TODO(jlfwong): Support phantom users\n user = UserData.current()\n\n if not (user and user.developer):\n # Certain fields are only modifiable by developers\n for field in scratchpad_models.Scratchpad._developer_only_fields:\n if request.json.get(field):\n return api_forbidden_response(\n \"Forbidden: Only developers can change the %s\" % field)\n\n try:\n # Convert unicode encoded JSON keys to strings\n create_args = dict_keys_to_strings(request.json)\n if user:\n create_args['user_id'] = user.user_id\n return scratchpad_models.Scratchpad.create(**create_args)\n except (db.BadValueError, db.BadKeyError), e:\n return api_invalid_param_response(\"Bad data supplied: \" + e.message)",
"def post(self):\n data = request.json\n create_entry(data)\n return None, 201",
"def testUpdate(self):\n response = self.runPut(self.root, sequencer=self.hiseq2000.sodar_uuid, data=self.post_data)\n self.response_200(response)\n data = json.loads(response.content.decode(\"utf-8\"))\n self.assertEqual(data[\"vendor_id\"], self.post_data[\"vendor_id\"])",
"def update_drink_in_db():\n data = request.data\n data_dict = json.loads(data)\n\n # function returns an array\n # index 0: list of flavors\n # index 1: is a list of ingredients\n ingredients_and_flavors = get_ingredient_and_flavor_list(data_dict)\n print(data_dict)\n print(ingredients_and_flavors)\n connection = mongo_connect()\n connection[\"cocktails\"].update_one(\n {\"_id\": ObjectId(data_dict[\"id\"])},\n {\"$set\":\n {\"name\": data_dict[\"name\"],\n \"description\": data_dict[\"description\"],\n \"flavor_tags\": ingredients_and_flavors[0],\n \"ingredients\": ingredients_and_flavors[1],\n \"method\": data_dict[\"instructions\"],\n \"glass\": data_dict[\"glass\"],\n \"equipment\": data_dict[\"equipment\"],\n \"creator\": ObjectId(session['_id']),\n \"updated_at\": str(datetime.now()),\n \"image_url\": data_dict[\"image_url\"]}\n }\n )\n resp = jsonify(success=True)\n return resp",
"async def put(self, _id: str, doc: dict, *,\n rev: Optional[str] = None,\n batch: Optional[bool] = None,\n new_edits: Optional[bool] = None) -> dict:\n\n params = dict(\n rev=rev,\n batch=\"ok\" if batch else None,\n new_edits=new_edits\n )\n\n return await self.__connection.query('PUT', self._get_path(_id), params=params, data=doc)",
"def update(ctx, saved_search_id, payload):\n payload = parse_payload(ctx, payload)\n r = SavedSearch(ctx.obj['TOKEN'], ctx.obj['DEBUG']).update(payload)\n click.echo(json_dumps(r, ctx.obj['PRETTY']))",
"def post(self):\n\n params = json.loads(self.request.body)\n if self.is_post_authorised(params):\n params, validation_errors = self.validate(params)\n if validation_errors:\n response = {'success': False, 'validate': False, 'errors': validation_errors}\n else:\n taskboard_object = TaskboardMethods.put_taskboard(params['title'], params['id'])\n response_object = TaskboardMethods.taskboard_to_dictionary(taskboard_object)\n response = {\n 'success': True,\n 'message': 'Successfully updated taskboard.' if params['id'] else 'Successfully added taskboard.',\n 'validate': True,\n 'data': response_object,\n 'errors': False\n }\n else:\n response = {'success': False, 'data': [], 'errors': {'unauthorised': True}}\n\n self.send_json_object(response)",
"def update(self, instance, validated_data):\n instance.title = validated_data.get('title', instance.title)\n instance.inspection_tag = validated_data.get('inspection_tag', instance.code)\n instance.content = validated_data.get('content', instance.language)\n instance.status = validated_data.get('status', instance.style)\n instance.save()\n return instance",
"def update(self, instance, validated_data):\n instance.title = validated_data.get('title', instance.title)\n instance.inspection_tag = validated_data.get('inspection_tag', instance.code)\n instance.content = validated_data.get('content', instance.language)\n instance.status = validated_data.get('status', instance.style)\n instance.save()\n return instance",
"def test_update_draft():\n with open(basedir + \"fixture/7149593_formatted.json\", \"r\") as f:\n data = f.read()\n storage.save_draft(user_id, \"bib\", \"7149593\", data, \"1362044230872\")\n json_data = json.loads(data)\n json_data['@context'] = \"yadda\"\n storage.update_draft(user_id, \"bib\", \"7149593\", json.dumps(json_data), \"1362044230872\")\n assert json.loads(open(basedir + \"some/path/\" + user_id + \"/bib/7149593\", \"r\").read())['document']['@context'] == \"yadda\"",
"def post_service_instance_update(self, resource_id, resource_dict):\n pass",
"def post(self):\n data = request.json\n return save_new_post(data=data)",
"def update_review(review_id):\n user_input = request.get_json()\n if user_input is None:\n abort(400, {'message': 'Not a JSON'})\n obj = storage.get(Review, review_id)\n if obj is None:\n abort(404)\n for k, v in user_input.items():\n if k not in ['id', 'user_id', 'place_id',\n 'created_at', 'updated_at']:\n setattr(obj, k, v)\n obj.save()\n return jsonify(obj.to_dict()), 200",
"def handle_patch_deployment(project_id, deployment_id):\n kwargs = request.get_json(force=True)\n kwargs = {to_snake_case(k): v for k, v in kwargs.items()}\n experiment = update_deployment(uuid=deployment_id,\n project_id=project_id,\n **kwargs)\n return jsonify(experiment)",
"def update(self, request, pk=None):\n exp = Experiment.objects.get(pk=pk)\n serializer = ExperimentSerializer(exp, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return send_response(request.method, serializer)",
"def post_project_update(self, resource_id, resource_dict):\n pass",
"def save():\n form_data = request.form.to_dict()\n if (form_data['release-at'] == 'Never'):\n form_data['release-at'] = None\n\n if not 'id' in form_data:\n r = requests.post(API_ROUTE, headers={'Auth': _auth()}, json=form_data)\n if r.status_code != requests.codes.created:\n return r.text, r.status_code\n else:\n r = requests.put(API_ROUTE + '/' + str(request.form['id']), headers={'Auth': _auth()}, json=form_data)\n if r.status_code != requests.codes.ok:\n return r.text, r.status_code\n\n return redirect(url_for('index'), code=278)",
"def update_review(review_id):\n review_obj = storage.get(Review, review_id)\n if review_obj:\n body_dic = request.get_json()\n if not body_dic:\n return jsonify({'error': 'Not a JSON'}), 400\n for key, value in body_dic.items():\n setattr(review_obj, key, value)\n review_obj.save()\n return jsonify(review_obj.to_dict()), 200\n else:\n abort(404)",
"def test_api_can_update_post(self):\n post = Post.objects.get()\n change_post = {'name': 'Something new'}\n res = self.client.put(\n reverse('details', kwargs={'pk': post.id}),\n change_post, format='json'\n )\n self.assertEqual(res.status_code, status.HTTP_200_OK)",
"def post_update():\n\n\n user_id = session['user_id']\n post = request.form.get('post')\n\n Update.add_update(user_id, post)\n\n return \"Updated Post\"",
"def update_drink(jwt, drink_id):\n try:\n drink = Drink.query.filter(Drink.id == drink_id).one_or_none()\n\n if drink is None:\n abort(404)\n\n body = request.get_json()\n req_title = body.get('title', drink.title)\n req_recipe = json.dumps(body.get('recipe', drink.recipe))\n\n drink.title = req_title\n drink.recipe = req_recipe\n drink.update()\n\n return jsonify({\n 'success': True,\n 'drinks': [drink.long()]\n }), 200\n\n except Exception as e:\n abort(422)",
"def test_api_can_update_post(self):\n post = Post.objects.get()\n change_post = {'name': 'Something new'}\n response = self.client.put(\n reverse('details', kwargs={'pk': post.id}),\n change_post, format='json'\n )\n self.assertEqual(response.status_code, status.HTTP_200_OK)",
"def update_animal():\n\n animal_uuid = request.args.get(\"uuid\", default=None, type=str)\n animal = json.loads(rd.get(animal_uuid))\n\n new_animal_body = request.args.get(\"body\", default=None, type=str)\n if new_animal_body is not None:\n animal[\"body\"] = new_animal_body\n\n new_animal_arms = request.args.get(\"arms\", default=None, type=int)\n if new_animal_body is not None:\n animal[\"arms\"] = new_animal_arms\n\n new_animal_legs = request.args.get(\"legs\", default=None, type=int)\n if new_animal_legs is not None:\n animal[\"legs\"] = new_animal_legs\n\n new_animal_tails = request.args.get(\"tails\", default=None, type=int)\n if new_animal_tails is not None:\n animal[\"tails\"] = new_animal_tails\n\n rd.set(animal_uuid, json.dumps(animal))\n return animal",
"def update(self, instance, validated_data):\n instance.title = validated_data.get('title', instance.title)\n instance.code = validated_data.get('code', instance.code)\n instance.linenos = validated_data.get('linenos', instance.linenos)\n instance.language = validated_data.get('language', instance.language)\n instance.style = validated_data.get('style', instance.style)\n instance.save()\n return instance",
"def patch_movie(payload: PayloadJSON, movie_id: int) -> ResourceJSON:\r\n movie = MovieModel.find_by_id(movie_id)\r\n\r\n if movie is None:\r\n abort(404)\r\n\r\n data = request.get_json()\r\n title = data.get(\"title\", None)\r\n release_date = data.get(\"release_date\", None)\r\n\r\n if title is None or release_date is None:\r\n abort(400)\r\n\r\n movie.title = title\r\n movie.release_date = release_date\r\n result = movie.save_to_db()\r\n\r\n if result[\"error\"]:\r\n abort(500)\r\n\r\n _id = result[\"id\"]\r\n\r\n return jsonify(\r\n {\"success\": True, \"movie\": MovieModel.find_by_id(_id).json()}\r\n )",
"def test_update_submission(self):\n sub_response_register = register_ok_submission(self, self.token)\n response_data = json.loads(sub_response_register.data.decode())\n self.assertTrue(response_data['status']=='success')\n\n sub = [sub for sub in Submission.query(hash_key=self.new_user.username, range_key_condition=Submission.sort.startswith('SUBMISSION_'))][0]\n sub_response_update = self.client.put(\n '/submission/{}'.format(str(sub.public_id)),\n headers=dict(\n Authorization=\"Token {}\".format(self.token)\n ),\n data=json.dumps(dict(\n submitted_texts=['updated_text1']\n )),\n content_type='application/json'\n )\n update_data = json.loads(sub_response_update.data.decode())\n upd_sub = Submission.get(hash_key=sub.username, range_key=sub.sort)\n self.assertTrue(update_data['status']=='success')\n self.assertTrue(upd_sub.text_count == 1)",
"def update_cupcake(cupcake_id):\n\n cupcake = Cupcake.query.get_or_404(cupcake_id)\n cupcake.flavor = request.json.get('flavor', cupcake.flavor)\n cupcake.size = request.json.get('size', cupcake.size)\n cupcake.rating = request.json.get('rating', cupcake.rating)\n cupcake.image = request.json.get('image', cupcake.image)\n\n\n db.session.add(cupcake)\n db.session.commit()\n\n json_response = jsonify(cupcake=cupcake.serialize())\n return json_response",
"def update(self, instance, validated_data):\n instance.title = validated_data.get('title', instance.title)\n instance.xml_content = validated_data.get('xml_content', instance.xml_content)\n return data_api.upsert(instance, validated_data['user'])",
"def post(self, *args):\n logger.error('Got post: ' + str(args))\n params = self.parse_query_string(args[0])\n params.update(web.input())\n\n logger.error(\"Updated: \" + str(params))\n\n module = self.get_module(params)\n impl = module.ProductsBuilds(config=self.context)\n\n return impl.create(**params)",
"def post_update(self):\n\t\tlogging.info(\"Beginning\")\n\t\toptions=dict(\n\t\t\tapi_key = self.apiKey\n\t\t)\n\t\tcounter = 0\n\t\tfor key, value in self.field.items():\n\t\t\tif value != None:\n\t\t\t\tcounter += 1\n\t\t\t\toptions[key] = value\n\t\tif counter == 0:\n\t\t\tlogging.error(\"There was nothing to update. Check the field values\")\n\t\t\treturn\n\t\turl = '{ts}update'.format(\n\t\t\tts=self.tsRUL,\n\t\t)\n\t\tlogging.debug(\"Options = \" + str(options))\n\t\ttry:\n\t\t\tresults = requests.post(url, params=options)\n\t\t\tif results.ok != True:\n\t\t\t\tlogging.error(\"The update failed\")\n\t\t\t\treturn False\n\t\texcept:\n\t\t\tlogging.error(\"There was an error trying to update the values\")\n\t\t\treturn False\n\t\tself.clear_field_values()\n\t\treturn True"
] |
[
"0.5830076",
"0.54428226",
"0.54384106",
"0.5388444",
"0.5172423",
"0.5118314",
"0.50912094",
"0.50869083",
"0.50869083",
"0.5076901",
"0.5074334",
"0.5056127",
"0.5050681",
"0.5022906",
"0.5009896",
"0.5004409",
"0.49962482",
"0.4994732",
"0.4982116",
"0.49775523",
"0.4976961",
"0.49681756",
"0.4967143",
"0.49477676",
"0.49449676",
"0.49359092",
"0.48844287",
"0.4883223",
"0.48796958",
"0.48641354"
] |
0.64261657
|
0
|
Describes the query command.
|
def description(self):
return QueryCommand.desc_text
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def query(self, command: str) -> str:\n return self._dmm.query(command).rstrip()",
"def command_list(self, query):\n return query",
"def query(self):\n return self.details[KEY_QUERY]",
"def query(self):\n pass",
"def query(self):\n return self.msg",
"def query(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"query\")",
"def query(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"query\")",
"def query(self, message: str) -> str:\n raise NotImplementedError()",
"def generate_query(self):\n return",
"def pp_query(query):\n print(format_query(query))",
"async def help_query(message):\n query = message.regex_groups[0]\n help_text = _make_help_text(bot.help.list(query))\n await message.reply(help_text)",
"def make_query(self):",
"def query(self, query):",
"def help_explain(self):\n print(EXPLAIN)",
"def briefing_action(self, query):\n raise NotImplementedError()\n pass",
"def query(self):",
"def query(self) -> Optional[str]:\n return pulumi.get(self, \"query\")",
"def usage():\n print(\"Usage:\\n python NFLplayerSentiments.py 'QUERY' (remember to include the quotes).\")",
"def query(self):\r\n raise NotImplementedError",
"def human(self, query):\n result = self.query(query)\n width = max([len(x) for x in result.keys()])\n fmt = (\" %%-%ds\" % width) + \"\\t%s\\t%s\"\n for key, val in result.iteritems():\n self._log(fmt % (key, val[0], val[1]), deadline=self._deadline)",
"def __str__(self):\n \n return \"#%s %s %s [%s]\" % (self.dbid, self.title, self.query, self.group)",
"def showGqlQuery(query):\n proto = query._proto_query\n kind = query._model_class.kind()\n filters = proto.filters()\n boundfilters = proto._GQL__bound_filters\n orderings = proto.orderings()\n hint = proto.hint()\n limit = proto.limit()\n offset = proto._GQL__offset\n\n select = \"SELECT * FROM %s\" % kind\n where = []\n order = []\n\n for k in sorted(filters):\n for clause in filters[k]:\n name, op = clause\n if name==-1: name = 'ANCESTOR'\n where.append(\"%s %s :%s\" % (name, op.upper(), k))\n\n for k in sorted(boundfilters):\n if isinstance(k, tuple):\n op = ' '.join(k)\n else:\n op = k\n where.append(\"%s %r\" % (op, boundfilters[k]))\n\n for p, o in orderings:\n order.append(\"%s %s\" % (p, 'DESC' if o==datastore.Query.DESCENDING else 'ASC'))\n\n gql = select\n if where:\n gql += ' WHERE '+' AND '.join(where)\n if order:\n gql += ' ORDER BY ' + ', '.join(order)\n if limit != -1:\n if offset != -1:\n gql += ' LIMIT %s,%s' % (offset,limit)\n else:\n gql += ' LIMIT %s' % limit\n elif offset != -1:\n gql += ' OFFSET %s' % offset\n return gql",
"def query(self) -> None:\n raise NotImplementedError()",
"def query(output, query):\n gqlapi = gql.get_api()\n print_output(output, gqlapi.query(query))",
"def help_help(self):\n print(\"List commands or print details about a command\")",
"def get_description(self):\n return self['command_name']",
"def query(self, query, request_type=None):\n\n #encode to UTF-8\n try: query = query.encode(\"utf-8\")\n except: query = query.decode('raw_unicode_escape').encode(\"utf-8\")\n\n lowercase_query = query.lower()\n if lowercase_query.startswith(\"select\") or \\\n lowercase_query.startswith(\"describe\") or \\\n lowercase_query.startswith(\"show\") or \\\n request_type==\"GET\":\n\n return self._get(urllib.urlencode({'sql': query}))\n\n else:\n return self._post(urllib.urlencode({'sql': query}))",
"def print_query(query: Query) -> str:\n regex = re.compile(r\":(?P<name>\\w+)\")\n params = query.statement.compile().params\n sql = regex.sub(r\"'{\\g<name>}'\", str(query.statement)).format(**params)\n from flexmeasures.data.config import db\n\n print(f\"\\nPrinting SQLAlchemy query to database {db.engine.url.database}:\\n\\n\")\n print(sql)\n return sql",
"def query_text(self) -> str:\n return self._query_text",
"def command(self, bot, comm, groups):\n query = groups[0]\n\n # Generates list of flags and strips out said flags from input\n query, flag_list = self.flags(query)\n\n if 'help' in flag_list:\n return self.print_helptext()\n\n # Generates psuedo-slugified url\n url = 'https://en.wikipedia.org/wiki/' + query.replace(' ', '_')\n\n # Get the article summary\n summary = self.get_article_summary(query)\n if summary is None:\n bot.reply(comm, \"{user}: I couldn't find an article for {query}\",\n kwvars={'query': query})\n return\n\n if 'may refer to' in summary:\n bot.reply(comm, \"{user}: Your query '{query}' was too ambigious.\",\n kwvars={'query': query})\n return\n\n # Done!\n bot.reply(comm, '{user}: {summary} :: URL: {url}',\n kwvars={'summary': summary, 'url': url})"
] |
[
"0.6920159",
"0.66629356",
"0.65501624",
"0.6539018",
"0.65007603",
"0.6458777",
"0.6458777",
"0.64061743",
"0.63914585",
"0.63866216",
"0.6300175",
"0.6286667",
"0.62046415",
"0.6189607",
"0.6187114",
"0.6182993",
"0.6180869",
"0.61589646",
"0.6131976",
"0.6092591",
"0.6080677",
"0.60532445",
"0.6034062",
"0.60245025",
"0.60237545",
"0.60202783",
"0.6016977",
"0.6013676",
"0.6003635",
"0.5995781"
] |
0.7841744
|
0
|
= array of necessary values for the endpoint to function = POST, PUT, GET, DELETE = request object from django = function that actually does the specific logic for this endpoint; takes (student_query, request) as params
|
def standard_student_endpoint(
endpoint_name,
required_values,
method,
request,
functor):
try:
# verify method
if request.method != method:
logger.warn("non-"+method+" request landed in "+method+" logic at "+endpoint_name+" in the api")
return HttpResponseBadRequest("bummer. your non-"+method+" request landed in the "+method+" logic.")
# very if potentially authorized
# user_id is only placed in the session by us
# when the user logs into the application
if 'user_id' not in request.session:
logger.debug("unauthenticated request to "+endpoint_name)
return HttpResponseForbidden('not authenticated')
user_id = request.session.get('user_id')
# verify the user id is regular
if not id_is_valid(user_id):
logger.debug("invalid id in request to"+endpoint_name)
return HttpResponseBadRequest("Invalid ID")
# verify each of the desired parameters is present in the method body
if not all_required_values_present(required_values, request, method):
logger.info("bad request made to "+endpoint_name+", not enough params "+str(request))
return HttpResponseBadRequest("expected more values, expected these:"+str(required_values))
# get the query for the student
student_query = Student.objects.filter(id=user_id)
# verify that this requesting user actually exists
if not student_query.exists():
logger.info("user who was not a student made a request to "+endpoint_name+", id of user:"+str(request.session['user_id']))
return HttpResponseNotAllowed('not authorized')
# endpoint specific logic
return functor(student_query, request)
except Exception, error:
logger.info("error in api endpoint "+endpoint_name+":"+str(error))
return HttpResponseServerError("unhandled error")
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def request(query):",
"def admin_search_student_query(request):\n\n if not validate_request(request): return redirect(reverse(URL_FORBIDDEN))\n if request.session['type'] == 'S' or request.session['type'] == 'R': return redirect(reverse(URL_FORBIDDEN))\n\n if request.method == \"POST\":\n first_name = request.POST['first_name']\n last_name = request.POST['last_name']\n email = request.POST['email']\n type = request.POST['type']\n dict = {}\n\n for user in User.objects.all():\n user_type = _get_user_type(user)\n\n if user_type is None or user_type == 'A':\n continue # for user who are not S, G, F, D, R, A\n\n user_first_name = None\n user_last_name = None\n user_email = None\n \n votes = 0\n\n if user_type == type:\n votes += 2\n\n if user_type == 'S':\n user_first_name = user.student.first_name\n user_last_name = user.student.last_name\n user_email = user.student.email\n elif user_type == 'G' or user_type == 'D':\n user_first_name = user.faculty.first_name\n user_last_name = user.faculty.last_name\n user_email = user.faculty.email\n elif user_type == 'R':\n user_first_name = user.first_name\n user_last_name = user.last_name\n user_email = user.email\n\n if first_name.upper() in user_first_name.upper():\n votes += 1\n\n if last_name.upper() in user_last_name.upper():\n votes += 1\n\n if email.upper() in user_email.upper():\n votes += 1\n\n dict[user] = votes\n \n sorted_results = sorted(dict.items(), key = operator.itemgetter(1))\n sorted_results.reverse()\n result = _clean_user_info_results(sorted_results)\n\n return HttpResponse(json.dumps(result), content_type = 'application/json')\n else:\n return redirect(reverse(URL_BAD_REQUEST))",
"def process_request(self, req, resp, resource, params):",
"def actionhelper(self, request, query, obj_map):\n\n if request.method == 'POST':\n serializer = self.get_serializer(data=request.data)\n\n serializer.is_valid(raise_exception=True)\n serializer.save(**obj_map)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n\n if request.method == 'DELETE':\n query.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)",
"def standard_teacher_endpoint(\nendpoint_name,\nrequired_values,\nmethod,\nrequest,\nfunctor):\n try:\n # validate we are receiving expected method\n if request.method != method:\n logger.warn(\"non-\"+method+\" request landed in \"+method+\" logic at \"+endpoint_name+\" in the api:\"+str(request))\n return HttpResponseBadRequest(\"bummer. your non-\"+method+\" request landed in the \"+method+\" logic.\")\n\n # validate that the user may be authorized to \n # perform this action - we set the user_id in the\n # session at login\n if 'user_id' not in request.session:\n logger.debug(\"unauthenticated request to \"+endpoint_name)\n return HttpResponseForbidden('not authenticated')\n\n teacher_id = request.session.get('user_id')\n\n # check that the id is valid for usage\n if not id_is_valid(teacher_id):\n return HttpResponseBadRequest(\"invalid id\")\n\n # get the query for this teacher\n teacher_query = Teacher.objects.filter(id=teacher_id)\n\n # validate that there is some teacher with this id\n if not teacher_query.exists():\n logger.info(\"user who was not a teacher made a request to \"+endpoint_name+\", id of user:\"+str(teacher_id))\n return HttpResponseForbidden('not a teacher!')\n\n # validate that all desired parameters are present in the request body\n if not all_required_values_present(required_values, request, method):\n logger.info(\"bad request made to \"+endpoint_name+\", not enough params \")\n return HttpResponseBadRequest(\"expected more values, expected these:\"+str(required_values))\n \n # perform the endpoint specific logic\n return functor(teacher_query, request)\n except Exception, error:\n logger.info(\"error in api endpoint \"+endpoint_name+\":\"+str(error))\n return HttpResponseServerError(\"unhandled error?!\")",
"def actionhelper(self, request, query, obj_map):\n\n if request.method == 'POST':\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n serializer.save(**obj_map)\n return Response(serializer.data)\n\n if request.method == 'DELETE':\n query.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)",
"def query(self, **kwargs):",
"def getUserQueries():\n if request.method == \"POST\":\n result = get_query_data(request)\n if result:\n status = 200\n else:\n status = 400\n return result, status # HTTP Status Created [201]",
"def student_detail(request, pk):\n try:\n students = student.objects.get(pk=pk)\n except students.DoesNotExist:\n return HttpResponse(status=404)\n\n if request.method == 'GET':\n serializer = studentSerializer(students)\n return JsonResponse(serializer.data)\n\n elif request.method == 'PUT':\n data = JSONParser().parse(request)\n serializer = studentSerializer(students, data=data)\n if serializer.is_valid():\n serializer.save()\n return JsonResponse(serializer.data)\n return JsonResponse(serializer.errors, status=400)\n\n elif request.method == 'DELETE':\n students.delete()\n return HttpResponse(status=204)",
"def _request(self, *args, **kwargs):\n raise NotImplementedError()",
"def basis(request: Any) -> Any:\n return request.param",
"def urlfor( request, *args, **kwargs ):",
"def _parse_in_request(self, request):\n error = None\n self.logger.debug(\"Http method: %s\" % request.method)\n if request.method == 'GET':\n self._params = request.args.to_dict()\n self.logger.debug(\"Request params: %s\" % self._params)\n \n elif request.method == 'POST':\n self._params = request.form.to_dict()\n self.logger.debug(\"Request params: %s\" % self._params)",
"def getStudent(request, pk):\n try:\n student = Student.objects.get(nuid=pk)\n\n except Student.DoesNotExist:\n return Response(status=HTTP_404_NOT_FOUND)\n\n if request.method == 'GET':\n serializer = StudentSerializer(student, context={'request': request})\n return Response({'data': serializer.data})\n\n elif request.method == 'PUT':\n serializer = StudentSerializer(student, data=request.data, context={'request': request})\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors, status=HTTP_400_BAD_REQUEST)\n\n elif request.method == 'DELETE':\n student.delete()\n return Response(status=HTTP_204_NO_CONTENT)",
"def get_request_args():\n args = {}\n args['user_id'] = request.args.get('user_id', default=None, type=int)\n args['is_examiner'] = request.args.get('is_examiner', default=None, type=int)\n if args['is_examiner'] is not None: args['is_examiner'] = args['is_examiner']==1\n args['first_name'] = request.args.get('first_name', default=None)\n args['last_name'] = request.args.get('last_name', default=None)\n\n args['exam_warning_id'] = request.args.get('exam_warning_id', default=None, type=int)\n args['exam_recording_id'] = request.args.get('exam_recording_id', default=None, type=int)\n args['in_progress'] = request.args.get('in_progress', default=None, type=int)\n if args['in_progress'] is not None: args['in_progress'] = args['in_progress']==1\n args['exam_id'] = request.args.get('exam_id', default=None, type=int)\n args['subject_id'] = request.args.get('subject_id', default=None, type=int)\n args['login_code'] = request.args.get('login_code', default=None)\n args['exam_name'] = request.args.get('exam_name', default=None)\n\n args['warning_count'] = request.args.get('warning_count', default=None, type=int)\n args['min_warnings'] = request.args.get('min_warnings', default=None, type=int)\n args['max_warnings'] = request.args.get('max_warnings', default=None, type=int)\n\n args['period_start'] = request.args.get('period_start', default=timedelta(days=10))\n args['period_end'] = request.args.get('period_end', default=timedelta(days=10))\n if args['period_start'] == timedelta(days=10): args['period_start'] = None\n if args['period_end'] == timedelta(days=10): args['period_end'] = None\n args['order_by'] = request.args.get('order_by', default='default').lower()\n args['order'] = request.args.get('order', default='desc').lower()\n \n args['page_number'] = request.args.get('page_number', default=1, type=int)\n args['results_length'] = request.args.get('results_length', default=25, type=int)\n if args['page_number'] < 1: args['page_number'] = 1\n if args['results_length'] < 1: args['results_length'] = 1\n\n return args",
"def _request(self, *args):\n raise NotImplementedError",
"def test_put_Student_missing_param(self):\n school_ids = self.create_School(2,20)\n\n url = '/students'\n data = {'first_name': 'Poompatai', 'last_name': 'Puntitpong','age': 20, 'nationality': 'Thailand', 'school': school_ids[0]}\n response = self.client.post(url, data, format='json')\n\n id = str(Student.objects.get().id)\n\n url = '/students/' + id\n\n \"\"\"Normal request\"\"\"\n data = {'first_name': 'Poompatai2', 'last_name': 'Puntitpong2','age': 22, 'nationality': 'Thailand2', 'school': school_ids[0]}\n response = self.client.put(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n \"\"\"Missing params\"\"\"\n data = {}\n response = self.client.put(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(response.data['first_name'][0].code, 'required')\n self.assertEqual(response.data['last_name'][0].code, 'required')\n self.assertEqual(response.data['age'][0].code, 'required')\n self.assertEqual(response.data['nationality'][0].code, 'required')\n self.assertEqual(response.data['school'][0].code, 'required')",
"def filter_by_login_or_role(login: bool, request: HttpRequest,\n sql_request: Q, teachers: list, students: list):\n\n if login:\n try:\n if request.GET[\"studentLogin\"].isnumeric():\n userId = int(request.GET[\"studentLogin\"])\n else:\n userId = int(request.GET[\"teacherLogin\"])\n sql_request &= Q(user_id=userId)\n \n except (ValueError, Profile.DoesNotExist) as err:\n raise err\n \n elif \"type\" in request.GET:\n if request.GET[\"type\"] == \"teacher\":\n tmp = list(map(lambda teacher: teacher[0], teachers))\n elif request.GET[\"type\"] == \"students\":\n tmp = list(map(lambda student: student[0], students))\n else:\n tmp = []\n sql_request &= Q(user_id__in=tmp)\n return sql_request",
"def __call__(self, request):",
"def track_request(params):\n\n if len(params) == 0:\n number_of_orders = \"10\"\n field = None\n from_date = None\n condition_1 = None\n to_date = None\n condition_2 = None\n status = None\n value = None\n condition_3 = None\n else:\n number_of_orders = params['number_of_orders'] if len(params['number_of_orders']) != 0 else \"20\"\n field = \"created_at\"\n from_date = params['from_date'] + \" 00:00:00\" if len(params['from_date']) != 0 else \"2000-01-01 00:00:00\" \n condition_1 = \"gteq\"\n to_date = params['to_date'] + \" 23:59:59\" if len(params['to_date']) != 0 else \"2100-12-31 23:59:59\"\n condition_2 = \"lteq\"\n status = None if params['status'] == \"None\" else \"status\"\n value = None if params['status'] == \"None\" else params['status']\n condition_3 = None if params['status'] == \"None\" else \"eq\"\n\n generate_request = oAuth_magento()\n\n payload = { \"searchCriteria[filter_groups][0][filters][0][field]\": field,\n \"searchCriteria[filter_groups][0][filters][0][value]\": from_date,\n \"searchCriteria[filter_groups][0][filters][0][condition_type]\": condition_1,\n\n \"searchCriteria[filter_groups][1][filters][0][field]\": field,\n \"searchCriteria[filter_groups][1][filters][0][value]\": to_date,\n \"searchCriteria[filter_groups][1][filters][0][condition_type]\": condition_2,\n\n \"searchCriteria[filter_groups][2][filters][0][field]\": status,\n \"searchCriteria[filter_groups][2][filters][0][value]\": value,\n \"searchCriteria[filter_groups][2][filters][0][condition_type]\": condition_3,\n \n \"searchCriteria[pageSize]\": number_of_orders,\n \"searchCriteria[sortOrders][0][field]\":\"created_at\",\n \"fields\": \"items[increment_id,base_currency_code,grand_total,created_at,status,billing_address[company,firstname,lastname]]\",\n }\n\n response = requests.request(\"GET\", url=generate_request[0], headers=generate_request[1], params=payload)\n # with open('temp_files/magento_orders_test.json','w') as f:\n # f.write(response.text)\n json_response = json.loads(response.text)\n for ele in json_response['items']:\n for key, val in ele.items():\n if key == 'billing_address':\n name_container = ele.pop(key)\n try:\n ele['purchasing_institute'] = name_container['company']\n except:\n ele['purchasing_institute'] = name_container['firstname'] + ' ' + name_container['lastname']\n else:\n pass\n col_headers = list((json_response['items'][0]).keys())\n return json_response['items'], col_headers",
"def rest_api_request_handler(self, request_type):\n result = {}\n success_code = 0\n with self.resource_lock:\n if request_type == self.RestRequest.REST_MUTS:\n result = self.muts # Returns MUTs\n elif request_type == self.RestRequest.REST_TEST_SPEC:\n result = self.test_spec # Returns Test Specification\n elif request_type == self.RestRequest.REST_TEST_RESULTS:\n pass # Returns test results\n else:\n success_code = -1\n return json.dumps(self.get_rest_result_template(result, 'request/' + request_type, success_code), indent=4)",
"def admin_search_student(request):\n\n if not validate_request(request): return redirect(reverse(URL_FORBIDDEN))\n if request.session['type'] == 'S' or request.session['type'] == 'R': return redirect(reverse(URL_FORBIDDEN))\n\n if request.method == \"GET\":\n return render(\n request,\n 'app/admin/admin_search_student.html',\n {\n 'title':'Student Info',\n 'layout_data' : get_layout_data(request),\n }\n )\n else:\n return redirect(reverse(URL_BAD_REQUEST))",
"def query(self, obj):\r\n self.require_collection()\r\n request = http.Request('POST', self.get_url(), self.wrap_object(obj))\r\n\r\n return request, parsers.parse_json",
"def handle_request(self, request, **resources):\r\n if not request.method in self._meta.callmap.keys():\r\n raise HttpError(\r\n 'Unknown or unsupported method \\'%s\\'' % request.method,\r\n status=status.HTTP_501_NOT_IMPLEMENTED)\r\n\r\n # Get the appropriate create/read/update/delete function\r\n view = getattr(self, self._meta.callmap[request.method])\r\n\r\n # Get function data\r\n return view(request, **resources)",
"def get_query_parameters(self):\n parameters = super().get_query_parameters()\n\n if self.method in ['GET', 'DELETE']:\n self.add_parameters(parameters, [\n self.get_company_id_parameter(),\n self.get_lookup_parameter(),\n ])\n\n return parameters",
"def ng_query(self, request, *args, **kwargs):\r\n return self.build_json_response(self.get_queryset())",
"def __call__(request):",
"def query_components(fn):\n @wraps(fn)\n def wrapper(self, request, context, *args, **kwargs):\n constraints = list(url_to_constraints(request.path))\n return fn(self, request=request, context=context, constraints=constraints, params={}, *args, **kwargs)\n\n return wrapper",
"def _get_query(request):\n query = request.GET.get(\"query\", \"\")\n date = request.GET.get(\"date\", \"\")\n timestamp = request.GET.get(\"timestamp\", None)\n sort = request.GET.get(\"sort\", \"top\").lower()\n filter = request.GET.get(\"filter\", \"following\").lower()\n\n if timestamp:\n t = parse(timestamp, ignoretz=True)\n timestamp = pytz.utc.localize(t)\n else:\n timestamp = timezone.now()\n\n start_time = ''\n end_time = ''\n\n if date:\n start_time, end_time = DateRangeParser().parse(date)\n\n get_dict = {\n \"query\": query,\n \"filter\": filter,\n \"sort\": sort,\n \"start_time\": start_time,\n \"end_time\": end_time,\n \"username\": request.GET.get(\"username\", \"\"),\n \"orderBy\": request.GET.get(\"orderBy\", \"start_time\"),\n \"direction\": request.GET.get(\"direction\", \"\"),\n \"template\": request.GET.get(\"template\", \"\"),\n \"type\": request.GET.get(\"type\", \"\"),\n \"page\": request.GET.get(\"page\", 1),\n 'timestamp': timestamp,\n }\n\n return get_dict, query, date, sort, filter",
"def get_query_parameters(self):\n natural_parameters = self.get_filter_parameters() + self.get_pagination_parameters()\n\n query_serializer = self.get_query_serializer()\n serializer_parameters = []\n if query_serializer is not None:\n serializer_parameters = self.serializer_to_parameters(query_serializer, in_=openapi.IN_QUERY)\n\n if len(set(param_list_to_odict(natural_parameters)) & set(param_list_to_odict(serializer_parameters))) != 0:\n raise SwaggerGenerationError(\n \"your query_serializer contains fields that conflict with the \"\n \"filter_backend or paginator_class on the view - %s %s\" % (self.method, self.path)\n )\n\n return natural_parameters + serializer_parameters"
] |
[
"0.60971296",
"0.5948114",
"0.5902992",
"0.58379984",
"0.5760867",
"0.56506467",
"0.56495714",
"0.56206256",
"0.555941",
"0.55475605",
"0.55047286",
"0.5470059",
"0.54542714",
"0.54467124",
"0.5445971",
"0.54176366",
"0.540915",
"0.53910285",
"0.53737706",
"0.53653616",
"0.53527194",
"0.5350743",
"0.53506947",
"0.53339845",
"0.5328799",
"0.5322329",
"0.5315036",
"0.53121775",
"0.5307629",
"0.5279848"
] |
0.697114
|
0
|
= array of necessary values for the endpoint to function = POST, PUT, GET, DELETE = request object from django = function that actually does the specific logic for this endpoint; takes (teacher_query, request) as params
|
def standard_teacher_endpoint(
endpoint_name,
required_values,
method,
request,
functor):
try:
# validate we are receiving expected method
if request.method != method:
logger.warn("non-"+method+" request landed in "+method+" logic at "+endpoint_name+" in the api:"+str(request))
return HttpResponseBadRequest("bummer. your non-"+method+" request landed in the "+method+" logic.")
# validate that the user may be authorized to
# perform this action - we set the user_id in the
# session at login
if 'user_id' not in request.session:
logger.debug("unauthenticated request to "+endpoint_name)
return HttpResponseForbidden('not authenticated')
teacher_id = request.session.get('user_id')
# check that the id is valid for usage
if not id_is_valid(teacher_id):
return HttpResponseBadRequest("invalid id")
# get the query for this teacher
teacher_query = Teacher.objects.filter(id=teacher_id)
# validate that there is some teacher with this id
if not teacher_query.exists():
logger.info("user who was not a teacher made a request to "+endpoint_name+", id of user:"+str(teacher_id))
return HttpResponseForbidden('not a teacher!')
# validate that all desired parameters are present in the request body
if not all_required_values_present(required_values, request, method):
logger.info("bad request made to "+endpoint_name+", not enough params ")
return HttpResponseBadRequest("expected more values, expected these:"+str(required_values))
# perform the endpoint specific logic
return functor(teacher_query, request)
except Exception, error:
logger.info("error in api endpoint "+endpoint_name+":"+str(error))
return HttpResponseServerError("unhandled error?!")
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def request(query):",
"def urlfor( request, *args, **kwargs ):",
"def get_request_args():\n args = {}\n args['user_id'] = request.args.get('user_id', default=None, type=int)\n args['is_examiner'] = request.args.get('is_examiner', default=None, type=int)\n if args['is_examiner'] is not None: args['is_examiner'] = args['is_examiner']==1\n args['first_name'] = request.args.get('first_name', default=None)\n args['last_name'] = request.args.get('last_name', default=None)\n\n args['exam_warning_id'] = request.args.get('exam_warning_id', default=None, type=int)\n args['exam_recording_id'] = request.args.get('exam_recording_id', default=None, type=int)\n args['in_progress'] = request.args.get('in_progress', default=None, type=int)\n if args['in_progress'] is not None: args['in_progress'] = args['in_progress']==1\n args['exam_id'] = request.args.get('exam_id', default=None, type=int)\n args['subject_id'] = request.args.get('subject_id', default=None, type=int)\n args['login_code'] = request.args.get('login_code', default=None)\n args['exam_name'] = request.args.get('exam_name', default=None)\n\n args['warning_count'] = request.args.get('warning_count', default=None, type=int)\n args['min_warnings'] = request.args.get('min_warnings', default=None, type=int)\n args['max_warnings'] = request.args.get('max_warnings', default=None, type=int)\n\n args['period_start'] = request.args.get('period_start', default=timedelta(days=10))\n args['period_end'] = request.args.get('period_end', default=timedelta(days=10))\n if args['period_start'] == timedelta(days=10): args['period_start'] = None\n if args['period_end'] == timedelta(days=10): args['period_end'] = None\n args['order_by'] = request.args.get('order_by', default='default').lower()\n args['order'] = request.args.get('order', default='desc').lower()\n \n args['page_number'] = request.args.get('page_number', default=1, type=int)\n args['results_length'] = request.args.get('results_length', default=25, type=int)\n if args['page_number'] < 1: args['page_number'] = 1\n if args['results_length'] < 1: args['results_length'] = 1\n\n return args",
"def standard_student_endpoint(\nendpoint_name,\nrequired_values,\nmethod,\nrequest,\nfunctor): \n try:\n\n # verify method\n if request.method != method:\n logger.warn(\"non-\"+method+\" request landed in \"+method+\" logic at \"+endpoint_name+\" in the api\")\n return HttpResponseBadRequest(\"bummer. your non-\"+method+\" request landed in the \"+method+\" logic.\")\n \n # very if potentially authorized\n # user_id is only placed in the session by us\n # when the user logs into the application\n if 'user_id' not in request.session:\n logger.debug(\"unauthenticated request to \"+endpoint_name)\n return HttpResponseForbidden('not authenticated')\n \n user_id = request.session.get('user_id')\n\n # verify the user id is regular\n if not id_is_valid(user_id):\n logger.debug(\"invalid id in request to\"+endpoint_name)\n return HttpResponseBadRequest(\"Invalid ID\")\n \n # verify each of the desired parameters is present in the method body\n if not all_required_values_present(required_values, request, method):\n logger.info(\"bad request made to \"+endpoint_name+\", not enough params \"+str(request))\n return HttpResponseBadRequest(\"expected more values, expected these:\"+str(required_values))\n\n # get the query for the student\n student_query = Student.objects.filter(id=user_id)\n \n # verify that this requesting user actually exists\n if not student_query.exists():\n logger.info(\"user who was not a student made a request to \"+endpoint_name+\", id of user:\"+str(request.session['user_id']))\n return HttpResponseNotAllowed('not authorized')\n \n # endpoint specific logic\n return functor(student_query, request)\n\n except Exception, error:\n logger.info(\"error in api endpoint \"+endpoint_name+\":\"+str(error))\n return HttpResponseServerError(\"unhandled error\")",
"def process_request(self, req, resp, resource, params):",
"def get_query_parameters(self):\n parameters = super().get_query_parameters()\n\n if self.method in ['GET', 'DELETE']:\n self.add_parameters(parameters, [\n self.get_company_id_parameter(),\n self.get_lookup_parameter(),\n ])\n\n return parameters",
"def query(self, **kwargs):",
"def actionhelper(self, request, query, obj_map):\n\n if request.method == 'POST':\n serializer = self.get_serializer(data=request.data)\n\n serializer.is_valid(raise_exception=True)\n serializer.save(**obj_map)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n\n if request.method == 'DELETE':\n query.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)",
"def postQuery(self):\n pass",
"def _request(self, *args, **kwargs):\n raise NotImplementedError()",
"def query_params(self):\n return self.request._request.GET",
"def __call__(request):",
"def get(self, request):\n return Response(services.get_travel_functions(request.query_params, request.META['HTTP_JWT']))",
"def basis(request: Any) -> Any:\n return request.param",
"def __call__(self, request):",
"def actionhelper(self, request, query, obj_map):\n\n if request.method == 'POST':\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n serializer.save(**obj_map)\n return Response(serializer.data)\n\n if request.method == 'DELETE':\n query.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)",
"def filter_by_login_or_role(login: bool, request: HttpRequest,\n sql_request: Q, teachers: list, students: list):\n\n if login:\n try:\n if request.GET[\"studentLogin\"].isnumeric():\n userId = int(request.GET[\"studentLogin\"])\n else:\n userId = int(request.GET[\"teacherLogin\"])\n sql_request &= Q(user_id=userId)\n \n except (ValueError, Profile.DoesNotExist) as err:\n raise err\n \n elif \"type\" in request.GET:\n if request.GET[\"type\"] == \"teacher\":\n tmp = list(map(lambda teacher: teacher[0], teachers))\n elif request.GET[\"type\"] == \"students\":\n tmp = list(map(lambda student: student[0], students))\n else:\n tmp = []\n sql_request &= Q(user_id__in=tmp)\n return sql_request",
"def _request(self, *args):\n raise NotImplementedError",
"def _parse_in_request(self, request):\n error = None\n self.logger.debug(\"Http method: %s\" % request.method)\n if request.method == 'GET':\n self._params = request.args.to_dict()\n self.logger.debug(\"Request params: %s\" % self._params)\n \n elif request.method == 'POST':\n self._params = request.form.to_dict()\n self.logger.debug(\"Request params: %s\" % self._params)",
"def request_vars(self):",
"def _get_query(request):\n query = request.GET.get(\"query\", \"\")\n date = request.GET.get(\"date\", \"\")\n timestamp = request.GET.get(\"timestamp\", None)\n sort = request.GET.get(\"sort\", \"top\").lower()\n filter = request.GET.get(\"filter\", \"following\").lower()\n\n if timestamp:\n t = parse(timestamp, ignoretz=True)\n timestamp = pytz.utc.localize(t)\n else:\n timestamp = timezone.now()\n\n start_time = ''\n end_time = ''\n\n if date:\n start_time, end_time = DateRangeParser().parse(date)\n\n get_dict = {\n \"query\": query,\n \"filter\": filter,\n \"sort\": sort,\n \"start_time\": start_time,\n \"end_time\": end_time,\n \"username\": request.GET.get(\"username\", \"\"),\n \"orderBy\": request.GET.get(\"orderBy\", \"start_time\"),\n \"direction\": request.GET.get(\"direction\", \"\"),\n \"template\": request.GET.get(\"template\", \"\"),\n \"type\": request.GET.get(\"type\", \"\"),\n \"page\": request.GET.get(\"page\", 1),\n 'timestamp': timestamp,\n }\n\n return get_dict, query, date, sort, filter",
"def get_queryset(self):\n if 'person_id' in self.request.GET:\n queryset = (Answer.objects\n .filter(creator__id=self.request.GET['person_id']))\n elif 'question_id' in self.request.GET:\n queryset = (Answer.objects.filter(question__id=self.request\n .GET['question_id']))\n else:\n queryset = Answer.objects.all()\n return queryset",
"def do(request, **kwargs):\n\n if request.method == 'POST':\n method = request.POST.get('_method', 'POST')\n else:\n method = request.method\n\n return {\n 'get': _show,\n 'post': _create,\n 'put': _update,\n 'delete': _destroy,\n }[method.lower()](request, **kwargs)",
"def _build_request_url(self, params, kwargs, post=False):\n if post:\n return '%s?method=%s&type=%s' % (self.endpoint, self.methodname, params.get('type', 'json'))\n else:\n return '%s?%s' % (self.endpoint, kwargs)",
"def __call__(self, request):\n if self.where == \"qs\":\n parts = urlparse(request.url)\n qs = parse_qs(parts.query)\n qs[self.qs_key] = self.token\n request.url = urlunparse(\n (\n parts.scheme,\n parts.netloc,\n parts.path,\n parts.params,\n urlencode(qs),\n parts.fragment,\n )\n )\n elif self.where == \"header\":\n request.headers[\"Authorization\"] = \"Bearer {}\".format(self.token)\n return request",
"def get_queryset(self):\n queryset = []\n if 'requester_id' in self.request.GET:\n queryset = (AdoptionRequest.objects\n .filter(requester__id=self.request.GET['requester_id'],\n was_deleted=False))\n elif 'all_requests' in self.request.GET:\n queryset = (AdoptionRequest.objects\n .filter(date__gt=datetime.now()-timedelta(days=15),\n was_deleted=False, status=2))\n elif 'proposal_id' in self.request.GET:\n queryset = (AdoptionRequest.objects\n .filter(date__gt=datetime.now()-timedelta(days=15),\n adoption_proposal__id=self.request\n .GET['proposal_id'], was_deleted=False))\n else:\n queryset = AdoptionRequest.objects.filter(was_deleted=False)\n return queryset",
"def getUserQueries():\n if request.method == \"POST\":\n result = get_query_data(request)\n if result:\n status = 200\n else:\n status = 400\n return result, status # HTTP Status Created [201]",
"def post(self):\n teacher = self.request.get(\"teacher\")\n temail = self.request.get(\"temail\")\n tphone = self.request.get(\"tphone\")\n specialty = self.request.get(\"specialty\")\n\n if teacher and temail and tphone and specialty:\n\n #create a new teacher object and store it in the database\n teacher = Teacher(\n teacher=teacher,\n temail=temail,\n tphone=tphone, \n specialty=specialty)\n teacher.put()\n\n id = teacher.key().id()\n self.redirect(\"/teacher/%s\" % id)\n else:\n error = \"Please include a teacher, an email, a phone number, and a specialty.\"\n self.render_form(teacher, temail, tphone, specialty, error)",
"def _params(self, request: Request) -> dict:\n params = {'forceAsync': True}\n\n subset = self._spatial_subset_params(request) + self._temporal_subset_params(request)\n if len(subset) > 0:\n params['subset'] = subset\n\n for p, val in request.parameter_values():\n if type(val) == str:\n params[p] = val\n elif type(val) == bool:\n params[p] = str(val).lower()\n elif type(val) == list and type(val[0]) != str:\n params[p] = ','.join([str(v) for v in val])\n else:\n params[p] = val\n\n return params",
"def get_teacher_career_results(self, teacher, career):\n data = []\n\n # Get the active exams of the career.\n exams = EvaluationsExam.objects.filter(\n type__exact=career.type, status=\"ACTIVE\")\n\n # Get the results for each exam.\n for exam in exams:\n\n # Get the signatures of the teacher for the career in the exam.\n signatures_dtl = EvaluationsTeacherSignature.objects.filter(\n fk_teacher__exact=teacher.id, fk_period__exact=exam.fk_period, status=\"ACTIVE\").select_related('fk_signature')\n\n signatures_results = []\n for signature_dtl in signatures_dtl:\n \n # If it raise an exception, it means that the signature isn't evaluated yet or other error.\n try:\n # Get the results of the signature.\n signature_results = EvaluationsSignatureResult.objects.get(\n group=signature_dtl.group,\n fk_signature=signature_dtl.fk_signature.id,\n fk_exam=exam.id,\n status=\"ACTIVE\"\n )\n\n # Get the results for each question in the exam for the signature.\n questions_results = EvaluationsSignatureQuestionResult.objects.filter(\n group=signature_dtl.group,\n fk_signature=signature_dtl.fk_signature.id,\n fk_exam=exam.id,\n fk_question__optional='NO',\n status=\"ACTIVE\"\n ).values_list('fk_question__description', 'result')\n\n # Get the comments of the signature/group.\n comments_result = EvaluationsSignatureQuestionResult.objects.get(\n group=signature_dtl.group,\n fk_signature=signature_dtl.fk_signature.id,\n fk_exam=exam.id,\n fk_question__optional='YES',\n status=\"ACTIVE\"\n ).result\n\n # Split the comments and add them to a list, only the ones that are not empty.\n comments = list(filter(None, comments_result.split('|')))\n\n # Crate a dictionary with the results of the signature and the questions.\n signatures_results.append({\n 'teacher': teacher.name + ' ' + teacher.last_name + ' ' + teacher.last_name_2,\n 'signature': signature_dtl.fk_signature.description,\n 'group': signature_dtl.group,\n 'average': signature_results.average,\n 'comments': comments,\n 'total_evaluated': signature_results.total_evaluated,\n 'questions': questions_results\n })\n except Exception:\n pass\n\n # Add the results to the exam dictionary.\n exam_results = {\n 'exam': exam.description,\n 'career': career.description,\n 'signatures_results': signatures_results,\n 'period': exam.fk_period.period\n }\n\n # Add the exam results to the list that will be returned at the end.\n data.append(exam_results)\n\n return data"
] |
[
"0.5773605",
"0.550711",
"0.5505826",
"0.54748213",
"0.541358",
"0.5309723",
"0.522989",
"0.52046406",
"0.51748013",
"0.51410276",
"0.5131988",
"0.51249206",
"0.5116607",
"0.5065685",
"0.50522614",
"0.5045558",
"0.5037566",
"0.50146616",
"0.5013002",
"0.50122494",
"0.50120574",
"0.5007773",
"0.5007616",
"0.50019026",
"0.49764264",
"0.4969597",
"0.49615026",
"0.49510288",
"0.49492154",
"0.49171406"
] |
0.71122515
|
0
|
Test in_order_list() and make sure it returns an inorder list
|
def test_in_order_list(self):
_expected_list = [5, 13, 23, 57, 103]
_output_list = []
# Call in_order_list to test
in_order_list(self.root, _output_list)
# We just want to test the values
# so make a list from the list of objects
_sorted_output = [x.get_value() for x in _output_list]
assert len(_expected_list) == len(_output_list)
assert _expected_list == _sorted_output
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def testin_order_0_4(bst_wiki):\n assert tuple(bst_wiki.in_order()) == (1, 2, 3, 4, 5, 6, 7, 8, 9)",
"def test_pre_order_list(self):\n _expected_list = [23, 5, 13, 57, 103]\n\n _output_list = []\n\n # Call pre_order_list to test\n pre_order_list(self.root, _output_list)\n\n # We just want to test the values\n # so make a list from the list of objects\n _pre_order_output = [x.get_value() for x in _output_list]\n\n assert len(_expected_list) == len(_output_list)\n assert _expected_list == _pre_order_output",
"def test_in_order_traversal(our_bsts):\n bio = []\n for i in our_bsts[0].in_order():\n bio.append(i)\n assert bio == sorted(list(set(our_bsts[5])))",
"def in_order_list(root, lst):\n if None is root:\n return\n in_order_list(root.get_left(), lst)\n lst.append(root)\n in_order_list(root.get_right(), lst)",
"def test_binarytree_in_order_correct_on_given(given_list, capsys):\n expected = [11, 12, 14, 18, 19, 20, 22, 31, 33, 40]\n given_list.in_order()\n out, err = capsys.readouterr()\n actual = [int(i) for i in out.split('\\n') if i != '']\n assert expected == actual",
"def inorderUtil(self, root):\n if root:\n self.inorderUtil(root.left)\n self.inlist.append(root.key)\n self.inorderUtil(root.right)\n return self.inlist",
"def test_list_ordering(self) -> None:\n list1 = List.objects.create()\n item1 = Item.objects.create(list=list1, text=\"i1\")\n item2 = Item.objects.create(list=list1, text=\"item 2\")\n item3 = Item.objects.create(list=list1, text=\"3\")\n self.assertEqual(list(Item.objects.all()), [item1, item2, item3])",
"def inorder(self):\n\n traversal = []\n self.inorder_helper(self.root, traversal)\n return traversal",
"def test_get_order_items(self):\n pass",
"def test_level_order_list(self):\n _expected_list = [23, 5, 57, 13, 103]\n\n # Call level_order_list to test\n _lst = level_order_list(self.root)\n\n # We just want to test the values\n # so make a list from the list of objects\n _level_order_output = [x.get_value() for x in _lst]\n\n assert len(_expected_list) == len(_level_order_output)\n assert _expected_list == _level_order_output",
"def test_list_identity(self):\n pass",
"def test_in_order_0_1(bst_balanced):\n assert tuple(bst_balanced.in_order()) == (1, 2, 3, 5, 6, 7)",
"def test_in_order_0_3(bst_right_balance):\n assert tuple(bst_right_balance.in_order()) == (2, 5, 6, 7, 8, 9)",
"def test_in_order_0_2(bst_all_to_left):\n assert tuple(bst_all_to_left.in_order()) == (1, 2, 3, 4, 5)",
"def test_bst_empty_in_order(bst_empty):\n check_list = []\n bst_empty.in_order_trav(lambda x: check_list.append(x.val))\n assert check_list == []",
"def in_order_helper(self, node, alist=[], verbose=False):\n if node:\n in_order_helper(node.left, alist)\n if verbose:\n print(node.data)\n alist.append(node)\n in_order_helper(node.right, alist)",
"def test_in_list(self):\n\n # get available ids\n ids = list(DQ(\"(b.id) Book b\").tuples())\n ids = [id[0] for id in ids]\n\n # take just three of them\n c = {\"ids\": ids[:3]}\n dq = DQ(\"(b.id, b.name) Book{b.id in '$(ids)'} b\")\n r = list(dq.context(c).dicts())\n\n # make sure we got three of them\n self.assertEqual(len(r), 3)",
"def test_get_order(self):\n pass",
"def traverse_inorder(self, root, inorder):\n \n if root is not None:\n self.traverse_inorder(root.left, inorder)\n inorder.append(root)\n self.traverse_inorder(root.right, inorder)\n \n return inorder",
"def test_post_order_list(self):\n _expected_list = [13, 5, 103, 57, 23]\n\n _output_list = []\n\n # Call post_order_list to test\n post_order_list(self.root, _output_list)\n\n # We just want to test the values\n # so make a list from the list of objects\n _post_order_output = [x.get_value() for x in _output_list]\n\n assert len(_expected_list) == len(_output_list)\n assert _expected_list == _post_order_output",
"def test_pre_order_traversal(our_bsts):\n bpo = []\n for i in our_bsts[0].pre_order():\n bpo.append(i)\n assert bpo == our_bsts[4]",
"def test_in_order_0_0(bst_empty):\n assert tuple(bst_empty.in_order()) == ()",
"def _inorder_traverse_to_list_helper(self, node):\n\t\tl = []\n\t\tif (node.lchild()):\n\t\t\tl += self._inorder_traverse_to_list_helper(node.lchild())\n\t\tl.append(node.value())\n\t\tif (node.rchild()):\n\t\t\tl += self._inorder_traverse_to_list_helper(node.rchild())\n\t\treturn l",
"def inorder_traversal(root, inorder):\r\n if root is None:\r\n return\r\n\r\n inorder_traversal(root.left, inorder)\r\n inorder.append(root.val)\r\n inorder_traversal(root.right, inorder)\r\n return inorder",
"def test_in_order_one_item_tree(bst_empty):\n bst_empty.insert(10)\n assert next(bst_empty.in_order()) == 10",
"def test_linked_list_instantiates_with_list_input():\n a = [5, 6, 7, 8]\n aa = LinkedList(a)\n # for i in a:\n # assert aa.includes(i) is False\n assert len(aa) == len(a)\n assert aa.includes(5)\n assert aa.includes(6)\n assert aa.includes(7)\n assert aa.includes(8)",
"def exec_list_contains(order_type):\n input_list = get_list_input()\n result = list_contains(input_list, order_type)\n print(result)",
"def inorder(self,root)->list:\n\t\tres=[]\n\t\tif root:\n\t\t\tres=self.inorder(root.left)\n\t\t\tres.append(root.data)\n\t\t\tres=res+self.inorder(root.right)\n\t\treturn res",
"def test_bst_in_order_normal(bst_ten_values_random):\n check_list = []\n bst_ten_values_random.in_order_trav(lambda x: check_list.append(x.val))\n assert check_list == [0,1,2,3,4,5,6,7,8,9]",
"def __iter__(self):\n return self.in_order"
] |
[
"0.71159047",
"0.68667185",
"0.65902406",
"0.6583254",
"0.65332425",
"0.64737827",
"0.6378989",
"0.63576657",
"0.63542444",
"0.6317717",
"0.629651",
"0.6267976",
"0.6235458",
"0.62261045",
"0.6203755",
"0.6186887",
"0.6052205",
"0.60516423",
"0.60353404",
"0.60251814",
"0.6012973",
"0.59481645",
"0.59411496",
"0.5937024",
"0.5891753",
"0.58866197",
"0.5834446",
"0.5809616",
"0.5788995",
"0.57834005"
] |
0.7938445
|
0
|
Test post_order_list() builds a proper list
|
def test_post_order_list(self):
_expected_list = [13, 5, 103, 57, 23]
_output_list = []
# Call post_order_list to test
post_order_list(self.root, _output_list)
# We just want to test the values
# so make a list from the list of objects
_post_order_output = [x.get_value() for x in _output_list]
assert len(_expected_list) == len(_output_list)
assert _expected_list == _post_order_output
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_post_order_traversal(our_bsts):\n bpost = []\n for i in our_bsts[0].post_order():\n bpost.append(i)\n assert bpost == our_bsts[6]",
"def test_pre_order_list(self):\n _expected_list = [23, 5, 13, 57, 103]\n\n _output_list = []\n\n # Call pre_order_list to test\n pre_order_list(self.root, _output_list)\n\n # We just want to test the values\n # so make a list from the list of objects\n _pre_order_output = [x.get_value() for x in _output_list]\n\n assert len(_expected_list) == len(_output_list)\n assert _expected_list == _pre_order_output",
"def test_post_foods_list(self):\n pass",
"def test_postorder_traversal(depth_one_tree):\n testlist = []\n depth_one_tree.post_order(lambda x: testlist.append(x))\n assert str(testlist) == str([1, 2, 3, 4, 0])",
"def test_list_ordering(self) -> None:\n list1 = List.objects.create()\n item1 = Item.objects.create(list=list1, text=\"i1\")\n item2 = Item.objects.create(list=list1, text=\"item 2\")\n item3 = Item.objects.create(list=list1, text=\"3\")\n self.assertEqual(list(Item.objects.all()), [item1, item2, item3])",
"def test_POST_list(self):\n\t\t# cleaner's lists should originally be empty\n\t\tdata = self.GET_data('/api/cleaner/' + self.cleaner['_id'])\n\t\tself.assertEqual([], data['lists'])\n\n\t\t# after posting list, cleaner's lists should contain just id of posted list\n\t\tself.POST_list()\n\t\tdata = self.GET_data('/api/cleaner/' + self.cleaner['_id'])\n\t\tself.assertEqual(1, len(data['lists']))\n\t\tself.assertEqual(self.list_id, data['lists'][0])",
"def test_get_order_items(self):\n pass",
"def test_binarytree_post_order_on_given(given_list, capsys):\n expected = [11, 14, 12, 19, 18, 22, 33, 31, 40, 20]\n given_list.post_order()\n out, err = capsys.readouterr()\n actual = [int(i) for i in out.split('\\n') if i != '']\n assert expected == actual",
"def test_post_order_0_4(bst_wiki):\n assert tuple(bst_wiki.post_order()) == (1, 3, 2, 5, 6, 4, 8, 9, 7)",
"def test_in_order_list(self):\n _expected_list = [5, 13, 23, 57, 103]\n _output_list = []\n \n # Call in_order_list to test\n in_order_list(self.root, _output_list)\n\n # We just want to test the values\n # so make a list from the list of objects\n _sorted_output = [x.get_value() for x in _output_list]\n\n assert len(_expected_list) == len(_output_list)\n assert _expected_list == _sorted_output",
"def test_create_order_list(self):\n self.assertEqual(self.response.status_code, status.HTTP_201_CREATED)",
"def create_order():",
"def test_list(self):\n pass",
"def test_list(self):\n pass",
"def test_get_order(self):\n pass",
"def test_place_multiple_orders(self):\n self.orders_list.place_order(self.order)\n self.orders_list.place_order(self.order)\n self.orders_list.place_order(self.order)\n self.assertEqual(len(self.orders_list.orders_list), 3)\n self.assertEqual(self.orders_list.orders_list[2].order_id, 2)",
"def test_level_order_list(self):\n _expected_list = [23, 5, 57, 13, 103]\n\n # Call level_order_list to test\n _lst = level_order_list(self.root)\n\n # We just want to test the values\n # so make a list from the list of objects\n _level_order_output = [x.get_value() for x in _lst]\n\n assert len(_expected_list) == len(_level_order_output)\n assert _expected_list == _level_order_output",
"def test_bst_post_order_normal(bst_ten_values_random):\n check_list = []\n bst_ten_values_random.post_order_trav(lambda x: check_list.append(x.val))\n assert check_list == [0,2,1,4,3,7,6,9,8,5]",
"def test_POST_send_list(self):\n\t\tself.POST_list()\n\t\tlist = self.GET_data('/api/list/' + self.list_id)\n\t\tself.POST_data('/api/list/' + self.list_id + '/send', data=list)",
"def test_get_order_list(self):\n response = self.client.get(reverse('get_all_or_create'))\n orders = PizzaOrder.objects.all()\n serializer = PizzaOrderSerializer(orders, many=True)\n self.assertEqual(response.data, serializer.data)\n self.assertEqual(response.status_code, status.HTTP_200_OK)",
"def POST_list(self):\n\t\trv = self.POST_data('/api/cleaner/{0}/list'.format(self.cleaner['_id']), data=TEST_LIST_DATA)\n\t\tself.list_id = json.loads(rv.data)['_id']",
"def test_list_field():",
"def generate_tree_postorder(node_lst, root_index):",
"def test_get_order_list(self):\n\n user = self.set_auth_token_header()\n\n # Order list API\n # User has no order\n url = reverse('orders-list')\n response = self.client.get(url)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data, [])\n\n # User has orders\n data = [\n {\n 'stock': Stock.objects.get(code='AAPL'),\n 'order_type': OrderType.objects.get(code='BUY'),\n 'total_value': 18.75,\n 'status': OrderStatus.objects.get(code='FILLED'),\n 'quantity': 15.0,\n 'price': 1.25,\n 'account': user.account\n },\n ]\n data_obj = [Order(**item) for item in data]\n _ = Order.objects.bulk_create(data_obj)\n\n url = reverse('orders-list')\n response = self.client.get(url)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(len(response.data), len(data))",
"def proposed_order_print(proposed_order_list):\n for item_details in proposed_order_list:\n proposed_order_item_print(item_details)",
"def test_get_list(self):\n pass",
"def test_booklist_ordered_by_due_date(self):\n # Change all book's status to loan('o')\n for book in BookInstance.objects.all():\n book.status = 'o'\n book.save()\n\n # Login into page\n login = self.client.login(\n username='testuser1',\n password='1X<ISRUkw+tuK')\n response = self.client.get(reverse('my-borrowed'))\n\n # Check that user is logged in\n self.assertEqual(str(response.context['user']), 'testuser1')\n self.assertEqual(response.status_code, 200)\n\n # Confirm that only 10 items are displayed per page\n self.assertEqual(len(response.context['bookinstancelist']), 10)\n\n last_date = 0\n for book in response.context['bookinstancelist']:\n if last_date == 0:\n last_date = book.due_back\n else:\n self.assertTrue(last_date <= book.due_back)\n last_date = book.due_back",
"def post_order_list(root, lst):\n if None is root:\n return\n post_order_list(root.get_left(), lst)\n post_order_list(root.get_right(), lst)\n lst.append(root)",
"def postorderUtil(self, root):\n if root:\n self.postorderUtil(root.left)\n self.postorderUtil(root.right)\n self.postlist.append(root.key)\n return self.postlist",
"def test_post_order_0_3(bst_right_balance):\n assert tuple(bst_right_balance.post_order()) == (2, 5, 7, 9, 8, 6)"
] |
[
"0.6714462",
"0.67056686",
"0.668058",
"0.66765714",
"0.6622983",
"0.65353596",
"0.6508708",
"0.6487586",
"0.64617425",
"0.6366691",
"0.6242497",
"0.62152255",
"0.6207949",
"0.6207949",
"0.61733276",
"0.6121865",
"0.6087537",
"0.59351695",
"0.58675224",
"0.586745",
"0.58402985",
"0.58368725",
"0.5834564",
"0.58329767",
"0.5799286",
"0.57728225",
"0.574634",
"0.57441086",
"0.57416844",
"0.5735677"
] |
0.82653
|
0
|
Test pre_order_list() builds a proper list
|
def test_pre_order_list(self):
_expected_list = [23, 5, 13, 57, 103]
_output_list = []
# Call pre_order_list to test
pre_order_list(self.root, _output_list)
# We just want to test the values
# so make a list from the list of objects
_pre_order_output = [x.get_value() for x in _output_list]
assert len(_expected_list) == len(_output_list)
assert _expected_list == _pre_order_output
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_preorder_traversal(depth_one_tree):\n testlist = []\n depth_one_tree.pre_order(lambda x: testlist.append(x))\n assert str(testlist) == str([0, 1, 2, 3, 4])",
"def test_binarytree_pre_order_on_given(given_list, capsys):\n expected = [20, 18, 12, 11, 14, 19, 40, 31, 22, 33]\n given_list.pre_order()\n out, err = capsys.readouterr()\n actual = [int(i) for i in out.split('\\n') if i != '']\n assert expected == actual",
"def test_pre_order_traversal(our_bsts):\n bpo = []\n for i in our_bsts[0].pre_order():\n bpo.append(i)\n assert bpo == our_bsts[4]",
"def test_in_order_list(self):\n _expected_list = [5, 13, 23, 57, 103]\n _output_list = []\n \n # Call in_order_list to test\n in_order_list(self.root, _output_list)\n\n # We just want to test the values\n # so make a list from the list of objects\n _sorted_output = [x.get_value() for x in _output_list]\n\n assert len(_expected_list) == len(_output_list)\n assert _expected_list == _sorted_output",
"def test_post_order_list(self):\n _expected_list = [13, 5, 103, 57, 23]\n\n _output_list = []\n\n # Call post_order_list to test\n post_order_list(self.root, _output_list)\n\n # We just want to test the values\n # so make a list from the list of objects\n _post_order_output = [x.get_value() for x in _output_list]\n\n assert len(_expected_list) == len(_output_list)\n assert _expected_list == _post_order_output",
"def test_list_ordering(self) -> None:\n list1 = List.objects.create()\n item1 = Item.objects.create(list=list1, text=\"i1\")\n item2 = Item.objects.create(list=list1, text=\"item 2\")\n item3 = Item.objects.create(list=list1, text=\"3\")\n self.assertEqual(list(Item.objects.all()), [item1, item2, item3])",
"def pre_order_list(root, lst):\n if None is root:\n return\n lst.append(root)\n pre_order_list(root.get_left(), lst)\n pre_order_list(root.get_right(), lst)",
"def test_pre_order_0_4(bst_wiki):\n assert tuple(bst_wiki.pre_order()) == (7, 4, 2, 1, 3, 6, 5, 9, 8)",
"def test_level_order_list(self):\n _expected_list = [23, 5, 57, 13, 103]\n\n # Call level_order_list to test\n _lst = level_order_list(self.root)\n\n # We just want to test the values\n # so make a list from the list of objects\n _level_order_output = [x.get_value() for x in _lst]\n\n assert len(_expected_list) == len(_level_order_output)\n assert _expected_list == _level_order_output",
"def test_bst_pre_order_normal(bst_ten_values_random):\n check_list = []\n bst_ten_values_random.pre_order_trav(lambda x: check_list.append(x.val))\n assert check_list == [5,3,1,0,2,4,8,6,7,9]",
"def preorderUtil(self, root):\n if root:\n self.prelist.append(root.key)\n self.preorderUtil(root.left)\n self.preorderUtil(root.right)\n return self.prelist",
"def test_bst_empty_pre_order(bst_empty):\n check_list = []\n bst_empty.pre_order_trav(lambda x: check_list.append(x.val))\n assert check_list == []",
"def pre_order(self, verbose=False):\n if not self.root:\n return []\n alist = []\n self.pre_order_helper(self.root, alist, verbose)\n return alist",
"def test_get_order_items(self):\n pass",
"def preorder(self):\n\n traversal = []\n self.preorder_helper(self.root, traversal)\n return traversal",
"def test_pre_order_0_2(bst_all_to_left):\n assert tuple(bst_all_to_left.pre_order()) == (4, 2, 1, 3, 5)",
"def pre_order_helper(self, node, alist=[], verbose=False):\n if node:\n if verbose:\n print(node.data)\n alist.append(node)\n pre_order_helper(node.left, alist, verbose)\n pre_order_helper(node.right, alist, verbose)",
"def test_list(self):\n pass",
"def test_list(self):\n pass",
"def list_preorder(t: Tree) -> list:\n if t.value is None:\n return []\n else:\n return [t.value] + sum([list_preorder(s) for s in t.children], [])\n\n # if t.value is None:\n # return []\n # elif t.children == []:\n # return [t.value]\n # else:\n # pre = [t.value]\n # for subtree in t.children:\n # pre.extend(list_preorder(subtree))\n # return pre",
"def test_ordered_lists(self):\n\n list_str = '1. One'\n \n doc = parser.parse(list_str)\n self.assertEqual(len(doc.children()), 1)\n\n ol = doc.children()[0]\n self.assertTrue(isinstance(ol, parser.ListNode))\n\n self.assertEqual(str(doc), list_str)\n\n list_str = '- One\\n 1. OneOne\\n 2. OneTwo'\n\n doc = parser.parse(list_str)\n self.assertEqual(len(doc.children()), 1)\n\n ul = doc.children()[0]\n self.assertEqual(len(ul.children), 1)\n\n li = ul.children[0]\n ol = li.children[0]\n\n self.assertEqual(len(ol.children), 2)",
"def test_list_prepend_updates(self):\n partition = uuid4()\n cluster = 1\n original = [\"foo\"]\n TestQueryUpdateModel.objects.create(\n partition=partition, cluster=cluster, text_list=original)\n prepended = ['bar', 'baz']\n TestQueryUpdateModel.objects(\n partition=partition, cluster=cluster).update(\n text_list__prepend=prepended)\n obj = TestQueryUpdateModel.objects.get(partition=partition, cluster=cluster)\n expected = (prepended[::-1] if is_prepend_reversed() else prepended) + original\n self.assertEqual(obj.text_list, expected)",
"def test_preprocessed_data(self):\n self.assertEqual(self.tester.preprocessed_data, [1, 2])",
"def __init__(self):\n self._order_list = []",
"def test_binarytree_in_order_correct_on_given(given_list, capsys):\n expected = [11, 12, 14, 18, 19, 20, 22, 31, 33, 40]\n given_list.in_order()\n out, err = capsys.readouterr()\n actual = [int(i) for i in out.split('\\n') if i != '']\n assert expected == actual",
"def test_pre_order_0_1(bst_balanced):\n assert tuple(bst_balanced.pre_order()) == (5, 2, 1, 3, 6, 7)",
"def preorderTraversal(self, root: TreeNode) -> List[int]:\n def preorder(root,seq):\n if root is None:\n return seq\n seq.append(root.val)\n preorder(root.left,seq)\n preorder(root.right,seq)\n return seq\n \n prelist= []\n return preorder(root,prelist)",
"def create_order():",
"def pre_order_traversal(self, root):\n\n def pre_order_traversal_helper(root):\n if root:\n result.append(root.data)\n pre_order_traversal_helper(root.left)\n pre_order_traversal_helper(root.right)\n\n result = []\n pre_order_traversal_helper(root)\n return result",
"def preorder(self,root)->list:\n\t\tres=[]\n\t\tif root:\n\t\t\tres.append(root.data)\n\t\t\tres=res+self.preorder(root.left)\n\t\t\tres=res+self.preorder(root.right)\n\t\treturn res"
] |
[
"0.7134197",
"0.6952254",
"0.69361955",
"0.69228834",
"0.6712876",
"0.66876805",
"0.66182965",
"0.6506275",
"0.6481896",
"0.6332289",
"0.622093",
"0.61910236",
"0.61716825",
"0.6148367",
"0.6133927",
"0.6091369",
"0.6089609",
"0.60648483",
"0.60648483",
"0.6035778",
"0.5958515",
"0.59499073",
"0.5933721",
"0.58888847",
"0.58643454",
"0.5862074",
"0.5833069",
"0.5828051",
"0.5822717",
"0.58135337"
] |
0.83723724
|
0
|
Test level_order_list() returns proper list
|
def test_level_order_list(self):
_expected_list = [23, 5, 57, 13, 103]
# Call level_order_list to test
_lst = level_order_list(self.root)
# We just want to test the values
# so make a list from the list of objects
_level_order_output = [x.get_value() for x in _lst]
assert len(_expected_list) == len(_level_order_output)
assert _expected_list == _level_order_output
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_print_level_order(depth_one_tree):\n assert print_level_order(depth_one_tree) == ['0', '1 2 3 4']",
"def test_print_level_order_2(depth_one_tree):\n depth_one_tree.insert(5, 1)\n depth_one_tree.insert(6, 2)\n depth_one_tree.insert(7, 3)\n depth_one_tree.insert(8, 4)\n assert print_level_order(depth_one_tree) == ['0', '1 2 3 4', '5 6 7 8']",
"def get_order(parent_level):\n subset_lookup = {}\n for entry in [d for d in data if d[0] == parent_level]:\n start = entry[1]\n if start not in subset_lookup:\n subset_lookup[start] = []\n subset_lookup[start].append(entry)\n print(entry)\n pprint(subset_lookup)\n processing_order = []\n for items in reversed(list(subset_lookup.values())):\n for item in items:\n processing_order.append(item)\n processing_order.extend(get_order(item[2]))\n return processing_order",
"def levelOrder(self, root: 'Node') -> List[List[int]]:\n if not root: return []\n level = []\n waiting = []\n result = []\n level.append(root)\n while level:\n current = []\n while level:\n tmp = level.pop(0)\n if not tmp:\n continue\n current.append(tmp.val)\n waiting.append(tmp)\n if len(current) > 0:\n result.append(current)\n while waiting:\n tmp = waiting.pop(0)\n for ch in tmp.children:\n level.append(ch)\n return result",
"def test_in_order_list(self):\n _expected_list = [5, 13, 23, 57, 103]\n _output_list = []\n \n # Call in_order_list to test\n in_order_list(self.root, _output_list)\n\n # We just want to test the values\n # so make a list from the list of objects\n _sorted_output = [x.get_value() for x in _output_list]\n\n assert len(_expected_list) == len(_output_list)\n assert _expected_list == _sorted_output",
"def levelOrder(self, root: TreeNode) -> List[List[int]]:\n\n result = []\n if(root is None):\n return result\n\n q = deque([root])\n while(q):\n n = len(q)\n level = []\n for i in range(0,n):\n f = q.popleft()\n level.append(f.val)\n\n if (f.left is not None):\n q.append(f.left)\n if (f.right is not None):\n q.append(f.right)\n\n if(len(level) > 0):\n result.append(level[:])\n level.clear()\n return result",
"def test_print_level_order():\n with pytest.raises(TypeError):\n print_level_order(1)\n with pytest.raises(TypeError):\n print_level_order('string')\n with pytest.raises(TypeError):\n print_level_order([1, 2, 3, 4])",
"def items_level_order(self):\n # TODO: Create a queue to store nodes not yet traversed in level-order\n queue = ...\n # Create an items list\n items = list()\n # TODO: Enqueue the root node if this tree is not empty\n if ...:\n queue...\n # TODO: Loop until the queue is empty\n while ...:\n # TODO: Dequeue the node at the front of the queue\n node = ...\n # TODO: Add this node's data to the items list\n ...\n # TODO: Enqueue this node's left child if it exists\n if ...:\n ...\n # TODO: Enqueue this node's right child if it exists\n if ...:\n ...\n # Return the items list\n return items",
"def __level_entries_list__(self):\n # | - __level_entries_list__\n level_entries_dict = self.level_entries\n level_labels = self.tree_level_labels\n\n level_entries_list = []\n for param_i in level_labels:\n # for name, params_list in level_entries_dict.iteritems():\n for name, params_list in level_entries_dict.items():\n if param_i == name:\n level_entries_list.append(params_list)\n\n return(level_entries_list)\n # __|",
"def levelorder(self):\n return (node for node in self.get_level_order(self.root))",
"def levelorder(root):\n h = height(root)\n for i in range(1, h + 1):\n print_level(root, i)",
"def _get_level(self, cr, uid, lll, tree, level=1, context=None):\n context = context and dict(context) or {}\n if not tree.get(level):\n tree[level] = {}\n # The search through level should be backwards from the deepest level\n # to the outmost level\n levels = tree.keys()\n levels.sort()\n levels.reverse()\n xlevel = False\n for nnn in levels:\n xlevel = isinstance(tree[nnn].get(lll.id), (set)) and nnn or xlevel\n if not xlevel:\n tree[level][lll.id] = set()\n elif xlevel < level:\n tree[level][lll.id] = tree[xlevel][lll.id]\n del tree[xlevel][lll.id]\n else: # xlevel >= level\n return True\n for jjj in set(lll.total_ids + lll.operand_ids):\n tree[level][lll.id].add(jjj.id)\n self._get_level(cr, uid, jjj, tree, level + 1, context=context)\n return True",
"def getPriorityList(self):",
"def test_pre_order_list(self):\n _expected_list = [23, 5, 13, 57, 103]\n\n _output_list = []\n\n # Call pre_order_list to test\n pre_order_list(self.root, _output_list)\n\n # We just want to test the values\n # so make a list from the list of objects\n _pre_order_output = [x.get_value() for x in _output_list]\n\n assert len(_expected_list) == len(_output_list)\n assert _expected_list == _pre_order_output",
"def test_reorder_level(self):\n reorder_level = self._uncertain_demand.reorder_level\n avg_order = sum([int(item) for item in self._data_set.values()]) //len(self._data_set)\n variance = [(item - avg_order) for item in self._data_set.values()]\n stdev = pow(sum([pow(j, 2) for j in variance]) / len(self._data_set), 0.5)\n cal_safety = lambda x, y, z: x * y * (z ** 0.5)\n safety_stock = cal_safety(float(self._z_value), float(stdev), float(self._lead_time))\n cal_reorder_level = lambda x, y, z: ((x ** 0.5) * y) + z\n test_reorder = cal_reorder_level(float(self._lead_time), avg_order, float(safety_stock))\n self.assertEqual(float(reorder_level), test_reorder)",
"def test_binarytree_in_order_correct_on_given(given_list, capsys):\n expected = [11, 12, 14, 18, 19, 20, 22, 31, 33, 40]\n given_list.in_order()\n out, err = capsys.readouterr()\n actual = [int(i) for i in out.split('\\n') if i != '']\n assert expected == actual",
"def calc_levels(ply_order, n_plies_in_groups, n_groups):\r\n levels_in_groups = [None]*n_groups\r\n for ind_group in range(n_groups):\r\n levels_in_groups[ind_group] = []\r\n\r\n ind_all_plies = 0\r\n for ind_group in range(n_groups):\r\n for ind_plies in range(n_plies_in_groups[ind_group]):\r\n levels_in_groups[ind_group].append(ply_order[ind_all_plies])\r\n ind_all_plies += 1\r\n\r\n return levels_in_groups",
"def in_order(self, verbose=False):\n if not self.root:\n return []\n alist = []\n self.in_order_helper(self.root, alist, verbose)\n return alist",
"def test_level_discovery(self):\n defined_levels = find_defined_levels()\n level_values = defined_levels.values()\n for number in (0, 10, 20, 30, 40, 50):\n assert number in level_values",
"def getLevels():",
"def level_order_list(root):\n return_list = []\n queue = []\n queue.append(root)\n\n while 0 < len(queue):\n next_node = queue.pop(0)\n if next_node.get_left():\n queue.append(next_node.get_left())\n if next_node.get_right():\n queue.append(next_node.get_right())\n return_list.append(next_node)\n\n return return_list",
"def test_preorder_traversal(depth_one_tree):\n testlist = []\n depth_one_tree.pre_order(lambda x: testlist.append(x))\n assert str(testlist) == str([0, 1, 2, 3, 4])",
"def getLevels(self):\n levels = self.levels.keys()\n levels.sort()\n a = str(levels)\n \n logger.info('[biospatial.gbif.taxonomy.NestedTaxonomy]\\n Available Levels %s' %a)\n return a",
"def items_level_order(self):\n items = []\n if not self.is_empty():\n # Traverse tree level-order from root, appending each node's item\n self._traverse_level_order_iterative(self.root, items.append)\n # Return level-order list of all items in tree\n return items",
"def test_postorder_traversal(depth_one_tree):\n testlist = []\n depth_one_tree.post_order(lambda x: testlist.append(x))\n assert str(testlist) == str([1, 2, 3, 4, 0])",
"def get_level_order(self, node, more=None):\n if node is not None:\n if more is None:\n more = []\n more += [n for n in node.children if n is not None]\n yield node\n if more:\n for data in self.get_level_order(more[0], more[1:]):\n yield data",
"def test__get_level(self):\r\n\r\n # check regular case with and without prefix tags\r\n expected_output = 1\r\n output = _get_level(0.20, [0.25, 0.5, 0.75])\r\n self.assertEquals(output, expected_output)\r\n\r\n expected_output = 'level_bin_1_of_4'\r\n output = _get_level(0.20, [0.25, 0.5, 0.75], 'level_bin')\r\n self.assertEquals(output, expected_output)\r\n\r\n expected_output = 'level_bin_3_of_6'\r\n output = _get_level(0.20, [0.05, 0.15, 0.35, 0.8, 0.95], 'level_bin')\r\n self.assertEquals(output, expected_output)\r\n\r\n # edge cases with and without prefix tags\r\n expected_output = 2\r\n output = _get_level(0.25, [0.25, 0.5, 0.75])\r\n self.assertEquals(output, expected_output)\r\n\r\n expected_output = 4\r\n output = _get_level(1, [0.25, 0.5, 0.75])\r\n self.assertEquals(output, expected_output)\r\n\r\n expected_output = 'testing_bin_2_of_4'\r\n output = _get_level(0.25, [0.25, 0.5, 0.75], 'testing_bin')\r\n self.assertEquals(output, expected_output)\r\n\r\n expected_output = 'testing_bin_4_of_4'\r\n output = _get_level(1, [0.25, 0.5, 0.75], 'testing_bin')\r\n self.assertEquals(output, expected_output)\r\n\r\n # unwanted cases, greater than one and negative values\r\n with self.assertRaises(ValueError):\r\n output = _get_level(1.3, [0.5])\r\n\r\n with self.assertRaises(ValueError):\r\n output = _get_level(-1, [0.25, 0.5, 0.75])",
"def test_list_ordering(self) -> None:\n list1 = List.objects.create()\n item1 = Item.objects.create(list=list1, text=\"i1\")\n item2 = Item.objects.create(list=list1, text=\"item 2\")\n item3 = Item.objects.create(list=list1, text=\"3\")\n self.assertEqual(list(Item.objects.all()), [item1, item2, item3])",
"def test_binarytree_pre_order_on_given(given_list, capsys):\n expected = [20, 18, 12, 11, 14, 19, 40, 31, 22, 33]\n given_list.pre_order()\n out, err = capsys.readouterr()\n actual = [int(i) for i in out.split('\\n') if i != '']\n assert expected == actual",
"def make_order(self, root):\n order = []\n if root and isinstance(root[0], dict):\n keys = set()\n for item in root:\n for key in item.keys():\n keys.add(key)\n for key in args.order or []:\n key = self.get_key(key, keys)\n keys.remove(key)\n order.append(key)\n order += sorted(list(keys))\n return order"
] |
[
"0.7119945",
"0.6891942",
"0.6810224",
"0.67512816",
"0.66849685",
"0.65906245",
"0.654603",
"0.6543282",
"0.6518044",
"0.6412306",
"0.63750464",
"0.622505",
"0.6220062",
"0.6184824",
"0.6179744",
"0.61769027",
"0.610887",
"0.60863924",
"0.6083974",
"0.60619295",
"0.5994936",
"0.59627104",
"0.5942872",
"0.5897304",
"0.58702517",
"0.5861949",
"0.5848986",
"0.58338296",
"0.5827164",
"0.5801244"
] |
0.8427759
|
0
|
This function warps an image sequence (with corresponding flow and depth) to a stereo sequence 1) First of all it uses a heuristic to calculate initial depth 2) Then, It smooth this initial depth by spatial smoothness if needed 4) After that, it warps the image and does interpolation to generate the right image \ it pushes the pixels [to the right] as the depth increases
|
def stereoWarpK_noMotion_singleSided(curImageInfo, conversionParam, globalParam):
h, w, u = curImageInfo.originalImageResized.shape # shape after resize
K = 1
N = h * w * K
gr = np.mean(curImageInfo.originalImageResized, 2) # not 3 as it is zero based :3
grs = cv2.GaussianBlur(gr, (5, 5), 1)
# One heuristic for converting depth to disparity
disparity0 = imnormalize(1/(1+imnormalize(curImageInfo.depthResized)))*conversionParam.maxDisp - conversionParam.maxDisp/2;
if conversionParam.spatialSmoothnessSwitch == True:
# Smoothing the depth spatially according to adjacent pixels by using Gx, Gy gradients
# Vertical and Horizontal Edges
dx = cv2.filter2D(grs, -1, np.transpose(np.array([[-1, 1, 0]])))
dy = cv2.filter2D(grs, -1, np.array([[-1, 1, 0]]))
W = ( imnormalize(disparity0) + sigmoid(np.sqrt(np.power(dx, 2) + np.power(dy, 2)), 0.01, 500) ) / 2
A = np.transpose(spdiags(np.transpose(W).flatten(), 0, N, N, "csc") \
+ (conversionParam.spatialSmoothCoeff_x * globalParam.Gx.transpose() * globalParam.Gx) \
+ (conversionParam.spatialSmoothCoeff_y * globalParam.Gy.transpose() * globalParam.Gy))
b = np.transpose(W).flatten() * np.transpose(disparity0).flatten()
[x, flag] = cg(A, b, np.transpose(disparity0).flatten(), 5e-1, 50)
disparity = np.transpose(np.reshape(x, (w, h))) # remove (h, w, 1, K)
else:
disparity = disparity0
curImageInfo.leftImage = curImageInfo.originalImage
# The -ve sign to convert the white to black and black to white
warpright = -disparity
# only the warping interp2 is done on the original size image with no resizing to have good estimation
warpright = cv2.resize(warpright, (curImageInfo.originalImage.shape[1], curImageInfo.originalImage.shape[0]),
interpolation=cv2.INTER_LINEAR)
curImageInfo.rightImage = (clip(warpImage_v2((curImageInfo.originalImage), (warpright),
conversionParam.resizeFactor, globalParam.xx, globalParam.yy, globalParam.YY)))
return disparity
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def smooth_stitch(*, input_dir, output_dir):\n image_paths = glob(os.path.join(input_dir, \"*.tif\"))\n if not image_paths:\n raise RuntimeError(\"%s does not contain any .tif file\" % (input_dir))\n\n # Get the profile and affine of some image as template for output image\n first_image = image_paths[0]\n with rasterio.open(first_image) as src:\n profile = src.profile.copy()\n src_res = src.res\n chip_size = src.width\n assert src.width == src.height\n\n with tempfile.TemporaryDirectory() as tmpdir:\n tmp_image_paths = generate_spline_window_chips(\n image_paths=image_paths, output_dir=tmpdir\n )\n\n # Get bounds from all images and build R-Tree index\n idx, (dst_w, dst_s, dst_e, dst_n) = build_bounds_index(tmp_image_paths)\n\n # Get affine transform for complete bounds\n logger.info(\"Output bounds: %r\", (dst_w, dst_s, dst_e, dst_n))\n output_transform = Affine.translation(dst_w, dst_n)\n logger.info(\"Output transform, before scaling: %r\", output_transform)\n\n output_transform *= Affine.scale(src_res[0], -src_res[1])\n logger.info(\"Output transform, after scaling: %r\", output_transform)\n\n # Compute output array shape. We guarantee it will cover the output\n # bounds completely. We need this to build windows list later.\n output_width = int(math.ceil((dst_e - dst_w) / src_res[0]))\n output_height = int(math.ceil((dst_n - dst_s) / src_res[1]))\n\n # Set width and height for output chips, and other attributes\n profile.update(width=chip_size, height=chip_size, tiled=True)\n\n windows = list(\n sliding_windows(chip_size, width=output_width, height=output_height)\n )\n logger.info(\"Num. windows: %d\", len(windows))\n\n for win, (i, j) in tqdm(windows):\n # Get window affine transform and bounds\n win_transform = rasterio.windows.transform(win, output_transform)\n win_bounds = rasterio.windows.bounds(win, output_transform)\n\n # Get chips that intersect with window\n intersect_chip_paths = [\n tmp_image_paths[i] for i in idx.intersection(win_bounds)\n ]\n\n if intersect_chip_paths:\n # Merge them with median method\n img = merge_chips(intersect_chip_paths, win_bounds=win_bounds)\n\n # Write output chip\n profile.update(transform=win_transform)\n output_path = os.path.join(output_dir, f\"{i}_{j}.tif\")\n\n os.makedirs(output_dir, exist_ok=True)\n with rasterio.open(output_path, \"w\", **profile) as dst:\n for i in range(img.shape[0]):\n dst.write(img[i, :, :], i + 1)",
"def pipeline(self,img,debug=0):\n\t\timg = self.cam.undist(img)\n\t\t#get warped binary image\n\t\tbinary_warped = self.cam.warp(Image(img).binary_th())\n\t\tbw_shape = binary_warped.shape\n\t\t\n\t\tif (self.leftLine.detected == True and self.rightLine.detected == True):\n\t\t\tself.quick_search(binary_warped,debug)\n\t\telse:\n\t\t\tself.blind_search(binary_warped,debug)\n\t\n\t\tif (self.leftLine.fit!=None and self.rightLine.fit!=None):\n\t\t\tpolygon = self.fill_lane(bw_shape)\n\t\t\tunwarped_polygon = self.cam.unwarp(polygon)\n\t\t\t# calculate position of lane's center \n\t\t\ttemp = np.nonzero(unwarped_polygon[-1,:,1])[0]\n\t\t\tleft, right = temp[0], temp[-1]\n\t\t\tself.center = (int(bw_shape[1]/2) - (int((right-left)/2)+int(left)))*7.4/1280\n\t\t\timg_lines = weighted_img(unwarped_polygon,img, α=1, β=0.5, λ=0.)\n\t\t\t# write text on image\n\t\t\tfont = cv2.FONT_HERSHEY_SIMPLEX\n\t\t\ttext1 = 'Radius of Curvature: {:.0f}m'.format(np.mean((self.leftLine.radius, self.rightLine.radius)))\n\t\t\ttext2 = 'Distance is {:.2f}m {} of center'.format(abs(self.center), 'left' if self.center<0 else 'right')\n\n\t\t\tcv2.putText(img_lines, text1, (100,100), font, 1,(255,255,255),2)\n\t\t\tcv2.putText(img_lines, text2 ,(100,140), font, 1,(255,255,255),2)\n\t\t\t\n\t\t\tif (debug==1):\n\t\t\t\tshow_2gr(polygon, unwarped_polygon)\n\t\t\t\tshow_2gr(binary_warped, unwarped_polygon)\n\n\t\t\treturn img_lines\n\n\t\telse:\n\t\t\t# no lines detected and not fit available: return original image\n\t\t\t# without lines\n\t\t\treturn img",
"def warp_flow(img, flow):\n h, w = flow.shape[:2]\n flow = -flow\n flow[:, :, 0] += np.arange(w)\n flow[:, :, 1] += np.arange(h)[:, np.newaxis]\n #res = cv2.remap(img, flow, None, cv2.INTER_LINEAR,\n # borderValue=(1.0, 1.0, 1.0, 0.0))\n res = cv2.remap(img, flow, None, cv2.INTER_LINEAR,\n borderValue=(0.0, 0.0, 0.0, 0.0))\n return res",
"def warp_image(im, flow):\n from scipy import interpolate\n image_height = im.shape[0]\n image_width = im.shape[1]\n flow_height = flow.shape[0]\n flow_width = flow.shape[1]\n n = image_height * image_width\n (iy, ix) = np.mgrid[0:image_height, 0:image_width]\n (fy, fx) = np.mgrid[0:flow_height, 0:flow_width]\n fx += flow[:, :, 0]\n fy += flow[:, :, 1]\n mask = np.logical_or(fx < 0, fx > flow_width)\n mask = np.logical_or(mask, fy < 0)\n mask = np.logical_or(mask, fy > flow_height)\n fx = np.minimum(np.maximum(fx, 0), flow_width)\n fy = np.minimum(np.maximum(fy, 0), flow_height)\n points = np.concatenate((ix.reshape(n, 1), iy.reshape(n, 1)), axis=1)\n xi = np.concatenate((fx.reshape(n, 1), fy.reshape(n, 1)), axis=1)\n warp = np.zeros((image_height, image_width, im.shape[2]))\n for i in range(im.shape[2]):\n channel = im[:, :, i]\n plt.imshow(channel, cmap='gray')\n values = channel.reshape(n, 1)\n new_channel = interpolate.griddata(points, values, xi, method='cubic')\n new_channel = np.reshape(new_channel, [flow_height, flow_width])\n new_channel[mask] = 1\n warp[:, :, i] = new_channel.astype(np.uint8)\n\n return warp.astype(np.uint8)",
"def recreateImgsFromLapPyr(imgPyramid): \n layerNum = len(imgPyramid)\n curSrc=imgPyramid[-1].copy()\n for l in np.arange(layerNum-2, -1, -1):\n imgUp = cv2.resize(curSrc, (imgPyramid[l].shape[1], imgPyramid[l].shape[0]))\n imgBlurUp = cv2.GaussianBlur(imgUp, ksize=(0, 0), sigmaX=3)\n curSrc = imgBlurUp + imgPyramid[l]\n \n return(curSrc)",
"def morph(src_img, src_points, dest_img, dest_points,\n video, width=500, height=600, num_frames=20, fps=10,\n out_frames=None, out_video=None, alpha=False, plot=False):\n size = (height, width)\n stall_frames = np.clip(int(fps*0.15), 1, fps) # Show first & last longer\n plt = plotter.Plotter(plot, num_images=num_frames, out_folder=out_frames)\n num_frames -= (stall_frames * 2) # No need to process src and dest image\n label = plotter.Plotter(plot, num_images=2, out_folder=out_frames, label=True)\n label.plot_one(src_img, src_points)\n label.plot_one(dest_img, dest_points)\n label.show()\n plt.plot_one(src_img)\n video.write(src_img, 1)\n try:\n os.mkdir(os.path.join(os.getcwd(),'result'))\n os.mkdir(os.path.join(os.getcwd(),'result','src'))\n os.mkdir(os.path.join(os.getcwd(),'result','src_corners'))\n os.mkdir(os.path.join(os.getcwd(),'result','end'))\n os.mkdir(os.path.join(os.getcwd(),'result','average'))\n except Exception as e:\n print(e)\n\n # Produce morph frames!\n for percent in np.linspace(1, 0, num=num_frames):\n points = locator.weighted_average_points(src_points, dest_points, percent)\n src_face = warper.warp_image(src_img, src_points, points, size)\n end_face = warper.warp_image(dest_img, dest_points, points, size)\n average_face = blender.weighted_average(src_face, end_face, percent)\n average_face = alpha_image(average_face, points) if alpha else average_face\n average_face[:,:,:3] = correct_colours(src_face, average_face, np.matrix(points))\n corners = np.array([np.array([0,0]),np.array([0,height-2]),np.array([width-2,0]),np.array([width-2,height-2])])\n src_points_with_corners = np.concatenate((src_points, corners))\n points_with_corners = np.concatenate((points, corners))\n src_face_corners = warper.warp_image(src_img, src_points_with_corners, points_with_corners, size)\n average_face = process_edge(src_face_corners, average_face, width, height)\n plt.plot_one(average_face)\n filename = '%d.jpg' % int((1-percent)*num_frames)\n cv2.imwrite(os.path.join(os.getcwd(),'result','src',filename), src_face)\n cv2.imwrite(os.path.join(os.getcwd(),'result','src_corners',filename), src_face_corners)\n cv2.imwrite(os.path.join(os.getcwd(),'result','end',filename), end_face)\n cv2.imwrite(os.path.join(os.getcwd(),'result','average',filename), average_face)\n plt.save(average_face)\n video.write(average_face)\n\n plt.plot_one(dest_img)\n video.write(dest_img, stall_frames)\n plt.show()",
"def flow_warp(src_img, flow):\n batch, height, width, _ = src_img.get_shape().as_list()\n tgt_pixel_coords = meshgrid(batch, height, width, False)\n src_pixel_coords = tgt_pixel_coords + flow\n output_img = bilinear_sampler(src_img, src_pixel_coords)\n return output_img",
"def image_sources(dim, xs, order, rc):\n sources = np.zeros((number_of_sources(order)+1, 6))\n \"\"\"gain factor of sound source = 1\n number of the last hitted wall = 0\n propagation path = 0, because 0 wall hitted\"\"\"\n sources[0, :] = [xs[0], xs[1], xs[2], 1, 0, 0]\n\n c = 0 # counter to iterate\n r = 1 # variable to write data in the corresponding row\n while c <= number_of_sources(order - 1):\n sq = mirror_source(dim, [sources[c, 0], sources[c, 1],\n sources[c, 2]], sources[c, 3], sources[c, 4], rc,\n sources[c, 5])\n sources[r:r+sq.shape[0], :] = sq\n c += 1\n r += sq.shape[0]\n return(sources)",
"def smooth_depth_image(depth_image, max_hole_size=10):\n\tmask = np.zeros(depth_image.shape,dtype=np.uint8)\n\tmask[depth_image==0] = 1\n\n\t# Do not include in the mask the holes bigger than the maximum hole size\n\tkernel = np.ones((max_hole_size,max_hole_size),np.uint8)\n\terosion = cv2.erode(mask,kernel,iterations = 1)\n\tmask = mask - erosion\n\n\tsmoothed_depth_image = cv2.inpaint(depth_image.astype(np.uint16),mask,max_hole_size,cv2.INPAINT_NS)\n\n\treturn smoothed_depth_image",
"def ViewInterpolation(curImageInfo, conversionParam, globalParam):\n if np.max(np.max(np.abs(curImageInfo.depth))) < 0.1:\n # no need for interpolation the depth is the same for all the pixels\n curImageInfo.leftImage = curImageInfo.originalImage\n curImageInfo.rightImage = curImageInfo.originalImage\n \n vres, hres, u = curImageInfo.leftImage.shape\n curImageInfo.sideBySide = np.zeros((vres, hres*2, 3), dtype=np.float32)\n curImageInfo.sideBySide[:, 0:hres, :] = curImageInfo.leftImage\n curImageInfo.sideBySide[:, hres: , :] = curImageInfo.rightImage\n \n return\n \n # Warping\n disparity = stereoWarpK_noMotion_singleSided(curImageInfo, conversionParam, globalParam)\n \n # Create Side by Side image\n vres, hres, u = curImageInfo.leftImage.shape\n curImageInfo.sideBySide = np.zeros((vres, hres*2, 3), dtype=np.float32)\n curImageInfo.sideBySide[:, 0:hres, :] = curImageInfo.leftImage\n curImageInfo.sideBySide[:, hres: , :] = curImageInfo.rightImage",
"def burning_ship(pixels, width, height, max_iterations, re_start, re_end, im_start, im_end, color_hue,\n color_saturation, color_intensity):\n\n for x in prange(0, width):\n for y in prange(0, height):\n c = complex((re_start + (x / width) * (re_end - re_start)),\n (im_start + (y / height) * (im_end - im_start)))\n z = 0.0j\n\n iterations = 0\n while (abs(z) < 4.0) and iterations < max_iterations:\n abs_z = complex(abs(z.real), abs(z.imag))\n z = abs_z * abs_z + c\n iterations += 1\n\n # Color smoothing\n smooth_iterations = iterations - math.log(math.log(z.real * z.real + z.imag * z.imag)) + 4.0\n\n if iterations >= max_iterations:\n pixels[x, y, 0] = 0\n pixels[x, y, 1] = 0\n pixels[x, y, 2] = 0\n else:\n pixels[x, y, 0] = 255 * (color_hue / 360)\n pixels[x, y, 1] = 255 * color_saturation\n pixels[x, y, 2] = 255 * min(color_intensity * smooth_iterations / max_iterations, 1)",
"def warping(src, dst, H, ymin, ymax, xmin, xmax, direction='b'):\r\n\r\n h_src, w_src, ch = src.shape\r\n h_dst, w_dst, ch = dst.shape\r\n H_inv = np.linalg.inv(H)\r\n\r\n # TODO: 1.meshgrid the (x,y) coordinate pairs\r\n x = np.linspace(xmin, xmax-1, xmax-xmin)\r\n y = np.linspace(ymin, ymax-1, ymax-ymin)\r\n x, y = np.meshgrid(x, y)\r\n x = x.reshape(-1).astype(int)\r\n y = y.reshape(-1).astype(int)\r\n u = np.vstack((x, y, np.ones(len(x))))\r\n\r\n # TODO: 2.reshape the destination pixels as N x 3 homogeneous coordinate\r\n\r\n if direction == 'b':\r\n # TODO: 3.apply H_inv to the destination pixels and retrieve (u,v) pixels, then reshape to (ymax-ymin),(xmax-xmin)\r\n H_inv = np.linalg.inv(H)\r\n v = H_inv @ u\r\n vx = np.round(v[0] / v[2]).astype(int)\r\n vy = np.round(v[1] / v[2]).astype(int)\r\n\r\n # TODO: 4.calculate the mask of the transformed coordinate (should not exceed the boundaries of source image)\r\n mask = (vx >= 0) & (vx < w_src) & (vy >= 0) & (vy < h_src)\r\n\r\n # TODO: 5.sample the source image with the masked and reshaped transformed coordinates\r\n x = x[mask]\r\n y = y[mask]\r\n vx = vx[mask]\r\n vy = vy[mask]\r\n\r\n # TODO: 6. assign to destination image with proper masking\r\n dst[y, x] = src[vy, vx]\r\n\r\n elif direction == 'f':\r\n # TODO: 3.apply H to the source pixels and retrieve (u,v) pixels, then reshape to (ymax-ymin),(xmax-xmin)\r\n v = H @ u\r\n vx = np.round(v[0] / v[2]).astype(int)\r\n vy = np.round(v[1] / v[2]).astype(int)\r\n\r\n # TODO: 4.calculate the mask of the transformed coordinate (should not exceed the boundaries of destination image)\r\n mask = (vx >= 0) & (vx < w_dst) & (vy >= 0) & (vy < h_dst)\r\n\r\n # TODO: 5.filter the valid coordinates using previous obtained mask\r\n x = x[mask]\r\n y = y[mask]\r\n vx = vx[mask]\r\n vy = vy[mask]\r\n\r\n # TODO: 6. assign to destination image using advanced array indicing\r\n dst[vy, vx] = src[y, x]\r\n\r\n return dst",
"def up_to_step_4(imgs):\n # ... your code here ...\n for i in range(len(imgs)-1):\n \n detector = cv2.xfeatures2d.SURF_create(hessianThreshold = 3000,\n nOctaves = 4,\n nOctaveLayers = 3,\n upright = False,\n extended = False)\n gray1= cv2.cvtColor(imgs[i],cv2.COLOR_BGR2GRAY)\n kp1,des1 = detector.detectAndCompute(gray1,None)\n gray2= cv2.cvtColor(imgs[i+1],cv2.COLOR_BGR2GRAY)\n kp2,des2 = detector.detectAndCompute(gray2,None)\n# bf = cv2.BFMatcher()\n matches = knnmatch(des2,des1)\n# good = []\n# for m,n in matches:\n# if m.distance < 0.75*n.distance:\n# good.append(m)\n# \n src_pts = np.float32([ kp2[m.queryIdx].pt for m in matches ])\n dst_pts = np.float32([ kp1[m.trainIdx].pt for m in matches ])\n H = findhomography(src_pts, dst_pts, 3000)\n# H,mask = cv2.findHomography(src_pts,dst_pts,cv2.RANSAC)\n # warp = warpperspective(imgs[0],H)\n warp = cv2.warpPerspective(imgs[i+1], H, (imgs[i+1].shape[1]*2 , imgs[i+1].shape[0]*2))\n rows, cols = np.where(warp[:,:,0] !=0)\n min_row, max_row = min(rows), max(rows) +1\n min_col, max_col = min(cols), max(cols) +1\n result = warp[min_row:max_row,min_col:max_col,:]\n # imgs = warp\n # warp[0:imgs[0].shape[0], 0:imgs[0].shape[1]] = imgs[2]\n stitcher = cv2.createStitcher(False)\n result = stitcher.stitch((imgs[i],result))\n imgs[i+1] = result[1]\n imgs[0] = imgs[-2]\n return imgs[0]",
"def generateShrinkPyramid(image, depth):\n shrunkImages = [image]\n window = constants.GAUSS_BLUR_WINDOW_SIZE # TODO: Finetune Gaussian kernel size.\n for i in range(depth - 1):\n shrunkImages.append(utils.imageShrink(shrunkImages[-1], window))\n \n # Gauss window size needs to reduce as the image gets smaller,\n # else the blurring is excessive.\n window = window // 2\n if window % 2 == 0:\n window += 1\n\n return shrunkImages",
"def pipeline(image, plot = False):\n # undistort image\n undistorted_image = undistort_image(image)\n \n # R&S|sobel-x thresholding\n _, _, _, _, threshold_image = thresholding(undistorted_image)\n \n # yellow mask\n _, mask_yellow = filter_color(undistorted_image, np.array([20,100,100]), np.array([50,255,255]))\n \n # white mask\n _, mask_white = filter_color(undistorted_image, np.array([0,0,220]), np.array([255,35,255]))\n \n # combine yellow and white mask\n mask = cv2.bitwise_or(mask_yellow, mask_white)\n comb_image = np.zeros_like(threshold_image)\n \n # combine mask and thresholded image\n comb_image[(mask > 0)&(threshold_image == 1)] = 1\n \n # warp the binary image\n warped_image, Minv = warp(comb_image, src, dst)\n if plot:\n plt.figure(2)\n plt.imshow(warped_image, cmap = \"gray\")\n plt.title(\"Binary warped image\")\n \n # calculate polynomial fit\n left_fitx, right_fitx, ploty, left_curverad, right_curverad, offset = fit_polynomial(warped_image, plot)\n \n # superimpose lines on top of the polynomial\n superimposed_image = mark_lane_lines(undistorted_image, warped_image, ploty, left_fitx, right_fitx, Minv)\n cv2.putText(superimposed_image, \"Left curvature = \" + str(np.round(left_curverad, 2)) + \" m\",(40,40), \\\n cv2.FONT_HERSHEY_PLAIN, 2, (255,0,0), 2)\n cv2.putText(superimposed_image,\"Right curvature = \" + str(np.round(right_curverad, 2)) + \" m\",(40,80), \\\n cv2.FONT_HERSHEY_PLAIN, 2, (255,0,0), 2)\n cv2.putText(superimposed_image,\"Offset = \" + str(np.round(offset*100, 2)) + \" cm\",(40,120), \\\n cv2.FONT_HERSHEY_PLAIN, 2, (255,0,0), 2)\n \n return superimposed_image",
"def warp(\n image_pair, U, velocity_upsample_kind=\"linear\", direction=\"center\", nsteps=1, order=-1, radius=2\n) -> np.ndarray:\n\n # warping image pairs\n warped_frame_a, warped_frame_b = image_pair\n nr, nc = warped_frame_a.shape\n\n # interpolate velocity to pixel level\n if U.shape[1] != nr or U.shape[2] != nc:\n U = interpolate_to_pixel(U, warped_frame_a.shape, kind=velocity_upsample_kind)\n U_substep = U / nsteps\n\n # generate mapping grid\n image_coords = np.meshgrid(np.arange(nr), np.arange(nc), indexing=\"ij\")\n\n if order == -1:\n warp_func = partial(warp_whittaker, coords=image_coords, radius=radius)\n elif order == 1 or order == 3:\n warp_func = partial(warp_skimage, coords=image_coords, order=order)\n else:\n raise ValueError(\n \"Use order -1 for Whittaker-shannon interpolation and 1 or 3 for bi-linear and bi-cubic respectively.\"\n )\n\n # warp images in nsteps\n for istep in range(nsteps):\n if direction == \"forward\":\n warped_frame_a = warp_func(warped_frame_a, U_substep)\n elif direction == \"backward\":\n warped_frame_b = warp_func(warped_frame_b, -U_substep)\n elif direction == \"center\":\n warped_frame_a = warp_func(warped_frame_a, 0.5 * U_substep)\n warped_frame_b = warp_func(warped_frame_b, -0.5 * U_substep)\n else:\n raise ValueError(f\"Unknown warping direction: {direction}.\")\n\n return np.stack((warped_frame_a, warped_frame_b))",
"def downsampling(inp_img):\n\n\n img = np.array(inp_img)\n f = max(1, np.rint(np.amin(img)/256))\n\n if f > 1:\n lpf = np.ones((f, f))\n f = (1/(f*f))*lpf\n img = cv2.filter2D(img, -1, kernel=f)\n out = np.hstack((img[:, :, 0], img[:, :, 1], img[:, :, 2]))\n\n return out",
"def dispersed_pixel(x0, y0, width, height, lams, flxs, order, wmin, wmax,\n sens_waves, sens_resp, seg_wcs, grism_wcs, ID, naxis,\n oversample_factor=2, extrapolate_sed=False, xoffset=0,\n yoffset=0):\n\n # Setup the transforms we need from the input WCS objects\n sky_to_imgxy = grism_wcs.get_transform('world', 'detector')\n imgxy_to_grismxy = grism_wcs.get_transform('detector', 'grism_detector')\n\n # Setup function for retrieving flux values at each dispersed wavelength\n if len(lams) > 1:\n # If we have direct image flux values from more than one filter (lambda),\n # we have the option to extrapolate the fluxes outside the\n # wavelength range of the direct images\n if extrapolate_sed is False:\n flux = interp1d(lams, flxs, fill_value=0., bounds_error=False)\n else:\n flux = interp1d(lams, flxs, fill_value=\"extrapolate\", bounds_error=False)\n else:\n # If we only have flux from one lambda, just use that\n # single flux value at all wavelengths\n def flux(x):\n return flxs[0]\n\n # Get x/y positions in the grism image corresponding to wmin and wmax:\n # Start with RA/Dec of the input pixel position in segmentation map,\n # then convert to x/y in the direct image frame corresponding\n # to the grism image,\n # then finally convert to x/y in the grism image frame\n x0_sky, y0_sky = seg_wcs(x0, y0)\n x0_xy, y0_xy, _, _ = sky_to_imgxy(x0_sky, y0_sky, 1, order)\n xwmin, ywmin = imgxy_to_grismxy(x0_xy + xoffset, y0_xy + yoffset, wmin, order)\n xwmax, ywmax = imgxy_to_grismxy(x0_xy + xoffset, y0_xy + yoffset, wmax, order)\n dxw = xwmax - xwmin\n dyw = ywmax - ywmin\n\n # Compute the delta-wave per pixel\n dw = np.abs((wmax - wmin) / (dyw - dxw))\n\n # Use a natural wavelength scale or the wavelength scale of the input SED/spectrum,\n # whichever is smaller, divided by oversampling requested\n input_dlam = np.median(lams[1:] - lams[:-1])\n if input_dlam < dw:\n dlam = input_dlam / oversample_factor\n else:\n # this value gets used when we only have 1 direct image wavelength\n dlam = dw / oversample_factor\n\n # Create list of wavelengths on which to compute dispersed pixels\n lambdas = np.arange(wmin, wmax + dlam, dlam)\n n_lam = len(lambdas)\n\n # Compute lists of x/y positions in the grism image for\n # the set of desired wavelengths:\n # As above, first get RA/Dec of segmentation map pixel positions,\n # then convert to x/y in image frame of grism image,\n # then convert to x/y in grism frame.\n x0_sky, y0_sky = seg_wcs([x0] * n_lam, [y0] * n_lam)\n x0_xy, y0_xy, _, _ = sky_to_imgxy(x0_sky, y0_sky, lambdas, [order] * n_lam)\n x0s, y0s = imgxy_to_grismxy(x0_xy + xoffset, y0_xy + yoffset, lambdas, [order] * n_lam)\n\n # If none of the dispersed pixel indexes are within the image frame,\n # return a null result without wasting time doing other computations\n if x0s.min() >= naxis[0] or x0s.max() < 0 or y0s.min() >= naxis[1] or y0s.max() < 0:\n return None\n\n # Compute arrays of dispersed pixel locations and areas\n padding = 1\n xs, ys, areas, index = get_clipped_pixels(\n x0s, y0s,\n padding,\n naxis[0], naxis[1],\n width, height\n )\n lams = np.take(lambdas, index)\n\n # If results give no dispersed pixels, return null result\n if xs.size <= 1:\n return None\n\n # compute 1D sensitivity array corresponding to list of wavelengths\n sens, no_cal = create_1d_sens(lams, sens_waves, sens_resp)\n\n # Compute countrates for dispersed pixels. Note that dispersed pixel\n # values are naturally in units of physical fluxes, so we divide out\n # the sensitivity (flux calibration) values to convert to units of\n # countrate (DN/s).\n counts = flux(lams) * areas / sens\n counts[no_cal] = 0. # set to zero where no flux cal info available\n\n return xs, ys, areas, lams, counts, ID",
"def flow_warp(x, flow, interp_mode='bilinear', padding_mode='zeros', align_corners=True, use_pad_mask=False):\n n, _, h, w = x.size()\n grid_y, grid_x = torch.meshgrid(torch.arange(0, h, dtype=x.dtype, device=x.device), torch.arange(0, w, dtype=x.dtype, device=x.device))\n grid = torch.stack((grid_x, grid_y), 2).float()\n grid.requires_grad = False\n vgrid = grid + flow\n if interp_mode == 'nearest4':\n vgrid_x_floor = 2.0 * torch.floor(vgrid[:, :, :, 0]) / max(w - 1, 1) - 1.0\n vgrid_x_ceil = 2.0 * torch.ceil(vgrid[:, :, :, 0]) / max(w - 1, 1) - 1.0\n vgrid_y_floor = 2.0 * torch.floor(vgrid[:, :, :, 1]) / max(h - 1, 1) - 1.0\n vgrid_y_ceil = 2.0 * torch.ceil(vgrid[:, :, :, 1]) / max(h - 1, 1) - 1.0\n output00 = F.grid_sample(x, torch.stack((vgrid_x_floor, vgrid_y_floor), dim=3), mode='nearest', padding_mode=padding_mode, align_corners=align_corners)\n output01 = F.grid_sample(x, torch.stack((vgrid_x_floor, vgrid_y_ceil), dim=3), mode='nearest', padding_mode=padding_mode, align_corners=align_corners)\n output10 = F.grid_sample(x, torch.stack((vgrid_x_ceil, vgrid_y_floor), dim=3), mode='nearest', padding_mode=padding_mode, align_corners=align_corners)\n output11 = F.grid_sample(x, torch.stack((vgrid_x_ceil, vgrid_y_ceil), dim=3), mode='nearest', padding_mode=padding_mode, align_corners=align_corners)\n return torch.cat([output00, output01, output10, output11], 1)\n else:\n vgrid_x = 2.0 * vgrid[:, :, :, 0] / max(w - 1, 1) - 1.0\n vgrid_y = 2.0 * vgrid[:, :, :, 1] / max(h - 1, 1) - 1.0\n vgrid_scaled = torch.stack((vgrid_x, vgrid_y), dim=3)\n output = F.grid_sample(x, vgrid_scaled, mode=interp_mode, padding_mode=padding_mode, align_corners=align_corners)\n return output",
"def stacking_depth(cat,res_element, full_imagenoise):\n cat_ = Table.read(cat, format = \"ascii\")\n Number_of_sources = len(cat_[\"peak_flux\"])\n stacking_depth = Number_of_sources/res_element\n stacking_depth = int(stacking_depth)\n \n ## Making sure Not dominated by faint sources only, but including bright soirces too:\n above_stacked_noise_level = full_imagenoise/np.sqrt(stacking_depth) # select sources below the noise BUT: above the stacked noise level\n \n #mask_faint = np.argwhere((5*above_stacked_noise_level<=cat_[\"integrated_flux\"])&(cat_[\"integrated_flux\"]<=full_imagenoise*5))\n mask_faint = np.argwhere((cat_[\"integrated_flux\"]<=0.1e-6)) #stacking 5 factors below the noise\n mask_faint = np.ravel(mask_faint) # flattening from 2D to 1D\n\n #mask_bright = cat_[\"integrated_flux\"].argsort()[-100:][::-1] # selecting the indexes of the brightest 100 srcs\n #mask_bright = np.ravel(mask_bright) # flattening from 2D to 1D\n\n \n indices = []\n faint_sources_only = []\n np.random.seed(1)\n for i in range(stacking_depth):\n value = np.random.choice(mask_faint)\n indices.append(value)\n faint_sources_only.append(value)\n \n \"\"\"\n for j in range(mask_bright.size):\n value2 = np.random.choice(mask_bright)\n indices.append(value2)\n \"\"\"\n \n ra = cat_[\"ra_abs\"]\n dec = cat_[\"dec_abs\"]\n \n\n outfile_csv = open(path+'coords.csv', 'w')\n for k in faint_sources_only:\n srcline_csv = '%.10f,%.10f'%(ra[k],dec[k])\n print>>outfile_csv,srcline_csv\n outfile_csv.close()\n \n \n \n # Making new model image consists of stacking depth sources plus all bright sources\n trecs_cat_ = Table.read(cat, format = \"ascii\")\n trecs_ra = trecs_cat_[\"ra_abs\"]\n cat_ra = cat_[\"ra_abs\"][indices]\n \n indices2 = []\n for l in range(len(cat_ra)):\n value3 = np.argwhere(cat_ra[l] == trecs_ra)\n indices2.append(value3)\n \n indices_true = []\n for e in range(len(indices2)):\n value4 = indices2[e][0][0]\n indices_true.append(value4)\n \n \n outfile = open('simuclass/simuCLASS/T_recs_catalogues/catalogue_SFGs_stacking_depth.txt', 'w')\n\n print>> outfile, '#lon lat size flux e1 e2 gamma1 gamma2'\n\n for m in indices_true:\n srcline ='%.10f %.10f %.10f %.10f %.10f %.10f %.10f %.10f'%(trecs_cat_['ra_offset'][m],trecs_cat_['dec_offset'][m],trecs_cat_['size'][m],(trecs_cat_['integrated_flux'][m])*1e3,trecs_cat_['e1'][m],trecs_cat_['e2'][m],trecs_cat_['g1'][m],trecs_cat_['g2'][m])\n print>> outfile, srcline\n\n outfile.close()\n \n \n # catalogue of sizes and flux density for faint sources\n outfile_ = open(path+'faint_sourcesOnly.txt', 'w')\n\n print>> outfile_, '#size flux'\n\n for t in faint_sources_only:\n srcline_ ='%.10f %.10f'%(trecs_cat_['size'][t],trecs_cat_['integrated_flux'][t])\n print>> outfile_, srcline_\n\n outfile_.close()",
"def image_clip_to_segment_and_convert(image_ms_array, image_pan_array, mask_array, factor, image_height_size, \r\n image_width_size):\r\n \r\n img_pan_list = []\r\n img_ms_list = []\r\n mask_list = []\r\n \r\n n_bands = image_ms_array.shape[2]\r\n \r\n for i in range(0, image_pan_array.shape[0] - image_height_size, int(factor)):\r\n for j in range(0, image_pan_array.shape[1] - image_width_size, int(factor)):\r\n M_90 = cv2.getRotationMatrix2D((image_width_size / 2, image_height_size / 2), 90, 1.0)\r\n M_180 = cv2.getRotationMatrix2D((image_width_size / 2, image_height_size / 2), 180, 1.0)\r\n M_270 = cv2.getRotationMatrix2D((image_width_size / 2, image_height_size / 2), 270, 1.0)\r\n \r\n img_original = image_pan_array[i : i + image_height_size, j : j + image_width_size, 0]\r\n img_rotate_90 = cv2.warpAffine(img_original, M_90, (image_height_size, image_width_size))\r\n img_rotate_180 = cv2.warpAffine(img_original, M_180, (image_width_size, image_height_size))\r\n img_rotate_270 = cv2.warpAffine(img_original, M_270, (image_height_size, image_width_size))\r\n img_flip_hor = cv2.flip(img_original, 0)\r\n img_flip_vert = cv2.flip(img_original, 1)\r\n img_flip_both = cv2.flip(img_original, -1)\r\n img_pan_list.extend([img_original, img_rotate_90, img_rotate_180, img_rotate_270, img_flip_hor, img_flip_vert, \r\n img_flip_both])\r\n mask_patch = mask_array[i : i + image_height_size, j : j + image_width_size, 0]\r\n label = mask_patch[int(image_height_size / 2), int(image_width_size / 2)]\r\n mask_list.extend([label] * 7)\r\n \r\n for i in range(0, int(image_ms_array.shape[0] - (image_height_size / factor)), 1):\r\n for j in range(0, int(image_ms_array.shape[1] - (image_width_size / factor)), 1):\r\n M_90_ms = cv2.getRotationMatrix2D(((image_width_size / factor) / 2, (image_height_size / factor) / 2), 90, 1.0)\r\n M_180_ms = cv2.getRotationMatrix2D(((image_width_size / factor) / 2, (image_height_size / factor) / 2), 180, 1.0)\r\n M_270_ms = cv2.getRotationMatrix2D(((image_width_size / factor) / 2, (image_height_size / factor) / 2), 270, 1.0)\r\n \r\n img_original = image_ms_array[i : i + image_height_size, j : j + image_width_size, 0 : n_bands]\r\n img_rotate_90 = cv2.warpAffine(img_original, M_90_ms, (image_height_size, image_width_size))\r\n img_rotate_180 = cv2.warpAffine(img_original, M_180_ms, (image_width_size, image_height_size))\r\n img_rotate_270 = cv2.warpAffine(img_original, M_270_ms, (image_height_size, image_width_size))\r\n img_flip_hor = cv2.flip(img_original, 0)\r\n img_flip_vert = cv2.flip(img_original, 1)\r\n img_flip_both = cv2.flip(img_original, -1)\r\n img_ms_list.extend([img_original, img_rotate_90, img_rotate_180, img_rotate_270, img_flip_hor, img_flip_vert, \r\n img_flip_both])\r\n \r\n image_pan_segment_array = np.zeros((len(img_pan_list), image_height_size, image_width_size, image_pan_array.shape[2]))\r\n image_ms_segment_array = np.zeros((len(img_ms_list), int(image_height_size / factor), int(image_width_size / factor), \r\n image_ms_array.shape[2]))\r\n \r\n for index in range(len(img_pan_list)):\r\n image_pan_segment_array[index] = img_pan_list[index]\r\n image_ms_segment_array[index] = img_ms_list[index]\r\n \r\n mask_array = np.array(mask_list)\r\n \r\n return image_ms_segment_array, image_pan_segment_array, mask_array",
"def find_path(masked_image,start_pos, target_pos, size_compress_index, active_particle_size,\r\n compress = False):\r\n \r\n \r\n not_image = cv2.bitwise_not(masked_image)\r\n image_index = size_compress_index\r\n \r\n start_x,start_y = start_pos\r\n end_x, end_y = target_pos\r\n \r\n ker1=cv2.getStructuringElement(cv2.MORPH_RECT, (3,3),anchor =(-1,-1))\r\n not_image = cv2.dilate(not_image,ker1,iterations = active_particle_size//2)\r\n\r\n small_image = cv2.resize(not_image, (st_width//image_index, st_height//image_index),interpolation = cv2.INTER_AREA)\r\n ret,small_image = cv2.threshold(small_image,127,255,cv2.THRESH_BINARY)\r\n \r\n small_image = cv2.bitwise_not(small_image)\r\n # \r\n #cv2.imshow(\"thresh\", small_image)\r\n #cv2.waitKey(0)\r\n #cv2.destroyAllWindows() \r\n \r\n \r\n matrix = small_image.tolist()\r\n grid = Grid(matrix=matrix)\r\n\r\n start = grid.node(int(start_x//image_index), int(start_y//image_index))\r\n end = grid.node(int(end_x//image_index), int(end_y//image_index))\r\n\r\n finder = AStarFinder(diagonal_movement = DiagonalMovement.never)\r\n path, runs = finder.find_path(start, end, grid)\r\n \r\n new_path = list()\r\n for p in path:\r\n x,y = p\r\n x = x*image_index\r\n y = y*image_index\r\n new_path.append((x,y))\r\n \r\n compressed_path = compress_path(new_path)\r\n \r\n if compress == True:\r\n res_path = compressed_path\r\n else:\r\n res_path = new_path\r\n \r\n return res_path, runs",
"def flattenFrames(stack):\n \n maxHeight=0\n frameList=[]\n \n \n print('\\n')\n for i, frame in enumerate(stack):\n #medFrame = ndimage.filters.median_filter(frame,size=(1,60)) #Takes 3.5 minutes\n medFrame = ndimage.filters.uniform_filter1d(frame, 60) #Takes 1.0 minutes and has same output as med filter\n shifts = shiftDetector(medFrame)\n newFrame = adjustFrame(frame, shifts)\n frameList.append(newFrame)\n if newFrame.shape[0] > maxHeight:\n maxHeight = newFrame.shape[0]\n \n #Show percentage of loop completed.\n print('\\rFinding and correcting shifts {:.2f}% done'.format(100.0*((i+1)/len(stack))),end='', flush=True)\n \n flattenedStack = padFrames(frameList, maxHeight)\n\n return flattenedStack",
"def sharpen(im):\n kernel = np.array([[-1, -1, -1], [-1, 9, -1], [-1, -1, -1]])\n im = cv2.filter2D(im, -1, kernel)\n return im",
"def stitchSpectra(lamb_list,count_list, method=\"scale\", edgeremove=(0, 0), shiftToPositive=False, dlambda=None):\r\n rawData=np.array([np.array(lamb_list),np.array(count_list)])\r\n rawData=rawData.swapaxes(0,1)\r\n coefficients = []\r\n print(\"Removing edges for stitching:\", *edgeremove)\r\n omitRight = rawData[0].shape[1] - math.floor(rawData[0].shape[1] * edgeremove[1])\r\n print(\"Stitching index range is \", 0, omitRight)\r\n processed = np.array(rawData[0][:, 0:omitRight]) \r\n if dlambda is None:\r\n dlambda = math.fabs(processed[0, 1] - processed[0, 0]) ## lambda steps of first spectrum are kept\r\n for i, spec in enumerate(rawData[1:]):\r\n omitLeft = math.floor(spec.shape[1] * edgeremove[0])\r\n omitRight = spec.shape[1] - math.floor(spec.shape[1] * edgeremove[1])\r\n print(\"Stitching index range is \", omitLeft, omitRight)\r\n if i == len(rawData)-2:\r\n spec = np.array(spec[:, omitLeft:]) ## do not shorten last array at end\r\n else:\r\n spec = np.array(spec[:, omitLeft:omitRight]) # shorten middle arrays at both sides\r\n print(\"Stitching spectrum in range\", np.min(spec[0,]), np.max(spec[0,]))\r\n # calculate overlap\r\n overlap = (np.min(spec[0,]), np.max(processed[0,])) \r\n #lambdas = np.arange(*overlap, dlambda)\r\n #leftfun = interp1d(processed[0,], processed[1,])\r\n #rightfun = interp1d(spec[0,], spec[1,])\r\n left = np.mean(processed[1, processed[0,] > overlap[0]]) ##mean of counts of overlap\r\n right = np.mean(spec[1, spec[0,] < overlap[1]])\r\n if method == \"shift\":\r\n # calculate offset in overlap region\r\n offset = left - right\r\n print(\"Stitching offset %s in overlap\", offset, *overlap)\r\n # add shifted spectrum\r\n spec[1,] = spec[1,] + offset\r\n coefficients.append(offset)\r\n elif method == \"scale\":\r\n # calculate factor in overlap region\r\n factor = left/right\r\n print(\"Stitching factor\"+str(factor)+\" in overlap \", *overlap)\r\n spec[1,] = spec[1,] * factor\r\n coefficients.append(factor)\r\n processed = np.concatenate([processed, spec], axis=1)\r\n # interpolate data on grid\r\n interpolated = interp1d(processed[0,], processed[1,])\r\n lambdas = np.arange(processed[0, 0], processed[0, -1], dlambda)\r\n specdata = interpolated(lambdas)\r\n # shift above 0\r\n if shiftToPositive:\r\n minimum = np.min(specdata)\r\n if minimum < 0:\r\n specdata += math.fabs(minimum)\r\n \r\n return (lambdas,specdata,coefficients)",
"def height_width_shift(img, steer, translate_range):\n rows, columns, channels = img.shape\n\n# y\n# ^\n# |\n# |____> x\n\n translate_x = WIDTH_SHIFT * np.random.uniform() - WIDTH_SHIFT / 2\n translate_y = HEIGHT_SHIFT * np.random.uniform() - HEIGHT_SHIFT / 2\n transform = np.float32([[1, 0 , translate_x], [0, 1, translate_y]])\n steering_angle = steer + translate_x/translate_range * 2 * .2\n\n transformed_img = cv2.warpAffine(img, transform, (columns, rows)) # example warpAffine from http://docs.opencv.org/trunk/da/d6e/tutorial_py_geometric_transformations.html\n return transformed_img, steering_angle, translate_x",
"def inverse_warping(img_initial, img_final, pts_initial, pts_final): \n \n # YOU SHOULDN'T NEED TO CHANGE THIS\n pts_final = pts_final.astype(int)\n \n projected_img = img_initial.copy()\n for i in range(3):\n sub_img_i = img_initial[:,:,i][pts_initial[:,1], pts_initial[:,0]]\n sub_img_f = img_final[:,:,i][pts_final[:,1], pts_final[:,0]]\n \n sub_img = sub_img_i*0.5 + sub_img_f*0.5\n projected_img[:,:,i][pts_initial[:,1], pts_initial[:,0]] = sub_img\n \n return projected_img",
"def stitch_images(self):\n stitched_folder_name = self.parent_folder + 'stitched'\n print(\"Stitching images in:\")\n print(self.folder_list)\n print(\"Storing in: \" + str(stitched_folder_name))\n\n try:\n print(\"Making dir \" + str(stitched_folder_name) + \" for stitching\")\n os.mkdir(stitched_folder_name)\n except OSError:\n print(\"Folder exists, have you already done this stitching??\")\n return\n\n photo_list = self.get_photo_list(self.parent_folder + '/' + self.folder_list[0])\n # get photo sizes\n print(self.parent_folder + '/' + self.folder_list[0] + '/' + photo_list[0])\n size_photo = cv2.imread(self.parent_folder + '/' + self.folder_list[0] +\n '/' + photo_list[0], cv2.IMREAD_ANYDEPTH)\n photo_height, photo_width = np.shape(size_photo)\n stitched_height = photo_height * 2\n stitched_width = photo_width * 4\n\n for photo in photo_list:\n stitched_photo = np.full((stitched_height, stitched_width), 0)\n\n for i, folder in enumerate(self.folder_list):\n print(i)\n print(folder)\n print(self.parent_folder + folder + '/' + photo)\n\n stitched_photo[(int((float(i) / 4.0)) * photo_height):(int(((float(i) / 4.0) + 1)) * photo_height),\n (int(i % 4) * photo_width):((int((i % 4) + 1)) * photo_width)] \\\n = cv2.imread(self.parent_folder + '/' + folder + '/' + photo, cv2.IMREAD_ANYDEPTH)\n\n stitched_photo = stitched_photo.astype(np.uint16)\n cv2.imwrite(stitched_folder_name + '/' + photo, stitched_photo, [cv2.IMWRITE_PNG_COMPRESSION, 0])\n\n return stitched_folder_name",
"def embossed_wiggle_image(f, channel=-1, nwiggle=80):\n img = imageio.imread(f)\n y = np.linspace(0, img.shape[0], img.shape[0])\n x = np.linspace(0, img.shape[1], img.shape[1])\n img = rgb_to_cmyk(img, RGB_SCALE=255.)[:,:,-1]\n from scipy import interpolate\n F = interpolate.interp2d(y, x, img.flatten(), kind='linear')\n sharp_wiggle = wiggle_fill((0,img.shape[1]), (0,img.shape[0]), nwiggle=nwiggle)\n list_wiggle=np.array(sharp_wiggle).reshape(-1,2)\n wiggle = smooth_path(list_wiggle)\n new_path = smooth_path(np.array(path_channel_distort(wiggle, F)).reshape(-1,2),window=4)\n P = np.array(new_path).reshape(-1,2)*np.array([[1.,-1.]])\n plt.plot(P[:,0],-1*P[:,1])\n # plt.plot(P[:,1]-P[:,1].min(),-1*P[:,0]+P[:,0].max())\n plt.show()\n return [P.tolist()]",
"def _deblend_source(data, segment_img, npixels, nlevels=32, contrast=0.001,\n mode='exponential', connectivity=8):\n\n from scipy import ndimage\n from skimage.morphology import watershed\n\n if nlevels < 1:\n raise ValueError('nlevels must be >= 1, got \"{0}\"'.format(nlevels))\n if contrast < 0 or contrast > 1:\n raise ValueError('contrast must be >= 0 or <= 1, got '\n '\"{0}\"'.format(contrast))\n\n if connectivity == 4:\n selem = ndimage.generate_binary_structure(2, 1)\n elif connectivity == 8:\n selem = ndimage.generate_binary_structure(2, 2)\n else:\n raise ValueError('Invalid connectivity={0}. '\n 'Options are 4 or 8'.format(connectivity))\n\n segm_mask = (segment_img.data > 0)\n source_values = data[segm_mask]\n source_min = np.nanmin(source_values)\n source_max = np.nanmax(source_values)\n if source_min == source_max:\n return segment_img # no deblending\n if source_min < 0:\n warnings.warn('Source \"{0}\" contains negative values, setting '\n 'deblending mode to \"linear\"'.format(\n segment_img.labels[0]), AstropyUserWarning)\n mode = 'linear'\n source_sum = float(np.nansum(source_values))\n\n steps = np.arange(1., nlevels + 1)\n if mode == 'exponential':\n if source_min == 0:\n source_min = source_max * 0.01\n thresholds = source_min * ((source_max / source_min) **\n (steps / (nlevels + 1)))\n elif mode == 'linear':\n thresholds = source_min + ((source_max - source_min) /\n (nlevels + 1)) * steps\n else:\n raise ValueError('\"{0}\" is an invalid mode; mode must be '\n '\"exponential\" or \"linear\"')\n\n # create top-down tree of local peaks\n segm_tree = []\n mask = ~segm_mask\n for level in thresholds[::-1]:\n segm_tmp = detect_sources(data, level, npixels=npixels,\n connectivity=connectivity, mask=mask)\n if segm_tmp.nlabels >= 2:\n fluxes = []\n for i in segm_tmp.labels:\n fluxes.append(np.nansum(data[segm_tmp == i]))\n idx = np.where((np.array(fluxes) / source_sum) >= contrast)[0]\n if len(idx >= 2):\n segm_tree.append(segm_tmp)\n\n nbranch = len(segm_tree)\n if nbranch == 0:\n return segment_img\n else:\n for j in np.arange(nbranch - 1, 0, -1):\n intersect_mask = (segm_tree[j].data *\n segm_tree[j - 1].data).astype(bool)\n intersect_labels = np.unique(segm_tree[j].data[intersect_mask])\n\n if segm_tree[j - 1].nlabels <= len(intersect_labels):\n segm_tree[j - 1] = segm_tree[j]\n else:\n # If a higher tree level has more peaks than in the\n # intersected label(s) with the level below, then remove\n # the intersected label(s) in the lower level, add the\n # higher level, and relabel.\n segm_tree[j].remove_labels(intersect_labels)\n new_segments = segm_tree[j].data + segm_tree[j - 1].data\n new_segm, nsegm = ndimage.label(new_segments)\n segm_tree[j - 1] = SegmentationImage(new_segm)\n\n return SegmentationImage(watershed(-data, segm_tree[0].data,\n mask=segment_img.data,\n connectivity=selem))"
] |
[
"0.6078032",
"0.59519064",
"0.59111696",
"0.58948994",
"0.57756174",
"0.5736996",
"0.5640896",
"0.5628331",
"0.5625276",
"0.55844927",
"0.5533586",
"0.5519274",
"0.55109257",
"0.5501204",
"0.54939526",
"0.5480183",
"0.54773796",
"0.546816",
"0.5466745",
"0.5464357",
"0.5446828",
"0.5441624",
"0.54063547",
"0.5385016",
"0.5380571",
"0.537933",
"0.53573734",
"0.53486234",
"0.533454",
"0.5332566"
] |
0.66762245
|
0
|
Returns whether num_of_players multi playing is supported or not. Calculated by the ratio columns/num_of_players. By experimental observations, if the ratio is greater than 40, then multi playing is supported else not. (But there are some exceptions too, this formula doesn't work well for high terminal sizes.)
|
def multi_player_support(self, num_of_players):
if self.screen['columns'] / num_of_players > 40:
return True
else:
return False
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def is_winner(self, player: str) -> bool:\n total_result = self.current_state.hori_result + self.current_state.left_result + self.current_state.right_result\n total_line = len(total_result)\n p1_taken = 0\n p2_taken = 0\n for item in total_result:\n if item == '1':\n p1_taken+=1\n elif item == '2':\n p2_taken += 1\n if player == \"p1\":\n return float(p1_taken) >= total_line/2\n return float(p2_taken) >= total_line/2",
"def enough_players():\n return True",
"def _evaluate_num_pieces(self, player):\n evaluation = 0\n if player is Player.black:\n evaluation += self.num_black_pieces * 10\n evaluation -= self.num_white_pieces * 10\n evaluation += self.num_black_kings * 10\n evaluation -= self.num_white_kings * 10\n elif player is Player.white:\n evaluation -= self.num_black_pieces * 10\n evaluation += self.num_white_pieces * 10\n evaluation -= self.num_black_kings * 10\n evaluation += self.num_white_kings * 10\n\n return evaluation",
"def number_of_players(self) -> int:\n return self.param.number_of_players",
"def numberOfPlayers(self):\r\n return len(self.playerPreparers)",
"def check_win(players: List[Player]) -> Tuple[bool, Optional[Player]]:\n total_players = len(players)\n for player in players:\n if player.influence == 0:\n total_players -= 1\n if total_players == 1:\n for player in players:\n if player.influence >0:\n return True, player\n return False, None",
"def __playersDriverNum(self):\n for _player in range(50): # self.Rf2Tele.mVehicles[0].mNumVehicles:\n if self.Rf2Scor.mVehicles[_player].mIsPlayer:\n break\n return _player",
"def determine_win(self):\n if self.match.radiant_win is True and self.player_slot < 5:\n return True\n if self.match.radiant_win is False and self.player_slot > 5:\n return True\n return False",
"def check_combo(self, matrix, player=None):\n if player is None:\n player = self.current_player\n \n if self.N * player in np.sum(matrix, axis=0):\n return player\n if self.N * player in np.sum(matrix, axis=1):\n return player\n if np.sum(matrix.diagonal()) == self.N * player:\n return player\n if np.sum(np.fliplr(matrix).diagonal()) == self.N * player:\n return player\n return 0",
"def min_players(self):\n return 2",
"def check_winner(self):\n if self.player1.chips <= BIG_BLIND_BET:\n return 2\n elif self.player2.chips <= BIG_BLIND_BET:\n return 1\n else:\n return 0",
"def noOfPlayers(self):\n\t\tnumber = 0\n\t\tfor n in range(6):\n\t\t\tif self.playerList[n] != None:\n\t\t\t\tnumber = number + 1\n\t\treturn number",
"async def player_ratio(self, ctx):\r\n player = ctx.message.content.split(' ')[1]\r\n if os.environ.get(\"WoW_Token\") is None:\r\n return\r\n else:\r\n async with aiohttp.ClientSession().get('https://us.api.battle.net/wow/character/zul\\'jin/' + player + '?fields=pvp&locale=en_US&apikey=' + os.environ.get(\"WoW_Token\")) as res:\r\n if res.status == 200:\r\n data = await res.json()\r\n player_pvp_stats = data['pvp']['brackets']['ARENA_BRACKET_3v3']\r\n await ctx.message.channel.send(u\"Player: {:s}\").format(player)\r\n await ctx.message.channel.send(\"Rating: {:d}\".format(player_pvp_stats['rating']))\r\n await ctx.message.channel.send(\"Season Wins: {:d}\".format(player_pvp_stats['seasonWon']))\r\n await ctx.message.channel.send(\"Season Losses: {:d}\".format(player_pvp_stats['seasonLost']))\r\n\r\n if player_pvp_stats['seasonWon'] == 0 or player_pvp_stats['seasonLost'] == 0:\r\n await ctx.message.channel.send(\"Ratio: 0\")\r\n else:\r\n await ctx.message.channel.send(\"Ratio: {:.4f}\".format(\r\n float(player_pvp_stats['seasonWon'])/\r\n float(player_pvp_stats['seasonLost']))\r\n )",
"def game_tie(self):\n\n shape = self.board.shape\n if np.count_nonzero(self.board) == (shape[0] * shape[1]):\n # The board is full\n player = 0\n return True\n else:\n return False",
"def can_run_experiment(self, info, device):\n nb_qubit_max = self.backends[device]['nq']\n nb_qubit_needed = info['nq']\n return nb_qubit_needed <= nb_qubit_max, nb_qubit_max, nb_qubit_needed",
"def piece_evaluate(self):\r\n evaluation = 0\r\n for player in range(2):\r\n player_sign = player * 2 - 1\r\n evaluation += player_sign * 4 * count_bits(self.bitboard_king[player])\r\n evaluation += player_sign * 2 * count_bits(self.bitboard_pawns[player])\r\n\r\n return evaluation",
"def completeness_of_game(game):\n spaces = game.width * game.height\n played_spaces = len([x for x in game._board_state[:-3] if x == 1])\n return float(played_spaces / spaces)",
"def get_win_prob(self,hand_cards, board_cards,simulation_number,num_players):\n win = 0\n round=0\n evaluator = HandEvaluator()\n for i in range(simulation_number):\n\n board_cards_to_draw = 5 - len(board_cards) # 2\n board_sample = board_cards + self._pick_unused_card(board_cards_to_draw, board_cards + hand_cards)\n unused_cards = self._pick_unused_card((num_players - 1)*2, hand_cards + board_sample)\n opponents_hole = [unused_cards[2 * i:2 * i + 2] for i in range(num_players - 1)]\n\n try:\n opponents_score = [pow(evaluator.evaluate_hand(hole, board_sample), num_players) for hole in opponents_hole]\n # hand_sample = self._pick_unused_card(2, board_sample + hand_cards)\n my_rank = pow(evaluator.evaluate_hand(hand_cards, board_sample),num_players)\n if my_rank >= max(opponents_score):\n win += 1\n #rival_rank = evaluator.evaluate_hand(hand_sample, board_sample)\n round+=1\n except Exception, e:\n print e.message\n continue\n # The large rank value means strong hand card\n print \"Win:{}\".format(win)\n win_prob = win / float(round)\n print \"win_prob:{}\".format(win_prob)\n return win_prob",
"def getNumPlayers(self):\n return len(self.__colordict__.keys())",
"def _get_n_players(env):\n return len(env.action_space.spaces)",
"def get_result(state, winrate_predictor):\n teamA_picks = state[:, TEAM_A_PICK_INDICES]\n teamB_picks = state[:, TEAM_B_PICK_INDICES]\n team_comp = torch.cat((teamA_picks, teamB_picks), dim=1)\n winrate = winrate_predictor(team_comp)[0, 0]\n \n if winrate >= 0.5:\n return 0\n return 1",
"def test_players_per_type_num_players_less(self):\n type_of_player = [ss.Player, ss.LazyPlayer, ss.Player]\n sim = ss.Simulation(player_field=type_of_player)\n run = sim.players_per_type()\n assert list(run.keys()) == ['Player', 'LazyPlayer']",
"def checkWinAll(self, model, previousWin):\r\n previous = self.__render\r\n self.__render = Render.NOTHING # avoid rendering anything during execution of the check games\r\n\r\n win = 0\r\n lose = 0\r\n \r\n cellsRanking = {}\r\n sumForProb = 0\r\n for cell in self.allowableCells:\r\n if self.play(model, cell)[0] == Status.WIN:\r\n win += 1\r\n else:\r\n lose += 1\r\n\r\n self.__render = previous # restore previous rendering setting\r\n\r\n logging.info(\"won: {} | lost: {} | win rate: {:.5f}\".format(win, lose, win / (win + lose)))\r\n\r\n result = True if lose == 0 else False\r\n \r\n if lose == 0:\r\n previousWin += 1\r\n else:\r\n previousWin = 0 \r\n\r\n return result, win / (win + lose), previousWin",
"def parse_num_players(num_players):\n \n try:\n\tnum_players = int(num_players)\n except ValueError:\n\treturn None\n if num_players < 2 or num_players > 4:\n return None\n\n return num_players",
"def test_winners_per_type_num_players_less(self):\n type_of_player = [ss.Player, ss.LazyPlayer, ss.Player]\n sim = ss.Simulation(player_field=type_of_player)\n run = sim.winners_per_type()\n assert list(run.keys()) == ['Player', 'LazyPlayer']",
"def __countPlayers(self, players):\n\n numLow = sum(map(lambda p: p.lowFps, players))\n numHigh = sum(map(lambda p: p.highFps, players))\n numMed = len(players) - numLow - numHigh\n\n return '%s, %s, %s' % (numLow, numMed, numHigh)",
"def _test_player_list_size(self):\n return len(self.player_list)",
"def goals_ratio_to_num_matches(self, team_id, num_matches=1):\n # {{{\n if type(num_matches) is not int or num_matches == 0:\n num_matches = 1\n # this is fastest selecting to compared with concat and append\n # %timeit matches[(matches[\"HID\"] == team_id) | (matches[\"AID\"] == team_id)].sort_index()\n # 1.21 ms ± 14.7 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each)\n # %timeit pd.concat([matches[matches[\"HID\"] == team_id], matches[matches[\"AID\"] == team_id]]).sort_index()\n # 3.26 ms ± 62.2 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)\n # %timeit matches[matches[\"HID\"]==team_id].append(matches[matches[\"AID\"]==team_id]).sort_index()\n # 3.31 ms ± 75.8 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)\n matches_containing_team = self.matches[(self.matches[\"HID\"] == team_id) |\n (self.matches[\"AID\"] == team_id)].sort_index()[-1-num_matches:-1]\n\n goals_conceded, goals_scored = np.nan, np.nan\n if not matches_containing_team.empty:\n goals_conceded = matches_containing_team[matches_containing_team[\"HID\"] == team_id]['ASC'].sum() + \\\n matches_containing_team[matches_containing_team[\"AID\"] == team_id]['HSC'].sum()\n goals_scored = matches_containing_team[matches_containing_team[\"HID\"] == team_id]['HSC'].sum() + \\\n matches_containing_team[matches_containing_team[\"AID\"] == team_id]['ASC'].sum()\n\n return goals_scored / goals_conceded if goals_conceded != 0 else goals_scored / (goals_conceded + 1)",
"def check_game_status(self):\n for player in (\"1\", \"2\"):\n row_win = np.apply_along_axis(\n lambda x: set(x) == {player}, 1, self.board\n ).any()\n col_win = np.apply_along_axis(\n lambda x: set(x) == {player}, 0, self.board\n ).any()\n d1_win = set(self.data[[0, 4, 8]]) == {player}\n d2_win = set(self.data[[2, 4, 6]]) == {player}\n if any([row_win, col_win, d1_win, d2_win]):\n return (\"win\", player)\n\n if self.counter[\"_\"] == 0:\n return (\"tie\", None)\n else:\n return (\"turn\", \"1\" if self.counter[\"1\"] == self.counter[\"2\"] else \"2\")",
"def is_acceptable_multiplier(m):\n return 1 < m < (2 ** 61 - 1)"
] |
[
"0.60528976",
"0.5894889",
"0.57506096",
"0.5736657",
"0.57216996",
"0.56766444",
"0.5639242",
"0.5638798",
"0.56313765",
"0.55557156",
"0.55519736",
"0.5526917",
"0.5416034",
"0.53952646",
"0.5383263",
"0.5374004",
"0.5368561",
"0.53543204",
"0.5328036",
"0.5291333",
"0.5289244",
"0.52601576",
"0.5239432",
"0.52233624",
"0.52138096",
"0.5198431",
"0.5177381",
"0.5173464",
"0.51596624",
"0.51470524"
] |
0.7676265
|
0
|
1. Build a list of tuples represent the sequence of characters [("a", 3), ("b", 2), ("c", 1)]. 2. Calculate all possibles create from each tupble 3. Calculate how many possibles created with a different character in between
|
def substrCount(n, s):
lst = []
character = s[0]
count = 1
result = 0
for i in range(1, n):
if s[i] == character:
count += 1
else:
lst.append((character, count))
character = s[i]
count = 1
lst.append((character, count))
for tpl in lst:
"""calculate all possible palindromes created from same characters that are close to each other
E.g: aaa => 6 possibles (3*4//2 = 6)
"""
result += tpl[1] * (tpl[1] + 1) // 2
for i in range(1, len(lst) - 1):
if lst[i - 1][0] == lst[i + 1][0] and lst[i][1] == 1:
"""
check palindromes created from 3 tuples with a different character in between
"""
result += min(lst[i - 1][1], lst[i + 1][1])
return result
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def part1(input_string):\n twos, threes = 0, 0\n for line in input_string:\n letters_seen = {}\n for char in line:\n letters_seen[char] = letters_seen.setdefault(char, 0)+1\n if 2 in letters_seen.values():\n twos += 1\n if 3 in letters_seen.values():\n threes += 1\n return threes * twos",
"def combinations(s, n):\n return (\"\".join(x) for x in tuples(s,n))",
"def generateSubSequences(k, ch):\n seq = [\"\".join(c) for c in itertools.product(ch, repeat = k)]\n# discussion about the best way to do this:\n# https://stackoverflow.com/questions/7074051/what-is-the-best-way-to-generate-all-possible-three-letter-strings\n return seq",
"def howmany_sequences(listOfTuples):\r\n #initialize number of pairs as 0\r\n pairs = 0\r\n #count pairs\r\n for n in listOfTuples:\r\n pairs += 1\r\n k = 1\r\n #find number of initial sequences \r\n while k*(k-1) != pairs*2:\r\n k += 1\r\n return(k)",
"def possibleTuples(nDifferentNodes):\n numberOfPossibleTuples = {}\n for nodeName, numberOfDifferent in nDifferentNodes.iteritems():\n numberOfPossibleTuples[nodeName] = sum([comb(numberOfDifferent,i,exact=True)for i in range(0,4)])\n return numberOfPossibleTuples",
"def solve(chars, length):\n return generate_greedy(generate_string_list(length, chars))",
"def _create_ngrams(tokens, n):\n\n ngrams = collections.Counter()\n for ngram in (tuple(tokens[i:i + n]) for i in xrange(len(tokens) - n + 1)):\n ngrams[ngram] += 1\n return ngrams",
"def _precook(s, n=4, out=False):\n if isinstance(s,str):\n words = s.split()\n # print(words)\n elif isinstance(s,list) or isinstance(s,tuple):\n words = s\n else:\n raise NotImplementedError(type(s))\n counts = defaultdict(int)\n for k in range(1, n + 1):\n for i in range(len(words) - k + 1):\n ngram = tuple(words[i:i + k])\n counts[ngram] += 1\n return (len(words), counts)",
"def _get_character_pairs(text):\n\n if not hasattr(text, \"upper\"):\n raise ValueError(\"Invalid argument\")\n\n results = dict()\n\n for word in text.upper().split():\n for pair in [word[i]+word[i+1] for i in range(len(word)-1)]:\n if pair in results:\n results[pair] += 1\n else:\n results[pair] = 1\n return results",
"def cands(inputs):\n # The below could probably be simplified a bit....\n return map(''.join, list(itertools.chain.from_iterable([ map (list, (itertools.permutations(inputs, x))) for x in range(4, len(inputs)+1)])))",
"def character_map(text):\n\n print(f\"Total character count: {len(text)}\\n\")\n\n characters = sorted(list(set(text))) # Get sorted list of individual characters\n n_to_char = {}\n char_to_n = {}\n\n num = 0\n for char in characters:\n n_to_char[num] = char\n char_to_n[char] = num\n num += 1\n\n return characters, n_to_char, char_to_n",
"def extract_char_ngrams(s: str, n: int) -> Counter:\n return Counter([s[i:i + n] for i in range(len(s) - n + 1)])",
"def letters_generator():\n def multiletters(seq):\n for n in itertools.count(1):\n for s in itertools.product(seq, repeat=n):\n yield \"\".join(s)\n letters = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n return multiletters(letters)",
"def make_chains(text_string):\n\n chains = {}\n n = 2\n # n = int(raw_input(\"Enter the number of ngrams you want? >\"))\n # your code goes here\n words = text_string.split()\n for i in range(len(words) - 1):\n ngram = tuple(words[i: i + n])\n if i >= len(words) - n:\n chains[ngram] = None\n else:\n nth_word = words[i + n]\n if ngram not in chains:\n chains[ngram] = [nth_word]\n else:\n chains[ngram].append(nth_word)\n return (chains, n)",
"def make_chains(text_string, n):\n\n chains = {}\n i = 0\n i1 = 1\n i2 = 2\n \n # your code goes here\n # while i1 < len(text_string) - 1:\n # update_tuple = (text_string[i], text_string[i1])\n # chains[update_tuple] = chains.get(update_tuple, [])\n # chains[update_tuple].append(text_string[i2])\n # i += 1\n # i1 += 1\n # i2 += 1\n\n while n < len(text_string) - 1:\n update_tuple = tuple(text_string[i:n])\n chains[update_tuple] = chains.get(update_tuple, [])\n chains[update_tuple].append(text_string[n + 1])\n i += 1\n i1 += 1\n n += 1\n \n return chains",
"def make_chains(text_string, n):\n text_list = text_string.split()\n\n chains = {}\n\n for i in range(n):\n if text_list[i][0].isupper():\n starters = chains.get('START', [])\n starters.append(text_list[i:i+n])\n chains['START'] = starters\n\n # your code goes here\n for i in range(len(text_list)-n):\n n_gram = tuple(text_list[i:i+n])\n\n #bigram = (text_list[i], text_list[i+1])\n\n followers = chains.get(n_gram, [])\n followers.append(text_list[i+n])\n\n if n_gram[-1][-1] in {'.', '?', '!'}:\n followers.append('EOF')\n\n chains[n_gram] = followers\n\n if text_list[i+n][0].isupper():\n starters = chains.get('START', [])\n starters.append(text_list[i+n:i+(2*n)])\n chains['START'] = starters\n\n return chains",
"def find_substitutes(text):\n if CHAINS == {}:\n generate_food_chains()\n\n candidates = []\n subs = []\n for i in range(len(text)):\n char = text[i]\n if CHAINS.get(char):\n candidates = []\n candidates = CHAINS[char]\n else:\n if candidates != []:\n # choose the most popular option from candidates\n counts = {}\n for candidate in candidates:\n if counts.get(candidate):\n counts[candidate] += 1\n else:\n counts[candidate] = 1\n max_count = 0\n chosen = None\n for candidate, count in counts.iteritems():\n if count > max_count:\n max_count = count\n chosen = candidate\n if chosen:\n subs.append((chosen, i))\n\n candidates = []\n return subs",
"def precook(s, n=4, out=False):\n words = s.split()\n counts = defaultdict(int)\n for k in xrange(1,n+1):\n for i in xrange(len(words)-k+1):\n ngram = tuple(words[i:i+k])\n counts[ngram] += 1\n return counts",
"def get_num_cipher_matches(input_list):\n \n if isinstance(input_list, str):\n return 0\n \n count = 0\n for raw_string, cypher_string in combinations(input_list, 2):\n if is_match(raw_string, cypher_string):\n count += 1\n \n return count",
"def get_combo(un_lit):\n\n done_lit = []\n li_count = len(un_lit)\n\n for letter in un_lit: # for each letter in the provided\n placeholder = 0\n for num in range(li_count) # for each pos in list\n if letter.index == placeholder:\n temp_lit = \n\n elif letter.index > placeholder:\n \n elif letter.index < placeholder:\n\n done_lit.append(temp_lit)\n placeholder += 1",
"def genCharGroup(self):\n alphabet = list('abcdefghijklmnopqrstuvwxyz') #Creates a list of all the alphabet characters\n group = []\n count = 0\n while count != 3: #While the loop total does not equal 3\n i = random.choice(alphabet) #Make a random choice\n alphabet.remove(i) #Remove it from the alphabet\n group.append(i) #And add it to the group array\n count += 1 #Add one to the loop total\n return str(''.join(group)) #Return the string of 3 characters to the user",
"def get_subs(n):\n \n from itertools import product\n return [''.join(sub) for sub in product('CATGN', repeat=n)]",
"def generate_strings(char_list, length):\n if length <= 0:\n yield []\n elif length == 1:\n for char in char_list:\n yield [char]\n else:\n for char in char_list:\n for l in generate_strings(char_list, length-1):\n yield [char] + l",
"def exercise_b2_2():\r\n letters = ['a', 'e', 'i', 'o', 'u', 'u']\r\n combinations = list(permutations(letters))\r\n uniq_combinations = set(combinations)\r\n total_possibilities = len(combinations)\r\n total_uniq_possibilities = len(uniq_combinations)\r\n print(\"\\nThere are %s possible combinations and %s unique combinations for this set\\n\" \r\n % (total_possibilities, total_uniq_possibilities))\r\n return",
"def pattern_list(k):\r\n p_list=[]\r\n for i in list(itertools.product('ACGT', repeat=k)):\r\n x = ''.join(i)\r\n p_list.append(x)\r\n return p_list",
"def make_chains(text_string, n_gram):\n\n chains = {}\n words = text_string.split()\n for i in range(len(words)-n_gram):\n temp_list = []\n for z in range(n_gram):\n temp_list = temp_list + [words[i + z]]\n\n temp_tup = tuple(temp_list)\n\n # if temp_tup in chains:\n # chains[temp_tup] = chains[temp_tup] + [words[i + 2]]\n # else:\n # chains[temp_tup] = [words[i + 2]]\n chains.setdefault(temp_tup, [])\n chains[temp_tup].append(words[i + n_gram])\n #chains[temp_tup] = chains.get(temp_tup, []) + [words[i + 2]]\n print chains\n return chains",
"def generateNchars(inputChar, inputNum):\n return inputChar * int(inputNum)",
"def extract_char_ngrams(self, s: str, n: int) -> Counter:\n return Counter([s[i:i + n] for i in range(len(s) - n + 1)])",
"def extract_char_ngrams(self, s: str, n: int) -> Counter:\n return Counter([s[i:i + n] for i in range(len(s) - n + 1)])",
"def gen_ngrams(items, n):\n ngs = {}\n ilen = len(items)\n for i in xrange(ilen-n+1):\n ng = tuple(items[i:i+n])\n ngs[ng] = ngs.get(ng, 0) + 1\n return ngs"
] |
[
"0.6370319",
"0.631129",
"0.62461615",
"0.62214625",
"0.61855423",
"0.6170227",
"0.60728705",
"0.6035342",
"0.6032441",
"0.60059273",
"0.59726614",
"0.5971988",
"0.5968853",
"0.5966242",
"0.5942112",
"0.59405017",
"0.592574",
"0.5887282",
"0.58769894",
"0.5861784",
"0.58539724",
"0.58280814",
"0.5827111",
"0.58224607",
"0.58112085",
"0.57923615",
"0.578709",
"0.57832825",
"0.57832825",
"0.5741888"
] |
0.65442604
|
0
|
Generates a random hash
|
def create_hash(self):
return os.urandom(32).encode('hex')
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def hash_gen(n):\n domain = \"abcdefghijklmnopqrstuvwxyz\"\n temp = \"\"\n for i in range(0, n):\n temp += domain[random.randrange(0, 26)]\n return temp",
"def seed_hash(*args):\n args_str = str(args)\n return int(hashlib.md5(args_str.encode(\"utf-8\")).hexdigest(), 16) % (2**31)",
"def get_hash_str():\r\n\tli = \"\"\r\n\tfor i in range(5):\r\n\t\tli += str(int(int((6 * random.random()) + 1)))\r\n\treturn li",
"def randomHash(self):\n modulus = self.filter_size\n a, b = random.randint(1, modulus - 1), random.randint(1, modulus - 1)\n\n def f(x):\n return hash(x) % (a + b) % modulus\n\n return f",
"def create_hash() -> str:\n length = 6\n char = string.ascii_uppercase + string.digits + string.ascii_lowercase\n\n # Generate a new ID, until one is found that is unique\n while True:\n hash = \"\".join(random.choice(char) for _ in range(length))\n\n if not utils.cache_is_hash_taken(hash):\n return hash",
"def gensalt():\n return hexlify(os.urandom(24)).decode()",
"def generateHash(*length):\n from string import letters, digits\n from random import choice\n pool, size = letters + digits, length or 10\n hash = ''.join([choice(pool) for i in range(size)])\n return hash.lower()",
"def hash(x):\r\n return (randint(1,5*c)*x + randint(1,5*c))%c",
"def random_invite_hash():\n return ''.join(random.choice(string.ascii_lowercase) for i in range(25))",
"def randkey():\n return binascii.b2a_hex(os.urandom(15))",
"def nextRandom(self):\n # Apply SHA-256, interpreting digest output as integer\n # to yield 256-bit integer (a python \"long integer\")\n hash_output = self.basehash.digest()\n self.next()\n return hash_output",
"def getHash():\n return str(uuid.uuid4())[-17:].replace(\"-\", \"\")",
"def genSeed():\n\tseed_length = int(''.join(random.SystemRandom().choice(string.digits) for _ in range(0, 3)))\n\tseed = os.urandom(seed_length)\n\thashing_algorithm = hashlib.shake_128()\n\thashing_algorithm.update(seed)\n\t# 2200 bytes from SHAKE-128 function is enough data to get 1024 coefficients\n\t# smaller than 5q, from Alkim, Ducas, Pöppelmann, Schwabe section 7:\n\tseed_hash = hashing_algorithm.digest(100)\n\treturn seed, seed_hash",
"def generate_key(self)->bytes:\n return os.urandom(32)",
"def mkRandHash(N):\n junk = \"\".join(random.choice(string.ascii_letters + string.digits)\n for unused in range(10))\n return lambda key: hash(junk + key) % N",
"def generate_key():\n return unicode(hashlib.sha224(str(random.getrandbits(128))).hexdigest())",
"def generate_hash(*args):\n key = bytes(' '.join(args), 'utf_8')\n hashh = hashlib.md5()\n hashh.update(key)\n return hashh.hexdigest()",
"def UUIDGen():\n\trandGen = random.Random()\n\trandGen.seed()\n\thashGen = sha.new(randStr512(randGen))\n\twhile 1:\n\t\thashGen.update(randStr512(randGen))\n\t\thashed = hashGen.digest()\n\t\tyield '%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x' % (\n\t\t\tord(hashed[0]), ord(hashed[1]), ord(hashed[2]), ord(hashed[3]),\n\t\t\tord(hashed[4]), ord(hashed[5]),\n\t\t\tord(hashed[6]) & 0x0F | 0x40, ord(hashed[7]),\n\t\t\tord(hashed[8]) & 0x3F | 0x80, ord(hashed[9]),\n\t\t\tord(hashed[10]), ord(hashed[11]),\n\t\t\tord(hashed[12]), ord(hashed[13]), ord(hashed[14]), ord(hashed[15]) )",
"def genKey(length=32):\r\n return os.urandom(length)",
"def random_h_function(key, N):\n\n if key is None:\n raise TypeError(\"Key must not be None\")\n\n\n hashcode = 0\n for i in range(len(key)):\n hashcode = (71 * hashcode + ord(key[i])) % N\n return hashcode",
"def deterministicrandom(x):\n\n i = hash_value(x)\n\n r = 1.0 * i / MAX_HASH_VALUE\n return r",
"def hash_generator(self, value):\n hash_string = hashlib.sha256(bytes(value))\n return hash_string.hexdigest()",
"def get_salt():\n return os.urandom(32)",
"def generate_hash(password):\n return pbkdf2_sha256.hash(password)",
"def generate_salt(size):\n return hexlify(urandom(size)).decode()",
"def _hashPassword(password):\n charset = './' + ascii_letters + digits\n return crypt.crypt(password, ''.join(random.sample(charset, 2)))",
"def GenerateRandomHexKey(length=_RANDOM_BYTE_LENGTH):\n # After encoded in hex, the length doubles.\n return os.urandom(length).encode('hex')",
"def generate_key(random=random.SystemRandom()):\n poly = 0\n while not is_acceptable_multiplier(poly):\n poly = random.getrandbits(61)\n oh = []\n for _ in range(2 * BLOCK_SIZE + TWISTING_COUNT):\n u64 = None\n while u64 is None or u64 in oh:\n u64 = random.getrandbits(64)\n oh.append(u64)\n return UmashKey(poly, oh)",
"def create_challenge():\n\treturn os.urandom(12)",
"def generate_hash(group_id, event_id, base_seed=0):\n hash = (base_seed + (group_id * GROUP_ID_HASH_CODE) % HASH_MOD_CODE +\n (event_id * EVENT_ID_HASH_CODE) % HASH_MOD_CODE) % HASH_MOD_CODE\n\n return hash"
] |
[
"0.7629578",
"0.758247",
"0.7516325",
"0.75082797",
"0.7490267",
"0.73890376",
"0.72695833",
"0.7265213",
"0.7222997",
"0.71885234",
"0.7061839",
"0.70534843",
"0.7019731",
"0.6947813",
"0.69440633",
"0.69429433",
"0.69230837",
"0.6919496",
"0.6906894",
"0.6899868",
"0.6898192",
"0.6851186",
"0.6849008",
"0.68262154",
"0.6806677",
"0.67981005",
"0.6763825",
"0.6761965",
"0.6742641",
"0.6705911"
] |
0.81974334
|
0
|
Send >countdest_addrtimeout< and display the result.
|
def verbose_ping(dest_addr, timeout = 2, count = 4):
for i in range(count):
logging.info("%s: ping %s..." % (i, dest_addr))
try:
delay = do_one(dest_addr, timeout)
except socket.gaierror as e:
logging.error("failed. (socket error: '%s')" % (e[1]))
break
if delay == None:
logging.error("failed. (timeout within %ssec.)" % (timeout))
else:
delay = delay * 1000
logging.info("get ping in %0.4fms" % (delay))
logging.info('')
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def SendTimeout(self) -> int:",
"def SendTimeout(self) -> int:",
"def send_command_timeout(self, sn: TransactionID, destination: tuple, source: tuple):\n pass",
"def ping(dst, count = 10, timeout = 30):\t\n\tpingTimes = []\n\tlogging.debug(\"ping %s (%i times)\", dst, count)\n\tcommand = Command(['ping', '-n', '-c', str(count), dst], timeout)\n\tcommand.run()\n\t# when it's executing here, the results have been available\n\tif command.out is not None:\n\t\tpattern = \"time=([0-9]+\\.[0-9]+) ms\"\n\t\tlines = command.out.split('\\n')\n\t\tfor line in lines:\n\t\t\tpingTime = re.search(pattern, line)\n\t\t\ttry:\n\t\t\t\tpingTimes.append(float(pingTime.group(1)))\n\t\t\texcept:\n\t\t\t\tpass\n\n\t\treturn command.returncode, pingTimes",
"def send_message_timeout(self, sn: TransactionID, destination: tuple, source: tuple):\n pass",
"def atraso(myStats, destIP, hostname, timeout, mySeqNumber, packet_size, quiet=False):\n delay = None\n \n \"\"\"\n socket.AF_INET, é uma string que representa um nome de host na notação de domínio da Internet \n como 'daring.cwi.nl' ou um endereço IPv4 como '100.50.200.5' e porta é um inteiro.\"\"\"\n \"\"\"\n socket.getprotobyname(protocolname), Traduz um nome de protocolo da Internet (por exemplo, 'icmp') \n para uma constante adequada para passar como o terceiro argumento (opcional) \n para a função socket (). Isso geralmente é necessário apenas para soquetes abertos \n no modo \"bruto\" (SOCK_RAW); para os modos normais de soquete, o protocolo correto é \n escolhido automaticamente se o protocolo for omitido ou zero.\n \"\"\"\n\n try: \n mySocket = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.getprotobyname(\"icmp\"))\n except socket.error as e:\n print(\"Falhou!!!. (Erro de socket: '%s')\" % e.args[1])\n\n my_ID = os.getpid() & 0xFFFF # retorna a identificação do processo atual.\n\n sentTime = send_one_ping(mySocket, destIP, my_ID, mySeqNumber, packet_size) # retorna o tempo enviado\n if sentTime == None:\n mySocket.close()\n return delay\n\n myStats.pktsSent += 1 # contador de pacotes enviados\n\n # retorna o tempo de resposta, o tamanho dado, o ping pingado, o numero de seq, id e timeout \n recvTime, dataSize, iphSrcIP, icmpSeqNumber, iphTTL = receive_one_ping(\n mySocket, my_ID, timeout) \n mySocket.close()\n\n if recvTime: # tempo de resposta for verdadeiro\n delay = (recvTime-sentTime)*1000 # tempo de resposta - tempo de envio = delay(ms)\n if not quiet:\n # exibição das respostas do ping\n responseServer = {'bytes': dataSize, 'ip': socket.inet_ntoa(struct.pack(\"!I\", iphSrcIP)), 'sequencia': icmpSeqNumber, 'ttl': iphTTL, 'tempo': round(delay,2)}\n listResponse.append(responseServer)\n\n\n myStats.pktsRcvd += 1 # contador de pacotes recebidos\n myStats.totTime += delay # contador do tempo total (todos os times desse host) \n if myStats.minTime > delay:\n myStats.minTime = delay # contador tempo mínimo\n if myStats.maxTime < delay: \n myStats.maxTime = delay # contador tempo máximo\n else:\n delay = None\n print(\"Requesição excedeu o tempo limite.\")\n\n return delay",
"def ping(destination,\n timeout=2,\n count=5,\n size=24,\n payload=None,\n sweep_start=None,\n sweep_end=None,\n dont_fragment=False,\n verbose=False,\n output=sys.stdout,\n match=False):\n\n ping_command = PingCommand(destination,\n timeout,\n count,\n size,\n payload,\n sweep_start,\n sweep_end,\n dont_fragment,\n verbose,\n output,\n match)\n\n print(f'\\nPinging {ping_command.destination}:\\n', file=output)\n\n ping_responses = ping_command.run()\n\n print(f'\\nPing statistics for {ping_command.destination}: ')\n\n print(f'\\tPackets: Sent = {ping_command.count}, '\n f'Received = {(ping_command.count - ping_responses.packets_lost) :.0f}, '\n f'Lost = {ping_responses.packets_lost :.0f} '\n f'<{((ping_responses.packets_lost / ping_command.count) * 100) :.0f}% loss>',\n file=output)\n\n print('\\nApproximate round trip times in milliseconds:')\n\n print(f'\\tMinimum = {ping_responses.rtt_min_ms}ms, '\n f'Maximum = {ping_responses.rtt_max_ms}ms, '\n f'Average = {ping_responses.rtt_avg_ms}ms',\n file=output)",
"def verbose_ping(hostname, timeout=WAIT_TIMEOUT, count=NUM_PACKETS,\n packet_size=PACKET_SIZE):\n myStats = MyStats() # Reseta o status\n\n mySeqNumber = 0 # Inicializando mySeqNumber\n\n try:\n destIP = socket.gethostbyname(hostname) # Pega endereco do IP do host\n except socket.gaierror as e: # Essa gerada para erros ao host\n return\n\n myStats.thisIP = destIP # atribui o ip destino do host ao ip do Objeto myStats\n\n for i in range(count):\n delay = atraso(myStats, destIP, hostname, \n timeout, mySeqNumber, packet_size)\n\n if delay == None:\n delay = 0\n\n mySeqNumber += 1\n\n # Aguarde para o restante do período MAX_SLEEP (se for verdade)\n if (MAX_SLEEP > delay):\n time.sleep((MAX_SLEEP - delay)/1000)\n\n dump_stats(myStats)",
"def ReceiveTimeout(self) -> int:",
"def ReceiveTimeout(self) -> int:",
"def tcpdump(timeout, q, interface):\t\n\tlogging.debug('tcpdump -s 1024 -lqnAt tcp port 80 -i eth0')\n\t# tcpdump -s 1024 -lqnAt tcp port 80\n\t\t\n\tcommand = Command(['/usr/sbin/tcpdump', '-s 1024', '-lnAq', '-i', interface], timeout)\n\tcommand.run()\n\n\t# when it's executing here, the results have been available\n\t# print command.out\n\n\tif command.out is not None:\n\t\t# pattern = \"time=([0-9]+\\.[0-9]+) ms\"\n\t\tip_pattern = \"IP ([0-9]+.[0-9]+.[0-9]+.[0-9]+).[0-9]+ > [0-9]+.[0-9]+.[0-9]+.[0-9]+.[0-9]\"\n\t\tgoogle_pattern = \"domain=.google.com\"\n\t\tlines = command.out.split('\\n')\n\t\tlast_ip = None\n\n\t\t# first time scan for google's return ip\n\t\tfor line in lines:\n\t\t\tip_src = re.search(ip_pattern, line)\n\t\t\tif ip_src is not None:\n\t\t\t\tlast_ip = ip_src.group(1)\n\t\t\tif re.search(google_pattern, line):\n\t\t\t\tprint last_ip\n\t\t\t\tbreak\n\n\t\tgEntries = []\n\t\tif last_ip is not None:\n\t\t\t\n\t\t\t# second time scan parse tcpdump for query entries\n\t\t\tfor line in lines:\n\t\t\t\tlast_ip_pos = re.search(last_ip, line)\n\t\t\t\tif last_ip_pos is None:\n\t\t\t\t\tcontinue\n\t\t\t\n\t\t\t\tif line.index('>') > last_ip_pos.start():\n\t\t\t\t\t# from remote to this place\n\t\t\t\t\ttraffic_type = 1\t\t\t\n\t\t\t\telse:\n\t\t\t\t\t# out to remote\n\t\t\t\t\ttraffic_type = 0\n\t\t\t\n\t\t\t\ttime_pattern = \"([0-9]+:[0-9]+:[0-9]+.[0-9]+) IP\"\n\t\t\t\ttimestamp = re.search(time_pattern, line)\n\t\t\t\tif timestamp is not None:\n\t\t\t\t\ttime_str = timestamp.group(1)\n\t\t\t\t\th, m, s, ms = map(int, re.split(r'[.:]+', time_str))\n\t\t\t\t\ttimestamp_delta = timedelta(hours=h, minutes=m, seconds=s, microseconds=ms)\n\t\t\t\t\tgEntries.append( (timestamp_delta, traffic_type) )\n\t\t\t\telse:\n\t\t\t\t\tgEntries.append( (None, -1))\n\n\t\tq.put((command.returncode, last_ip, gEntries))\n\t\treturn",
"def icmp_ping(ip_addr, timeout = 6, count = 1024):\n for i in range(count):\n print('Ping wait:')\n try:\n delay = ping_wait(ip_addr, timeout)\n except socket.gaierror as e:\n print('Failed. (socket error: %s)' % e[1])\n break\n\n if delay == None:\n print('Failed. (timeout within %s second.)' % timeout)\n else:\n print('get ICMP in %0.4f ms' % (delay * 1000))",
"def timeout():\n\t\ttimeout_ip = request.forms.get('ip')\n\t\tprint(\"timeout : {}\".format(timeout_ip))\n\t\tremove_vessel(timeout_ip)\n\t\treturn False",
"def output_timeout(text):\n if conf.eval_output:\n info_dict = {'type':'timeout', 'text' : text}\n output_result_eval(info_dict)\n else:\n output_result('[TIMEOUT] ' + text)",
"def do_one(dest_addr, timeout):\n icmp = socket.getprotobyname(\"icmp\")\n try:\n my_socket = socket.socket(socket.AF_INET, socket.SOCK_RAW, icmp)\n except socket.error, (errno, msg):\n if errno == 1:\n # Operation not permitted\n msg = msg + (\n \" - Note that ICMP messages can only be sent from processes\"\n \" running as root.\"\n )\n raise socket.error(msg)\n raise # raise the original error\n \n my_ID = os.getpid() & 0xFFFF\n \n send_one_ping(my_socket, dest_addr, my_ID)\n delay = receive_one_ping(my_socket, my_ID, timeout)\n \n my_socket.close()\n return delay",
"def get_timeout(self) -> int:",
"def ping(self,dest):\n\t\tself.tn.write('ping -c 4 %s\\n'%(dest))\n\t\tself.tn.write('exit\\n')\n\t\tresp = self.tn.read_all()\n\t\treturn resp",
"def udp_timeout(ctx):\n config_db = ConfigDBConnector()\n config_db.connect()\n seconds = 300\n\n config_db.mod_entry(\"NAT_GLOBAL\", \"Values\", {\"nat_udp_timeout\": seconds})",
"def icmp_ping(ip_addr, timeout=0.5, count=4):\n is_connect = 1\n\n for i in range(count):\n try:\n delay = ping_once(ip_addr, timeout)\n except socket.gaierror, e:\n print \"failed. (socket error: '%s')\" % e[1]\n if delay == None:\n print 'failed. (timeout within %s second.)' % timeout\n is_connect = 0\n else:\n pass\n result = [ip_addr, round(delay, 4), is_connect]\n return result",
"def ping_test(hostaddr, count=2, timeout=1):\n xping = subprocess.Popen(['/bin/ping', '-W', str(timeout), '-c', str(count), hostaddr],\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n xping.communicate()\n xping.wait()\n return not bool(xping.returncode)",
"def send_packet(self, src_packet, dest, N):\n\t\t# return a delay time\n\n\t\tnumHops = src_packet.src - dest\n\t\tif numHops < 0:\n\t\t\tnumHops = N - abs(numHops)\n\t\treturn numHops * 10e-5",
"def timeout(self):\n pf.debug(\"TIMEOUT\")\n self.acceptData(TIMEOUT)",
"def send_udp(target, count=500, port=util.DEFAULT_DST_PORT, tos=0x00,\n timeout=util.DEFAULT_TIMEOUT):\n sender = udp.Sender(target, port, count, tos, timeout)\n sender.run()\n return ProbeResults(sender.stats.loss, sender.stats.rtt_avg, target)",
"def sendArpReply(logger, device, destination, count=3, quiet=False, blocking=True):\n\n args = [Arping.ARPING_COMMAND_NAME, \n Arping.INTERFACE_OPTION, device, \n Arping.COUNT_OPTION, str(count),\n Arping.ARP_REPLY_OPTION]\n\n if quiet is True:\n args.append(Arping.QUIET_OPTION)\n\n # must set destination as last arg\n args.append(destination) \n\n rc = Command.execute(logger, Arping.ARPING_COMMAND_NAME, args, blocking=blocking)\n\n return rc",
"def handle_timer(self):\n\n for dest in self.hosts_to_unused_ports:\n self.hosts_to_unused_ports[dest] = [host for host in self.hosts_to_unused_ports[dest] if api.current_time() != host.time_to_live] \n self.hosts_to_ports[dest] = self.find_minium_latency_unused_ports(self.hosts_to_unused_ports[dest])\n\n #Send the reachable routes (must be less than infinity)\n for dest in self.hosts_to_ports:\n if self.hosts_to_ports[dest].latency < INFINITY: \n distance_vector = self.hosts_to_ports[dest] \n host_latency = distance_vector.latency\n\n distance_vector = self.hosts_to_ports[dest]\n\n # Send normal route packet\n packet = basics.RoutePacket(dest, host_latency)\n self.send(packet, distance_vector.port)\n\n # Send poison packet if POISON_MODE is true\n if self.POISON_MODE == True:\n poison_packet = basics.RoutePacket(dest, INFINITY)\n self.send(poison_packet, distance_vector.port)",
"def _get_echo_req_timeout_count(self):\n return self.__echo_req_timeout_count",
"def check_timer():\n end = time.time()\n time_elapsed = end - target_time[0]\n durationMSG = fg.cyan + f\"Scans Completed for {args.target} in: \" + fg.rs\n print(durationMSG, display_time(time_elapsed))",
"def udp_timeout(ctx, seconds):\n config_db = ConfigDBConnector()\n config_db.connect()\n\n config_db.mod_entry(\"NAT_GLOBAL\", \"Values\", {\"nat_udp_timeout\": seconds})",
"def TODO_testTimeout(self):\n return \"\"\"TODO: Highly dependent on hardcoded downstream timeout val\"\"\"\n\n # Assuming proxy's downstream_max is 1,\n # and number of threads is 1.\n\n self.client_connect(0)\n\n self.client_send('get time0\\r\\n', 0)\n self.mock_recv('get time0\\r\\n', 0)\n\n # Mock server is 'busy' at this point, so\n # downstream timeout logic should kick in,\n # without our mock server having to send anything.\n\n self.wait(210)\n\n self.client_recv('END\\r\\n', 0)\n\n # TODO: The number of server sessions should be 0,\n # except the close might not have propagated.",
"def recv_timeout(s, timeout=2):\n print 'Receive data in timeout mode'\n s.setblocking(0) # Non-blocking\n\n total_data = []\n data = ''\n\n begin = time.time()\n\n while 1:\n if total_data and time.time() - begin > timeout:\n break\n\n elif time.time() - begin > timeout:\n break\n\n try:\n data = s.recv(128)\n if data:\n total_data.append(data)\n begin = time.time()\n else:\n time.sleep(0.1)\n except Exception as e:\n # print 'Error:', e\n pass\n\n print ''.join(total_data)"
] |
[
"0.6299477",
"0.6299477",
"0.6273986",
"0.62287384",
"0.6068203",
"0.60135263",
"0.59908974",
"0.5865602",
"0.5819534",
"0.5819534",
"0.5675437",
"0.5655426",
"0.56502455",
"0.56440985",
"0.5607505",
"0.5594451",
"0.55888224",
"0.5520488",
"0.55140877",
"0.5496508",
"0.54870164",
"0.5475514",
"0.5448886",
"0.54449147",
"0.5439848",
"0.54396564",
"0.54143095",
"0.53672373",
"0.53223133",
"0.5314086"
] |
0.6857996
|
0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.