desc
stringlengths 3
26.7k
| decl
stringlengths 11
7.89k
| bodies
stringlengths 8
553k
|
---|---|---|
'Get a list of blog articles related to an artist
Args:
Kwargs:
cache (bool): A boolean indicating whether or not the cached value should be used (if available). Defaults to True.
results (int): An integer number of results to return
start (int): An ingteger starting value for the result set
Returns:
A list of blog document dicts; list contains additional attributes \'start\' and \'total\'
Example:
>>> a = artist.Artist(\'bob marley\')
>>> blogs = a.get_blogs(results=1,start=4)
>>> blogs.total
4068
>>> blogs[0][\'summary\']
But the Kenyans I know relate to music about the same way Americans do. They like their Congolese afropop,
and I\'ve known some to be big fans of international acts like <span>Bob</span> <span>Marley</span> and Dolly Parton.
They rarely talk about music that\'s indigenous in the way a South African or Malian or Zimbabwean would, and it\'s
even rarer to actually hear such indigenous music. I do sometimes hear ceremonial chanting from the Maasai, but only
when they\'re dancing for tourists. If East Africa isn\'t the most musical part ... "'
| def get_blogs(self, results=15, start=0, cache=True, high_relevance=False):
| if (cache and ('blogs' in self.cache) and (results == 15) and (start == 0) and (not high_relevance)):
return self.cache['blogs']
else:
high_relevance = ('true' if high_relevance else 'false')
response = self.get_attribute('blogs', results=results, start=start, high_relevance=high_relevance)
if ((results == 15) and (start == 0)):
self.cache['blogs'] = ResultList(response['blogs'], 0, response['total'])
return ResultList(response['blogs'], start, response['total'])
|
'Get our numerical estimation of how familiar an artist currently is to the world
Args:
Kwargs:
cache (bool): A boolean indicating whether or not the cached value should be used (if available). Defaults to True.
Returns:
A float representing familiarity.
Example:
>>> a = artist.Artist(\'frank sinatra\')
>>> a.get_familiarity()
0.65142555825947457
>>> a.familiarity
0.65142555825947457'
| def get_familiarity(self, cache=True):
| if (not (cache and ('familiarity' in self.cache))):
response = self.get_attribute('familiarity')
self.cache['familiarity'] = response['artist']['familiarity']
return self.cache['familiarity']
|
'Get the foreign id for this artist for a specific id space
Args:
Kwargs:
idspace (str): A string indicating the idspace to fetch a foreign id for.
Returns:
A foreign ID string
Example:
>>> a = artist.Artist(\'fabulous\')
>>> a.get_foreign_id(\'7digital\')
u\'7digital:artist:186042\''
| def get_foreign_id(self, idspace='musicbrainz', cache=True):
| if (not (cache and ('foreign_ids' in self.cache) and filter((lambda d: (d.get('catalog') == idspace)), self.cache['foreign_ids']))):
response = self.get_attribute('profile', bucket=[('id:' + idspace)])
foreign_ids = response['artist'].get('foreign_ids', [])
self.cache['foreign_ids'] = (self.cache.get('foreign_ids', []) + foreign_ids)
cval = filter((lambda d: (d.get('catalog') == util.map_idspace(idspace))), self.cache.get('foreign_ids'))
return (cval[0].get('foreign_id') if cval else None)
|
'Get the twitter id for this artist if it exists
Args:
Kwargs:
Returns:
A twitter ID string
Example:
>>> a = artist.Artist(\'big boi\')
>>> a.get_twitter_id()
u\'BigBoi\''
| def get_twitter_id(self, cache=True):
| if (not (cache and ('twitter' in self.cache))):
response = self.get_attribute('twitter')
self.cache['twitter'] = response['artist'].get('twitter')
return self.cache['twitter']
|
'Get our numerical description of how hottt an artist currently is
Args:
Kwargs:
cache (bool): A boolean indicating whether or not the cached value should be used (if available). Defaults to True.
Returns:
float: the hotttnesss value
Example:
>>> a = artist.Artist(\'hannah montana\')
>>> a.get_hotttnesss()
0.59906022155998995
>>> a.hotttnesss
0.59906022155998995'
| def get_hotttnesss(self, cache=True):
| if (not (cache and ('hotttnesss' in self.cache))):
response = self.get_attribute('hotttnesss')
self.cache['hotttnesss'] = response['artist']['hotttnesss']
return self.cache['hotttnesss']
|
'Get a list of artist images
Args:
cache (bool): A boolean indicating whether or not the cached value should be used (if available). Defaults to True.
results (int): An integer number of results to return
start (int): An integer starting value for the result set
license (str): A string specifying the desired license type
Returns:
A list of image document dicts; list contains additional attributes \'start\' and \'total\'
Example:
>>> a = artist.Artist(\'Captain Beefheart\')
>>> images = a.get_images(results=1)
>>> images.total
49
>>> images[0][\'url\']
u\'http://c4.ac-images.myspacecdn.com/images01/5/l_e1a329cdfdb16a848288edc6d578730f.jpg\''
| def get_images(self, results=15, start=0, license=None, cache=True):
| if (cache and ('images' in self.cache) and (results == 15) and (start == 0) and (license == None)):
return self.cache['images']
else:
response = self.get_attribute('images', results=results, start=start, license=license)
total = (response.get('total') or 0)
if ((results == 15) and (start == 0) and (license == None)):
self.cache['images'] = ResultList(response['images'], 0, total)
return ResultList(response['images'], start, total)
|
'Get a list of news articles found on the web related to an artist
Args:
cache (bool): A boolean indicating whether or not the cached value should be used (if available). Defaults to True.
results (int): An integer number of results to return
start (int): An integer starting value for the result set
Returns:
A list of news document dicts; list contains additional attributes \'start\' and \'total\'
Example:
>>> a = artist.Artist(\'Henry Threadgill\')
>>> news = a.news
>>> news.total
41
>>> news[0][\'name\']
u\'Jazz Journalists Association Announces 2010 Jazz Award Winners\''
| def get_news(self, results=15, start=0, cache=True, high_relevance=False):
| if (cache and ('news' in self.cache) and (results == 15) and (start == 0) and (not high_relevance)):
return self.cache['news']
else:
high_relevance = ('true' if high_relevance else 'false')
response = self.get_attribute('news', results=results, start=start, high_relevance=high_relevance)
if ((results == 15) and (start == 0)):
self.cache['news'] = ResultList(response['news'], 0, response['total'])
return ResultList(response['news'], start, response['total'])
|
'Get reviews related to an artist\'s work
Args:
Kwargs:
cache (bool): A boolean indicating whether or not the cached value should be used (if available). Defaults to True.
results (int): An integer number of results to return
start (int): An integer starting value for the result set
Returns:
A list of review document dicts; list contains additional attributes \'start\' and \'total\'
Example:
>>> a = artist.Artist(\'Ennio Morricone\')
>>> reviews = a.reviews
>>> reviews.total
17
>>> reviews[0][\'release\']
u\'For A Few Dollars More\''
| def get_reviews(self, results=15, start=0, cache=True):
| if (cache and ('reviews' in self.cache) and (results == 15) and (start == 0)):
return self.cache['reviews']
else:
response = self.get_attribute('reviews', results=results, start=start)
if ((results == 15) and (start == 0)):
self.cache['reviews'] = ResultList(response['reviews'], 0, response['total'])
return ResultList(response['reviews'], start, response['total'])
|
'Return similar artists to this one
Args:
Kwargs:
cache (bool): A boolean indicating whether or not the cached value should be used (if available). Defaults to True.
results (int): An integer number of results to return
start (int): An integer starting value for the result set
max_familiarity (float): A float specifying the max familiarity of artists to search for
min_familiarity (float): A float specifying the min familiarity of artists to search for
max_hotttnesss (float): A float specifying the max hotttnesss of artists to search for
min_hotttnesss (float): A float specifying the max hotttnesss of artists to search for
reverse (bool): A boolean indicating whether or not to return dissimilar artists (wrecommender). Defaults to False.
Returns:
A list of similar Artist objects
Example:
>>> a = artist.Artist(\'Sleater Kinney\')
>>> similars = a.similar[:5]
>>> similars
[<artist - Bikini Kill>, <artist - Pretty Girls Make Graves>, <artist - Huggy Bear>, <artist - Bratmobile>, <artist - Team Dresch>]'
| def get_similar(self, results=15, start=0, buckets=None, limit=False, cache=True, max_familiarity=None, min_familiarity=None, max_hotttnesss=None, min_hotttnesss=None, min_results=None, reverse=False, artist_start_year_before=None, artist_start_year_after=None, artist_end_year_before=None, artist_end_year_after=None):
| buckets = (buckets or [])
kwargs = {}
if max_familiarity:
kwargs['max_familiarity'] = max_familiarity
if min_familiarity:
kwargs['min_familiarity'] = min_familiarity
if max_hotttnesss:
kwargs['max_hotttnesss'] = max_hotttnesss
if min_hotttnesss:
kwargs['min_hotttnesss'] = min_hotttnesss
if min_results:
kwargs['min_results'] = min_results
if buckets:
kwargs['bucket'] = buckets
if limit:
kwargs['limit'] = 'true'
if reverse:
kwargs['reverse'] = 'true'
if artist_start_year_before:
kwargs['artist_start_year_before'] = artist_start_year_before
if artist_start_year_after:
kwargs['artist_start_year_after'] = artist_start_year_after
if artist_end_year_before:
kwargs['artist_end_year_before'] = artist_end_year_before
if artist_end_year_after:
kwargs['artist_end_year_after'] = artist_end_year_after
if (cache and ('similar' in self.cache) and (results == 15) and (start == 0) and (not kwargs)):
return [Artist(**util.fix(a)) for a in self.cache['similar']]
else:
response = self.get_attribute('similar', results=results, start=start, **kwargs)
if ((results == 15) and (start == 0) and (not kwargs)):
self.cache['similar'] = response['artists']
return [Artist(**util.fix(a)) for a in response['artists']]
|
'Get the songs associated with an artist
Args:
Kwargs:
cache (bool): A boolean indicating whether or not the cached value should be used (if available). Defaults to True.
results (int): An integer number of results to return
start (int): An integer starting value for the result set
Results:
A list of Song objects; list contains additional attributes \'start\' and \'total\'
Example:
>>> a = artist.Artist(\'Strokes\')
>>> a.get_songs(results=5)
[<song - Fear Of Sleep>, <song - Red Light>, <song - Ize Of The World>, <song - Evening Sun>, <song - Juicebox>]'
| def get_songs(self, cache=True, results=15, start=0):
| if (cache and ('songs' in self.cache) and (results == 15) and (start == 0)):
if (not isinstance(self.cache['songs'][0], Song)):
song_objects = []
for s in self.cache['songs']:
song_objects.append(Song(id=s['id'], title=s['title'], artist_name=self.name, artist_id=self.id))
self.cache['songs'] = song_objects
return self.cache['songs']
else:
response = self.get_attribute('songs', results=results, start=start)
for s in response['songs']:
s.update({'artist_id': self.id, 'artist_name': self.name})
songs = [Song(**util.fix(s)) for s in response['songs']]
if ((results == 15) and (start == 0)):
self.cache['songs'] = ResultList(songs, 0, response['total'])
return ResultList(songs, start, response['total'])
|
'Get the terms associated with an artist
Args:
Kwargs:
cache (bool): A boolean indicating whether or not the cached value should be used (if available). Defaults to True.
sort (str): A string specifying the desired sorting type (weight or frequency)
Results:
A list of term document dicts
Example:
>>> a = artist.Artist(\'tom petty\')
>>> a.terms
[{u\'frequency\': 1.0, u\'name\': u\'heartland rock\', u\'weight\': 1.0},
{u\'frequency\': 0.88569401860168606,
u\'name\': u\'jam band\',
u\'weight\': 0.9116501862732439},
{u\'frequency\': 0.9656145118557401,
u\'name\': u\'pop rock\',
u\'weight\': 0.89777934440040685},
{u\'frequency\': 0.8414744288140491,
u\'name\': u\'southern rock\',
u\'weight\': 0.8698567153186606},
{u\'frequency\': 0.9656145118557401,
u\'name\': u\'hard rock\',
u\'weight\': 0.85738022655218893},
{u\'frequency\': 0.88569401860168606,
u\'name\': u\'singer-songwriter\',
u\'weight\': 0.77427243392312772},
{u\'frequency\': 0.88569401860168606,
u\'name\': u\'rock\',
u\'weight\': 0.71158718989399083},
{u\'frequency\': 0.60874110500110956,
u\'name\': u\'album rock\',
u\'weight\': 0.69758668733499629},
{u\'frequency\': 0.74350792060935744,
u\'name\': u\'psychedelic\',
u\'weight\': 0.68457367494207944},
{u\'frequency\': 0.77213698386292873,
u\'name\': u\'pop\',
u\'weight\': 0.65039556639337293},
{u\'frequency\': 0.41747136183050298,
u\'name\': u\'bar band\',
u\'weight\': 0.54974975024767025}]'
| def get_terms(self, sort='weight', cache=True):
| if (cache and ('terms' in self.cache) and (sort == 'weight')):
return self.cache['terms']
else:
response = self.get_attribute('terms', sort=sort)
if (sort == 'weight'):
self.cache['terms'] = response['terms']
return response['terms']
|
'Get the urls for an artist
Args:
Kwargs:
cache (bool): A boolean indicating whether or not the cached value should be used (if available). Defaults to True.
Results:
A url document dict
Example:
>>> a = artist.Artist(\'the unicorns\')
>>> a.get_urls()
{u\'amazon_url\': u\'http://www.amazon.com/gp/search?ie=UTF8&keywords=The Unicorns&tag=httpechonecom-20&index=music\',
u\'aolmusic_url\': u\'http://music.aol.com/artist/the-unicorns\',
u\'itunes_url\': u\'http://itunes.com/TheUnicorns\',
u\'lastfm_url\': u\'http://www.last.fm/music/The+Unicorns\',
u\'mb_url\': u\'http://musicbrainz.org/artist/603c5f9f-492a-4f21-9d6f-1642a5dbea2d.html\',
u\'myspace_url\': u\'http://www.myspace.com/iwasbornunicorn\'}'
| def get_urls(self, cache=True):
| if (not (cache and ('urls' in self.cache))):
response = self.get_attribute('urls')
self.cache['urls'] = response['urls']
return self.cache['urls']
|
'Get a list of video documents found on the web related to an artist
Args:
Kwargs:
cache (bool): A boolean indicating whether or not the cached value should be used (if available). Defaults to True.
results (int): An integer number of results to return
start (int): An integer starting value for the result set
Returns:
A list of video document dicts; list contains additional attributes \'start\' and \'total\'
Example:
>>> a = artist.Artist(\'the vapors\')
>>> a.get_video(results=1, start=2)
[{u\'date_found\': u\'2009-12-28T08:27:48\',
u\'id\': u\'d02f9e6dc7904f70402d4676516286b9\',
u\'image_url\': u\'http://i1.ytimg.com/vi/p6c0wOFL3Us/default.jpg\',
u\'site\': u\'youtube\',
u\'title\': u\'The Vapors-Turning Japanese (rectangular white vinyl promo)\',
u\'url\': u\'http://youtube.com/watch?v=p6c0wOFL3Us\'}]'
| def get_video(self, results=15, start=0, cache=True):
| if (cache and ('video' in self.cache) and (results == 15) and (start == 0)):
return self.cache['video']
else:
response = self.get_attribute('video', results=results, start=start)
if ((results == 15) and (start == 0)):
self.cache['video'] = ResultList(response['video'], 0, response['total'])
return ResultList(response['video'], start, response['total'])
|
'Get a list of years active dictionaries for an artist
Args:
Kwargs:
cache (bool): A boolean indicating whether or not the cached value should be used (if available). Defaults to True.
Returns:
A list of years active dictionaries; list contains additional attributes \'start\' and \'total\'
Example:
>>> a = artist.Artist(\'yelle\')
>>> a.get_years_active()
[{ start: 2005 }]'
| def get_years_active(self, cache=True):
| if (cache and ('years_active' in self.cache)):
return self.cache['years_active']
else:
response = self.get_attribute('profile', bucket=['years_active'])
self.cache['years_active'] = response['artist']['years_active']
return response['artist']['years_active']
|
'Get the number of related documents of various types for the artist.
The types include audio, biographies, blogs, images, news, reviews, songs, videos.
Note that these documents can be retrieved by calling artist.<document type>, for example,
artist.biographies.
Args:
Kwargs:
cache (bool): A boolean indicating whether or not the cached value should be used (if available).
Defaults to True.
Returns:
A dictionary with one key for each document type, mapped to an integer count of documents.
Example:
>>> a = artist.Artist("The Kinks")
>>> a.get_doc_counts()
{u\'audio\': 194,
u\'biographies\': 9,
u\'blogs\': 379,
u\'images\': 177,
u\'news\': 84,
u\'reviews\': 110,
u\'songs\': 499,
u\'videos\': 340}'
| def get_doc_counts(self, cache=True):
| if ((not cache) or (not ('doc_counts' in self.cache))):
response = self.get_attribute('profile', bucket='doc_counts')
self.cache['doc_counts'] = response['artist']['doc_counts']
return self.cache['doc_counts']
|
'Song class
Args:
id (str): a song ID
Kwargs:
buckets (list): A list of strings specifying which buckets to retrieve
Returns:
A Song object
Example:
>>> s = song.Song(\'SOPEXHZ12873FD2AC7\', buckets=[\'song_hotttnesss\', \'artist_hotttnesss\'])
>>> s.song_hotttnesss
0.58602500000000002
>>> s.artist_hotttnesss
0.80329715999999995'
| def __init__(self, id, buckets=None, **kwargs):
| buckets = (buckets or [])
super(Song, self).__init__(id, buckets, **kwargs)
|
'Get an audio summary of a song containing mode, tempo, key, duration, time signature, loudness, danceability, energy, and analysis_url.
Args:
Kwargs:
cache (bool): A boolean indicating whether or not the cached value should be used (if available). Defaults to True.
Returns:
A dictionary containing mode, tempo, key, duration, time signature, loudness, danceability, energy and analysis_url keys.
Example:
>>> s = song.Song(\'SOGNMKX12B0B806320\')
>>> s.audio_summary
{u\'analysis_url\': u\'https://echonest-analysis.s3.amazonaws.com/TR/RnMKCg47J5LgQZr0SISyoPuRxKVQx3Z_YSuhVa/3/full.json?Signature=KBUbewLiP3sZ2X6rRZzXhrgh8fw%3D&Expires=1349809604&AWSAccessKeyId=AKIAJRDFEY23UEVW42BQ\',
u\'audio_md5\': u\'ca3fdfa72eed23d5ad89872c38cecc0e\',
u\'danceability\': 0.33712086491871546,
u\'duration\': 470.70666999999997,
u\'energy\': 0.58186979146361684,
u\'key\': 0,
u\'liveness\': 0.08676759933615498,
u\'loudness\': -9.5960000000000001,
u\'mode\': 1,
u\'speechiness\': 0.036938896635994867,
u\'tempo\': 126.949,
u\'time_signature\': 4}'
| def get_audio_summary(self, cache=True):
| if (not (cache and ('audio_summary' in self.cache))):
response = self.get_attribute('profile', bucket='audio_summary')
if (response['songs'] and ('audio_summary' in response['songs'][0])):
self.cache['audio_summary'] = response['songs'][0]['audio_summary']
else:
self.cache['audio_summary'] = {}
return self.cache['audio_summary']
|
'Get our numerical description of how hottt a song currently is
Args:
Kwargs:
cache (bool): A boolean indicating whether or not the cached value should be used (if available). Defaults to True.
Returns:
A float representing hotttnesss.
Example:
>>> s = song.Song(\'SOLUHKP129F0698D49\')
>>> s.get_song_hotttnesss()
0.57344379999999995
>>> s.song_hotttnesss
0.57344379999999995'
| def get_song_hotttnesss(self, cache=True):
| if (not (cache and ('song_hotttnesss' in self.cache))):
response = self.get_attribute('profile', bucket='song_hotttnesss')
self.cache['song_hotttnesss'] = response['songs'][0]['song_hotttnesss']
return self.cache['song_hotttnesss']
|
'Get the types of a song.
Args:
cache (boolean): A boolean indicating whether or not the cached value should be used
(if available). Defaults to True.
Returns:
A list of strings, each representing a song type: \'christmas\', for example.
Example:
>>> s = song.Song(\'SOQKVPH12A58A7AF4D\')
>>> s.song_type
[u\'christmas\']'
| def get_song_type(self, cache=True):
| if (not (cache and ('song_type' in self.cache))):
response = self.get_attribute('profile', bucket='song_type')
if response['songs'][0].has_key('song_type'):
self.cache['song_type'] = response['songs'][0]['song_type']
else:
self.cache['song_type'] = []
return self.cache['song_type']
|
'Get our numerical description of how hottt a song\'s artist currently is
Args:
Kwargs:
cache (bool): A boolean indicating whether or not the cached value should be used (if available). Defaults to True.
Returns:
A float representing hotttnesss.
Example:
>>> s = song.Song(\'SOOLGAZ127F3E1B87C\')
>>> s.artist_hotttnesss
0.45645633000000002
>>> s.get_artist_hotttnesss()
0.45645633000000002'
| def get_artist_hotttnesss(self, cache=True):
| if (not (cache and ('artist_hotttnesss' in self.cache))):
response = self.get_attribute('profile', bucket='artist_hotttnesss')
self.cache['artist_hotttnesss'] = response['songs'][0]['artist_hotttnesss']
return self.cache['artist_hotttnesss']
|
'Get our numerical estimation of how familiar a song\'s artist currently is to the world
Args:
cache (bool): A boolean indicating whether or not the cached value should be used (if available). Defaults to True.
Returns:
A float representing familiarity.
Example:
>>> s = song.Song(\'SOQKVPH12A58A7AF4D\')
>>> s.get_artist_familiarity()
0.639626025843539
>>> s.artist_familiarity
0.639626025843539'
| def get_artist_familiarity(self, cache=True):
| if (not (cache and ('artist_familiarity' in self.cache))):
response = self.get_attribute('profile', bucket='artist_familiarity')
self.cache['artist_familiarity'] = response['songs'][0]['artist_familiarity']
return self.cache['artist_familiarity']
|
'Get the location of a song\'s artist.
Args:
cache (bool): A boolean indicating whether or not the cached value should be used (if available). Defaults to True.
Returns:
An artist location object.
Example:
>>> s = song.Song(\'SOQKVPH12A58A7AF4D\')
>>> s.artist_location
{u\'latitude\': 34.053489999999996, u\'location\': u\'Los Angeles, CA\', u\'longitude\': -118.24532000000001}'
| def get_artist_location(self, cache=True):
| if (not (cache and ('artist_location' in self.cache))):
response = self.get_attribute('profile', bucket='artist_location')
self.cache['artist_location'] = response['songs'][0]['artist_location']
return self.cache['artist_location']
|
'Get the foreign id for this song for a specific id space
Args:
Kwargs:
idspace (str): A string indicating the idspace to fetch a foreign id for.
Returns:
A foreign ID string
Example:
>>> s = song.Song(\'SOYRVMR12AF729F8DC\')
>>> s.get_foreign_id(\'CAGPXKK12BB06F9DE9\')'
| def get_foreign_id(self, idspace='', cache=True):
| idspace = util.map_idspace(idspace)
if (not (cache and ('foreign_ids' in self.cache) and filter((lambda d: (d.get('catalog') == idspace)), self.cache['foreign_ids']))):
response = self.get_attribute('profile', bucket=[('id:' + idspace)])
rsongs = response['songs']
if (len(rsongs) == 0):
return None
foreign_ids = rsongs[0].get('foreign_ids', [])
self.cache['foreign_ids'] = (self.cache.get('foreign_ids', []) + foreign_ids)
cval = filter((lambda d: (d.get('catalog') == idspace)), self.cache.get('foreign_ids'))
return (cval[0].get('foreign_id') if cval else None)
|
'Args:
cache (bool): A boolean indicating whether or not the cached value should be used (if available). Defaults to True.
Returns:
A float representing a song\'s discovery rank.
Example:
>>> s = song.Song(\'SOQKVPH12A58A7AF4D\')
>>> s.get_song_discovery()
0.639626025843539
>>> s.song_discovery
0.639626025843539'
| def get_song_discovery(self, cache=True):
| if (not (cache and ('song_discovery' in self.cache))):
response = self.get_attribute('profile', bucket='song_discovery')
self.cache['song_discovery'] = response['songs'][0]['song_discovery']
return self.cache['song_discovery']
|
'Args:
cache (bool): A boolean indicating whether or not the cached value should be used (if available). Defaults to True.
Returns:
A float representing a song\'s currency rank.
Example:
>>> s = song.Song(\'SOQKVPH12A58A7AF4D\')
>>> s.get_song_currency()
0.639626025843539
>>> s.song_currency
0.639626025843539'
| def get_song_currency(self, cache=True):
| if (not (cache and ('song_currency' in self.cache))):
response = self.get_attribute('profile', bucket='song_currency')
self.cache['song_currency'] = response['songs'][0]['song_currency']
return self.cache['song_currency']
|
'Get the tracks for a song given a catalog.
Args:
catalog (str): a string representing the catalog whose track you want to retrieve.
Returns:
A list of Track dicts.
Example:
>>> s = song.Song(\'SOWDASQ12A6310F24F\')
>>> s.get_tracks(\'7digital\')[0]
{u\'catalog\': u\'7digital\',
u\'foreign_id\': u\'7digital:track:8445818\',
u\'id\': u\'TRJGNNY12903CC625C\',
u\'preview_url\': u\'http://previews.7digital.com/clips/34/8445818.clip.mp3\',
u\'release_image\': u\'http://cdn.7static.com/static/img/sleeveart/00/007/628/0000762838_200.jpg\'}'
| def get_tracks(self, catalog, cache=True):
| if (not (cache and ('tracks' in self.cache) and (catalog in [td['catalog'] for td in self.cache['tracks']]))):
kwargs = {'bucket': ['tracks', ('id:%s' % catalog)]}
response = self.get_attribute('profile', **kwargs)
if (not ('tracks' in self.cache)):
self.cache['tracks'] = []
potential_tracks = response['songs'][0].get('tracks', [])
existing_track_ids = [tr['foreign_id'] for tr in self.cache['tracks']]
new_tds = filter((lambda tr: (tr['foreign_id'] not in existing_track_ids)), potential_tracks)
self.cache['tracks'].extend(new_tds)
return filter((lambda tr: (tr['catalog'] == util.map_idspace(catalog))), self.cache['tracks'])
|
'Logs the start of each task'
| def playbook_on_task_start(self, name, is_conditional):
| if (self.current is not None):
self.stats[self.current] = (time.time() - self.stats[self.current])
self.current = name
self.stats[self.current] = time.time()
|
'Prints the timings'
| def playbook_on_stats(self, stats):
| if (self.current is not None):
self.stats[self.current] = (time.time() - self.stats[self.current])
results = sorted(self.stats.items(), key=(lambda value: value[1]), reverse=True)
results = results[:10]
for (name, elapsed) in results:
print '{0:-<70}{1:->9}'.format('{0} '.format(name), ' {0:.02f}s'.format(elapsed))
|
'Get ZooKeeper server stats as a map'
| def get_stats(self):
| data = self._send_cmd('mntr')
return self._parse(data)
|
'Send a 4letter word command to the server'
| def _send_cmd(self, cmd):
| s = self._create_socket()
s.settimeout(self._timeout)
s.connect(self._address)
s.send(cmd)
data = s.recv(2048)
s.close()
return data
|
'Parse the output from the \'mntr\' 4letter word command'
| def _parse(self, data):
| h = StringIO(data)
result = {}
for line in h.readlines():
try:
(key, value) = self._parse_line(line)
if (key not in ['zk_server_state', 'zk_version']):
result[key] = value
except ValueError:
pass
return result
|
'Checks the equality of two `UserMixin` objects using `get_id`.'
| def __eq__(self, other):
| if isinstance(other, UserMixin):
return (self.get_id() == other.get_id())
return NotImplemented
|
'Checks the inequality of two `UserMixin` objects using `get_id`.'
| def __ne__(self, other):
| equal = self.__eq__(other)
if (equal is NotImplemented):
return NotImplemented
return (not equal)
|
'This method has been deprecated. Please use
:meth:`LoginManager.init_app` instead.'
| def setup_app(self, app, add_context_processor=True):
| warnings.warn('Warning setup_app is deprecated. Please use init_app.', DeprecationWarning)
self.init_app(app, add_context_processor)
|
'Configures an application. This registers an `after_request` call, and
attaches this `LoginManager` to it as `app.login_manager`.
:param app: The :class:`flask.Flask` object to configure.
:type app: :class:`flask.Flask`
:param add_context_processor: Whether to add a context processor to
the app that adds a `current_user` variable to the template.
Defaults to ``True``.
:type add_context_processor: bool'
| def init_app(self, app, add_context_processor=True):
| app.login_manager = self
app.after_request(self._update_remember_cookie)
self._login_disabled = app.config.get('LOGIN_DISABLED', False)
if add_context_processor:
app.context_processor(_user_context_processor)
|
'This is called when the user is required to log in. If you register a
callback with :meth:`LoginManager.unauthorized_handler`, then it will
be called. Otherwise, it will take the following actions:
- Flash :attr:`LoginManager.login_message` to the user.
- If the app is using blueprints find the login view for
the current blueprint using `blueprint_login_views`. If the app
is not using blueprints or the login view for the current
blueprint is not specified use the value of `login_view`.
- Redirect the user to the login view. (The page they were
attempting to access will be passed in the ``next`` query
string variable, so you can redirect there if present instead
of the homepage. Alternatively, it will be added to the session
as ``next`` if USE_SESSION_FOR_NEXT is set.)
If :attr:`LoginManager.login_view` is not defined, then it will simply
raise a HTTP 401 (Unauthorized) error instead.
This should be returned from a view or before/after_request function,
otherwise the redirect will have no effect.'
| def unauthorized(self):
| user_unauthorized.send(current_app._get_current_object())
if self.unauthorized_callback:
return self.unauthorized_callback()
if (request.blueprint in self.blueprint_login_views):
login_view = self.blueprint_login_views[request.blueprint]
else:
login_view = self.login_view
if (not login_view):
abort(401)
if self.login_message:
if (self.localize_callback is not None):
flash(self.localize_callback(self.login_message), category=self.login_message_category)
else:
flash(self.login_message, category=self.login_message_category)
config = current_app.config
if config.get('USE_SESSION_FOR_NEXT', USE_SESSION_FOR_NEXT):
login_url = expand_login_view(login_view)
session['next'] = make_next_param(login_url, request.url)
redirect_url = make_login_url(login_view)
else:
redirect_url = make_login_url(login_view, next_url=request.url)
return redirect(redirect_url)
|
'This sets the callback for reloading a user from the session. The
function you set should take a user ID (a ``unicode``) and return a
user object, or ``None`` if the user does not exist.
:param callback: The callback for retrieving a user object.
:type callback: callable'
| def user_loader(self, callback):
| self.user_callback = callback
return callback
|
'This function has been deprecated. Please use
:meth:`LoginManager.request_loader` instead.
This sets the callback for loading a user from a header value.
The function you set should take an authentication token and
return a user object, or `None` if the user does not exist.
:param callback: The callback for retrieving a user object.
:type callback: callable'
| def header_loader(self, callback):
| self.header_callback = callback
return callback
|
'This sets the callback for loading a user from a Flask request.
The function you set should take Flask request object and
return a user object, or `None` if the user does not exist.
:param callback: The callback for retrieving a user object.
:type callback: callable'
| def request_loader(self, callback):
| self.request_callback = callback
return callback
|
'This will set the callback for the `unauthorized` method, which among
other things is used by `login_required`. It takes no arguments, and
should return a response to be sent to the user instead of their
normal view.
:param callback: The callback for unauthorized users.
:type callback: callable'
| def unauthorized_handler(self, callback):
| self.unauthorized_callback = callback
return callback
|
'This will set the callback for the `needs_refresh` method, which among
other things is used by `fresh_login_required`. It takes no arguments,
and should return a response to be sent to the user instead of their
normal view.
:param callback: The callback for unauthorized users.
:type callback: callable'
| def needs_refresh_handler(self, callback):
| self.needs_refresh_callback = callback
return callback
|
'This is called when the user is logged in, but they need to be
reauthenticated because their session is stale. If you register a
callback with `needs_refresh_handler`, then it will be called.
Otherwise, it will take the following actions:
- Flash :attr:`LoginManager.needs_refresh_message` to the user.
- Redirect the user to :attr:`LoginManager.refresh_view`. (The page
they were attempting to access will be passed in the ``next``
query string variable, so you can redirect there if present
instead of the homepage.)
If :attr:`LoginManager.refresh_view` is not defined, then it will
simply raise a HTTP 401 (Unauthorized) error instead.
This should be returned from a view or before/after_request function,
otherwise the redirect will have no effect.'
| def needs_refresh(self):
| user_needs_refresh.send(current_app._get_current_object())
if self.needs_refresh_callback:
return self.needs_refresh_callback()
if (not self.refresh_view):
abort(401)
if (self.localize_callback is not None):
flash(self.localize_callback(self.needs_refresh_message), category=self.needs_refresh_message_category)
else:
flash(self.needs_refresh_message, category=self.needs_refresh_message_category)
config = current_app.config
if config.get('USE_SESSION_FOR_NEXT', USE_SESSION_FOR_NEXT):
login_url = expand_login_view(self.refresh_view)
session['next'] = make_next_param(login_url, request.url)
redirect_url = make_login_url(self.refresh_view)
else:
login_url = self.refresh_view
redirect_url = make_login_url(login_url, next_url=request.url)
return redirect(redirect_url)
|
'Loads user from session or remember_me cookie as applicable'
| def _load_user(self):
| user_accessed.send(current_app._get_current_object())
config = current_app.config
if config.get('SESSION_PROTECTION', self.session_protection):
deleted = self._session_protection()
if deleted:
return self.reload_user()
is_missing_user_id = ('user_id' not in session)
if is_missing_user_id:
cookie_name = config.get('REMEMBER_COOKIE_NAME', COOKIE_NAME)
header_name = config.get('AUTH_HEADER_NAME', AUTH_HEADER_NAME)
has_cookie = ((cookie_name in request.cookies) and (session.get('remember') != 'clear'))
if has_cookie:
return self._load_from_cookie(request.cookies[cookie_name])
elif self.request_callback:
return self._load_from_request(request)
elif (header_name in request.headers):
return self._load_from_header(request.headers[header_name])
return self.reload_user()
|
'Shared assertions for simple tests.'
| def simple_assertions(self, updates, num_bricks=2, num_updates=4, mean_only=False):
| assert (len(updates) == num_updates)
assert all((is_shared_variable(u[0]) for u in updates))
means = set((u[0] for u in updates if has_roles(u[0], [BATCH_NORM_POPULATION_MEAN])))
stdevs = set((u[0] for u in updates if has_roles(u[0], [BATCH_NORM_POPULATION_STDEV])))
assert means.isdisjoint(stdevs)
assert (len(set((get_brick(v) for v in means))) == num_bricks)
if (not mean_only):
assert (len(set((get_brick(v) for v in stdevs))) == num_bricks)
else:
assert (len(stdevs) == 0)
|
'Test that get_batch_normalization_updates works as expected.'
| def test_get_batch_normalization_updates(self):
| with batch_normalization(self.mlp):
y_bn = self.mlp.apply(self.x)
graph = ComputationGraph([y_bn])
updates = get_batch_normalization_updates(graph)
self.simple_assertions(updates)
|
'Test get_batch_normalization_updates with mean_only bricks.'
| def test_get_batch_normalization_updates_mean_only(self):
| mlp = BatchNormalizedMLP([Tanh(), Tanh()], [5, 7, 9], mean_only=True)
with batch_normalization(mlp):
y_bn = mlp.apply(self.x)
graph = ComputationGraph([y_bn])
updates = get_batch_normalization_updates(graph)
self.simple_assertions(updates, num_updates=2, mean_only=True)
|
'Test updates extracton in graph with non-training apply.'
| def test_get_batch_normalization_updates_non_training_applications(self):
| y = self.mlp.apply(self.x)
with batch_normalization(self.mlp):
y_bn = self.mlp.apply(self.x)
graph = ComputationGraph([y_bn, y])
updates = get_batch_normalization_updates(graph)
self.simple_assertions(updates)
|
'Test for exception if there are no training-mode nodes.'
| def test_get_batch_normalization_updates_no_training(self):
| y = self.mlp.apply(self.x)
graph = ComputationGraph([y])
numpy.testing.assert_raises(ValueError, get_batch_normalization_updates, graph)
|
'Test that we get an error by default on multiple apply.'
| def test_get_batch_normalization_updates_duplicates_error(self):
| with batch_normalization(self.mlp):
y = self.mlp.apply(self.x)
y2 = self.mlp.apply(self.x)
graph = ComputationGraph([y, y2])
numpy.testing.assert_raises(ValueError, get_batch_normalization_updates, graph)
|
'Test get_batch_normalization_updates(allow_duplicates=True).'
| def test_get_batch_normalization_updates_allow_duplicates(self):
| with batch_normalization(self.mlp):
y = self.mlp.apply(self.x)
y2 = self.mlp.apply(self.x)
graph = ComputationGraph([y, y2])
updates = get_batch_normalization_updates(graph, allow_duplicates=True)
self.simple_assertions(updates, num_bricks=2, num_updates=8)
|
'Create main loop and run it.'
| def setUp(self):
| mlp = MLP(activations=[None], dims=[10, 10], weights_init=Constant(1.0), use_bias=False)
mlp.initialize()
self.W = mlp.linear_transformations[0].W
x = tensor.vector('data')
cost = mlp.apply(x).mean()
data = numpy.random.rand(10, 10).astype(theano.config.floatX)
self.data_stream = IterableDataset(data).get_example_stream()
self.model = Model(cost)
self.algorithm = GradientDescent(cost=cost, parameters=[self.W])
self.main_loop = MainLoop(model=self.model, data_stream=self.data_stream, algorithm=self.algorithm, extensions=[FinishAfter(after_n_batches=5), Checkpoint('myweirdmodel.tar', save_separately=['log'])])
self.main_loop.run()
|
'Check that main loop have been saved properly.'
| def test_save_and_load(self):
| old_value = self.W.get_value()
self.W.set_value((old_value * 2))
new_main_loop = MainLoop(model=self.model, data_stream=self.data_stream, algorithm=self.algorithm, extensions=[Load('myweirdmodel.tar')])
new_main_loop.extensions[0].main_loop = new_main_loop
new_main_loop._run_extensions('before_training')
assert_allclose(self.W.get_value(), old_value)
|
'Check we can save the log and iteration state separately.'
| def test_load_log_and_iteration_state(self):
| skip_if_configuration_set('log_backend', 'sqlite', 'Bug with log.status["resumed_from"]')
new_main_loop = MainLoop(model=self.model, data_stream=self.data_stream, algorithm=self.algorithm, extensions=[Load('myweirdmodel.tar', True, True)])
new_main_loop.extensions[0].main_loop = new_main_loop
new_main_loop._run_extensions('before_training')
new_keys = sorted(new_main_loop.log.status.keys())
old_keys = sorted(self.main_loop.log.status.keys())
for (new_key, old_key) in zip(new_keys, old_keys):
assert (new_key == old_key)
assert (new_main_loop.log.status[new_key] == self.main_loop.log.status[old_key])
new = next(new_main_loop.iteration_state[1])['data']
old = next(self.main_loop.iteration_state[1])['data']
assert_allclose(new, old)
|
'Check behaviour when loading nonexisting main loop.'
| def test_load_nonexisting(self):
| load = Load('mynonexisting.tar')
load.main_loop = self.main_loop
load.do()
|
'Check loading exception.'
| def test_loading_exception(self):
| with tempfile.NamedTemporaryFile(delete=False) as f:
f.write('a'.encode('utf-8'))
load = Load(f.name)
load.main_loop = self.main_loop
self.assertRaises(tarfile.ReadError, load.do)
|
'Check checkpoint exception.'
| def test_checkpoint_exception(self):
| checkpoint = Checkpoint(None, save_separately=['foo'])
checkpoint.main_loop = self.main_loop
self.assertRaises(AttributeError, checkpoint.do, None)
|
'Cleaning.'
| def tearDown(self):
| if os.path.exists('myweirdmodel.tar'):
os.remove('myweirdmodel.tar')
|
'Compute MSE.'
| @application
def cost(self, readouts, outputs):
| return ((readouts - outputs) ** 2).sum(axis=(readouts.ndim - 1))
|
'Attach an auxiliary variable to the graph.
Auxiliary variables are Theano variables that are not part of a
brick\'s output, but can be useful nonetheless e.g. as a regularizer
or to monitor during training progress.
Parameters
variable : :class:`~tensor.TensorVariable`
The variable you want to add.
roles : list of :class:`.VariableRole` instances, optional
The roles of this variable. The :const:`.AUXILIARY`
role will automatically be added. Other options are
:const:`.COST`, :const:`.WEIGHT`, etc.
name : str, optional
Name to give to the variable. If the variable already has a
name it will be overwritten.
Examples
>>> from blocks.bricks.base import application, Brick
>>> from blocks.roles import COST
>>> from blocks.utils import shared_floatx_nans
>>> class Foo(Brick):
... def _allocate(self):
... W = shared_floatx_nans((10, 10))
... self.add_auxiliary_variable(W.mean(), name=\'mean_W\')
... @application
... def apply(self, x, application_call):
... application_call.add_auxiliary_variable(
... x - 1, name=\'x_minus_1\')
... application_call.add_auxiliary_variable(
... x.mean(), roles=[COST], name=\'mean_x\')
... return x + 1
>>> from theano import tensor
>>> x = tensor.vector()
>>> y = Foo().apply(x)
>>> from blocks.graph import ComputationGraph
>>> cg = ComputationGraph([y])
>>> from blocks.filter import VariableFilter
>>> var_filter = VariableFilter(roles=[AUXILIARY])
>>> var_filter(cg.variables) # doctest: +SKIP
{x_minus_1, mean_W, mean_x}
>>> var_filter = VariableFilter(roles=[COST])
>>> var_filter(cg.variables) # doctest: +SKIP
{mean_x}'
| def add_auxiliary_variable(self, variable, roles=None, name=None):
| add_annotation(variable, self)
if (name is not None):
variable.name = name
variable.tag.name = name
add_role(variable, AUXILIARY)
if (roles is not None):
for role in roles:
add_role(variable, role)
self.auxiliary_variables.append(variable)
|
'Inputs to the graph, excluding constants and shared variables.'
| @property
def inputs(self):
| return [var for var in self.variables if is_graph_input(var)]
|
'Variables of Scan ops.'
| @property
def scan_variables(self):
| return list(chain(*[g.variables for g in self._scan_graphs]))
|
'Collect variables, updates and auxiliary variables.
In addition collects all :class:`.Scan` ops and recurses in the
respective inner Theano graphs.'
| def _get_variables(self):
| updates = OrderedDict()
shared_outputs = [o for o in self.outputs if is_shared_variable(o)]
usual_outputs = [o for o in self.outputs if (not is_shared_variable(o))]
variables = shared_outputs
if usual_outputs:
inputs = graph.inputs(self.outputs)
sorted_apply_nodes = graph.io_toposort(inputs, usual_outputs)
self.scans = list(unique([node.op for node in sorted_apply_nodes if isinstance(node.op, Scan)], key=(lambda op: id(op))))
self._scan_graphs = [ComputationGraph(scan.outputs) for scan in self.scans]
seen = set()
main_vars = ([var for var in list(chain(*[apply_node.inputs for apply_node in sorted_apply_nodes])) if (not ((var in seen) or seen.add(var)))] + [var for var in self.outputs if (var not in seen)])
seen = set()
seen_avs = set(main_vars)
variables = []
for var in main_vars:
variables.append(var)
for annotation in getattr(var.tag, 'annotations', []):
if (annotation not in seen):
seen.add(annotation)
new_avs = [av for av in annotation.auxiliary_variables if (not ((av in seen_avs) or seen_avs.add(av)))]
variables.extend(new_avs)
updates = dict_union(updates, annotation.updates)
self.variables = variables
self.updates = updates
|
'Return a mapping from an input name to the input.'
| def dict_of_inputs(self):
| return {var.name: var for var in self.inputs}
|
'Replace certain variables in the computation graph.
Parameters
replacements : dict
The mapping from variables to be replaced to the corresponding
substitutes.
Examples
>>> import theano
>>> from theano import tensor, function
>>> x = tensor.scalar(\'x\')
>>> y = x + 2
>>> z = y + 3
>>> a = z + 5
Let\'s suppose we have dependent replacements like
>>> replacements = {y: x * 2, z: y * 3}
>>> cg = ComputationGraph([a])
>>> theano.pprint(a) # doctest: +NORMALIZE_WHITESPACE
\'(((x + TensorConstant{2}) + TensorConstant{3}) +
TensorConstant{5})\'
>>> cg_new = cg.replace(replacements)
>>> theano.pprint(
... cg_new.outputs[0]) # doctest: +NORMALIZE_WHITESPACE
\'(((x * TensorConstant{2}) * TensorConstant{3}) +
TensorConstant{5})\'
First two sums turned into multiplications
>>> float(function(cg_new.inputs, cg_new.outputs)(3.)[0])
23.0'
| def replace(self, replacements):
| replacements = OrderedDict(replacements)
outputs_cur = self.outputs
replacement_keys_cur = []
replacement_vals_cur = []
remaining_replacements = replacements.copy()
for variable in self.variables:
if (variable in replacements):
if has_roles(variable, [AUXILIARY]):
warnings.warn('replace method was asked to replace a variable ({}) that is an auxiliary variable.'.format(variable))
replacement_keys_cur.append(variable)
replacement_vals_cur.append(remaining_replacements.pop(variable))
if remaining_replacements:
warnings.warn('replace method was asked to replace a variable(s) ({}) that is not a part of the computational graph.'.format(str(remaining_replacements.keys())))
while replacement_keys_cur:
replace_what = replacement_keys_cur[0]
replace_by = replacement_vals_cur[0]
outputs_new = theano.clone(((outputs_cur + replacement_keys_cur[1:]) + replacement_vals_cur[1:]), replace={replace_what: replace_by})
outputs_cur = outputs_new[:len(outputs_cur)]
replacement_keys_cur = outputs_new[len(outputs_cur):((len(outputs_cur) + len(replacement_keys_cur)) - 1)]
replacement_vals_cur = outputs_new[(len(outputs_cur) + len(replacement_keys_cur)):]
return ComputationGraph(outputs_cur)
|
'Create Theano function from the graph contained.
Parameters
\*\*kwargs : dict
Keyword arguments to theano.function.
Useful for specifying compilation modes or profiling.'
| def get_theano_function(self, additional_updates=None, **kwargs):
| updates = self.updates
if additional_updates:
updates = dict_union(updates, OrderedDict(additional_updates))
return theano.function(self.inputs, self.outputs, updates=updates, **kwargs)
|
'Evaluate all role-carrying Theano variables on given data.
Parameters
data : dict of (data source, data) pairs
Data for input variables. The sources should match with the
names of the input variables.
Returns
Dictionary of (variable, variable value on given data) pairs.'
| def get_snapshot(self, data):
| role_variables = [var for var in self.variables if (hasattr(var.tag, 'roles') and (not is_shared_variable(var)))]
value_holders = [shared_like(var) for var in role_variables]
function = self.get_theano_function(equizip(value_holders, role_variables))
function(*(data[input_.name] for input_ in self.inputs))
return OrderedDict([(var, value_holder.get_value(borrow=True)) for (var, value_holder) in equizip(role_variables, value_holders)])
|
'Check if a variable depends on input variables.
Returns
bool
``True`` if the given variable depends on input variables,
``False`` otherwise.'
| def has_inputs(self, variable):
| if (variable not in self._has_inputs):
self._has_inputs[variable] = False
if is_graph_input(variable):
self._has_inputs[variable] = True
elif getattr(variable, 'owner', None):
for dependancy in variable.owner.inputs:
if self.has_inputs(dependancy):
self._has_inputs[variable] = True
return self._has_inputs[variable]
|
'Compile all Theano functions used.'
| def compile(self):
| self._compile_initial_state_and_context_computer()
self._compile_next_state_computer()
self._compile_logprobs_computer()
self.compiled = True
|
'Computes initial states and contexts from inputs.
Parameters
inputs : dict
Dictionary of input arrays.
Returns
A tuple containing a {name: :class:`numpy.ndarray`} dictionary of
contexts ordered like `self.context_names` and a
{name: :class:`numpy.ndarray`} dictionary of states ordered like
`self.state_names`.'
| def compute_initial_states_and_contexts(self, inputs):
| outputs = self.initial_state_and_context_computer(*[inputs[var] for var in self.inputs])
contexts = OrderedDict(((n, outputs.pop(n)) for n in self.context_names))
beam_size = outputs.pop('beam_size')
initial_states = outputs
return (contexts, initial_states, beam_size)
|
'Compute log probabilities of all possible outputs.
Parameters
contexts : dict
A {name: :class:`numpy.ndarray`} dictionary of contexts.
states : dict
A {name: :class:`numpy.ndarray`} dictionary of states.
Returns
A :class:`numpy.ndarray` of the (beam size, number of possible
outputs) shape.'
| def compute_logprobs(self, contexts, states):
| input_states = [states[name] for name in self.input_state_names]
return self.logprobs_computer(*(list(contexts.values()) + input_states))
|
'Computes next states.
Parameters
contexts : dict
A {name: :class:`numpy.ndarray`} dictionary of contexts.
states : dict
A {name: :class:`numpy.ndarray`} dictionary of states.
outputs : :class:`numpy.ndarray`
A :class:`numpy.ndarray` of this step outputs.
Returns
A {name: numpy.array} dictionary of next states.'
| def compute_next_states(self, contexts, states, outputs):
| input_states = [states[name] for name in self.input_state_names]
next_values = self.next_state_computer(*((list(contexts.values()) + input_states) + [outputs]))
return OrderedDict(equizip(self.state_names, next_values))
|
'Find k smallest elements of a matrix.
Parameters
matrix : :class:`numpy.ndarray`
The matrix.
k : int
The number of smallest elements required.
only_first_row : bool, optional
Consider only elements of the first row.
Returns
Tuple of ((row numbers, column numbers), values).'
| @staticmethod
def _smallest(matrix, k, only_first_row=False):
| if only_first_row:
flatten = matrix[:1, :].flatten()
else:
flatten = matrix.flatten()
args = numpy.argpartition(flatten, k)[:k]
args = args[numpy.argsort(flatten[args])]
return (numpy.unravel_index(args, matrix.shape), flatten[args])
|
'Performs beam search.
If the beam search was not compiled, it also compiles it.
Parameters
input_values : dict
A {:class:`~theano.Variable`: :class:`~numpy.ndarray`}
dictionary of input values. The shapes should be
the same as if you ran sampling with batch size equal to
`beam_size`. Put it differently, the user is responsible
for duplicaling inputs necessary number of times, because
this class has insufficient information to do it properly.
eol_symbol : int
End of sequence symbol, the search stops when the symbol is
generated.
max_length : int
Maximum sequence length, the search stops when it is reached.
ignore_first_eol : bool, optional
When ``True``, the end if sequence symbol generated at the
first iteration are ignored. This useful when the sequence
generator was trained on data with identical symbols for
sequence start and sequence end.
as_arrays : bool, optional
If ``True``, the internal representation of search results
is returned, that is a (matrix of outputs, mask,
costs of all generated outputs) tuple.
Returns
outputs : list of lists of ints
A list of the `beam_size` best sequences found in the order
of decreasing likelihood.
costs : list of floats
A list of the costs for the `outputs`, where cost is the
negative log-likelihood.'
| def search(self, input_values, eol_symbol, max_length, ignore_first_eol=False, as_arrays=False):
| if (not self.compiled):
self.compile()
(contexts, states, beam_size) = self.compute_initial_states_and_contexts(input_values)
all_outputs = states['outputs'][None, :]
all_masks = numpy.ones_like(all_outputs, dtype=config.floatX)
all_costs = numpy.zeros_like(all_outputs, dtype=config.floatX)
for i in range(max_length):
if (all_masks[(-1)].sum() == 0):
break
logprobs = self.compute_logprobs(contexts, states)
next_costs = (all_costs[(-1), :, None] + (logprobs * all_masks[(-1), :, None]))
(finished,) = numpy.where((all_masks[(-1)] == 0))
next_costs[finished, :eol_symbol] = numpy.inf
next_costs[finished, (eol_symbol + 1):] = numpy.inf
((indexes, outputs), chosen_costs) = self._smallest(next_costs, beam_size, only_first_row=(i == 0))
for name in states:
states[name] = states[name][indexes]
all_outputs = all_outputs[:, indexes]
all_masks = all_masks[:, indexes]
all_costs = all_costs[:, indexes]
states.update(self.compute_next_states(contexts, states, outputs))
all_outputs = numpy.vstack([all_outputs, outputs[None, :]])
all_costs = numpy.vstack([all_costs, chosen_costs[None, :]])
mask = (outputs != eol_symbol)
if (ignore_first_eol and (i == 0)):
mask[:] = 1
all_masks = numpy.vstack([all_masks, mask[None, :]])
all_outputs = all_outputs[1:]
all_masks = all_masks[:(-1)]
all_costs = (all_costs[1:] - all_costs[:(-1)])
result = (all_outputs, all_masks, all_costs)
if as_arrays:
return result
return self.result_to_lists(result)
|
'Add a configuration setting.
Parameters
key : str
The name of the configuration setting. This must be a valid
Python attribute name i.e. alphanumeric with underscores.
type : function
A function such as ``float``, ``int`` or ``str`` which takes
the configuration value and returns an object of the correct
type. Note that the values retrieved from environment
variables are always strings, while those retrieved from the
YAML file might already be parsed. Hence, the function provided
here must accept both types of input.
default : object, optional
The default configuration to return if not set. By default none
is set and an error is raised instead.
env_var : str, optional
The environment variable name that holds this configuration
value. If not given, this configuration can only be set in the
YAML configuration file.'
| def add_config(self, key, type_, default=NOT_SET, env_var=None):
| self.config[key] = {'type': type_}
if (env_var is not None):
self.config[key]['env_var'] = env_var
if (default is not NOT_SET):
self.config[key]['default'] = default
|
'Initialize the training algorithm.'
| @abstractmethod
def initialize(self, **kwargs):
| pass
|
'Process a batch of training data.
Attributes
batch : dict
A dictionary of (source name, data) pairs.'
| @abstractmethod
def process_batch(self, batch):
| pass
|
'Add updates to the training process.
The updates will be done _before_ the parameters are changed.
Parameters
updates : list of tuples or :class:`~collections.OrderedDict`
The updates to add.'
| def add_updates(self, updates):
| if isinstance(updates, OrderedDict):
updates = list(updates.items())
if (not isinstance(updates, list)):
raise ValueError
self.updates.extend(updates)
|
'Build a Theano expression for the step for a parameter.
This method is called by default implementation of
:meth:`compute_steps`, it relieves from writing a loop each time.
Parameters
parameter : :class:`~tensor.TensorSharedVariable`
The parameter.
previous_step : :class:`~tensor.TensorVariable`
Some quantity related to the gradient of the cost with respect
to the parameter, either the gradient itself or a step in a
related direction.
Returns
step : :class:`~theano.Variable`
Theano variable for the step to take.
updates : list
A list of tuples representing updates to be performed. This
is useful for stateful rules such as :class:`Momentum` which
need to update shared variables after itetations.'
| def compute_step(self, parameter, previous_step):
| raise NotImplementedError
|
'Build a Theano expression for steps for all parameters.
Override this method if you want to process the steps
with respect to all parameters as a whole, not parameter-wise.
Parameters
previous_steps : OrderedDict
An :class:`~OrderedDict` of
(:class:`~tensor.TensorSharedVariable`
:class:`~tensor.TensorVariable`) pairs. The keys are the
parameters being trained, the values are the expressions for
quantities related to gradients of the cost with respect to
the parameters, either the gradients themselves or steps in
related directions.
Returns
steps : OrderedDict
A dictionary of the proposed steps in the same form as
`previous_steps`.
updates : list
A list of tuples representing updates to be performed.'
| def compute_steps(self, previous_steps):
| parameter_wise = [self.compute_step(parameter, previous_steps[parameter]) for parameter in previous_steps]
(steps, updates) = equizip(*parameter_wise)
steps = OrderedDict(((parameter, step) for (parameter, step) in equizip(previous_steps.keys(), steps)))
updates = list(itertools.chain(*updates))
return (steps, updates)
|
'Constructs a path from its string representation.
.. todo::
More error checking.
Parameters
string : str
String representation of the path.'
| @staticmethod
def parse(string):
| elements = Path.separator_re.split(string)[1:]
separators = elements[::2]
parts = elements[1::2]
if (not (len(elements) == (2 * len(separators)) == (2 * len(parts)))):
raise ValueError
nodes = []
for (separator, part) in equizip(separators, parts):
if (separator == Path.separator):
nodes.append(Path.BrickName(part))
elif (Path.parameter_separator == Path.parameter_separator):
nodes.append(Path.ParameterName(part))
else:
raise ValueError('Wrong separator {}'.format(separator))
return Path(nodes)
|
'Select a subset of current selection matching the path given.
.. warning::
Current implementation is very inefficient (theoretical
complexity is :math:`O(n^3)`, where :math:`n` is the number
of bricks in the hierarchy). It can be sped up easily.
Parameters
path : :class:`Path` or str
The path for the desired selection. If a string is given
it is parsed into a path.
Returns
Depending on the path given, one of the following:
* :class:`Selector` with desired bricks.
* list of :class:`~tensor.SharedTensorVariable`.'
| def select(self, path):
| if isinstance(path, six.string_types):
path = Path.parse(path)
current_bricks = [None]
for node in path.nodes:
next_bricks = []
if isinstance(node, Path.ParameterName):
return list(Selector(current_bricks).get_parameters(node).values())
if isinstance(node, Path.BrickName):
for brick in current_bricks:
children = (brick.children if brick else self.bricks)
matching_bricks = [child for child in children if (child.name == node)]
for match in matching_bricks:
if (match not in next_bricks):
next_bricks.append(match)
current_bricks = next_bricks
return Selector(current_bricks)
|
'Returns parameters from selected bricks and their descendants.
Parameters
parameter_name : :class:`Path.ParameterName`, optional
If given, only parameters with a `name` attribute equal to
`parameter_name` are returned.
Returns
parameters : OrderedDict
A dictionary of (`path`, `parameter`) pairs, where `path` is
a string representation of the path in the brick hierarchy
to the parameter (i.e. the slash-delimited path to the brick
that owns the parameter, followed by a dot, followed by the
parameter\'s name), and `parameter` is the Theano variable
representing the parameter.
Examples
>>> from blocks.bricks import MLP, Tanh
>>> mlp = MLP([Tanh(), Tanh(), Tanh()], [5, 7, 11, 2])
>>> mlp.allocate()
>>> selector = Selector([mlp])
>>> selector.get_parameters() # doctest: +NORMALIZE_WHITESPACE
OrderedDict([(\'/mlp/linear_0.W\', W), (\'/mlp/linear_0.b\', b),
(\'/mlp/linear_1.W\', W), (\'/mlp/linear_1.b\', b),
(\'/mlp/linear_2.W\', W), (\'/mlp/linear_2.b\', b)])
Or, select just the weights of the MLP by passing the parameter
name `W`:
>>> w_select = Selector([mlp])
>>> w_select.get_parameters(\'W\') # doctest: +NORMALIZE_WHITESPACE
OrderedDict([(\'/mlp/linear_0.W\', W), (\'/mlp/linear_1.W\', W),
(\'/mlp/linear_2.W\', W)])'
| def get_parameters(self, parameter_name=None):
| def recursion(brick):
result = [(Path([Path.BrickName(brick.name), Path.ParameterName(parameter.name)]), parameter) for parameter in brick.parameters if ((not parameter_name) or (parameter.name == parameter_name))]
result = OrderedDict(result)
for child in brick.children:
for (path, parameter) in recursion(child).items():
new_path = (Path([Path.BrickName(brick.name)]) + path)
if (new_path in result):
raise ValueError((('Name collision encountered while retrieving ' + 'parameters.') + name_collision_error_message.format(new_path)))
result[new_path] = parameter
return result
result = dict_union(*[recursion(brick) for brick in self.bricks])
return OrderedDict(((str(key), value) for (key, value) in result.items()))
|
'Runs callback with the given name.
The reason for having this method is to allow
the descendants of the :class:`TrainingExtension` to intercept
callback invocations and do something with them, e.g. block
when certain condition does not hold. The default implementation
simply invokes the callback by its name.'
| def dispatch(self, callback_name, *args):
| getattr(self, str(callback_name))(*args)
|
'The callback invoked after training is resumed.'
| @callback
def on_resumption(self):
| pass
|
'The callback invoked before training is started.'
| @callback
def before_training(self):
| pass
|
'The callback invoked before starting an epoch.'
| @callback
def before_epoch(self):
| pass
|
'The callback invoked before a batch is processed.
Parameters
batch : object
The data batch to be processed.'
| @callback
def before_batch(self, batch):
| pass
|
'The callback invoked after a batch is processed.
Parameters
batch : object
The data batch just processed.'
| @callback
def after_batch(self, batch):
| pass
|
'The callback invoked after an epoch is finished.'
| @callback
def after_epoch(self):
| pass
|
'The callback invoked after training is finished.'
| @callback
def after_training(self):
| pass
|
'The callback invoked when training is interrupted.'
| @callback
def on_interrupt(self):
| pass
|
'Set the conditions for which this extension should be run.
Parameters
See the :class:`SimpleExtension` docstring for a list of
possible parameters.'
| def set_conditions(self, **kwargs):
| self._conditions[:] = []
predicates = {'before_first_epoch': has_done_epochs}
conditions = {'before_first_epoch': 'before_epoch', 'after_epoch': 'after_epoch', 'after_batch': 'after_batch', 'every_n_batches': 'after_batch', 'every_n_epochs': 'after_epoch', 'after_n_batches': 'after_batch', 'after_n_epochs': 'after_epoch'}
for (key, value) in kwargs.items():
if value:
if (key in self.BOOLEAN_TRIGGERS):
self.add_condition([conditions.get(key, key)], predicate=predicates.get(key, None))
elif (key in self.INTEGER_TRIGGERS):
predicate = Predicate(key, value)
self.add_condition([conditions.get(key, key)], predicate=predicate)
else:
raise KeyError('Invalid condition: {}'.format(key))
return self
|
'Adds a condition under which a :meth:`do` is called.
Parameters
callbacks_names : list of str
The names of the callback in which the method.
predicate : function
A predicate function the main loop\'s log as the
single parameter and returning ``True`` when the method
should be called and ``False`` when should not. If ``None``,
an always ``True`` predicate is used.
arguments : iterable
Additional arguments to be passed to :meth:`do`. They will
be concatenated with the ones passed from the main loop
(e.g. the batch in case of `after_epoch` callback).
Returns
The extension object (allow chaining calls)'
| def add_condition(self, callbacks_names, predicate=None, arguments=None):
| if (not isinstance(callbacks_names, (list, tuple))):
raise ValueError('callbacks_names must be list or tuple.')
for _callback_name in callbacks_names:
if (not arguments):
arguments = []
if (not predicate):
self._conditions.append((_callback_name, always_true, arguments))
else:
self._conditions.append((_callback_name, predicate, arguments))
return self
|
'Does the job of the training extension.
Parameters
which_callback : str
The name of the callback in the context of which :meth:`do` is
run.
\*args : tuple
The arguments from the main loop concatenated with additional
arguments from user.
Notes
Subclasses *must* accept additional positional arguments in their
call signature for this method, even if they are unused.'
| @abstractmethod
def do(self, which_callback, *args):
| pass
|
'Check conditions and call the :meth:`do` method.
Also adds additional arguments if specified for a condition.
.. todo::
Add a check for a situation when several conditions are met
at the same time and do something.'
| def dispatch(self, callback_invoked, *from_main_loop):
| for (callback_name, predicate, arguments) in self._conditions:
if ((callback_name == callback_invoked) and predicate(self.main_loop.log)):
self.do(callback_invoked, *(from_main_loop + tuple(arguments)))
|
'Separates :meth:`do` arguments coming from different sources.
When a :meth:`do` method receives arguments from both the main
loop (e.g. a batch) and the user, it often has to separate them.
This method is the right tool to use.
Parameters
which_callback : str
The name of the callback.
args : iterable
The arguments.
Returns
from_main_loop : tuple
from_user : tuple'
| @staticmethod
def parse_args(which_callback, args):
| args = tuple(args)
if ((which_callback == 'after_batch') or (which_callback == 'before_batch')):
return ((args[0],), args[1:])
return ((), args)
|
'Try to infer the number of iterations per epoch.'
| def get_iter_per_epoch(self):
| iter_scheme = self.main_loop.data_stream.iteration_scheme
if (hasattr(iter_scheme, 'indices') and (not hasattr(iter_scheme, 'batch_size'))):
return len(iter_scheme.indices)
elif (hasattr(iter_scheme, 'indices') and hasattr(iter_scheme, 'batch_size')):
return (len(iter_scheme.indices) // iter_scheme.batch_size)
elif (hasattr(iter_scheme, 'num_examples') and hasattr(iter_scheme, 'batch_size')):
return (iter_scheme.num_examples // iter_scheme.batch_size)
return None
|
'Create a new progress bar.
Calls `self.get_iter_per_epoch()`, selects an appropriate
set of widgets and creates a ProgressBar.'
| def create_bar(self):
| iter_per_epoch = self.get_iter_per_epoch()
epochs_done = self.main_loop.log.status['epochs_done']
if (iter_per_epoch is None):
widgets = ['Epoch {}, step '.format(epochs_done), progressbar.Counter(), ' ', progressbar.BouncingBar(), ' ', progressbar.Timer()]
iter_per_epoch = progressbar.UnknownLength
else:
widgets = ['Epoch {}, step '.format(epochs_done), progressbar.Counter(), ' (', progressbar.Percentage(), ') ', progressbar.Bar(), ' ', progressbar.Timer(), ' ', progressbar.ETA()]
return progressbar.ProgressBar(widgets=widgets, max_value=iter_per_epoch)
|
'The record name for a variable name.'
| def _record_name(self, name):
| if (not isinstance(name, str)):
raise ValueError('record name must be a string')
return self.SEPARATOR.join([morpheme for morpheme in [self.prefix, name, self.suffix] if (morpheme is not None)])
|
'The record name for a variable.'
| def record_name(self, variable):
| return self._record_name(variable.name)
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.