desc stringlengths 3 26.7k | decl stringlengths 11 7.89k | bodies stringlengths 8 553k |
|---|---|---|
'Get a list of blog articles related to an artist
Args:
Kwargs:
cache (bool): A boolean indicating whether or not the cached value should be used (if available). Defaults to True.
results (int): An integer number of results to return
start (int): An ingteger starting value for the result set
Returns:
A list of blog doc... | def get_blogs(self, results=15, start=0, cache=True, high_relevance=False):
| if (cache and ('blogs' in self.cache) and (results == 15) and (start == 0) and (not high_relevance)):
return self.cache['blogs']
else:
high_relevance = ('true' if high_relevance else 'false')
response = self.get_attribute('blogs', results=results, start=start, high_relevance=high_relevan... |
'Get our numerical estimation of how familiar an artist currently is to the world
Args:
Kwargs:
cache (bool): A boolean indicating whether or not the cached value should be used (if available). Defaults to True.
Returns:
A float representing familiarity.
Example:
>>> a = artist.Artist(\'frank sinatra\')
>>> a.get_famil... | def get_familiarity(self, cache=True):
| if (not (cache and ('familiarity' in self.cache))):
response = self.get_attribute('familiarity')
self.cache['familiarity'] = response['artist']['familiarity']
return self.cache['familiarity']
|
'Get the foreign id for this artist for a specific id space
Args:
Kwargs:
idspace (str): A string indicating the idspace to fetch a foreign id for.
Returns:
A foreign ID string
Example:
>>> a = artist.Artist(\'fabulous\')
>>> a.get_foreign_id(\'7digital\')
u\'7digital:artist:186042\''
| def get_foreign_id(self, idspace='musicbrainz', cache=True):
| if (not (cache and ('foreign_ids' in self.cache) and filter((lambda d: (d.get('catalog') == idspace)), self.cache['foreign_ids']))):
response = self.get_attribute('profile', bucket=[('id:' + idspace)])
foreign_ids = response['artist'].get('foreign_ids', [])
self.cache['foreign_ids'] = (self.... |
'Get the twitter id for this artist if it exists
Args:
Kwargs:
Returns:
A twitter ID string
Example:
>>> a = artist.Artist(\'big boi\')
>>> a.get_twitter_id()
u\'BigBoi\''
| def get_twitter_id(self, cache=True):
| if (not (cache and ('twitter' in self.cache))):
response = self.get_attribute('twitter')
self.cache['twitter'] = response['artist'].get('twitter')
return self.cache['twitter']
|
'Get our numerical description of how hottt an artist currently is
Args:
Kwargs:
cache (bool): A boolean indicating whether or not the cached value should be used (if available). Defaults to True.
Returns:
float: the hotttnesss value
Example:
>>> a = artist.Artist(\'hannah montana\')
>>> a.get_hotttnesss()
0.5990602215... | def get_hotttnesss(self, cache=True):
| if (not (cache and ('hotttnesss' in self.cache))):
response = self.get_attribute('hotttnesss')
self.cache['hotttnesss'] = response['artist']['hotttnesss']
return self.cache['hotttnesss']
|
'Get a list of artist images
Args:
cache (bool): A boolean indicating whether or not the cached value should be used (if available). Defaults to True.
results (int): An integer number of results to return
start (int): An integer starting value for the result set
license (str): A string specifying the desired license ty... | def get_images(self, results=15, start=0, license=None, cache=True):
| if (cache and ('images' in self.cache) and (results == 15) and (start == 0) and (license == None)):
return self.cache['images']
else:
response = self.get_attribute('images', results=results, start=start, license=license)
total = (response.get('total') or 0)
if ((results == 15) an... |
'Get a list of news articles found on the web related to an artist
Args:
cache (bool): A boolean indicating whether or not the cached value should be used (if available). Defaults to True.
results (int): An integer number of results to return
start (int): An integer starting value for the result set
Returns:
A list of ... | def get_news(self, results=15, start=0, cache=True, high_relevance=False):
| if (cache and ('news' in self.cache) and (results == 15) and (start == 0) and (not high_relevance)):
return self.cache['news']
else:
high_relevance = ('true' if high_relevance else 'false')
response = self.get_attribute('news', results=results, start=start, high_relevance=high_relevance)... |
'Get reviews related to an artist\'s work
Args:
Kwargs:
cache (bool): A boolean indicating whether or not the cached value should be used (if available). Defaults to True.
results (int): An integer number of results to return
start (int): An integer starting value for the result set
Returns:
A list of review document d... | def get_reviews(self, results=15, start=0, cache=True):
| if (cache and ('reviews' in self.cache) and (results == 15) and (start == 0)):
return self.cache['reviews']
else:
response = self.get_attribute('reviews', results=results, start=start)
if ((results == 15) and (start == 0)):
self.cache['reviews'] = ResultList(response['reviews... |
'Return similar artists to this one
Args:
Kwargs:
cache (bool): A boolean indicating whether or not the cached value should be used (if available). Defaults to True.
results (int): An integer number of results to return
start (int): An integer starting value for the result set
max_familiarity (float): A float specifyin... | def get_similar(self, results=15, start=0, buckets=None, limit=False, cache=True, max_familiarity=None, min_familiarity=None, max_hotttnesss=None, min_hotttnesss=None, min_results=None, reverse=False, artist_start_year_before=None, artist_start_year_after=None, artist_end_year_before=None, artist_end_year_after=None):
| buckets = (buckets or [])
kwargs = {}
if max_familiarity:
kwargs['max_familiarity'] = max_familiarity
if min_familiarity:
kwargs['min_familiarity'] = min_familiarity
if max_hotttnesss:
kwargs['max_hotttnesss'] = max_hotttnesss
if min_hotttnesss:
kwargs['min_hotttn... |
'Get the songs associated with an artist
Args:
Kwargs:
cache (bool): A boolean indicating whether or not the cached value should be used (if available). Defaults to True.
results (int): An integer number of results to return
start (int): An integer starting value for the result set
Results:
A list of Song objects; list... | def get_songs(self, cache=True, results=15, start=0):
| if (cache and ('songs' in self.cache) and (results == 15) and (start == 0)):
if (not isinstance(self.cache['songs'][0], Song)):
song_objects = []
for s in self.cache['songs']:
song_objects.append(Song(id=s['id'], title=s['title'], artist_name=self.name, artist_id=self... |
'Get the terms associated with an artist
Args:
Kwargs:
cache (bool): A boolean indicating whether or not the cached value should be used (if available). Defaults to True.
sort (str): A string specifying the desired sorting type (weight or frequency)
Results:
A list of term document dicts
Example:
>>> a = artist.Artist(... | def get_terms(self, sort='weight', cache=True):
| if (cache and ('terms' in self.cache) and (sort == 'weight')):
return self.cache['terms']
else:
response = self.get_attribute('terms', sort=sort)
if (sort == 'weight'):
self.cache['terms'] = response['terms']
return response['terms']
|
'Get the urls for an artist
Args:
Kwargs:
cache (bool): A boolean indicating whether or not the cached value should be used (if available). Defaults to True.
Results:
A url document dict
Example:
>>> a = artist.Artist(\'the unicorns\')
>>> a.get_urls()
{u\'amazon_url\': u\'http://www.amazon.com/gp/search?ie=UTF8&keywor... | def get_urls(self, cache=True):
| if (not (cache and ('urls' in self.cache))):
response = self.get_attribute('urls')
self.cache['urls'] = response['urls']
return self.cache['urls']
|
'Get a list of video documents found on the web related to an artist
Args:
Kwargs:
cache (bool): A boolean indicating whether or not the cached value should be used (if available). Defaults to True.
results (int): An integer number of results to return
start (int): An integer starting value for the result set
Returns:
... | def get_video(self, results=15, start=0, cache=True):
| if (cache and ('video' in self.cache) and (results == 15) and (start == 0)):
return self.cache['video']
else:
response = self.get_attribute('video', results=results, start=start)
if ((results == 15) and (start == 0)):
self.cache['video'] = ResultList(response['video'], 0, res... |
'Get a list of years active dictionaries for an artist
Args:
Kwargs:
cache (bool): A boolean indicating whether or not the cached value should be used (if available). Defaults to True.
Returns:
A list of years active dictionaries; list contains additional attributes \'start\' and \'total\'
Example:
>>> a = artist.Artis... | def get_years_active(self, cache=True):
| if (cache and ('years_active' in self.cache)):
return self.cache['years_active']
else:
response = self.get_attribute('profile', bucket=['years_active'])
self.cache['years_active'] = response['artist']['years_active']
return response['artist']['years_active']
|
'Get the number of related documents of various types for the artist.
The types include audio, biographies, blogs, images, news, reviews, songs, videos.
Note that these documents can be retrieved by calling artist.<document type>, for example,
artist.biographies.
Args:
Kwargs:
cache (bool): A boolean indicating whether... | def get_doc_counts(self, cache=True):
| if ((not cache) or (not ('doc_counts' in self.cache))):
response = self.get_attribute('profile', bucket='doc_counts')
self.cache['doc_counts'] = response['artist']['doc_counts']
return self.cache['doc_counts']
|
'Song class
Args:
id (str): a song ID
Kwargs:
buckets (list): A list of strings specifying which buckets to retrieve
Returns:
A Song object
Example:
>>> s = song.Song(\'SOPEXHZ12873FD2AC7\', buckets=[\'song_hotttnesss\', \'artist_hotttnesss\'])
>>> s.song_hotttnesss
0.58602500000000002
>>> s.artist_hotttnesss
0.8032971... | def __init__(self, id, buckets=None, **kwargs):
| buckets = (buckets or [])
super(Song, self).__init__(id, buckets, **kwargs)
|
'Get an audio summary of a song containing mode, tempo, key, duration, time signature, loudness, danceability, energy, and analysis_url.
Args:
Kwargs:
cache (bool): A boolean indicating whether or not the cached value should be used (if available). Defaults to True.
Returns:
A dictionary containing mode, tempo, key, du... | def get_audio_summary(self, cache=True):
| if (not (cache and ('audio_summary' in self.cache))):
response = self.get_attribute('profile', bucket='audio_summary')
if (response['songs'] and ('audio_summary' in response['songs'][0])):
self.cache['audio_summary'] = response['songs'][0]['audio_summary']
else:
self.... |
'Get our numerical description of how hottt a song currently is
Args:
Kwargs:
cache (bool): A boolean indicating whether or not the cached value should be used (if available). Defaults to True.
Returns:
A float representing hotttnesss.
Example:
>>> s = song.Song(\'SOLUHKP129F0698D49\')
>>> s.get_song_hotttnesss()
0.573... | def get_song_hotttnesss(self, cache=True):
| if (not (cache and ('song_hotttnesss' in self.cache))):
response = self.get_attribute('profile', bucket='song_hotttnesss')
self.cache['song_hotttnesss'] = response['songs'][0]['song_hotttnesss']
return self.cache['song_hotttnesss']
|
'Get the types of a song.
Args:
cache (boolean): A boolean indicating whether or not the cached value should be used
(if available). Defaults to True.
Returns:
A list of strings, each representing a song type: \'christmas\', for example.
Example:
>>> s = song.Song(\'SOQKVPH12A58A7AF4D\')
>>> s.song_type
[u\'christmas\... | def get_song_type(self, cache=True):
| if (not (cache and ('song_type' in self.cache))):
response = self.get_attribute('profile', bucket='song_type')
if response['songs'][0].has_key('song_type'):
self.cache['song_type'] = response['songs'][0]['song_type']
else:
self.cache['song_type'] = []
return self.... |
'Get our numerical description of how hottt a song\'s artist currently is
Args:
Kwargs:
cache (bool): A boolean indicating whether or not the cached value should be used (if available). Defaults to True.
Returns:
A float representing hotttnesss.
Example:
>>> s = song.Song(\'SOOLGAZ127F3E1B87C\')
>>> s.artist_hotttnesss... | def get_artist_hotttnesss(self, cache=True):
| if (not (cache and ('artist_hotttnesss' in self.cache))):
response = self.get_attribute('profile', bucket='artist_hotttnesss')
self.cache['artist_hotttnesss'] = response['songs'][0]['artist_hotttnesss']
return self.cache['artist_hotttnesss']
|
'Get our numerical estimation of how familiar a song\'s artist currently is to the world
Args:
cache (bool): A boolean indicating whether or not the cached value should be used (if available). Defaults to True.
Returns:
A float representing familiarity.
Example:
>>> s = song.Song(\'SOQKVPH12A58A7AF4D\')
>>> s.get_artis... | def get_artist_familiarity(self, cache=True):
| if (not (cache and ('artist_familiarity' in self.cache))):
response = self.get_attribute('profile', bucket='artist_familiarity')
self.cache['artist_familiarity'] = response['songs'][0]['artist_familiarity']
return self.cache['artist_familiarity']
|
'Get the location of a song\'s artist.
Args:
cache (bool): A boolean indicating whether or not the cached value should be used (if available). Defaults to True.
Returns:
An artist location object.
Example:
>>> s = song.Song(\'SOQKVPH12A58A7AF4D\')
>>> s.artist_location
{u\'latitude\': 34.053489999999996, u\'location\':... | def get_artist_location(self, cache=True):
| if (not (cache and ('artist_location' in self.cache))):
response = self.get_attribute('profile', bucket='artist_location')
self.cache['artist_location'] = response['songs'][0]['artist_location']
return self.cache['artist_location']
|
'Get the foreign id for this song for a specific id space
Args:
Kwargs:
idspace (str): A string indicating the idspace to fetch a foreign id for.
Returns:
A foreign ID string
Example:
>>> s = song.Song(\'SOYRVMR12AF729F8DC\')
>>> s.get_foreign_id(\'CAGPXKK12BB06F9DE9\')'
| def get_foreign_id(self, idspace='', cache=True):
| idspace = util.map_idspace(idspace)
if (not (cache and ('foreign_ids' in self.cache) and filter((lambda d: (d.get('catalog') == idspace)), self.cache['foreign_ids']))):
response = self.get_attribute('profile', bucket=[('id:' + idspace)])
rsongs = response['songs']
if (len(rsongs) == 0):
... |
'Args:
cache (bool): A boolean indicating whether or not the cached value should be used (if available). Defaults to True.
Returns:
A float representing a song\'s discovery rank.
Example:
>>> s = song.Song(\'SOQKVPH12A58A7AF4D\')
>>> s.get_song_discovery()
0.639626025843539
>>> s.song_discovery
0.639626025843539'
| def get_song_discovery(self, cache=True):
| if (not (cache and ('song_discovery' in self.cache))):
response = self.get_attribute('profile', bucket='song_discovery')
self.cache['song_discovery'] = response['songs'][0]['song_discovery']
return self.cache['song_discovery']
|
'Args:
cache (bool): A boolean indicating whether or not the cached value should be used (if available). Defaults to True.
Returns:
A float representing a song\'s currency rank.
Example:
>>> s = song.Song(\'SOQKVPH12A58A7AF4D\')
>>> s.get_song_currency()
0.639626025843539
>>> s.song_currency
0.639626025843539'
| def get_song_currency(self, cache=True):
| if (not (cache and ('song_currency' in self.cache))):
response = self.get_attribute('profile', bucket='song_currency')
self.cache['song_currency'] = response['songs'][0]['song_currency']
return self.cache['song_currency']
|
'Get the tracks for a song given a catalog.
Args:
catalog (str): a string representing the catalog whose track you want to retrieve.
Returns:
A list of Track dicts.
Example:
>>> s = song.Song(\'SOWDASQ12A6310F24F\')
>>> s.get_tracks(\'7digital\')[0]
{u\'catalog\': u\'7digital\',
u\'foreign_id\': u\'7digital:track:84458... | def get_tracks(self, catalog, cache=True):
| if (not (cache and ('tracks' in self.cache) and (catalog in [td['catalog'] for td in self.cache['tracks']]))):
kwargs = {'bucket': ['tracks', ('id:%s' % catalog)]}
response = self.get_attribute('profile', **kwargs)
if (not ('tracks' in self.cache)):
self.cache['tracks'] = []
... |
'Logs the start of each task'
| def playbook_on_task_start(self, name, is_conditional):
| if (self.current is not None):
self.stats[self.current] = (time.time() - self.stats[self.current])
self.current = name
self.stats[self.current] = time.time()
|
'Prints the timings'
| def playbook_on_stats(self, stats):
| if (self.current is not None):
self.stats[self.current] = (time.time() - self.stats[self.current])
results = sorted(self.stats.items(), key=(lambda value: value[1]), reverse=True)
results = results[:10]
for (name, elapsed) in results:
print '{0:-<70}{1:->9}'.format('{0} '.format(name)... |
'Get ZooKeeper server stats as a map'
| def get_stats(self):
| data = self._send_cmd('mntr')
return self._parse(data)
|
'Send a 4letter word command to the server'
| def _send_cmd(self, cmd):
| s = self._create_socket()
s.settimeout(self._timeout)
s.connect(self._address)
s.send(cmd)
data = s.recv(2048)
s.close()
return data
|
'Parse the output from the \'mntr\' 4letter word command'
| def _parse(self, data):
| h = StringIO(data)
result = {}
for line in h.readlines():
try:
(key, value) = self._parse_line(line)
if (key not in ['zk_server_state', 'zk_version']):
result[key] = value
except ValueError:
pass
return result
|
'Checks the equality of two `UserMixin` objects using `get_id`.'
| def __eq__(self, other):
| if isinstance(other, UserMixin):
return (self.get_id() == other.get_id())
return NotImplemented
|
'Checks the inequality of two `UserMixin` objects using `get_id`.'
| def __ne__(self, other):
| equal = self.__eq__(other)
if (equal is NotImplemented):
return NotImplemented
return (not equal)
|
'This method has been deprecated. Please use
:meth:`LoginManager.init_app` instead.'
| def setup_app(self, app, add_context_processor=True):
| warnings.warn('Warning setup_app is deprecated. Please use init_app.', DeprecationWarning)
self.init_app(app, add_context_processor)
|
'Configures an application. This registers an `after_request` call, and
attaches this `LoginManager` to it as `app.login_manager`.
:param app: The :class:`flask.Flask` object to configure.
:type app: :class:`flask.Flask`
:param add_context_processor: Whether to add a context processor to
the app that adds a `current_us... | def init_app(self, app, add_context_processor=True):
| app.login_manager = self
app.after_request(self._update_remember_cookie)
self._login_disabled = app.config.get('LOGIN_DISABLED', False)
if add_context_processor:
app.context_processor(_user_context_processor)
|
'This is called when the user is required to log in. If you register a
callback with :meth:`LoginManager.unauthorized_handler`, then it will
be called. Otherwise, it will take the following actions:
- Flash :attr:`LoginManager.login_message` to the user.
- If the app is using blueprints find the login view for
the curr... | def unauthorized(self):
| user_unauthorized.send(current_app._get_current_object())
if self.unauthorized_callback:
return self.unauthorized_callback()
if (request.blueprint in self.blueprint_login_views):
login_view = self.blueprint_login_views[request.blueprint]
else:
login_view = self.login_view
if ... |
'This sets the callback for reloading a user from the session. The
function you set should take a user ID (a ``unicode``) and return a
user object, or ``None`` if the user does not exist.
:param callback: The callback for retrieving a user object.
:type callback: callable'
| def user_loader(self, callback):
| self.user_callback = callback
return callback
|
'This function has been deprecated. Please use
:meth:`LoginManager.request_loader` instead.
This sets the callback for loading a user from a header value.
The function you set should take an authentication token and
return a user object, or `None` if the user does not exist.
:param callback: The callback for retrieving... | def header_loader(self, callback):
| self.header_callback = callback
return callback
|
'This sets the callback for loading a user from a Flask request.
The function you set should take Flask request object and
return a user object, or `None` if the user does not exist.
:param callback: The callback for retrieving a user object.
:type callback: callable'
| def request_loader(self, callback):
| self.request_callback = callback
return callback
|
'This will set the callback for the `unauthorized` method, which among
other things is used by `login_required`. It takes no arguments, and
should return a response to be sent to the user instead of their
normal view.
:param callback: The callback for unauthorized users.
:type callback: callable'
| def unauthorized_handler(self, callback):
| self.unauthorized_callback = callback
return callback
|
'This will set the callback for the `needs_refresh` method, which among
other things is used by `fresh_login_required`. It takes no arguments,
and should return a response to be sent to the user instead of their
normal view.
:param callback: The callback for unauthorized users.
:type callback: callable'
| def needs_refresh_handler(self, callback):
| self.needs_refresh_callback = callback
return callback
|
'This is called when the user is logged in, but they need to be
reauthenticated because their session is stale. If you register a
callback with `needs_refresh_handler`, then it will be called.
Otherwise, it will take the following actions:
- Flash :attr:`LoginManager.needs_refresh_message` to the user.
- Redirect the u... | def needs_refresh(self):
| user_needs_refresh.send(current_app._get_current_object())
if self.needs_refresh_callback:
return self.needs_refresh_callback()
if (not self.refresh_view):
abort(401)
if (self.localize_callback is not None):
flash(self.localize_callback(self.needs_refresh_message), category=self.... |
'Loads user from session or remember_me cookie as applicable'
| def _load_user(self):
| user_accessed.send(current_app._get_current_object())
config = current_app.config
if config.get('SESSION_PROTECTION', self.session_protection):
deleted = self._session_protection()
if deleted:
return self.reload_user()
is_missing_user_id = ('user_id' not in session)
if is... |
'Shared assertions for simple tests.'
| def simple_assertions(self, updates, num_bricks=2, num_updates=4, mean_only=False):
| assert (len(updates) == num_updates)
assert all((is_shared_variable(u[0]) for u in updates))
means = set((u[0] for u in updates if has_roles(u[0], [BATCH_NORM_POPULATION_MEAN])))
stdevs = set((u[0] for u in updates if has_roles(u[0], [BATCH_NORM_POPULATION_STDEV])))
assert means.isdisjoint(stdevs)
... |
'Test that get_batch_normalization_updates works as expected.'
| def test_get_batch_normalization_updates(self):
| with batch_normalization(self.mlp):
y_bn = self.mlp.apply(self.x)
graph = ComputationGraph([y_bn])
updates = get_batch_normalization_updates(graph)
self.simple_assertions(updates)
|
'Test get_batch_normalization_updates with mean_only bricks.'
| def test_get_batch_normalization_updates_mean_only(self):
| mlp = BatchNormalizedMLP([Tanh(), Tanh()], [5, 7, 9], mean_only=True)
with batch_normalization(mlp):
y_bn = mlp.apply(self.x)
graph = ComputationGraph([y_bn])
updates = get_batch_normalization_updates(graph)
self.simple_assertions(updates, num_updates=2, mean_only=True)
|
'Test updates extracton in graph with non-training apply.'
| def test_get_batch_normalization_updates_non_training_applications(self):
| y = self.mlp.apply(self.x)
with batch_normalization(self.mlp):
y_bn = self.mlp.apply(self.x)
graph = ComputationGraph([y_bn, y])
updates = get_batch_normalization_updates(graph)
self.simple_assertions(updates)
|
'Test for exception if there are no training-mode nodes.'
| def test_get_batch_normalization_updates_no_training(self):
| y = self.mlp.apply(self.x)
graph = ComputationGraph([y])
numpy.testing.assert_raises(ValueError, get_batch_normalization_updates, graph)
|
'Test that we get an error by default on multiple apply.'
| def test_get_batch_normalization_updates_duplicates_error(self):
| with batch_normalization(self.mlp):
y = self.mlp.apply(self.x)
y2 = self.mlp.apply(self.x)
graph = ComputationGraph([y, y2])
numpy.testing.assert_raises(ValueError, get_batch_normalization_updates, graph)
|
'Test get_batch_normalization_updates(allow_duplicates=True).'
| def test_get_batch_normalization_updates_allow_duplicates(self):
| with batch_normalization(self.mlp):
y = self.mlp.apply(self.x)
y2 = self.mlp.apply(self.x)
graph = ComputationGraph([y, y2])
updates = get_batch_normalization_updates(graph, allow_duplicates=True)
self.simple_assertions(updates, num_bricks=2, num_updates=8)
|
'Create main loop and run it.'
| def setUp(self):
| mlp = MLP(activations=[None], dims=[10, 10], weights_init=Constant(1.0), use_bias=False)
mlp.initialize()
self.W = mlp.linear_transformations[0].W
x = tensor.vector('data')
cost = mlp.apply(x).mean()
data = numpy.random.rand(10, 10).astype(theano.config.floatX)
self.data_stream = IterableDat... |
'Check that main loop have been saved properly.'
| def test_save_and_load(self):
| old_value = self.W.get_value()
self.W.set_value((old_value * 2))
new_main_loop = MainLoop(model=self.model, data_stream=self.data_stream, algorithm=self.algorithm, extensions=[Load('myweirdmodel.tar')])
new_main_loop.extensions[0].main_loop = new_main_loop
new_main_loop._run_extensions('before_train... |
'Check we can save the log and iteration state separately.'
| def test_load_log_and_iteration_state(self):
| skip_if_configuration_set('log_backend', 'sqlite', 'Bug with log.status["resumed_from"]')
new_main_loop = MainLoop(model=self.model, data_stream=self.data_stream, algorithm=self.algorithm, extensions=[Load('myweirdmodel.tar', True, True)])
new_main_loop.extensions[0].main_loop = new_main_loop
new_... |
'Check behaviour when loading nonexisting main loop.'
| def test_load_nonexisting(self):
| load = Load('mynonexisting.tar')
load.main_loop = self.main_loop
load.do()
|
'Check loading exception.'
| def test_loading_exception(self):
| with tempfile.NamedTemporaryFile(delete=False) as f:
f.write('a'.encode('utf-8'))
load = Load(f.name)
load.main_loop = self.main_loop
self.assertRaises(tarfile.ReadError, load.do)
|
'Check checkpoint exception.'
| def test_checkpoint_exception(self):
| checkpoint = Checkpoint(None, save_separately=['foo'])
checkpoint.main_loop = self.main_loop
self.assertRaises(AttributeError, checkpoint.do, None)
|
'Cleaning.'
| def tearDown(self):
| if os.path.exists('myweirdmodel.tar'):
os.remove('myweirdmodel.tar')
|
'Compute MSE.'
| @application
def cost(self, readouts, outputs):
| return ((readouts - outputs) ** 2).sum(axis=(readouts.ndim - 1))
|
'Attach an auxiliary variable to the graph.
Auxiliary variables are Theano variables that are not part of a
brick\'s output, but can be useful nonetheless e.g. as a regularizer
or to monitor during training progress.
Parameters
variable : :class:`~tensor.TensorVariable`
The variable you want to add.
roles : list of :cl... | def add_auxiliary_variable(self, variable, roles=None, name=None):
| add_annotation(variable, self)
if (name is not None):
variable.name = name
variable.tag.name = name
add_role(variable, AUXILIARY)
if (roles is not None):
for role in roles:
add_role(variable, role)
self.auxiliary_variables.append(variable)
|
'Inputs to the graph, excluding constants and shared variables.'
| @property
def inputs(self):
| return [var for var in self.variables if is_graph_input(var)]
|
'Variables of Scan ops.'
| @property
def scan_variables(self):
| return list(chain(*[g.variables for g in self._scan_graphs]))
|
'Collect variables, updates and auxiliary variables.
In addition collects all :class:`.Scan` ops and recurses in the
respective inner Theano graphs.'
| def _get_variables(self):
| updates = OrderedDict()
shared_outputs = [o for o in self.outputs if is_shared_variable(o)]
usual_outputs = [o for o in self.outputs if (not is_shared_variable(o))]
variables = shared_outputs
if usual_outputs:
inputs = graph.inputs(self.outputs)
sorted_apply_nodes = graph.io_toposort... |
'Return a mapping from an input name to the input.'
| def dict_of_inputs(self):
| return {var.name: var for var in self.inputs}
|
'Replace certain variables in the computation graph.
Parameters
replacements : dict
The mapping from variables to be replaced to the corresponding
substitutes.
Examples
>>> import theano
>>> from theano import tensor, function
>>> x = tensor.scalar(\'x\')
>>> y = x + 2
>>> z = y + 3
>>> a = z + 5
Let\'s suppose we have... | def replace(self, replacements):
| replacements = OrderedDict(replacements)
outputs_cur = self.outputs
replacement_keys_cur = []
replacement_vals_cur = []
remaining_replacements = replacements.copy()
for variable in self.variables:
if (variable in replacements):
if has_roles(variable, [AUXILIARY]):
... |
'Create Theano function from the graph contained.
Parameters
\*\*kwargs : dict
Keyword arguments to theano.function.
Useful for specifying compilation modes or profiling.'
| def get_theano_function(self, additional_updates=None, **kwargs):
| updates = self.updates
if additional_updates:
updates = dict_union(updates, OrderedDict(additional_updates))
return theano.function(self.inputs, self.outputs, updates=updates, **kwargs)
|
'Evaluate all role-carrying Theano variables on given data.
Parameters
data : dict of (data source, data) pairs
Data for input variables. The sources should match with the
names of the input variables.
Returns
Dictionary of (variable, variable value on given data) pairs.'
| def get_snapshot(self, data):
| role_variables = [var for var in self.variables if (hasattr(var.tag, 'roles') and (not is_shared_variable(var)))]
value_holders = [shared_like(var) for var in role_variables]
function = self.get_theano_function(equizip(value_holders, role_variables))
function(*(data[input_.name] for input_ in self.input... |
'Check if a variable depends on input variables.
Returns
bool
``True`` if the given variable depends on input variables,
``False`` otherwise.'
| def has_inputs(self, variable):
| if (variable not in self._has_inputs):
self._has_inputs[variable] = False
if is_graph_input(variable):
self._has_inputs[variable] = True
elif getattr(variable, 'owner', None):
for dependancy in variable.owner.inputs:
if self.has_inputs(dependancy):
... |
'Compile all Theano functions used.'
| def compile(self):
| self._compile_initial_state_and_context_computer()
self._compile_next_state_computer()
self._compile_logprobs_computer()
self.compiled = True
|
'Computes initial states and contexts from inputs.
Parameters
inputs : dict
Dictionary of input arrays.
Returns
A tuple containing a {name: :class:`numpy.ndarray`} dictionary of
contexts ordered like `self.context_names` and a
{name: :class:`numpy.ndarray`} dictionary of states ordered like
`self.state_names`.'
| def compute_initial_states_and_contexts(self, inputs):
| outputs = self.initial_state_and_context_computer(*[inputs[var] for var in self.inputs])
contexts = OrderedDict(((n, outputs.pop(n)) for n in self.context_names))
beam_size = outputs.pop('beam_size')
initial_states = outputs
return (contexts, initial_states, beam_size)
|
'Compute log probabilities of all possible outputs.
Parameters
contexts : dict
A {name: :class:`numpy.ndarray`} dictionary of contexts.
states : dict
A {name: :class:`numpy.ndarray`} dictionary of states.
Returns
A :class:`numpy.ndarray` of the (beam size, number of possible
outputs) shape.'
| def compute_logprobs(self, contexts, states):
| input_states = [states[name] for name in self.input_state_names]
return self.logprobs_computer(*(list(contexts.values()) + input_states))
|
'Computes next states.
Parameters
contexts : dict
A {name: :class:`numpy.ndarray`} dictionary of contexts.
states : dict
A {name: :class:`numpy.ndarray`} dictionary of states.
outputs : :class:`numpy.ndarray`
A :class:`numpy.ndarray` of this step outputs.
Returns
A {name: numpy.array} dictionary of next states.'
| def compute_next_states(self, contexts, states, outputs):
| input_states = [states[name] for name in self.input_state_names]
next_values = self.next_state_computer(*((list(contexts.values()) + input_states) + [outputs]))
return OrderedDict(equizip(self.state_names, next_values))
|
'Find k smallest elements of a matrix.
Parameters
matrix : :class:`numpy.ndarray`
The matrix.
k : int
The number of smallest elements required.
only_first_row : bool, optional
Consider only elements of the first row.
Returns
Tuple of ((row numbers, column numbers), values).'
| @staticmethod
def _smallest(matrix, k, only_first_row=False):
| if only_first_row:
flatten = matrix[:1, :].flatten()
else:
flatten = matrix.flatten()
args = numpy.argpartition(flatten, k)[:k]
args = args[numpy.argsort(flatten[args])]
return (numpy.unravel_index(args, matrix.shape), flatten[args])
|
'Performs beam search.
If the beam search was not compiled, it also compiles it.
Parameters
input_values : dict
A {:class:`~theano.Variable`: :class:`~numpy.ndarray`}
dictionary of input values. The shapes should be
the same as if you ran sampling with batch size equal to
`beam_size`. Put it differently, the user is re... | def search(self, input_values, eol_symbol, max_length, ignore_first_eol=False, as_arrays=False):
| if (not self.compiled):
self.compile()
(contexts, states, beam_size) = self.compute_initial_states_and_contexts(input_values)
all_outputs = states['outputs'][None, :]
all_masks = numpy.ones_like(all_outputs, dtype=config.floatX)
all_costs = numpy.zeros_like(all_outputs, dtype=config.floatX)
... |
'Add a configuration setting.
Parameters
key : str
The name of the configuration setting. This must be a valid
Python attribute name i.e. alphanumeric with underscores.
type : function
A function such as ``float``, ``int`` or ``str`` which takes
the configuration value and returns an object of the correct
type. Note t... | def add_config(self, key, type_, default=NOT_SET, env_var=None):
| self.config[key] = {'type': type_}
if (env_var is not None):
self.config[key]['env_var'] = env_var
if (default is not NOT_SET):
self.config[key]['default'] = default
|
'Initialize the training algorithm.'
| @abstractmethod
def initialize(self, **kwargs):
| pass
|
'Process a batch of training data.
Attributes
batch : dict
A dictionary of (source name, data) pairs.'
| @abstractmethod
def process_batch(self, batch):
| pass
|
'Add updates to the training process.
The updates will be done _before_ the parameters are changed.
Parameters
updates : list of tuples or :class:`~collections.OrderedDict`
The updates to add.'
| def add_updates(self, updates):
| if isinstance(updates, OrderedDict):
updates = list(updates.items())
if (not isinstance(updates, list)):
raise ValueError
self.updates.extend(updates)
|
'Build a Theano expression for the step for a parameter.
This method is called by default implementation of
:meth:`compute_steps`, it relieves from writing a loop each time.
Parameters
parameter : :class:`~tensor.TensorSharedVariable`
The parameter.
previous_step : :class:`~tensor.TensorVariable`
Some quantity related ... | def compute_step(self, parameter, previous_step):
| raise NotImplementedError
|
'Build a Theano expression for steps for all parameters.
Override this method if you want to process the steps
with respect to all parameters as a whole, not parameter-wise.
Parameters
previous_steps : OrderedDict
An :class:`~OrderedDict` of
(:class:`~tensor.TensorSharedVariable`
:class:`~tensor.TensorVariable`) pairs.... | def compute_steps(self, previous_steps):
| parameter_wise = [self.compute_step(parameter, previous_steps[parameter]) for parameter in previous_steps]
(steps, updates) = equizip(*parameter_wise)
steps = OrderedDict(((parameter, step) for (parameter, step) in equizip(previous_steps.keys(), steps)))
updates = list(itertools.chain(*updates))
ret... |
'Constructs a path from its string representation.
.. todo::
More error checking.
Parameters
string : str
String representation of the path.'
| @staticmethod
def parse(string):
| elements = Path.separator_re.split(string)[1:]
separators = elements[::2]
parts = elements[1::2]
if (not (len(elements) == (2 * len(separators)) == (2 * len(parts)))):
raise ValueError
nodes = []
for (separator, part) in equizip(separators, parts):
if (separator == Path.separator... |
'Select a subset of current selection matching the path given.
.. warning::
Current implementation is very inefficient (theoretical
complexity is :math:`O(n^3)`, where :math:`n` is the number
of bricks in the hierarchy). It can be sped up easily.
Parameters
path : :class:`Path` or str
The path for the desired selection... | def select(self, path):
| if isinstance(path, six.string_types):
path = Path.parse(path)
current_bricks = [None]
for node in path.nodes:
next_bricks = []
if isinstance(node, Path.ParameterName):
return list(Selector(current_bricks).get_parameters(node).values())
if isinstance(node, Path.Br... |
'Returns parameters from selected bricks and their descendants.
Parameters
parameter_name : :class:`Path.ParameterName`, optional
If given, only parameters with a `name` attribute equal to
`parameter_name` are returned.
Returns
parameters : OrderedDict
A dictionary of (`path`, `parameter`) pairs, where `path` is
a stri... | def get_parameters(self, parameter_name=None):
| def recursion(brick):
result = [(Path([Path.BrickName(brick.name), Path.ParameterName(parameter.name)]), parameter) for parameter in brick.parameters if ((not parameter_name) or (parameter.name == parameter_name))]
result = OrderedDict(result)
for child in brick.children:
for (pa... |
'Runs callback with the given name.
The reason for having this method is to allow
the descendants of the :class:`TrainingExtension` to intercept
callback invocations and do something with them, e.g. block
when certain condition does not hold. The default implementation
simply invokes the callback by its name.'
| def dispatch(self, callback_name, *args):
| getattr(self, str(callback_name))(*args)
|
'The callback invoked after training is resumed.'
| @callback
def on_resumption(self):
| pass
|
'The callback invoked before training is started.'
| @callback
def before_training(self):
| pass
|
'The callback invoked before starting an epoch.'
| @callback
def before_epoch(self):
| pass
|
'The callback invoked before a batch is processed.
Parameters
batch : object
The data batch to be processed.'
| @callback
def before_batch(self, batch):
| pass
|
'The callback invoked after a batch is processed.
Parameters
batch : object
The data batch just processed.'
| @callback
def after_batch(self, batch):
| pass
|
'The callback invoked after an epoch is finished.'
| @callback
def after_epoch(self):
| pass
|
'The callback invoked after training is finished.'
| @callback
def after_training(self):
| pass
|
'The callback invoked when training is interrupted.'
| @callback
def on_interrupt(self):
| pass
|
'Set the conditions for which this extension should be run.
Parameters
See the :class:`SimpleExtension` docstring for a list of
possible parameters.'
| def set_conditions(self, **kwargs):
| self._conditions[:] = []
predicates = {'before_first_epoch': has_done_epochs}
conditions = {'before_first_epoch': 'before_epoch', 'after_epoch': 'after_epoch', 'after_batch': 'after_batch', 'every_n_batches': 'after_batch', 'every_n_epochs': 'after_epoch', 'after_n_batches': 'after_batch', 'after_n_epochs':... |
'Adds a condition under which a :meth:`do` is called.
Parameters
callbacks_names : list of str
The names of the callback in which the method.
predicate : function
A predicate function the main loop\'s log as the
single parameter and returning ``True`` when the method
should be called and ``False`` when should not. If `... | def add_condition(self, callbacks_names, predicate=None, arguments=None):
| if (not isinstance(callbacks_names, (list, tuple))):
raise ValueError('callbacks_names must be list or tuple.')
for _callback_name in callbacks_names:
if (not arguments):
arguments = []
if (not predicate):
self._conditions.append((_callback_name, al... |
'Does the job of the training extension.
Parameters
which_callback : str
The name of the callback in the context of which :meth:`do` is
run.
\*args : tuple
The arguments from the main loop concatenated with additional
arguments from user.
Notes
Subclasses *must* accept additional positional arguments in their
call sign... | @abstractmethod
def do(self, which_callback, *args):
| pass
|
'Check conditions and call the :meth:`do` method.
Also adds additional arguments if specified for a condition.
.. todo::
Add a check for a situation when several conditions are met
at the same time and do something.'
| def dispatch(self, callback_invoked, *from_main_loop):
| for (callback_name, predicate, arguments) in self._conditions:
if ((callback_name == callback_invoked) and predicate(self.main_loop.log)):
self.do(callback_invoked, *(from_main_loop + tuple(arguments)))
|
'Separates :meth:`do` arguments coming from different sources.
When a :meth:`do` method receives arguments from both the main
loop (e.g. a batch) and the user, it often has to separate them.
This method is the right tool to use.
Parameters
which_callback : str
The name of the callback.
args : iterable
The arguments.
Re... | @staticmethod
def parse_args(which_callback, args):
| args = tuple(args)
if ((which_callback == 'after_batch') or (which_callback == 'before_batch')):
return ((args[0],), args[1:])
return ((), args)
|
'Try to infer the number of iterations per epoch.'
| def get_iter_per_epoch(self):
| iter_scheme = self.main_loop.data_stream.iteration_scheme
if (hasattr(iter_scheme, 'indices') and (not hasattr(iter_scheme, 'batch_size'))):
return len(iter_scheme.indices)
elif (hasattr(iter_scheme, 'indices') and hasattr(iter_scheme, 'batch_size')):
return (len(iter_scheme.indices) // iter... |
'Create a new progress bar.
Calls `self.get_iter_per_epoch()`, selects an appropriate
set of widgets and creates a ProgressBar.'
| def create_bar(self):
| iter_per_epoch = self.get_iter_per_epoch()
epochs_done = self.main_loop.log.status['epochs_done']
if (iter_per_epoch is None):
widgets = ['Epoch {}, step '.format(epochs_done), progressbar.Counter(), ' ', progressbar.BouncingBar(), ' ', progressbar.Timer()]
iter_per_epoch = pr... |
'The record name for a variable name.'
| def _record_name(self, name):
| if (not isinstance(name, str)):
raise ValueError('record name must be a string')
return self.SEPARATOR.join([morpheme for morpheme in [self.prefix, name, self.suffix] if (morpheme is not None)])
|
'The record name for a variable.'
| def record_name(self, variable):
| return self._record_name(variable.name)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.