_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 75
19.8k
| language
stringclasses 1
value | meta_information
dict |
|---|---|---|---|---|---|
q10500
|
compute_objective_value
|
train
|
def compute_objective_value(objective_func, parameters, data=None, cl_runtime_info=None):
"""Calculate and return the objective function value of the given model for the given parameters.
Args:
objective_func (mot.lib.cl_function.CLFunction): A CL function with the signature:
.. code-block:: c
double <func_name>(local const mot_float_type* const x,
void* data,
local mot_float_type* objective_list);
parameters (ndarray): The parameters to use in the evaluation of the model, an (d, p) matrix
with d problems and p parameters.
data (mot.lib.kernel_data.KernelData): the user provided data for the ``void* data`` pointer.
cl_runtime_info (mot.configuration.CLRuntimeInfo): the runtime information
Returns:
ndarray: vector matrix with per problem the objective function value
"""
return objective_func.evaluate({'data': data, 'parameters': Array(parameters, 'mot_float_type', mode='r')},
parameters.shape[0], use_local_reduction=True, cl_runtime_info=cl_runtime_info)
|
python
|
{
"resource": ""
}
|
q10501
|
get_log
|
train
|
def get_log(username):
"""
Return a list of page views.
Each item is a dict with `datetime`, `method`, `path` and `code` keys.
"""
redis = get_redis_client()
log_key = 'log:{}'.format(username)
raw_log = redis.lrange(log_key, 0, -1)
log = []
for raw_item in raw_log:
item = json.loads(raw_item.decode())
item['datetime'] = convert_timestamp(item.pop('time'))
log.append(item)
return log
|
python
|
{
"resource": ""
}
|
q10502
|
get_token
|
train
|
def get_token(username, length=20, timeout=20):
"""
Obtain an access token that can be passed to a websocket client.
"""
redis = get_redis_client()
token = get_random_string(length)
token_key = 'token:{}'.format(token)
redis.set(token_key, username)
redis.expire(token_key, timeout)
return token
|
python
|
{
"resource": ""
}
|
q10503
|
UserLogMiddleware.get_log
|
train
|
def get_log(self, request, response):
"""
Return a dict of data to log for a given request and response.
Override this method if you need to log a different set of values.
"""
return {
'method': request.method,
'path': request.get_full_path(),
'code': response.status_code,
'time': time.time(),
}
|
python
|
{
"resource": ""
}
|
q10504
|
MusicManager.download
|
train
|
def download(self, song):
"""Download a song from a Google Music library.
Parameters:
song (dict): A song dict.
Returns:
tuple: Song content as bytestring, suggested filename.
"""
song_id = song['id']
response = self._call(
mm_calls.Export,
self.uploader_id,
song_id)
audio = response.body
suggested_filename = unquote(
response.headers['Content-Disposition'].split("filename*=UTF-8''")[-1]
)
return (audio, suggested_filename)
|
python
|
{
"resource": ""
}
|
q10505
|
MusicManager.quota
|
train
|
def quota(self):
"""Get the uploaded track count and allowance.
Returns:
tuple: Number of uploaded tracks, number of tracks allowed.
"""
response = self._call(
mm_calls.ClientState,
self.uploader_id
)
client_state = response.body.clientstate_response
return (client_state.total_track_count, client_state.locker_track_limit)
|
python
|
{
"resource": ""
}
|
q10506
|
MusicManager.songs
|
train
|
def songs(self, *, uploaded=True, purchased=True):
"""Get a listing of Music Library songs.
Returns:
list: Song dicts.
"""
if not uploaded and not purchased:
raise ValueError("'uploaded' and 'purchased' cannot both be False.")
if purchased and uploaded:
song_list = []
for chunk in self.songs_iter(export_type=1):
song_list.extend(chunk)
elif purchased:
song_list = []
for chunk in self.songs_iter(export_type=2):
song_list.extend(chunk)
elif uploaded:
purchased_songs = []
for chunk in self.songs_iter(export_type=2):
purchased_songs.extend(chunk)
song_list = [
song
for chunk in self.songs_iter(export_type=1)
for song in chunk
if song not in purchased_songs
]
return song_list
|
python
|
{
"resource": ""
}
|
q10507
|
MusicManager.songs_iter
|
train
|
def songs_iter(self, *, continuation_token=None, export_type=1):
"""Get a paged iterator of Music Library songs.
Parameters:
continuation_token (str, Optional): The token of the page to return.
Default: Not sent to get first page.
export_type (int, Optional): The type of tracks to return. 1 for all tracks, 2 for promotional and purchased.
Default: ``1``
Yields:
list: Song dicts.
"""
def track_info_to_dict(track_info):
return dict(
(field.name, value)
for field, value in track_info.ListFields()
)
while True:
response = self._call(
mm_calls.ExportIDs,
self.uploader_id,
continuation_token=continuation_token,
export_type=export_type
)
items = [
track_info_to_dict(track_info)
for track_info in response.body.download_track_info
]
if items:
yield items
continuation_token = response.body.continuation_token
if not continuation_token:
break
|
python
|
{
"resource": ""
}
|
q10508
|
GoogleMusicClient.login
|
train
|
def login(self, username, *, token=None):
"""Log in to Google Music.
Parameters:
username (str, Optional): Your Google Music username.
Used for keeping stored OAuth tokens for multiple accounts separate.
device_id (str, Optional): A mobile device ID or music manager uploader ID.
Default: MAC address is used.
token (dict, Optional): An OAuth token compatible with ``requests-oauthlib``.
Returns:
bool: ``True`` if successfully authenticated, ``False`` if not.
"""
self._username = username
self._oauth(username, token=token)
return self.is_authenticated
|
python
|
{
"resource": ""
}
|
q10509
|
GoogleMusicClient.switch_user
|
train
|
def switch_user(self, username='', *, token=None):
"""Log in to Google Music with a different user.
Parameters:
username (str, Optional): Your Google Music username.
Used for keeping stored OAuth tokens for multiple accounts separate.
token (dict, Optional): An OAuth token compatible with ``requests-oauthlib``.
Returns:
bool: ``True`` if successfully authenticated, ``False`` if not.
"""
if self.logout():
return self.login(username, token=token)
return False
|
python
|
{
"resource": ""
}
|
q10510
|
gen_tau
|
train
|
def gen_tau(S, K, delta):
"""The Robust part of the RSD, we precompute an
array for speed
"""
pivot = floor(K/S)
return [S/K * 1/d for d in range(1, pivot)] \
+ [S/K * log(S/delta)] \
+ [0 for d in range(pivot, K)]
|
python
|
{
"resource": ""
}
|
q10511
|
gen_mu
|
train
|
def gen_mu(K, delta, c):
"""The Robust Soliton Distribution on the degree of
transmitted blocks
"""
S = c * log(K/delta) * sqrt(K)
tau = gen_tau(S, K, delta)
rho = gen_rho(K)
normalizer = sum(rho) + sum(tau)
return [(rho[d] + tau[d])/normalizer for d in range(K)]
|
python
|
{
"resource": ""
}
|
q10512
|
gen_rsd_cdf
|
train
|
def gen_rsd_cdf(K, delta, c):
"""The CDF of the RSD on block degree, precomputed for
sampling speed"""
mu = gen_mu(K, delta, c)
return [sum(mu[:d+1]) for d in range(K)]
|
python
|
{
"resource": ""
}
|
q10513
|
PRNG._get_next
|
train
|
def _get_next(self):
"""Executes the next iteration of the PRNG
evolution process, and returns the result
"""
self.state = PRNG_A * self.state % PRNG_M
return self.state
|
python
|
{
"resource": ""
}
|
q10514
|
PRNG._sample_d
|
train
|
def _sample_d(self):
"""Samples degree given the precomputed
distributions above and the linear PRNG output
"""
p = self._get_next() / PRNG_MAX_RAND
for ix, v in enumerate(self.cdf):
if v > p:
return ix + 1
return ix + 1
|
python
|
{
"resource": ""
}
|
q10515
|
PRNG.get_src_blocks
|
train
|
def get_src_blocks(self, seed=None):
"""Returns the indices of a set of `d` source blocks
sampled from indices i = 1, ..., K-1 uniformly, where
`d` is sampled from the RSD described above.
"""
if seed:
self.state = seed
blockseed = self.state
d = self._sample_d()
have = 0
nums = set()
while have < d:
num = self._get_next() % self.K
if num not in nums:
nums.add(num)
have += 1
return blockseed, d, nums
|
python
|
{
"resource": ""
}
|
q10516
|
run
|
train
|
def run(fn, blocksize, seed, c, delta):
"""Run the encoder until the channel is broken, signalling that the
receiver has successfully reconstructed the file
"""
with open(fn, 'rb') as f:
for block in encode.encoder(f, blocksize, seed, c, delta):
sys.stdout.buffer.write(block)
|
python
|
{
"resource": ""
}
|
q10517
|
MobileClient.album
|
train
|
def album(self, album_id, *, include_description=True, include_songs=True):
"""Get information about an album.
Parameters:
album_id (str): An album ID. Album IDs start with a 'B'.
include_description (bool, Optional): Include description of the album in the returned dict.
include_songs (bool, Optional): Include songs from the album in the returned dict.
Default: ``True``.
Returns:
dict: Album information.
"""
response = self._call(
mc_calls.FetchAlbum,
album_id,
include_description=include_description,
include_tracks=include_songs
)
album_info = response.body
return album_info
|
python
|
{
"resource": ""
}
|
q10518
|
MobileClient.artist
|
train
|
def artist(
self, artist_id, *, include_albums=True, num_related_artists=5, num_top_tracks=5
):
"""Get information about an artist.
Parameters:
artist_id (str): An artist ID. Artist IDs start with an 'A'.
include_albums (bool, Optional): Include albums by the artist in returned dict.
Default: ``True``.
num_related_artists (int, Optional): Include up to given number of related artists in returned dict.
Default: ``5``.
num_top_tracks (int, Optional): Include up to given number of top tracks in returned dict.
Default: ``5``.
Returns:
dict: Artist information.
"""
response = self._call(
mc_calls.FetchArtist,
artist_id,
include_albums=include_albums,
num_related_artists=num_related_artists,
num_top_tracks=num_top_tracks
)
artist_info = response.body
return artist_info
|
python
|
{
"resource": ""
}
|
q10519
|
MobileClient.browse_podcasts
|
train
|
def browse_podcasts(self, podcast_genre_id='JZCpodcasttopchartall'):
"""Get the podcasts for a genre from the Podcasts browse tab.
Parameters:
podcast_genre_id (str, Optional): A podcast genre ID as found
in :meth:`browse_podcasts_genres`.
Default: ``'JZCpodcasttopchartall'``.
Returns:
list: Podcast dicts.
"""
response = self._call(
mc_calls.PodcastBrowse,
podcast_genre_id=podcast_genre_id
)
podcast_series_list = response.body.get('series', [])
return podcast_series_list
|
python
|
{
"resource": ""
}
|
q10520
|
MobileClient.browse_podcasts_genres
|
train
|
def browse_podcasts_genres(self):
"""Get the genres from the Podcasts browse tab dropdown.
Returns:
list: Genre groups that contain sub groups.
"""
response = self._call(
mc_calls.PodcastBrowseHierarchy
)
genres = response.body.get('groups', [])
return genres
|
python
|
{
"resource": ""
}
|
q10521
|
MobileClient.browse_stations
|
train
|
def browse_stations(self, station_category_id):
"""Get the stations for a category from Browse Stations.
Parameters:
station_category_id (str): A station category ID as
found with :meth:`browse_stations_categories`.
Returns:
list: Station dicts.
"""
response = self._call(
mc_calls.BrowseStations,
station_category_id
)
stations = response.body.get('stations', [])
return stations
|
python
|
{
"resource": ""
}
|
q10522
|
MobileClient.browse_stations_categories
|
train
|
def browse_stations_categories(self):
"""Get the categories from Browse Stations.
Returns:
list: Station categories that can contain subcategories.
"""
response = self._call(
mc_calls.BrowseStationCategories
)
station_categories = response.body.get('root', {}).get('subcategories', [])
return station_categories
|
python
|
{
"resource": ""
}
|
q10523
|
MobileClient.config
|
train
|
def config(self):
"""Get a listing of mobile client configuration settings."""
response = self._call(
mc_calls.Config
)
config_list = response.body.get('data', {}).get('entries', [])
return config_list
|
python
|
{
"resource": ""
}
|
q10524
|
MobileClient.devices
|
train
|
def devices(self):
"""Get a listing of devices registered to the Google Music account."""
response = self._call(
mc_calls.DeviceManagementInfo
)
registered_devices = response.body.get('data', {}).get('items', [])
return registered_devices
|
python
|
{
"resource": ""
}
|
q10525
|
MobileClient.explore_genres
|
train
|
def explore_genres(self, parent_genre_id=None):
"""Get a listing of song genres.
Parameters:
parent_genre_id (str, Optional): A genre ID.
If given, a listing of this genre's sub-genres is returned.
Returns:
list: Genre dicts.
"""
response = self._call(
mc_calls.ExploreGenres,
parent_genre_id
)
genre_list = response.body.get('genres', [])
return genre_list
|
python
|
{
"resource": ""
}
|
q10526
|
MobileClient.explore_tabs
|
train
|
def explore_tabs(self, *, num_items=100, genre_id=None):
"""Get a listing of explore tabs.
Parameters:
num_items (int, Optional): Number of items per tab to return.
Default: ``100``
genre_id (genre_id, Optional): Genre ID from :meth:`explore_genres` to explore.
Default: ``None``.
Returns:
dict: Explore tabs content.
"""
response = self._call(
mc_calls.ExploreTabs,
num_items=num_items,
genre_id=genre_id
)
tab_list = response.body.get('tabs', [])
explore_tabs = {}
for tab in tab_list:
explore_tabs[tab['tab_type'].lower()] = tab
return explore_tabs
|
python
|
{
"resource": ""
}
|
q10527
|
MobileClient.listen_now_dismissed_items
|
train
|
def listen_now_dismissed_items(self):
"""Get a listing of items dismissed from Listen Now tab."""
response = self._call(
mc_calls.ListenNowGetDismissedItems
)
dismissed_items = response.body.get('items', [])
return dismissed_items
|
python
|
{
"resource": ""
}
|
q10528
|
MobileClient.listen_now_items
|
train
|
def listen_now_items(self):
"""Get a listing of Listen Now items.
Note:
This does not include situations;
use the :meth:`situations` method instead.
Returns:
dict: With ``albums`` and ``stations`` keys of listen now items.
"""
response = self._call(
mc_calls.ListenNowGetListenNowItems
)
listen_now_item_list = response.body.get('listennow_items', [])
listen_now_items = defaultdict(list)
for item in listen_now_item_list:
type_ = f"{ListenNowItemType(item['type']).name}s"
listen_now_items[type_].append(item)
return dict(listen_now_items)
|
python
|
{
"resource": ""
}
|
q10529
|
MobileClient.playlist_song
|
train
|
def playlist_song(self, playlist_song_id):
"""Get information about a playlist song.
Note:
This returns the playlist entry information only.
For full song metadata, use :meth:`song` with
the ``'trackId'`` field.
Parameters:
playlist_song_id (str): A playlist song ID.
Returns:
dict: Playlist song information.
"""
playlist_song_info = next(
(
playlist_song
for playlist in self.playlists(include_songs=True)
for playlist_song in playlist['tracks']
if playlist_song['id'] == playlist_song_id
),
None
)
return playlist_song_info
|
python
|
{
"resource": ""
}
|
q10530
|
MobileClient.playlist_song_add
|
train
|
def playlist_song_add(
self,
song,
playlist,
*,
after=None,
before=None,
index=None,
position=None
):
"""Add a song to a playlist.
Note:
* Provide no optional arguments to add to end.
* Provide playlist song dicts for ``after`` and/or ``before``.
* Provide a zero-based ``index``.
* Provide a one-based ``position``.
Songs are inserted *at* given index or position.
It's also possible to add to the end by using
``len(songs)`` for index or ``len(songs) + 1`` for position.
Parameters:
song (dict): A song dict.
playlist (dict): A playlist dict.
after (dict, Optional): A playlist song dict ``songs`` will follow.
before (dict, Optional): A playlist song dict ``songs`` will precede.
index (int, Optional): The zero-based index position to insert ``song``.
position (int, Optional): The one-based position to insert ``song``.
Returns:
dict: Playlist dict including songs.
"""
prev, next_ = get_ple_prev_next(
self.playlist_songs(playlist),
after=after,
before=before,
index=index,
position=position
)
if 'storeId' in song:
song_id = song['storeId']
elif 'trackId' in song:
song_id = song['trackId']
else:
song_id = song['id']
mutation = mc_calls.PlaylistEntriesBatch.create(
song_id, playlist['id'],
preceding_entry_id=prev.get('id'),
following_entry_id=next_.get('id')
)
self._call(mc_calls.PlaylistEntriesBatch, mutation)
return self.playlist(playlist['id'], include_songs=True)
|
python
|
{
"resource": ""
}
|
q10531
|
MobileClient.playlist_songs_add
|
train
|
def playlist_songs_add(
self,
songs,
playlist,
*,
after=None,
before=None,
index=None,
position=None
):
"""Add songs to a playlist.
Note:
* Provide no optional arguments to add to end.
* Provide playlist song dicts for ``after`` and/or ``before``.
* Provide a zero-based ``index``.
* Provide a one-based ``position``.
Songs are inserted *at* given index or position.
It's also possible to add to the end by using
``len(songs)`` for index or ``len(songs) + 1`` for position.
Parameters:
songs (list): A list of song dicts.
playlist (dict): A playlist dict.
after (dict, Optional): A playlist song dict ``songs`` will follow.
before (dict, Optional): A playlist song dict ``songs`` will precede.
index (int, Optional): The zero-based index position to insert ``songs``.
position (int, Optional): The one-based position to insert ``songs``.
Returns:
dict: Playlist dict including songs.
"""
playlist_songs = self.playlist_songs(playlist)
prev, next_ = get_ple_prev_next(
playlist_songs,
after=after,
before=before,
index=index,
position=position
)
songs_len = len(songs)
for i, song in enumerate(songs):
if 'storeId' in song:
song_id = song['storeId']
elif 'trackId' in song:
song_id = song['trackId']
else:
song_id = song['id']
mutation = mc_calls.PlaylistEntriesBatch.create(
song_id, playlist['id'],
preceding_entry_id=prev.get('id'),
following_entry_id=next_.get('id')
)
response = self._call(mc_calls.PlaylistEntriesBatch, mutation)
result = response.body['mutate_response'][0]
# TODO: Proper exception on failure.
if result['response_code'] != 'OK':
break
if i < songs_len - 1:
while True:
prev = self.playlist_song(result['id'])
if prev:
break
return self.playlist(playlist['id'], include_songs=True)
|
python
|
{
"resource": ""
}
|
q10532
|
MobileClient.playlist_song_delete
|
train
|
def playlist_song_delete(self, playlist_song):
"""Delete song from playlist.
Parameters:
playlist_song (str): A playlist song dict.
Returns:
dict: Playlist dict including songs.
"""
self.playlist_songs_delete([playlist_song])
return self.playlist(playlist_song['playlistId'], include_songs=True)
|
python
|
{
"resource": ""
}
|
q10533
|
MobileClient.playlist_songs_delete
|
train
|
def playlist_songs_delete(self, playlist_songs):
"""Delete songs from playlist.
Parameters:
playlist_songs (list): A list of playlist song dicts.
Returns:
dict: Playlist dict including songs.
"""
if not more_itertools.all_equal(
playlist_song['playlistId']
for playlist_song in playlist_songs
):
raise ValueError(
"All 'playlist_songs' must be from the same playlist."
)
mutations = [mc_calls.PlaylistEntriesBatch.delete(playlist_song['id']) for playlist_song in playlist_songs]
self._call(mc_calls.PlaylistEntriesBatch, mutations)
return self.playlist(playlist_songs[0]['playlistId'], include_songs=True)
|
python
|
{
"resource": ""
}
|
q10534
|
MobileClient.playlist_song_move
|
train
|
def playlist_song_move(
self,
playlist_song,
*,
after=None,
before=None,
index=None,
position=None
):
"""Move a song in a playlist.
Note:
* Provide no optional arguments to move to end.
* Provide playlist song dicts for ``after`` and/or ``before``.
* Provide a zero-based ``index``.
* Provide a one-based ``position``.
Songs are inserted *at* given index or position.
It's also possible to move to the end by using
``len(songs)`` for index or ``len(songs) + 1`` for position.
Parameters:
playlist_song (dict): A playlist song dict.
after (dict, Optional): A playlist song dict ``songs`` will follow.
before (dict, Optional): A playlist song dict ``songs`` will precede.
index (int, Optional): The zero-based index position to insert ``song``.
position (int, Optional): The one-based position to insert ``song``.
Returns:
dict: Playlist dict including songs.
"""
playlist_songs = self.playlist(
playlist_song['playlistId'],
include_songs=True
)['tracks']
prev, next_ = get_ple_prev_next(
playlist_songs,
after=after,
before=before,
index=index,
position=position
)
mutation = mc_calls.PlaylistEntriesBatch.update(
playlist_song,
preceding_entry_id=prev.get('id'),
following_entry_id=next_.get('id')
)
self._call(mc_calls.PlaylistEntriesBatch, mutation)
return self.playlist(playlist_song['playlistId'], include_songs=True)
|
python
|
{
"resource": ""
}
|
q10535
|
MobileClient.playlist_songs_move
|
train
|
def playlist_songs_move(
self,
playlist_songs,
*,
after=None,
before=None,
index=None,
position=None
):
"""Move songs in a playlist.
Note:
* Provide no optional arguments to move to end.
* Provide playlist song dicts for ``after`` and/or ``before``.
* Provide a zero-based ``index``.
* Provide a one-based ``position``.
Songs are inserted *at* given index or position.
It's also possible to move to the end by using
``len(songs)`` for index or ``len(songs) + 1`` for position.
Parameters:
playlist_songs (list): A list of playlist song dicts.
after (dict, Optional): A playlist song dict ``songs`` will follow.
before (dict, Optional): A playlist song dict ``songs`` will precede.
index (int, Optional): The zero-based index position to insert ``songs``.
position (int, Optional): The one-based position to insert ``songs``.
Returns:
dict: Playlist dict including songs.
"""
if not more_itertools.all_equal(
playlist_song['playlistId']
for playlist_song in playlist_songs
):
raise ValueError(
"All 'playlist_songs' must be from the same playlist."
)
playlist = self.playlist(
playlist_songs[0]['playlistId'],
include_songs=True
)
prev, next_ = get_ple_prev_next(
playlist['tracks'],
after=after,
before=before,
index=index,
position=position
)
playlist_songs_len = len(playlist_songs)
for i, playlist_song in enumerate(playlist_songs):
mutation = mc_calls.PlaylistEntriesBatch.update(
playlist_song,
preceding_entry_id=prev.get('id'),
following_entry_id=next_.get('id')
)
response = self._call(mc_calls.PlaylistEntriesBatch, mutation)
result = response.body['mutate_response'][0]
# TODO: Proper exception on failure.
if result['response_code'] != 'OK':
break
if i < playlist_songs_len - 1:
while True:
prev = self.playlist_song(result['id'])
if prev:
break
return self.playlist(playlist_songs[0]['playlistId'], include_songs=True)
|
python
|
{
"resource": ""
}
|
q10536
|
MobileClient.playlist_songs
|
train
|
def playlist_songs(self, playlist):
"""Get a listing of songs from a playlist.
Paramters:
playlist (dict): A playlist dict.
Returns:
list: Playlist song dicts.
"""
playlist_type = playlist.get('type')
playlist_song_list = []
if playlist_type in ('USER_GENERATED', None):
start_token = None
playlist_song_list = []
while True:
response = self._call(
mc_calls.PlaylistEntryFeed,
max_results=49995,
start_token=start_token
)
items = response.body.get('data', {}).get('items', [])
if items:
playlist_song_list.extend(items)
start_token = response.body.get('nextPageToken')
if start_token is None:
break
elif playlist_type == 'SHARED':
playlist_share_token = playlist['shareToken']
start_token = None
playlist_song_list = []
while True:
response = self._call(
mc_calls.PlaylistEntriesShared,
playlist_share_token,
max_results=49995,
start_token=start_token
)
entry = response.body['entries'][0]
items = entry.get('playlistEntry', [])
if items:
playlist_song_list.extend(items)
start_token = entry.get('nextPageToken')
if start_token is None:
break
playlist_song_list.sort(key=itemgetter('absolutePosition'))
return playlist_song_list
|
python
|
{
"resource": ""
}
|
q10537
|
MobileClient.playlist
|
train
|
def playlist(self, playlist_id, *, include_songs=False):
"""Get information about a playlist.
Parameters:
playlist_id (str): A playlist ID.
include_songs (bool, Optional): Include songs from
the playlist in the returned dict.
Default: ``False``
Returns:
dict: Playlist information.
"""
playlist_info = next(
(
playlist
for playlist in self.playlists(include_songs=include_songs)
if playlist['id'] == playlist_id
),
None
)
return playlist_info
|
python
|
{
"resource": ""
}
|
q10538
|
MobileClient.playlist_create
|
train
|
def playlist_create(
self,
name,
description='',
*,
make_public=False,
songs=None
):
"""Create a playlist.
Parameters:
name (str): Name to give the playlist.
description (str): Description to give the playlist.
make_public (bool, Optional): If ``True`` and account has a subscription,
make playlist public.
Default: ``False``
songs (list, Optional): A list of song dicts to add to the playlist.
Returns:
dict: Playlist information.
"""
share_state = 'PUBLIC' if make_public else 'PRIVATE'
playlist = self._call(
mc_calls.PlaylistsCreate,
name,
description,
share_state
).body
if songs:
playlist = self.playlist_songs_add(songs, playlist)
return playlist
|
python
|
{
"resource": ""
}
|
q10539
|
MobileClient.playlist_subscribe
|
train
|
def playlist_subscribe(self, playlist):
"""Subscribe to a public playlist.
Parameters:
playlist (dict): A public playlist dict.
Returns:
dict: Playlist information.
"""
mutation = mc_calls.PlaylistBatch.create(
playlist['name'],
playlist['description'],
'SHARED',
owner_name=playlist.get('ownerName', ''),
share_token=playlist['shareToken']
)
response_body = self._call(
mc_calls.PlaylistBatch,
mutation
).body
playlist_id = response_body['mutate_response'][0]['id']
return self.playlist(playlist_id)
|
python
|
{
"resource": ""
}
|
q10540
|
MobileClient.playlists
|
train
|
def playlists(self, *, include_songs=False):
"""Get a listing of library playlists.
Parameters:
include_songs (bool, Optional): Include songs in the returned playlist dicts.
Default: ``False``.
Returns:
list: A list of playlist dicts.
"""
playlist_list = []
for chunk in self.playlists_iter(page_size=49995):
for playlist in chunk:
if include_songs:
playlist['tracks'] = self.playlist_songs(playlist)
playlist_list.append(playlist)
return playlist_list
|
python
|
{
"resource": ""
}
|
q10541
|
MobileClient.playlists_iter
|
train
|
def playlists_iter(self, *, start_token=None, page_size=250):
"""Get a paged iterator of library playlists.
Parameters:
start_token (str): The token of the page to return.
Default: Not sent to get first page.
page_size (int, Optional): The maximum number of results per returned page.
Max allowed is ``49995``.
Default: ``250``
Yields:
list: Playlist dicts.
"""
start_token = None
while True:
response = self._call(
mc_calls.PlaylistFeed,
max_results=page_size,
start_token=start_token
)
items = response.body.get('data', {}).get('items', [])
if items:
yield items
start_token = response.body.get('nextPageToken')
if start_token is None:
break
|
python
|
{
"resource": ""
}
|
q10542
|
MobileClient.podcast
|
train
|
def podcast(self, podcast_series_id, *, max_episodes=50):
"""Get information about a podcast series.
Parameters:
podcast_series_id (str): A podcast series ID.
max_episodes (int, Optional): Include up to given number of episodes in returned dict.
Default: ``50``
Returns:
dict: Podcast series information.
"""
podcast_info = self._call(
mc_calls.PodcastFetchSeries,
podcast_series_id,
max_episodes=max_episodes
).body
return podcast_info
|
python
|
{
"resource": ""
}
|
q10543
|
MobileClient.podcasts
|
train
|
def podcasts(self, *, device_id=None):
"""Get a listing of subsribed podcast series.
Paramaters:
device_id (str, Optional): A mobile device ID.
Default: Use ``device_id`` of the :class:`MobileClient` instance.
Returns:
list: Podcast series dict.
"""
if device_id is None:
device_id = self.device_id
podcast_list = []
for chunk in self.podcasts_iter(device_id=device_id, page_size=49995):
podcast_list.extend(chunk)
return podcast_list
|
python
|
{
"resource": ""
}
|
q10544
|
MobileClient.podcasts_iter
|
train
|
def podcasts_iter(self, *, device_id=None, page_size=250):
"""Get a paged iterator of subscribed podcast series.
Parameters:
device_id (str, Optional): A mobile device ID.
Default: Use ``device_id`` of the :class:`MobileClient` instance.
page_size (int, Optional): The maximum number of results per returned page.
Max allowed is ``49995``.
Default: ``250``
Yields:
list: Podcast series dicts.
"""
if device_id is None:
device_id = self.device_id
start_token = None
prev_items = None
while True:
response = self._call(
mc_calls.PodcastSeries,
device_id,
max_results=page_size,
start_token=start_token
)
items = response.body.get('data', {}).get('items', [])
# Google does some weird shit.
if items != prev_items:
subscribed_podcasts = [
item
for item in items
if item.get('userPreferences', {}).get('subscribed')
]
yield subscribed_podcasts
prev_items = items
else:
break
start_token = response.body.get('nextPageToken')
if start_token is None:
break
|
python
|
{
"resource": ""
}
|
q10545
|
MobileClient.podcast_episode
|
train
|
def podcast_episode(self, podcast_episode_id):
"""Get information about a podcast_episode.
Parameters:
podcast_episode_id (str): A podcast episode ID.
Returns:
dict: Podcast episode information.
"""
response = self._call(
mc_calls.PodcastFetchEpisode,
podcast_episode_id
)
podcast_episode_info = [
podcast_episode
for podcast_episode in response.body
if not podcast_episode['deleted']
]
return podcast_episode_info
|
python
|
{
"resource": ""
}
|
q10546
|
MobileClient.podcast_episodes
|
train
|
def podcast_episodes(self, *, device_id=None):
"""Get a listing of podcast episodes for all subscribed podcasts.
Paramaters:
device_id (str, Optional): A mobile device ID.
Default: Use ``device_id`` of the :class:`MobileClient` instance.
Returns:
list: Podcast episode dicts.
"""
if device_id is None:
device_id = self.device_id
podcast_episode_list = []
for chunk in self.podcast_episodes_iter(
device_id=device_id,
page_size=49995
):
podcast_episode_list.extend(chunk)
return podcast_episode_list
|
python
|
{
"resource": ""
}
|
q10547
|
MobileClient.podcast_episodes_iter
|
train
|
def podcast_episodes_iter(self, *, device_id=None, page_size=250):
"""Get a paged iterator of podcast episode for all subscribed podcasts.
Parameters:
device_id (str, Optional): A mobile device ID.
Default: Use ``device_id`` of the :class:`MobileClient` instance.
page_size (int, Optional): The maximum number of results per returned page.
Max allowed is ``49995``.
Default: ``250``
Yields:
list: Podcast episode dicts.
"""
if device_id is None:
device_id = self.device_id
start_token = None
prev_items = None
while True:
response = self._call(
mc_calls.PodcastEpisode,
device_id,
max_results=page_size,
start_token=start_token
)
items = response.body.get('data', {}).get('items', [])
# Google does some weird shit.
if items != prev_items:
yield items
prev_items = items
else:
break
start_token = response.body.get('nextPageToken')
if start_token is None:
break
|
python
|
{
"resource": ""
}
|
q10548
|
MobileClient.search
|
train
|
def search(self, query, *, max_results=100, **kwargs):
"""Search Google Music and library for content.
Parameters:
query (str): Search text.
max_results (int, Optional): Maximum number of results per type per
location to retrieve. I.e up to 100 Google and 100 library
for a total of 200 for the default value.
Google only accepts values up to 100.
Default: ``100``
kwargs (bool, Optional): Any of ``albums``, ``artists``, ``genres``,
``playlists``, ``podcasts``, ``situations``, ``songs``, ``stations``,
``videos`` set to ``True`` will include that result type in the
returned dict.
Setting none of them will include all result types in the returned dict.
Returns:
dict: A dict of results separated into keys: ``'albums'``, ``'artists'``,
``'genres'``, ``'playlists'``, ```'podcasts'``, ``'situations'``,
``'songs'``, ``'stations'``, ``'videos'``.
Note:
Free account search is restricted so may not contain hits for all result types.
"""
results = defaultdict(list)
for type_, results_ in self.search_library(
query,
max_results=max_results,
**kwargs
).items():
results[type_].extend(results_)
for type_, results_ in self.search_google(
query,
max_results=max_results,
**kwargs
).items():
results[type_].extend(results_)
return dict(results)
|
python
|
{
"resource": ""
}
|
q10549
|
MobileClient.search_suggestion
|
train
|
def search_suggestion(self, query):
"""Get search query suggestions for query.
Parameters:
query (str): Search text.
Returns:
list: Suggested query strings.
"""
response = self._call(
mc_calls.QuerySuggestion,
query
)
suggested_queries = response.body.get('suggested_queries', [])
return [
suggested_query['suggestion_string']
for suggested_query in suggested_queries
]
|
python
|
{
"resource": ""
}
|
q10550
|
MobileClient.situations
|
train
|
def situations(self, *, tz_offset=None):
"""Get a listing of situations.
Parameters:
tz_offset (int, Optional): A time zone offset from UTC in seconds.
"""
response = self._call(
mc_calls.ListenNowSituations,
tz_offset
)
situation_list = response.body.get('situations', [])
return situation_list
|
python
|
{
"resource": ""
}
|
q10551
|
MobileClient.song
|
train
|
def song(self, song_id):
"""Get information about a song.
Parameters:
song_id (str): A song ID.
Returns:
dict: Song information.
"""
if song_id.startswith('T'):
song_info = self._call(
mc_calls.FetchTrack,
song_id
).body
else:
song_info = next(
(
song
for song in self.songs()
if song['id'] == song_id
),
None
)
return song_info
|
python
|
{
"resource": ""
}
|
q10552
|
MobileClient.songs_add
|
train
|
def songs_add(self, songs):
"""Add store songs to your library.
Parameters:
songs (list): A list of store song dicts.
Returns:
list: Songs' library IDs.
"""
mutations = [mc_calls.TrackBatch.add(song) for song in songs]
response = self._call(
mc_calls.TrackBatch,
mutations
)
success_ids = [
res['id']
for res in response.body['mutate_response']
if res['response_code'] == 'OK'
]
return success_ids
|
python
|
{
"resource": ""
}
|
q10553
|
MobileClient.songs_delete
|
train
|
def songs_delete(self, songs):
"""Delete songs from library.
Parameters:
song (list): A list of song dicts.
Returns:
list: Successfully deleted song IDs.
"""
mutations = [mc_calls.TrackBatch.delete(song['id']) for song in songs]
response = self._call(
mc_calls.TrackBatch,
mutations
)
success_ids = [
res['id']
for res in response.body['mutate_response']
if res['response_code'] == 'OK'
]
# TODO: Report failures.
# failure_ids = [
# res['id']
# for res in response.body['mutate_response']
# if res['response_code'] != 'OK'
# ]
return success_ids
|
python
|
{
"resource": ""
}
|
q10554
|
MobileClient.song_play
|
train
|
def song_play(self, song):
"""Add play to song play count.
Parameters:
song (dict): A song dict.
Returns:
bool: ``True`` if successful, ``False`` if not.
"""
if 'storeId' in song:
song_id = song['storeId']
elif 'trackId' in song:
song_id = song['trackId']
else:
song_id = song['id']
song_duration = song['durationMillis']
event = mc_calls.ActivityRecordRealtime.play(song_id, song_duration)
response = self._call(
mc_calls.ActivityRecordRealtime,
event
)
return True if response.body['eventResults'][0]['code'] == 'OK' else False
|
python
|
{
"resource": ""
}
|
q10555
|
MobileClient.song_rate
|
train
|
def song_rate(self, song, rating):
"""Rate song.
Parameters:
song (dict): A song dict.
rating (int): 0 (not rated), 1 (thumbs down), or 5 (thumbs up).
Returns:
bool: ``True`` if successful, ``False`` if not.
"""
if 'storeId' in song:
song_id = song['storeId']
elif 'trackId' in song:
song_id = song['trackId']
else:
song_id = song['id']
event = mc_calls.ActivityRecordRealtime.rate(song_id, rating)
response = self._call(
mc_calls.ActivityRecordRealtime,
event
)
return True if response.body['eventResults'][0]['code'] == 'OK' else False
|
python
|
{
"resource": ""
}
|
q10556
|
MobileClient.songs
|
train
|
def songs(self):
"""Get a listing of library songs.
Returns:
list: Song dicts.
"""
song_list = []
for chunk in self.songs_iter(page_size=49995):
song_list.extend(chunk)
return song_list
|
python
|
{
"resource": ""
}
|
q10557
|
MobileClient.songs_iter
|
train
|
def songs_iter(self, *, page_size=250):
"""Get a paged iterator of library songs.
Parameters:
page_size (int, Optional): The maximum number of results per returned page.
Max allowed is ``49995``.
Default: ``250``
Yields:
list: Song dicts.
"""
start_token = None
while True:
response = self._call(
mc_calls.TrackFeed,
max_results=page_size,
start_token=start_token
)
items = response.body.get('data', {}).get('items', [])
if items:
yield items
start_token = response.body.get('nextPageToken')
if start_token is None:
break
|
python
|
{
"resource": ""
}
|
q10558
|
MobileClient.station
|
train
|
def station(self, station_id, *, num_songs=25, recently_played=None):
"""Get information about a station.
Parameters:
station_id (str): A station ID. Use 'IFL' for I'm Feeling Lucky.
num_songs (int, Optional): The maximum number of songs to return from the station.
Default: ``25``
recently_played (list, Optional): A list of dicts in the form of {'id': '', 'type'}
where ``id`` is a song ID and ``type`` is 0 for a library song and 1 for a store song.
Returns:
dict: Station information.
"""
station_info = {
'station_id': station_id,
'num_entries': num_songs,
'library_content_only': False
}
if recently_played is not None:
station_info['recently_played'] = recently_played
response = self._call(
mc_calls.RadioStationFeed,
station_infos=[station_info]
)
station_feed = response.body.get('data', {}).get('stations', [])
try:
station = station_feed[0]
except IndexError:
station = {}
return station
|
python
|
{
"resource": ""
}
|
q10559
|
MobileClient.station_feed
|
train
|
def station_feed(self, *, num_songs=25, num_stations=4):
"""Generate stations.
Note:
A Google Music subscription is required.
Parameters:
num_songs (int, Optional): The total number of songs to return. Default: ``25``
num_stations (int, Optional): The number of stations to return when no station_infos is provided.
Default: ``5``
Returns:
list: Station information dicts.
"""
response = self._call(
mc_calls.RadioStationFeed,
num_entries=num_songs,
num_stations=num_stations
)
station_feed = response.body.get('data', {}).get('stations', [])
return station_feed
|
python
|
{
"resource": ""
}
|
q10560
|
MobileClient.station_songs
|
train
|
def station_songs(self, station, *, num_songs=25, recently_played=None):
"""Get a listing of songs from a station.
Parameters:
station (str): A station dict.
num_songs (int, Optional): The maximum number of songs to return from the station. Default: ``25``
recently_played (list, Optional): A list of dicts in the form of {'id': '', 'type'}
where ``id`` is a song ID and ``type`` is 0 for a library song and 1 for a store song.
Returns:
list: Station song dicts.
"""
station_id = station['id']
station = self.station(
station_id,
num_songs=num_songs,
recently_played=recently_played
)
return station.get('tracks', [])
|
python
|
{
"resource": ""
}
|
q10561
|
MobileClient.stations
|
train
|
def stations(self, *, generated=True, library=True):
"""Get a listing of library stations.
The listing can contain stations added to the library and generated from the library.
Parameters:
generated (bool, Optional): Include generated stations.
Default: True
library (bool, Optional): Include library stations.
Default: True
Returns:
list: Station information dicts.
"""
station_list = []
for chunk in self.stations_iter(page_size=49995):
for station in chunk:
if (
(generated and not station.get('inLibrary'))
or (library and station.get('inLibrary'))
):
station_list.append(station)
return station_list
|
python
|
{
"resource": ""
}
|
q10562
|
MobileClient.stations_iter
|
train
|
def stations_iter(self, *, page_size=250):
"""Get a paged iterator of library stations.
Parameters:
page_size (int, Optional): The maximum number of results per returned page.
Max allowed is ``49995``.
Default: ``250``
Yields:
list: Station dicts.
"""
start_token = None
while True:
response = self._call(
mc_calls.RadioStation,
max_results=page_size,
start_token=start_token
)
yield response.body.get('data', {}).get('items', [])
start_token = response.body.get('nextPageToken')
if start_token is None:
break
|
python
|
{
"resource": ""
}
|
q10563
|
MobileClient.stream
|
train
|
def stream(self, item, *, device_id=None, quality='hi', session_token=None):
"""Get MP3 stream of a podcast episode, library song, station_song, or store song.
Note:
Streaming requires a ``device_id`` from a valid, linked mobile device.
Parameters:
item (str): A podcast episode, library song, station_song, or store song.
A Google Music subscription is required to stream store songs.
device_id (str, Optional): A mobile device ID.
Default: Use ``device_id`` of the :class:`MobileClient` instance.
quality (str, Optional): Stream quality is one of ``'hi'`` (320Kbps), ``'med'`` (160Kbps), or ``'low'`` (128Kbps).
Default: ``'hi'``.
session_token (str): Session token from a station dict required for unsubscribed users to stream a station song.
station['sessionToken'] as returend by :meth:`station` only exists for free accounts.
Returns:
bytes: An MP3 file.
"""
if device_id is None:
device_id = self.device_id
stream_url = self.stream_url(
item,
device_id=device_id,
quality=quality,
session_token=session_token
)
response = self.session.get(stream_url)
audio = response.content
return audio
|
python
|
{
"resource": ""
}
|
q10564
|
MobileClient.stream_url
|
train
|
def stream_url(self, item, *, device_id=None, quality='hi', session_token=None):
"""Get a URL to stream a podcast episode, library song, station_song, or store song.
Note:
Streaming requires a ``device_id`` from a valid, linked mobile device.
Parameters:
item (str): A podcast episode, library song, station_song, or store song.
A Google Music subscription is required to stream store songs.
device_id (str, Optional): A mobile device ID.
Default: Use ``device_id`` of the :class:`MobileClient` instance.
quality (str, Optional): Stream quality is one of ``'hi'`` (320Kbps), ``'med'`` (160Kbps), or ``'low'`` (128Kbps).
Default: ``'hi'``.
session_token (str): Session token from a station dict required for unsubscribed users to stream a station song.
station['sessionToken'] as returend by :meth:`station` only exists for free accounts.
Returns:
str: A URL to an MP3 file.
"""
if device_id is None:
device_id = self.device_id
if 'episodeId' in item: # Podcast episode.
response = self._call(
mc_calls.PodcastEpisodeStreamURL,
item['episodeId'],
quality=quality,
device_id=device_id
)
elif 'wentryid' in item: # Free account station song.
response = self._call(
mc_calls.RadioStationTrackStreamURL,
item['storeId'],
item['wentryid'],
session_token,
quality=quality,
device_id=device_id
)
elif 'trackId' in item: # Playlist song.
response = self._call(
mc_calls.TrackStreamURL,
item['trackId'],
quality=quality,
device_id=device_id
)
elif 'storeId' in item and self.is_subscribed: # Store song.
response = self._call(
mc_calls.TrackStreamURL,
item['storeId'],
quality=quality,
device_id=device_id
)
elif 'id' in item: # Library song.
response = self._call(
mc_calls.TrackStreamURL,
item['id'],
quality=quality,
device_id=device_id
)
else:
# TODO: Create an exception for not being subscribed or use a better builtin exception for this case.
if 'storeId' in item and not self.is_subscribed:
msg = "Can't stream a store song without a subscription."
else:
msg = "Item does not contain an ID field."
raise ValueError(msg)
try:
stream_url = response.headers['Location']
except KeyError:
stream_url = response.body['url']
return stream_url
|
python
|
{
"resource": ""
}
|
q10565
|
MobileClient.thumbs_up_songs
|
train
|
def thumbs_up_songs(self, *, library=True, store=True):
"""Get a listing of 'Thumbs Up' store songs.
Parameters:
library (bool, Optional): Include 'Thumbs Up' songs from library.
Default: True
generated (bool, Optional): Include 'Thumbs Up' songs from store.
Default: True
Returns:
list: Dicts of 'Thumbs Up' songs.
"""
thumbs_up_songs = []
if library is True:
thumbs_up_songs.extend(
song
for song in self.songs()
if song.get('rating', '0') == '5'
)
if store is True:
response = self._call(mc_calls.EphemeralTop)
thumbs_up_songs.extend(response.body.get('data', {}).get('items', []))
return thumbs_up_songs
|
python
|
{
"resource": ""
}
|
q10566
|
MobileClient.top_charts
|
train
|
def top_charts(self):
"""Get a listing of the default top charts."""
response = self._call(mc_calls.BrowseTopChart)
top_charts = response.body
return top_charts
|
python
|
{
"resource": ""
}
|
q10567
|
MobileClient.top_charts_for_genre
|
train
|
def top_charts_for_genre(self, genre_id):
"""Get a listing of top charts for a top chart genre.
Parameters:
genre_id (str): A top chart genre ID as found with :meth:`top_charts_genres`.
"""
response = self._call(mc_calls.BrowseTopChartForGenre, genre_id)
top_chart_for_genre = response.body
return top_chart_for_genre
|
python
|
{
"resource": ""
}
|
q10568
|
MobileClient.top_charts_genres
|
train
|
def top_charts_genres(self):
"""Get a listing of genres from the browse top charts tab."""
response = self._call(mc_calls.BrowseTopChartGenres)
top_chart_genres = response.body.get('genres', [])
return top_chart_genres
|
python
|
{
"resource": ""
}
|
q10569
|
run
|
train
|
def run(stream=sys.stdin.buffer):
"""Reads from stream, applying the LT decoding algorithm
to incoming encoded blocks until sufficiently many blocks
have been received to reconstruct the entire file.
"""
payload = decode.decode(stream)
sys.stdout.write(payload.decode('utf8'))
|
python
|
{
"resource": ""
}
|
q10570
|
_split_file
|
train
|
def _split_file(f, blocksize):
"""Block file byte contents into blocksize chunks, padding last one if necessary
"""
f_bytes = f.read()
blocks = [int.from_bytes(f_bytes[i:i+blocksize].ljust(blocksize, b'0'), sys.byteorder)
for i in range(0, len(f_bytes), blocksize)]
return len(f_bytes), blocks
|
python
|
{
"resource": ""
}
|
q10571
|
encoder
|
train
|
def encoder(f, blocksize, seed=None, c=sampler.DEFAULT_C, delta=sampler.DEFAULT_DELTA):
"""Generates an infinite sequence of blocks to transmit
to the receiver
"""
# Generate seed if not provided
if seed is None:
seed = randint(0, 1 << 31 - 1)
# get file blocks
filesize, blocks = _split_file(f, blocksize)
# init stream vars
K = len(blocks)
prng = sampler.PRNG(params=(K, delta, c))
prng.set_seed(seed)
# block generation loop
while True:
blockseed, d, ix_samples = prng.get_src_blocks()
block_data = 0
for ix in ix_samples:
block_data ^= blocks[ix]
# Generate blocks of XORed data in network byte order
block = (filesize, blocksize, blockseed, int.to_bytes(block_data, blocksize, sys.byteorder))
yield pack('!III%ss'%blocksize, *block)
|
python
|
{
"resource": ""
}
|
q10572
|
_read_block
|
train
|
def _read_block(blocksize, stream):
"""Read block data from network into integer type
"""
blockdata = stream.read(blocksize)
return int.from_bytes(blockdata, 'big')
|
python
|
{
"resource": ""
}
|
q10573
|
read_blocks
|
train
|
def read_blocks(stream):
"""Generate parsed blocks from input stream
"""
while True:
header = _read_header(stream)
block = _read_block(header[1], stream)
yield (header, block)
|
python
|
{
"resource": ""
}
|
q10574
|
BlockGraph.add_block
|
train
|
def add_block(self, nodes, data):
"""Adds a new check node and edges between that node and all
source nodes it connects, resolving all message passes that
become possible as a result.
"""
# We can eliminate this source node
if len(nodes) == 1:
to_eliminate = list(self.eliminate(next(iter(nodes)), data))
# Recursively eliminate all nodes that can now be resolved
while len(to_eliminate):
other, check = to_eliminate.pop()
to_eliminate.extend(self.eliminate(other, check))
else:
# Pass messages from already-resolved source nodes
for node in list(nodes):
if node in self.eliminated:
nodes.remove(node)
data ^= self.eliminated[node]
# Resolve if we are left with a single non-resolved source node
if len(nodes) == 1:
return self.add_block(nodes, data)
else:
# Add edges for all remaining nodes to this check
check = CheckNode(nodes, data)
for node in nodes:
self.checks[node].append(check)
# Are we done yet?
return len(self.eliminated) >= self.num_blocks
|
python
|
{
"resource": ""
}
|
q10575
|
BlockGraph.eliminate
|
train
|
def eliminate(self, node, data):
"""Resolves a source node, passing the message to all associated checks
"""
# Cache resolved value
self.eliminated[node] = data
others = self.checks[node]
del self.checks[node]
# Pass messages to all associated checks
for check in others:
check.check ^= data
check.src_nodes.remove(node)
# Yield all nodes that can now be resolved
if len(check.src_nodes) == 1:
yield (next(iter(check.src_nodes)), check.check)
|
python
|
{
"resource": ""
}
|
q10576
|
mobileclient
|
train
|
def mobileclient(username=None, device_id=None, *, token=None, locale='en_US'):
"""Create and authenticate a Google Music mobile client.
>>> import google_music
>>> mc = google_music.mobileclient('username')
Parameters:
username (str, Optional): Your Google Music username.
This is used to store OAuth credentials for different accounts separately.
device_id (str, Optional): A mobile device ID. Default: MAC address is used.
token (dict, Optional): An OAuth token compatible with ``requests-oauthlib``.
locale (str, Optional): `ICU <http://www.localeplanet.com/icu/>`__ locale used to localize some
responses. This must be a locale supported by Android. Default: `'en_US'``.
Returns:
MobileClient: An authenticated :class:`~google_music.MobileClient` instance.
"""
return MobileClient(
username,
device_id,
token=token,
locale=locale
)
|
python
|
{
"resource": ""
}
|
q10577
|
_dendropy_to_dataframe
|
train
|
def _dendropy_to_dataframe(
tree,
add_node_labels=True,
use_uids=True):
"""Convert Dendropy tree to Pandas dataframe."""
# Maximum distance from root.
tree.max_distance_from_root()
# Initialize the data object.
idx = []
data = {
'type': [],
'id': [],
'parent': [],
'length': [],
'label': [],
'distance': []}
if use_uids:
data['uid'] = []
# Add labels to internal nodes if set to true.
if add_node_labels:
for i, node in enumerate(tree.internal_nodes()):
node.label = str(i)
for node in tree.nodes():
# Get node type
if node.is_leaf():
type_ = 'leaf'
label = str(node.taxon.label).replace(' ', '_')
elif node.is_internal():
type_ = 'node'
label = str(node.label)
# Set node label and parent.
id_ = label
parent_node = node.parent_node
length = node.edge_length
distance = node.distance_from_root()
# Is this node a root?
if parent_node is None and length is None:
parent_label = None
parent_node = None
length = 0
distance = 0
type_ = 'root'
# Set parent node label
elif parent_node.is_internal():
parent_label = str(parent_node.label)
else:
raise Exception("Subtree is not attached to tree?")
# Add this node to the data.
data['type'].append(type_)
data['id'].append(id_)
data['parent'].append(parent_label)
data['length'].append(length)
data['label'].append(label)
data['distance'].append(distance)
if use_uids:
data['uid'].append(get_random_id(10))
# Construct dataframe.
df = pandas.DataFrame(data)
return df
|
python
|
{
"resource": ""
}
|
q10578
|
_read
|
train
|
def _read(
filename=None,
data=None,
schema=None,
add_node_labels=True,
use_uids=True
):
"""Read a phylogenetic tree into a phylopandas.DataFrame.
The resulting DataFrame has the following columns:
- name: label for each taxa or node.
- id: unique id (created by phylopandas) given to each node.
- type: type of node (leaf, internal, or root).
- parent: parent id. necessary for constructing trees.
- length: length of branch from parent to node.
- distance: distance from root.
Parameters
----------
filename: str (default is None)
newick file to read into DataFrame.
data: str (default is None)
newick string to parse and read into DataFrame.
add_node_labels: bool
If true, labels the internal nodes with numbers.
Returns
-------
df: phylopandas.DataFrame.
"""
if filename is not None:
# Use Dendropy to parse tree.
tree = dendropy.Tree.get(
path=filename,
schema=schema,
preserve_underscores=True)
elif data is not None:
tree = dendropy.Tree.get(
data=data,
schema=schema,
preserve_underscores=True)
else:
raise Exception('No tree given?')
df = _dendropy_to_dataframe(
tree,
add_node_labels=add_node_labels,
use_uids=use_uids
)
return df
|
python
|
{
"resource": ""
}
|
q10579
|
pandas_df_to_biopython_seqrecord
|
train
|
def pandas_df_to_biopython_seqrecord(
df,
id_col='uid',
sequence_col='sequence',
extra_data=None,
alphabet=None,
):
"""Convert pandas dataframe to biopython seqrecord for easy writing.
Parameters
----------
df : Dataframe
Pandas dataframe to convert
id_col : str
column in dataframe to use as sequence label
sequence_col str:
column in dataframe to use as sequence data
extra_data : list
extra columns to use in sequence description line
alphabet :
biopython Alphabet object
Returns
-------
seq_records :
List of biopython seqrecords.
"""
seq_records = []
for i, row in df.iterrows():
# Tries getting sequence data. If a TypeError at the seqrecord
# creation is thrown, it is assumed that this row does not contain
# sequence data and therefore the row is ignored.
try:
# Get sequence
seq = Seq(row[sequence_col], alphabet=alphabet)
# Get id
id = row[id_col]
# Build a description
description = ""
if extra_data is not None:
description = " ".join([row[key] for key in extra_data])
# Build a record
record = SeqRecord(
seq=seq,
id=id,
description=description,
)
seq_records.append(record)
except TypeError:
pass
return seq_records
|
python
|
{
"resource": ""
}
|
q10580
|
pandas_series_to_biopython_seqrecord
|
train
|
def pandas_series_to_biopython_seqrecord(
series,
id_col='uid',
sequence_col='sequence',
extra_data=None,
alphabet=None
):
"""Convert pandas series to biopython seqrecord for easy writing.
Parameters
----------
series : Series
Pandas series to convert
id_col : str
column in dataframe to use as sequence label
sequence_col : str
column in dataframe to use as sequence data
extra_data : list
extra columns to use in sequence description line
Returns
-------
seq_records :
List of biopython seqrecords.
"""
# Get sequence
seq = Seq(series[sequence_col], alphabet=alphabet)
# Get id
id = series[id_col]
# Build a description
description = ""
if extra_data is not None:
description = " ".join([series[key] for key in extra_data])
# Build a record
record = SeqRecord(
seq=seq,
id=id,
description=description,
)
seq_records = [record]
return seq_records
|
python
|
{
"resource": ""
}
|
q10581
|
_write
|
train
|
def _write(
data,
filename=None,
schema='fasta',
id_col='uid',
sequence_col='sequence',
extra_data=None,
alphabet=None,
**kwargs):
"""General write function. Write phylopanda data to biopython format.
Parameters
----------
filename : str
File to write string to. If no filename is given, a string
will be returned.
sequence_col : str (default='sequence')
Sequence column name in DataFrame.
id_col : str (default='id')
ID column name in DataFrame
id_only : bool (default=False)
If True, use only the ID column to label sequences in fasta.
"""
# Check Alphabet if given
if alphabet is None:
alphabet = Bio.Alphabet.Alphabet()
elif alphabet in ['dna', 'rna', 'protein', 'nucleotide']:
alphabet = getattr(Bio.Alphabet, 'generic_{}'.format(alphabet))
else:
raise Exception(
"The alphabet is not recognized. Must be 'dna', 'rna', "
"'nucleotide', or 'protein'.")
# Build a list of records from a pandas DataFrame
if type(data) is pd.DataFrame:
seq_records = pandas_df_to_biopython_seqrecord(
data,
id_col=id_col,
sequence_col=sequence_col,
extra_data=extra_data,
alphabet=alphabet,
)
# Build a record from a pandas Series
elif type(data) is pd.Series:
seq_records = pandas_series_to_biopython_seqrecord(
data,
id_col=id_col,
sequence_col=sequence_col,
extra_data=extra_data,
alphabet=alphabet,
)
# Write to disk or return string
if filename is not None:
SeqIO.write(seq_records, filename, format=schema, **kwargs)
else:
return "".join([s.format(schema) for s in seq_records])
|
python
|
{
"resource": ""
}
|
q10582
|
_read
|
train
|
def _read(
filename,
schema,
seq_label='sequence',
alphabet=None,
use_uids=True,
**kwargs):
"""Use BioPython's sequence parsing module to convert any file format to
a Pandas DataFrame.
The resulting DataFrame has the following columns:
- name
- id
- description
- sequence
"""
# Check Alphabet if given
if alphabet is None:
alphabet = Bio.Alphabet.Alphabet()
elif alphabet in ['dna', 'rna', 'protein', 'nucleotide']:
alphabet = getattr(Bio.Alphabet, 'generic_{}'.format(alphabet))
else:
raise Exception(
"The alphabet is not recognized. Must be 'dna', 'rna', "
"'nucleotide', or 'protein'.")
kwargs.update(alphabet=alphabet)
# Prepare DataFrame fields.
data = {
'id': [],
seq_label: [],
'description': [],
'label': []
}
if use_uids:
data['uid'] = []
# Parse Fasta file.
for i, s in enumerate(SeqIO.parse(filename, format=schema, **kwargs)):
data['id'].append(s.id)
data[seq_label].append(str(s.seq))
data['description'].append(s.description)
data['label'].append(s.name)
if use_uids:
data['uid'].append(get_random_id(10))
# Port to DataFrame.
return pd.DataFrame(data)
|
python
|
{
"resource": ""
}
|
q10583
|
read_blast_xml
|
train
|
def read_blast_xml(filename, **kwargs):
"""Read BLAST XML format."""
# Read file.
with open(filename, 'r') as f:
blast_record = NCBIXML.read(f)
# Prepare DataFrame fields.
data = {'accession': [],
'hit_def': [],
'hit_id': [],
'title': [],
'length': [],
'e_value': [],
'sequence': []}
# Get alignments from blast result.
for i, s in enumerate(blast_record.alignments):
data['accession'] = s.accession
data['hit_def'] = s.hit_def
data['hit_id'] = s.hit_id
data['title'] = s.title
data['length'] = s.length
data['e_value'] = s.hsps[0].expect
data['sequence'] = s.hsps[0].sbjct
# Port to DataFrame.
return pd.DataFrame(data)
|
python
|
{
"resource": ""
}
|
q10584
|
_pandas_df_to_dendropy_tree
|
train
|
def _pandas_df_to_dendropy_tree(
df,
taxon_col='uid',
taxon_annotations=[],
node_col='uid',
node_annotations=[],
branch_lengths=True,
):
"""Turn a phylopandas dataframe into a dendropy tree.
Parameters
----------
df : DataFrame
DataFrame containing tree data.
taxon_col : str (optional)
Column in dataframe to label the taxon. If None, the index will be used.
taxon_annotations : str
List of columns to annotation in the tree taxon.
node_col : str (optional)
Column in dataframe to label the nodes. If None, the index will be used.
node_annotations : str
List of columns to annotation in the node taxon.
branch_lengths : bool
If True, inclues branch lengths.
"""
if isinstance(taxon_col, str) is False:
raise Exception("taxon_col must be a string.")
if isinstance(node_col, str) is False:
raise Exception("taxon_col must be a string.")
# Construct a list of nodes from dataframe.
taxon_namespace = dendropy.TaxonNamespace()
nodes = {}
for idx in df.index:
# Get node data.
data = df.loc[idx]
# Get taxon for node (if leaf node).
taxon = None
if data['type'] == 'leaf':
taxon = dendropy.Taxon(label=data[taxon_col])
# Add annotations data.
for ann in taxon_annotations:
taxon.annotations.add_new(ann, data[ann])
taxon_namespace.add_taxon(taxon)
# Get label for node.
label = data[node_col]
# Get edge length.
edge_length = None
if branch_lengths is True:
edge_length = data['length']
# Build a node
n = dendropy.Node(
taxon=taxon,
label=label,
edge_length=edge_length
)
# Add node annotations
for ann in node_annotations:
n.annotations.add_new(ann, data[ann])
nodes[idx] = n
# Build branching pattern for nodes.
root = None
for idx, node in nodes.items():
# Get node data.
data = df.loc[idx]
# Get children nodes
children_idx = df[df['parent'] == data['id']].index
children_nodes = [nodes[i] for i in children_idx]
# Set child nodes
nodes[idx].set_child_nodes(children_nodes)
# Check if this is root.
if data['parent'] is None:
root = nodes[idx]
# Build tree.
tree = dendropy.Tree(
seed_node=root,
taxon_namespace=taxon_namespace
)
return tree
|
python
|
{
"resource": ""
}
|
q10585
|
_write
|
train
|
def _write(
df,
filename=None,
schema='newick',
taxon_col='uid',
taxon_annotations=[],
node_col='uid',
node_annotations=[],
branch_lengths=True,
**kwargs
):
"""Write a phylopandas tree DataFrame to various formats.
Parameters
----------
df : DataFrame
DataFrame containing tree data.
filename : str
filepath to write out tree. If None, will return string.
schema : str
tree format to write out.
taxon_col : str (optional)
Column in dataframe to label the taxon. If None, the index will be used.
taxon_annotations : str
List of columns to annotation in the tree taxon.
node_col : str (optional)
Column in dataframe to label the nodes. If None, the index will be used.
node_annotations : str
List of columns to annotation in the node taxon.
branch_lengths : bool
If True, inclues branch lengths.
"""
tree = _pandas_df_to_dendropy_tree(
df,
taxon_col=taxon_col,
taxon_annotations=taxon_annotations,
node_col=node_col,
node_annotations=node_annotations,
branch_lengths=branch_lengths,
)
# Write out format
print(schema)
if filename is not None:
tree.write(path=filename, schema=schema, suppress_annotations=False, **kwargs)
else:
return tree.as_string(schema=schema)
|
python
|
{
"resource": ""
}
|
q10586
|
get_random_id
|
train
|
def get_random_id(length):
"""Generate a random, alpha-numerical id."""
alphabet = string.ascii_uppercase + string.ascii_lowercase + string.digits
return ''.join(random.choice(alphabet) for _ in range(length))
|
python
|
{
"resource": ""
}
|
q10587
|
PyGreen.set_folder
|
train
|
def set_folder(self, folder):
"""
Sets the folder where the files to serve are located.
"""
self.folder = folder
self.templates.directories[0] = folder
self.app.root_path = folder
|
python
|
{
"resource": ""
}
|
q10588
|
PyGreen.run
|
train
|
def run(self, host='0.0.0.0', port=8080):
"""
Launch a development web server.
"""
waitress.serve(self.app, host=host, port=port)
|
python
|
{
"resource": ""
}
|
q10589
|
PyGreen.get
|
train
|
def get(self, path):
"""
Get the content of a file, indentified by its path relative to the folder configured
in PyGreen. If the file extension is one of the extensions that should be processed
through Mako, it will be processed.
"""
data = self.app.test_client().get("/%s" % path).data
return data
|
python
|
{
"resource": ""
}
|
q10590
|
PyGreen.gen_static
|
train
|
def gen_static(self, output_folder):
"""
Generates a complete static version of the web site. It will stored in
output_folder.
"""
files = []
for l in self.file_listers:
files += l()
for f in files:
_logger.info("generating %s" % f)
content = self.get(f)
loc = os.path.join(output_folder, f)
d = os.path.dirname(loc)
if not os.path.exists(d):
os.makedirs(d)
with open(loc, "wb") as file_:
file_.write(content)
|
python
|
{
"resource": ""
}
|
q10591
|
PyGreen.cli
|
train
|
def cli(self, cmd_args=None):
"""
The command line interface of PyGreen.
"""
logging.basicConfig(level=logging.INFO, format='%(message)s')
parser = argparse.ArgumentParser(description='PyGreen, micro web framework/static web site generator')
subparsers = parser.add_subparsers(dest='action')
parser_serve = subparsers.add_parser('serve', help='serve the web site')
parser_serve.add_argument('-p', '--port', type=int, default=8080, help='port to serve on')
parser_serve.add_argument('-f', '--folder', default=".", help='folder containg files to serve')
parser_serve.add_argument('-d', '--disable-templates', action='store_true', default=False, help='just serve static files, do not use Mako')
def serve():
if args.disable_templates:
self.template_exts = set([])
self.run(port=args.port)
parser_serve.set_defaults(func=serve)
parser_gen = subparsers.add_parser('gen', help='generate a static version of the site')
parser_gen.add_argument('output', help='folder to store the files')
parser_gen.add_argument('-f', '--folder', default=".", help='folder containing files to generate')
def gen():
self.gen_static(args.output)
parser_gen.set_defaults(func=gen)
args = parser.parse_args(cmd_args)
self.set_folder(args.folder)
print(parser.description)
print("")
args.func()
|
python
|
{
"resource": ""
}
|
q10592
|
WSGIMimeRender
|
train
|
def WSGIMimeRender(*args, **kwargs):
'''
A wrapper for _WSGIMimeRender that wrapps the
inner callable with wsgi_wrap first.
'''
def wrapper(*args2, **kwargs2):
# take the function
def wrapped(f):
return _WSGIMimeRender(*args, **kwargs)(*args2, **kwargs2)(wsgi_wrap(f))
return wrapped
return wrapper
|
python
|
{
"resource": ""
}
|
q10593
|
URI.relative
|
train
|
def relative(self):
"""Identify if this URI is relative to some "current context".
For example, if the protocol is missing, it's protocol-relative. If the host is missing, it's host-relative, etc.
"""
scheme = self.scheme
if not scheme:
return True
return scheme.is_relative(self)
|
python
|
{
"resource": ""
}
|
q10594
|
URI.resolve
|
train
|
def resolve(self, uri=None, **parts):
"""Attempt to resolve a new URI given an updated URI, partial or complete."""
if uri:
result = self.__class__(urljoin(str(self), str(uri)))
else:
result = self.__class__(self)
for part, value in parts.items():
if part not in self.__all_parts__:
raise TypeError("Unknown URI component: " + part)
setattr(result, part, value)
return result
|
python
|
{
"resource": ""
}
|
q10595
|
JalaliDate.replace
|
train
|
def replace(self, year=None, month=None, day=None):
"""
Replaces the given arguments on this instance, and return a new instance.
:param year:
:param month:
:param day:
:return: A :py:class:`khayyam.JalaliDate` with the same attributes, except for those
attributes given new values by which keyword arguments are specified.
"""
return JalaliDate(
year if year else self.year,
month if month else self.month,
day if day else self.day
)
|
python
|
{
"resource": ""
}
|
q10596
|
JalaliDate.todate
|
train
|
def todate(self):
"""
Calculates the corresponding day in the gregorian calendar. this is the main use case of this library.
:return: Corresponding date in gregorian calendar.
:rtype: :py:class:`datetime.date`
"""
arr = get_gregorian_date_from_julian_day(self.tojulianday())
return datetime.date(int(arr[0]), int(arr[1]), int(arr[2]))
|
python
|
{
"resource": ""
}
|
q10597
|
JalaliDatetime.date
|
train
|
def date(self):
"""
Return date object with same year, month and day.
:rtype: :py:class:`khayyam.JalaliDate`
"""
return khayyam.JalaliDate(self.year, self.month, self.day)
|
python
|
{
"resource": ""
}
|
q10598
|
levinson_1d
|
train
|
def levinson_1d(r, order):
"""Levinson-Durbin recursion, to efficiently solve symmetric linear systems
with toeplitz structure.
Parameters
---------
r : array-like
input array to invert (since the matrix is symmetric Toeplitz, the
corresponding pxp matrix is defined by p items only). Generally the
autocorrelation of the signal for linear prediction coefficients
estimation. The first item must be a non zero real.
Notes
----
This implementation is in python, hence unsuitable for any serious
computation. Use it as educational and reference purpose only.
Levinson is a well-known algorithm to solve the Hermitian toeplitz
equation:
_ _
-R[1] = R[0] R[1] ... R[p-1] a[1]
: : : : * :
: : : _ * :
-R[p] = R[p-1] R[p-2] ... R[0] a[p]
_
with respect to a ( is the complex conjugate). Using the special symmetry
in the matrix, the inversion can be done in O(p^2) instead of O(p^3).
"""
r = np.atleast_1d(r)
if r.ndim > 1:
raise ValueError("Only rank 1 are supported for now.")
n = r.size
if n < 1:
raise ValueError("Cannot operate on empty array !")
elif order > n - 1:
raise ValueError("Order should be <= size-1")
if not np.isreal(r[0]):
raise ValueError("First item of input must be real.")
elif not np.isfinite(1 / r[0]):
raise ValueError("First item should be != 0")
# Estimated coefficients
a = np.empty(order + 1, 'float32')
# temporary array
t = np.empty(order + 1, 'float32')
# Reflection coefficients
k = np.empty(order, 'float32')
a[0] = 1.
e = r[0]
for i in range(1, order + 1):
acc = r[i]
for j in range(1, i):
acc += a[j] * r[i - j]
k[i - 1] = -acc / e
a[i] = k[i - 1]
for j in range(order):
t[j] = a[j]
for j in range(1, i):
a[j] += k[i - 1] * np.conj(t[i - j])
e *= 1 - k[i - 1] * np.conj(k[i - 1])
return a, e, k
|
python
|
{
"resource": ""
}
|
q10599
|
acorr_lpc
|
train
|
def acorr_lpc(x, axis=-1):
"""Compute autocorrelation of x along the given axis.
This compute the biased autocorrelation estimator (divided by the size of
input signal)
Notes
-----
The reason why we do not use acorr directly is for speed issue."""
if not np.isrealobj(x):
raise ValueError("Complex input not supported yet")
maxlag = x.shape[axis]
nfft = int(2 ** nextpow2(2 * maxlag - 1))
if axis != -1:
x = np.swapaxes(x, -1, axis)
a = _acorr_last_axis(x, nfft, maxlag)
if axis != -1:
a = np.swapaxes(a, -1, axis)
return a
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.