code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
|---|---|---|---|---|---|
if owner is None:
raise ValueError("Invalid value for `owner`, must not be `None`")
if owner is not None and not re.search('[a-z0-9](?:-(?!-)|[a-z0-9])+[a-z0-9]', owner):
raise ValueError("Invalid value for `owner`, must be a follow pattern or equal to `/[a-z0-9](?:-(?!-)|[a-z0-9])+[a-z0-9]/`")
self._owner = owner
|
def owner(self, owner)
|
Sets the owner of this LinkedDatasetCreateOrUpdateRequest.
User name and unique identifier of the creator of the dataset.
:param owner: The owner of this LinkedDatasetCreateOrUpdateRequest.
:type: str
| 1.755225
| 1.682021
| 1.043521
|
if 'w' == self._mode and isinstance(value, str):
self._queue.put(value.encode('utf-8'))
elif self._mode in {'w', 'wb'}:
if isinstance(value, (bytes, bytearray)):
self._queue.put(value)
else:
raise TypeError(
"a string or bytes object is required, not {}".format(
type(value)))
else:
raise IOError("File not opened in write mode.")
|
def write(self, value)
|
write the given value to the stream - if the object is a bytearray,
write it as-is - otherwise, convert the object to a string with
`str()` and write the UTF-8 bytes
:param value: the value to write
:type value: str or bytearray
:raises TypeError: if the type of the value provided does not match
the mode in which the file was opened.
:raises NotImplementedError: if the mode of the file is not one of the
supported values (currently only "writing" modes for files are
supported - leaving the option to implement "read" modes open for
future work)
| 3.149458
| 3.082875
| 1.021597
|
if 'r' == self._mode:
return self._read_response.text
elif 'rb' == self._mode:
return self._read_response.content
else:
raise IOError("File not opened in read mode.")
|
def read(self)
|
read the contents of the file that's been opened in read mode
| 4.582434
| 3.51614
| 1.303257
|
ownerid, datasetid = parse_dataset_key(self._dataset_key)
response = requests.get(
'{}/file_download/{}/{}/{}'.format(
self._query_host, ownerid, datasetid, self._file_name),
headers={
'User-Agent': self._user_agent,
'Authorization': 'Bearer {}'.format(
self._config.auth_token)
}, stream=True)
try:
response.raise_for_status()
except Exception as e:
raise RestApiError(cause=e)
self._read_response = response
|
def _open_for_read(self)
|
open the file in read mode
| 3.879737
| 3.757161
| 1.032625
|
def put_request(body):
ownerid, datasetid = parse_dataset_key(self._dataset_key)
response = requests.put(
"{}/uploads/{}/{}/files/{}".format(
self._api_host, ownerid, datasetid, self._file_name),
data=body,
headers={
'User-Agent': self._user_agent,
'Authorization': 'Bearer {}'.format(
self._config.auth_token)
})
self._response_queue.put(response)
body = iter(self._queue.get, self._sentinel)
self._thread = Thread(target=put_request, args=(body,))
self._thread.start()
|
def _open_for_write(self)
|
open the file in write mode
| 3.994759
| 3.91046
| 1.021557
|
if self._mode.find('w') >= 0:
self._queue.put(self._sentinel)
self._thread.join(timeout=self._timeout)
if self._thread.is_alive():
raise RemoteFileException("Closing file timed out.")
response = self._response_queue.get_nowait()
try:
response.raise_for_status()
except Exception as e:
raise RestApiError(cause=e)
else:
self._read_response = None
|
def close(self)
|
in write mode, closing the handle adds the sentinel value into the
queue and joins the thread executing the HTTP request. in read mode,
this clears out the read response object so there are no references
to it, and the resources can be reclaimed.
| 4.995241
| 3.826761
| 1.305344
|
json_context = json.dumps(context, cls=self.json_encoder_class)
return HttpResponse(json_context,
content_type=self.get_content_type(),
status=status)
|
def render_json_response(self, context, status=200)
|
Serialize the context dictionary as JSON and return it
as a HTTP Repsonse object. This method only allows
serialization of simple objects (i.e. no model instances)
| 2.762517
| 2.554527
| 1.08142
|
context = self.chart_instance.chartjs_configuration(*args, **kwargs)
return self.render_json_response(context)
|
def get(self, request, *args, **kwargs)
|
Main entry. This View only responds to GET requests.
| 9.015642
| 7.763496
| 1.161286
|
# TODO: handle syntax errors
contents = self._get_contents(contents, filename)
request = ParseRequest(filename=os.path.basename(filename),
content=contents, mode=mode,
language=self._scramble_language(language))
response = self._stub_v2.Parse(request, timeout=timeout)
return ResultContext(response)
|
def parse(self, filename: str, language: Optional[str]=None,
contents: Optional[str]=None, mode: Optional[ModeType]=None,
timeout: Optional[int]=None) -> ResultContext
|
Queries the Babelfish server and receives the UAST response for the specified
file.
:param filename: The path to the file. Can be arbitrary if contents \
is not None.
:param language: The programming language of the file. Refer to \
https://doc.bblf.sh/languages.html for the list of \
currently supported languages. None means autodetect.
:param contents: The contents of the file. IF None, it is read from \
filename.
:param mode: UAST transformation mode.
:param timeout: The request timeout in seconds.
:type filename: str
:type language: str
:type contents: str
:type timeout: float
:return: UAST object.
| 4.867379
| 5.685805
| 0.856058
|
self._channel.close()
self._channel = self._stub_v1 = self._stub_v2 = None
|
def close(self) -> None
|
Close the gRPC channel and free the acquired resources. Using a closed client is
not supported.
| 9.533978
| 5.888887
| 1.618978
|
if isinstance(n, CompatNodeIterator):
return CompatNodeIterator(n._nodeit.iterate(order), only_nodes=True)
elif isinstance(n, Node):
nat_it = native_iterator(n.internal_node, order)
return CompatNodeIterator(NodeIterator(nat_it), only_nodes=True)
elif isinstance(n, dict):
nat_it = native_iterator(n, order)
return CompatNodeIterator(NodeIterator(nat_it, uast()), only_nodes=True)
else:
raise WrongTypeException(
"iterator on non node or iterator type (%s)" % str(type(n))
)
|
def iterator(n: Union[Node, CompatNodeIterator, dict],
order: TreeOrder = TreeOrder.PRE_ORDER) -> CompatNodeIterator
|
This function has the same signature as the pre-v3 iterator()
call returning a compatibility CompatNodeIterator.
| 4.083013
| 4.028321
| 1.013577
|
ctx = uast()
return CompatNodeIterator(NodeIterator(ctx.filter(query, n.internal_node), ctx))
|
def filter(n: Node, query: str) -> CompatNodeIterator
|
This function has the same signature as the pre-v3 filter() returning a
compatibility CompatNodeIterator.
| 19.997358
| 22.930704
| 0.872078
|
return CompatNodeIterator(filter(n, query)._nodeit, only_nodes=True)
|
def filter_nodes(n: Node, query: str) -> CompatNodeIterator
|
Utility function. Same as filter() but will only filter for nodes (i. e.
it will exclude scalars and positions).
| 21.777618
| 18.209805
| 1.195928
|
return _scalariter2item(n, query, str)
|
def filter_string(n: Node, query: str) -> str
|
Filter and ensure that the returned value is of string type.
| 146.273392
| 62.296604
| 2.348015
|
return _scalariter2item(n, query, bool)
|
def filter_bool(n: Node, query: str) -> bool
|
Filter and ensure that the returned value is of type bool.
| 119.410202
| 53.784325
| 2.220167
|
return _scalariter2item(n, query, int)
|
def filter_int(n: Node, query: str) -> int
|
Filter and ensure that the returned value is of type int.
| 100.402451
| 47.749447
| 2.102693
|
return _scalariter2item(n, query, float)
|
def filter_float(n: Node, query: str) -> float
|
Filter and ensure that the returned value is of type int.
| 84.237885
| 45.310345
| 1.859131
|
return self._parse(filename, language, contents, timeout,
Mode.Value('ANNOTATED'))
|
def parse(self, filename: str, language: str = None, contents: str = None,
timeout: float = None) -> CompatParseResponse
|
Parse the specified filename or contents and return a CompatParseResponse.
| 20.478344
| 16.521963
| 1.239462
|
if not self._last_node:
return None
return filter(self._last_node, query)
|
def filter(self, query: str) -> Optional['CompatNodeIterator']
|
Further filter the results using this iterator as base.
| 6.383351
| 5.073153
| 1.258261
|
if isinstance(self._last_node, dict):
return self._last_node.keys()
else:
return {}
|
def properties(self) -> dict
|
Returns the properties of the current node in the iteration.
| 6.6644
| 4.124389
| 1.615852
|
xml = XmlWriter(f, indentAmount=' ')
xml.prolog()
xml.start('playlist', { 'xmlns': 'http://xspf.org/ns/0/', 'version': '1' })
xml.start('trackList')
for tupe in tuples:
xml.start('track')
xml.elem('creator',tupe[0])
xml.elem('title',tupe[1])
xml.elem('location', tupe[2])
xml.end()
xml.end()
xml.end()
f.close()
|
def write_xspf(f, tuples)
|
send me a list of (artist,title,mp3_url)
| 2.742068
| 2.667195
| 1.028072
|
try:
param_dict['api_key'] = config.ECHO_NEST_API_KEY
param_list = []
if not socket_timeout:
socket_timeout = config.CALL_TIMEOUT
for key,val in param_dict.iteritems():
if isinstance(val, list):
param_list.extend( [(key,subval) for subval in val] )
elif val is not None:
if isinstance(val, unicode):
val = val.encode('utf-8')
param_list.append( (key,val) )
params = urllib.urlencode(param_list)
orig_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(socket_timeout)
if(POST):
if (not method == 'track/upload') or ((method == 'track/upload') and 'url' in param_dict):
url = 'http://%s/%s/%s/%s' % (config.API_HOST, config.API_SELECTOR,
config.API_VERSION, method)
if data is None:
data = ''
data = urllib.urlencode(data)
data = "&".join([data, params])
f = opener.open(url, data=data)
else:
url = '/%s/%s/%s?%s' % (config.API_SELECTOR, config.API_VERSION,
method, params)
if ':' in config.API_HOST:
host, port = config.API_HOST.split(':')
else:
host = config.API_HOST
port = 80
if config.TRACE_API_CALLS:
logger.info("%s/%s" % (host+':'+str(port), url,))
conn = httplib.HTTPConnection(host, port = port)
conn.request('POST', url, body = data, headers = dict([('Content-Type', 'application/octet-stream')]+headers))
f = conn.getresponse()
else:
url = 'http://%s/%s/%s/%s?%s' % (config.API_HOST, config.API_SELECTOR, config.API_VERSION,
method, params)
f = opener.open(url)
socket.setdefaulttimeout(orig_timeout)
# try/except
response_dict = get_successful_response(f)
return response_dict
except IOError, e:
if hasattr(e, 'reason'):
raise EchoNestIOError(error=e.reason)
elif hasattr(e, 'code'):
raise EchoNestIOError(code=e.code)
else:
raise
|
def callm(method, param_dict, POST=False, socket_timeout=None, data=None)
|
Call the api!
Param_dict is a *regular* *python* *dictionary* so if you want to have multi-valued params
put them in a list.
** note, if we require 2.6, we can get rid of this timeout munging.
| 2.643242
| 2.641408
| 1.000694
|
try:
import oauth2 # lazy import this so oauth2 is not a hard dep
except ImportError:
raise Exception("You must install the python-oauth2 library to use this method.")
def build_request(url):
params = {
'oauth_version': "1.0",
'oauth_nonce': oauth2.generate_nonce(),
'oauth_timestamp': int(time.time())
}
consumer = oauth2.Consumer(key=config.ECHO_NEST_CONSUMER_KEY, secret=config.ECHO_NEST_SHARED_SECRET)
params['oauth_consumer_key'] = config.ECHO_NEST_CONSUMER_KEY
req = oauth2.Request(method='GET', url=url, parameters=params)
signature_method = oauth2.SignatureMethod_HMAC_SHA1()
req.sign_request(signature_method, consumer, None)
return req
param_dict['api_key'] = config.ECHO_NEST_API_KEY
param_list = []
if not socket_timeout:
socket_timeout = config.CALL_TIMEOUT
for key,val in param_dict.iteritems():
if isinstance(val, list):
param_list.extend( [(key,subval) for subval in val] )
elif val is not None:
if isinstance(val, unicode):
val = val.encode('utf-8')
param_list.append( (key,val) )
params = urllib.urlencode(param_list)
orig_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(socket_timeout)
url = 'http://%s/%s/%s/%s?%s' % (config.API_HOST, config.API_SELECTOR, config.API_VERSION,
method, params)
req = build_request(url)
f = opener.open(req.to_url())
socket.setdefaulttimeout(orig_timeout)
# try/except
response_dict = get_successful_response(f)
return response_dict
|
def oauthgetm(method, param_dict, socket_timeout=None)
|
Call the api! With Oauth!
Param_dict is a *regular* *python* *dictionary* so if you want to have multi-valued params
put them in a list.
** note, if we require 2.6, we can get rid of this timeout munging.
| 2.713415
| 2.714102
| 0.999747
|
params = urllib.urlencode(fields)
url = 'http://%s%s?%s' % (host, selector, params)
u = urllib2.urlopen(url, files)
result = u.read()
[fp.close() for (key, fp) in files]
return result
|
def postChunked(host, selector, fields, files)
|
Attempt to replace postMultipart() with nearly-identical interface.
(The files tuple no longer requires the filename, and we only return
the response body.)
Uses the urllib2_file.py originally from
http://fabien.seisen.org which was also drawn heavily from
http://code.activestate.com/recipes/146306/ .
This urllib2_file.py is more desirable because of the chunked
uploading from a file pointer (no need to read entire file into
memory) and the ability to work from behind a proxy (due to its
basis on urllib2).
| 2.612786
| 2.720033
| 0.960572
|
result = util.callm("catalog/create", {}, POST=True,
data={"name":name, "type":T})
result = result['response']
return Catalog(result['id'], **dict( (k,result[k]) for k in ('name', 'type')))
|
def create_catalog_by_name(name, T="general")
|
Creates a catalog object, with a given name. Does not check to see if the catalog already exists.
Create a catalog object like
| 6.557281
| 8.475001
| 0.77372
|
kwargs = {
'name' : name,
}
result = util.callm("%s/%s" % ('catalog', 'profile'), kwargs)
return Catalog(**util.fix(result['response']['catalog']))
|
def get_catalog_by_name(name)
|
Grabs a catalog by name, if its there on the api key.
Otherwise, an error is thrown (mirroring the API)
| 11.570127
| 10.473521
| 1.104703
|
result = util.callm("%s/%s" % ('catalog', 'list'), {'results': results, 'start': start})
cats = [Catalog(**util.fix(d)) for d in result['response']['catalogs']]
start = result['response']['start']
total = result['response']['total']
return ResultList(cats, start, total)
|
def list_catalogs(results=30, start=0)
|
Returns list of all catalogs created on this API key
Args:
Kwargs:
results (int): An integer number of results to return
start (int): An integer starting value for the result set
Returns:
A list of catalog objects
Example:
>>> catalog.list_catalogs()
[<catalog - test_artist_catalog>, <catalog - test_song_catalog>, <catalog - my_songs>]
>>>
| 5.108976
| 6.900051
| 0.740426
|
post_data = {}
items_json = json.dumps(items, default=dthandler)
post_data['data'] = items_json
response = self.post_attribute("update", data=post_data)
return response['ticket']
|
def update(self, items)
|
Update a catalog object
Args:
items (list): A list of dicts describing update data and action codes (see api docs)
Kwargs:
Returns:
A ticket id
Example:
>>> c = catalog.Catalog('my_songs', type='song')
>>> items
[{'action': 'update',
'item': {'artist_name': 'dAn ThE aUtOmAtOr',
'disc_number': 1,
'genre': 'Instrumental',
'item_id': '38937DDF04BC7FC4',
'play_count': 5,
'release': 'Bombay the Hard Way: Guns, Cars & Sitars',
'song_name': 'Inspector Jay From Dehli',
'track_number': 9,
'url': 'file://localhost/Users/tylerw/Music/iTunes/iTunes%20Media/Music/Dan%20the%20Automator/Bombay%20the%20Hard%20Way_%20Guns,%20Cars%20&%20Sitars/09%20Inspector%20Jay%20From%20Dehli.m4a'}}]
>>> ticket = c.update(items)
>>> ticket
u'7dcad583f2a38e6689d48a792b2e4c96'
>>> c.status(ticket)
{u'ticket_status': u'complete', u'update_info': []}
>>>
| 5.913669
| 6.857282
| 0.862393
|
warnings.warn("catalog.read_items() is depreciated. Please use catalog.get_item_dicts() instead.")
kwargs = {}
kwargs['bucket'] = buckets or []
kwargs['item_id'] = item_ids or []
response = self.get_attribute("read", results=results, start=start, **kwargs)
rval = ResultList([])
if item_ids:
rval.start=0;
rval.total=len(response['catalog']['items'])
else:
rval.start = response['catalog']['start']
rval.total = response['catalog']['total']
for item in response['catalog']['items']:
new_item = None
# song items
if 'song_id' in item:
item['id'] = item.pop('song_id')
item['title'] = item.pop('song_name')
request = item['request']
new_item = song.Song(**util.fix(item))
new_item.request = request
# artist item
elif 'artist_id' in item:
item['id'] = item.pop('artist_id')
item['name'] = item.pop('artist_name')
request = item['request']
new_item = artist.Artist(**util.fix(item))
new_item.request = request
# unresolved item
else:
new_item = item
rval.append(new_item)
return rval
|
def read_items(self, buckets=None, results=15, start=0,item_ids=None)
|
Returns data from the catalog; also expanded for the requested buckets.
This method is provided for backwards-compatibility
Args:
Kwargs:
buckets (list): A list of strings specifying which buckets to retrieve
results (int): An integer number of results to return
start (int): An integer starting value for the result set
Returns:
A list of objects in the catalog; list contains additional attributes 'start' and 'total'
Example:
>>> c
<catalog - my_songs>
>>> c.read_items(results=1)
[<song - Harmonice Mundi II>]
>>>
| 2.778522
| 2.855634
| 0.972997
|
kwargs = {}
kwargs['bucket'] = buckets or []
kwargs['item_id'] = item_ids or []
response = self.get_attribute("read", results=results, start=start, **kwargs)
rval = ResultList(response['catalog']['items'])
if item_ids:
rval.start=0;
rval.total=len(response['catalog']['items'])
else:
rval.start = response['catalog']['start']
rval.total = response['catalog']['total']
return rval
|
def get_item_dicts(self, buckets=None, results=15, start=0,item_ids=None)
|
Returns data from the catalog; also expanded for the requested buckets
Args:
Kwargs:
buckets (list): A list of strings specifying which buckets to retrieve
results (int): An integer number of results to return
start (int): An integer starting value for the result set
Returns:
A list of dicts representing objects in the catalog; list has additional attributes 'start' and 'total'
Example:
>>> c
<catalog - my_songs>
>>> c.read_items(results=1)
[
{
"artist_id": "AR78KRI1187B98E6F2",
"artist_name": "Art of Noise",
"date_added": "2012-04-02T16:50:02",
"foreign_id": "CAHLYLR13674D1CF83:song:1000",
"request": {
"artist_name": "The Art Of Noise",
"item_id": "1000",
"song_name": "Love"
},
"song_id": "SOSBCTO1311AFE7AE0",
"song_name": "Love"
}
]
| 3.648356
| 3.827244
| 0.953259
|
kwargs = {}
kwargs['bucket'] = buckets or []
if since:
kwargs['since']=since
response = self.get_attribute("feed", results=results, start=start, **kwargs)
rval = ResultList(response['feed'])
return rval
|
def get_feed(self, buckets=None, since=None, results=15, start=0)
|
Returns feed (news, blogs, reviews, audio, video) for the catalog artists; response depends on requested buckets
Args:
Kwargs:
buckets (list): A list of strings specifying which feed items to retrieve
results (int): An integer number of results to return
start (int): An integer starting value for the result set
Returns:
A list of news, blogs, reviews, audio or video document dicts;
Example:
>>> c
<catalog - my_artists>
>>> c.get_feed(results=15)
{u'date_found': u'2011-02-06T07:50:25',
u'date_posted': u'2011-02-06T07:50:23',
u'id': u'caec686c0dff361e4c53dceb58fb9d2f',
u'name': u'Linkin Park \u2013 \u201cWaiting For The End\u201d + \u201cWhen They Come For Me\u201d 2/5 SNL',
u'references': [{u'artist_id': u'ARQUMH41187B9AF699',
u'artist_name': u'Linkin Park'}],
u'summary': u'<span>Linkin</span> <span>Park</span> performed "Waiting For The End" and "When They Come For Me" on Saturday Night Live. Watch the videos below and pick up their album A Thousand Suns on iTunes, Amazon MP3, CD Social Bookmarking ... ',
u'type': u'blogs',
u'url': u'http://theaudioperv.com/2011/02/06/linkin-park-waiting-for-the-end-when-they-come-for-me-25-snl/'}
>>>
| 5.698831
| 7.300504
| 0.780608
|
"given an audio file, print out the artist, title and some audio attributes of the song"
print 'File: ', audio_file
pytrack = track.track_from_filename(audio_file)
print 'Artist: ', pytrack.artist if hasattr(pytrack, 'artist') else 'Unknown'
print 'Title: ', pytrack.title if hasattr(pytrack, 'title') else 'Unknown'
print 'Track ID: ', pytrack.id
print 'Tempo: ', pytrack.tempo
print 'Energy: %1.3f %s' % (pytrack.energy, _bar(pytrack.energy))
if not pytrack.valence:
# Track hasn't had latest attributes computed. Force an upload.
pytrack = track.track_from_filename(audio_file, force_upload=True)
print 'Valence: %1.3f %s' % (pytrack.valence, _bar(pytrack.valence))
print 'Acousticness: %1.3f %s' % (pytrack.acousticness, _bar(pytrack.acousticness))
print
|
def _show_one(audio_file)
|
given an audio file, print out the artist, title and some audio attributes of the song
| 3.216148
| 2.779665
| 1.157027
|
"print out the tempo for each audio file in the given directory"
for f in os.listdir(directory):
if _is_audio(f):
path = os.path.join(directory, f)
_show_one(path)
|
def show_attrs(directory)
|
print out the tempo for each audio file in the given directory
| 5.511551
| 3.353715
| 1.643417
|
kwargs = {}
if ids:
if not isinstance(ids, list):
ids = [ids]
kwargs['id'] = ids
if track_ids:
if not isinstance(track_ids, list):
track_ids = [track_ids]
kwargs['track_id'] = track_ids
buckets = buckets or []
if buckets:
kwargs['bucket'] = buckets
if limit:
kwargs['limit'] = 'true'
result = util.callm("%s/%s" % ('song', 'profile'), kwargs)
return [Song(**util.fix(s_dict)) for s_dict in result['response']['songs']]
|
def profile(ids=None, track_ids=None, buckets=None, limit=False)
|
get the profiles for multiple songs at once
Args:
ids (str or list): a song ID or list of song IDs
Kwargs:
buckets (list): A list of strings specifying which buckets to retrieve
limit (bool): A boolean indicating whether or not to limit the results to one of the id spaces specified in buckets
Returns:
A list of term document dicts
Example:
>>> song_ids = ['SOBSLVH12A8C131F38', 'SOXMSGY1338A5D5873', 'SOJPHZO1376210AFE5', 'SOBHNKR12AB0186218', 'SOSJAHD13770F4D40C']
>>> songs = song.profile(song_ids, buckets=['audio_summary'])
[<song - Say It Ain't So>,
<song - Island In The Sun>,
<song - My Name Is Jonas>,
<song - Buddy Holly>]
>>> songs[0].audio_summary
{u'analysis_url': u'https://echonest-analysis.s3.amazonaws.com/TR/7VRBNguufpHAQQ4ZjJ0eWsIQWl2S2_lrK-7Bp2azHOvPN4VFV-YnU7uO0dXgYtOKT-MTEa/3/full.json?Signature=hmNghHwfEsA4JKWFXnRi7mVP6T8%3D&Expires=1349809918&AWSAccessKeyId=AKIAJRDFEY23UEVW42BQ',
u'audio_md5': u'b6079b2b88f8265be8bdd5fe9702e05c',
u'danceability': 0.64540643050283253,
u'duration': 255.92117999999999,
u'energy': 0.30711665772260549,
u'key': 8,
u'liveness': 0.088994423525370583,
u'loudness': -9.7799999999999994,
u'mode': 1,
u'speechiness': 0.031970700260699259,
u'tempo': 76.049999999999997,
u'time_signature': 4}
>>>
| 2.725005
| 3.304687
| 0.824588
|
if not (cache and ('audio_summary' in self.cache)):
response = self.get_attribute('profile', bucket='audio_summary')
if response['songs'] and 'audio_summary' in response['songs'][0]:
self.cache['audio_summary'] = response['songs'][0]['audio_summary']
else:
self.cache['audio_summary'] = {}
return self.cache['audio_summary']
|
def get_audio_summary(self, cache=True)
|
Get an audio summary of a song containing mode, tempo, key, duration, time signature, loudness, danceability, energy, and analysis_url.
Args:
Kwargs:
cache (bool): A boolean indicating whether or not the cached value should be used (if available). Defaults to True.
Returns:
A dictionary containing mode, tempo, key, duration, time signature, loudness, danceability, energy and analysis_url keys.
Example:
>>> s = song.Song('SOGNMKX12B0B806320')
>>> s.audio_summary
{u'analysis_url': u'https://echonest-analysis.s3.amazonaws.com/TR/RnMKCg47J5LgQZr0SISyoPuRxKVQx3Z_YSuhVa/3/full.json?Signature=KBUbewLiP3sZ2X6rRZzXhrgh8fw%3D&Expires=1349809604&AWSAccessKeyId=AKIAJRDFEY23UEVW42BQ',
u'audio_md5': u'ca3fdfa72eed23d5ad89872c38cecc0e',
u'danceability': 0.33712086491871546,
u'duration': 470.70666999999997,
u'energy': 0.58186979146361684,
u'key': 0,
u'liveness': 0.08676759933615498,
u'loudness': -9.5960000000000001,
u'mode': 1,
u'speechiness': 0.036938896635994867,
u'tempo': 126.949,
u'time_signature': 4}
>>>
| 2.985229
| 3.263863
| 0.914631
|
if not (cache and ('song_hotttnesss' in self.cache)):
response = self.get_attribute('profile', bucket='song_hotttnesss')
self.cache['song_hotttnesss'] = response['songs'][0]['song_hotttnesss']
return self.cache['song_hotttnesss']
|
def get_song_hotttnesss(self, cache=True)
|
Get our numerical description of how hottt a song currently is
Args:
Kwargs:
cache (bool): A boolean indicating whether or not the cached value should be used (if available). Defaults to True.
Returns:
A float representing hotttnesss.
Example:
>>> s = song.Song('SOLUHKP129F0698D49')
>>> s.get_song_hotttnesss()
0.57344379999999995
>>> s.song_hotttnesss
0.57344379999999995
>>>
| 2.96751
| 3.46508
| 0.856405
|
if not (cache and ('song_type' in self.cache)):
response = self.get_attribute('profile', bucket='song_type')
if response['songs'][0].has_key('song_type'):
self.cache['song_type'] = response['songs'][0]['song_type']
else:
self.cache['song_type'] = []
return self.cache['song_type']
|
def get_song_type(self, cache=True)
|
Get the types of a song.
Args:
cache (boolean): A boolean indicating whether or not the cached value should be used
(if available). Defaults to True.
Returns:
A list of strings, each representing a song type: 'christmas', for example.
Example:
>>> s = song.Song('SOQKVPH12A58A7AF4D')
>>> s.song_type
[u'christmas']
>>>
| 3.108064
| 3.320465
| 0.936033
|
if not (cache and ('artist_hotttnesss' in self.cache)):
response = self.get_attribute('profile', bucket='artist_hotttnesss')
self.cache['artist_hotttnesss'] = response['songs'][0]['artist_hotttnesss']
return self.cache['artist_hotttnesss']
|
def get_artist_hotttnesss(self, cache=True)
|
Get our numerical description of how hottt a song's artist currently is
Args:
Kwargs:
cache (bool): A boolean indicating whether or not the cached value should be used (if available). Defaults to True.
Returns:
A float representing hotttnesss.
Example:
>>> s = song.Song('SOOLGAZ127F3E1B87C')
>>> s.artist_hotttnesss
0.45645633000000002
>>> s.get_artist_hotttnesss()
0.45645633000000002
>>>
| 2.983986
| 3.350185
| 0.890693
|
if not (cache and ('artist_familiarity' in self.cache)):
response = self.get_attribute('profile', bucket='artist_familiarity')
self.cache['artist_familiarity'] = response['songs'][0]['artist_familiarity']
return self.cache['artist_familiarity']
|
def get_artist_familiarity(self, cache=True)
|
Get our numerical estimation of how familiar a song's artist currently is to the world
Args:
cache (bool): A boolean indicating whether or not the cached value should be used (if available). Defaults to True.
Returns:
A float representing familiarity.
Example:
>>> s = song.Song('SOQKVPH12A58A7AF4D')
>>> s.get_artist_familiarity()
0.639626025843539
>>> s.artist_familiarity
0.639626025843539
>>>
| 3.32111
| 3.894886
| 0.852685
|
if not (cache and ('artist_location' in self.cache)):
response = self.get_attribute('profile', bucket='artist_location')
self.cache['artist_location'] = response['songs'][0]['artist_location']
return self.cache['artist_location']
|
def get_artist_location(self, cache=True)
|
Get the location of a song's artist.
Args:
cache (bool): A boolean indicating whether or not the cached value should be used (if available). Defaults to True.
Returns:
An artist location object.
Example:
>>> s = song.Song('SOQKVPH12A58A7AF4D')
>>> s.artist_location
{u'latitude': 34.053489999999996, u'location': u'Los Angeles, CA', u'longitude': -118.24532000000001}
>>>
| 4.096179
| 5.025835
| 0.815025
|
if not (cache and ('song_discovery' in self.cache)):
response = self.get_attribute('profile', bucket='song_discovery')
self.cache['song_discovery'] = response['songs'][0]['song_discovery']
return self.cache['song_discovery']
|
def get_song_discovery(self, cache=True)
|
Args:
cache (bool): A boolean indicating whether or not the cached value should be used (if available). Defaults to True.
Returns:
A float representing a song's discovery rank.
Example:
>>> s = song.Song('SOQKVPH12A58A7AF4D')
>>> s.get_song_discovery()
0.639626025843539
>>> s.song_discovery
0.639626025843539
>>>
| 4.374842
| 5.050946
| 0.866143
|
if not (cache and ('song_currency' in self.cache)):
response = self.get_attribute('profile', bucket='song_currency')
self.cache['song_currency'] = response['songs'][0]['song_currency']
return self.cache['song_currency']
|
def get_song_currency(self, cache=True)
|
Args:
cache (bool): A boolean indicating whether or not the cached value should be used (if available). Defaults to True.
Returns:
A float representing a song's currency rank.
Example:
>>> s = song.Song('SOQKVPH12A58A7AF4D')
>>> s.get_song_currency()
0.639626025843539
>>> s.song_currency
0.639626025843539
>>>
| 4.118402
| 4.941402
| 0.833448
|
if not (cache and ('tracks' in self.cache) and (catalog in [td['catalog'] for td in self.cache['tracks']])):
kwargs = {
'bucket':['tracks', 'id:%s' % catalog],
}
response = self.get_attribute('profile', **kwargs)
if not 'tracks' in self.cache:
self.cache['tracks'] = []
# don't blow away the cache for other catalogs
potential_tracks = response['songs'][0].get('tracks', [])
existing_track_ids = [tr['foreign_id'] for tr in self.cache['tracks']]
new_tds = filter(lambda tr: tr['foreign_id'] not in existing_track_ids, potential_tracks)
self.cache['tracks'].extend(new_tds)
return filter(lambda tr: tr['catalog']==util.map_idspace(catalog), self.cache['tracks'])
|
def get_tracks(self, catalog, cache=True)
|
Get the tracks for a song given a catalog.
Args:
catalog (str): a string representing the catalog whose track you want to retrieve.
Returns:
A list of Track dicts.
Example:
>>> s = song.Song('SOWDASQ12A6310F24F')
>>> s.get_tracks('7digital')[0]
{u'catalog': u'7digital',
u'foreign_id': u'7digital:track:8445818',
u'id': u'TRJGNNY12903CC625C',
u'preview_url': u'http://previews.7digital.com/clips/34/8445818.clip.mp3',
u'release_image': u'http://cdn.7static.com/static/img/sleeveart/00/007/628/0000762838_200.jpg'}
>>>
| 4.538798
| 4.323358
| 1.049832
|
limit = str(limit).lower()
dmca = str(dmca).lower()
kwargs = locals()
kwargs['bucket'] = kwargs['buckets']
del kwargs['buckets']
kwargs['genre'] = kwargs['genres']
del kwargs['genres']
result = util.callm("%s/%s" % ('playlist', 'basic'), kwargs)
return [Song(**util.fix(s_dict)) for s_dict in result['response']['songs']]
|
def basic(type='artist-radio', artist_id=None, artist=None, song_id=None, song=None, track_id=None, dmca=False,
results=15, buckets=None, limit=False,genres=None,)
|
Get a basic playlist
Args:
Kwargs:
type (str): a string representing the playlist type ('artist-radio' or 'song-radio')
artist_id (str): the artist_id to seed the playlist
artist (str): the name of an artist to seed the playlist
song_id (str): a song_id to seed the playlist
song (str): the name of a song to seed the playlist
track_id (str): the name of a track to seed the playlist
dmca (bool): make the playlist dmca-compliant
results (int): desired length of the playlist
buckets (list): A list of strings specifying which buckets to retrieve
limit (bool): Whether results should be restricted to any idspaces given in the buckets parameter
| 4.445827
| 5.335792
| 0.833208
|
response = result['response']
status = response['track']['status'].lower()
if status == 'pending':
# Need to wait for async upload or analyze call to finish.
result = _wait_for_pending_track(response['track']['id'], timeout)
response = result['response']
status = response['track']['status'].lower()
if not status == 'complete':
track_id = response['track']['id']
if status == 'pending':
raise Exception('%s: the operation didn\'t complete before the timeout (%d secs)' %
(track_id, timeout))
else:
raise Exception('%s: there was an error analyzing the track, status: %s' % (track_id, status))
else:
# track_properties starts as the response dictionary.
track_properties = response['track']
# 'id' and 'md5' are separated to construct the Track object.
identifier = track_properties.pop('id')
md5 = track_properties.pop('md5', None) # tracks from song api calls will not have an md5
# Pop off the audio_summary dict and make those keys attributes
# of the Track. This includes things like tempo, energy, and loudness.
track_properties.update(track_properties.pop('audio_summary'))
return Track(identifier, md5, track_properties)
|
def _track_from_response(result, timeout)
|
This is the function that actually creates the track object
| 4.900472
| 4.783424
| 1.024469
|
param_dict['format'] = 'json'
param_dict['wait'] = 'true'
param_dict['bucket'] = 'audio_summary'
result = util.callm('track/upload', param_dict, POST = True, socket_timeout = 300, data = data)
return _track_from_response(result, timeout)
|
def _upload(param_dict, timeout, data)
|
Calls upload either with a local audio file,
or a url. Returns a track object.
| 7.396581
| 6.113484
| 1.20988
|
if not force_upload:
try:
# Check if this file has already been uploaded.
# This is much faster than uploading.
md5 = hashlib.md5(file_object.read()).hexdigest()
return track_from_md5(md5)
except util.EchoNestAPIError:
# Fall through to do a fresh upload.
pass
file_object.seek(0)
return _track_from_data(file_object.read(), filetype, timeout)
|
def track_from_file(file_object, filetype, timeout=DEFAULT_ASYNC_TIMEOUT, force_upload=False)
|
Create a track object from a file-like object.
NOTE: Does not create the detailed analysis for the Track. Call
Track.get_analysis() for that.
Args:
file_object: a file-like Python object
filetype: the file type. Supported types include mp3, ogg, wav, m4a, mp4, au
force_upload: skip the MD5 shortcut path, force an upload+analysis
Example:
>>> f = open("Miaow-01-Tempered-song.mp3")
>>> t = track.track_from_file(f, 'mp3')
>>> t
< Track >
>>>
| 4.152387
| 4.836733
| 0.858511
|
filetype = filetype or filename.split('.')[-1]
file_object = open(filename, 'rb')
result = track_from_file(file_object, filetype, timeout, force_upload)
file_object.close()
return result
|
def track_from_filename(filename, filetype = None, timeout=DEFAULT_ASYNC_TIMEOUT, force_upload=False)
|
Create a track object from a filename.
NOTE: Does not create the detailed analysis for the Track. Call
Track.get_analysis() for that.
Args:
filename: A string containing the path to the input file.
filetype: A string indicating the filetype; Defaults to None (type determined by file extension).
force_upload: skip the MD5 shortcut path, force an upload+analysis
Example:
>>> t = track.track_from_filename("Miaow-01-Tempered-song.mp3")
>>> t
< Track >
>>>
| 2.121013
| 3.527555
| 0.60127
|
param_dict = dict(url = url)
return _upload(param_dict, timeout, data=None)
|
def track_from_url(url, timeout=DEFAULT_ASYNC_TIMEOUT)
|
Create a track object from a public http URL.
NOTE: Does not create the detailed analysis for the Track. Call
Track.get_analysis() for that.
Args:
url: A string giving the URL to read from. This must be on a public machine accessible by HTTP.
Example:
>>> t = track.track_from_url("http://www.miaowmusic.com/mp3/Miaow-01-Tempered-song.mp3")
>>> t
< Track >
>>>
| 10.989295
| 29.199274
| 0.376355
|
param_dict = dict(id = identifier)
return _profile(param_dict, timeout)
|
def track_from_id(identifier, timeout=DEFAULT_ASYNC_TIMEOUT)
|
Create a track object from an Echo Nest track ID.
NOTE: Does not create the detailed analysis for the Track. Call
Track.get_analysis() for that.
Args:
identifier: A string containing the ID of a previously analyzed track.
Example:
>>> t = track.track_from_id("TRWFIDS128F92CC4CA")
>>> t
<track - Let The Spirit>
>>>
| 12.811315
| 40.868458
| 0.313477
|
param_dict = dict(md5 = md5)
return _profile(param_dict, timeout)
|
def track_from_md5(md5, timeout=DEFAULT_ASYNC_TIMEOUT)
|
Create a track object from an md5 hash.
NOTE: Does not create the detailed analysis for the Track. Call
Track.get_analysis() for that.
Args:
md5: A string 32 characters long giving the md5 checksum of a track already analyzed.
Example:
>>> t = track.track_from_md5('b8abf85746ab3416adabca63141d8c2d')
>>> t
<track - Neverwas Restored (from Neverwas Soundtrack)>
>>>
| 9.633066
| 29.483824
| 0.326724
|
if self.analysis_url:
try:
# Try the existing analysis_url first. This expires shortly
# after creation.
try:
json_string = urllib2.urlopen(self.analysis_url).read()
except urllib2.HTTPError:
# Probably the analysis_url link has expired. Refresh it.
param_dict = dict(id = self.id)
new_track = _profile(param_dict, DEFAULT_ASYNC_TIMEOUT)
if new_track and new_track.analysis_url:
self.analysis_url = new_track.analysis_url
json_string = urllib2.urlopen(self.analysis_url).read()
else:
raise Exception("Failed to create track analysis.")
analysis = json.loads(json_string)
analysis_track = analysis.pop('track', {})
self.__dict__.update(analysis)
self.__dict__.update(analysis_track)
except Exception: #pylint: disable=W0702
# No detailed analysis found.
raise Exception("Failed to create track analysis.")
else:
raise Exception("Failed to create track analysis.")
|
def get_analysis(self)
|
Retrieve the detailed analysis for the track, if available.
Raises Exception if unable to create the detailed analysis.
| 3.75821
| 3.371435
| 1.114721
|
"gets the tempo for a song"
results = song.search(artist=artist, title=title, results=1, buckets=['audio_summary'])
if len(results) > 0:
return results[0].audio_summary['tempo']
else:
return None
|
def get_tempo(artist, title)
|
gets the tempo for a song
| 4.671271
| 4.65591
| 1.003299
|
result = util.callm("%s/%s" % ('sandbox', 'list'), {'sandbox':sandbox_name, 'results': results, 'start': start})
assets = result['response']['assets']
start = result['response']['start']
total = result['response']['total']
return ResultList(assets, start, total)
|
def list(sandbox_name, results=15, start=0)
|
Returns a list of all assets available in this sandbox
Args:
sandbox_name (str): A string representing the name of the sandbox
Kwargs:
results (int): An integer number of results to return
start (int): An integer starting value for the result set
Returns:
A list of asset dictionaries
Example:
>>> sandbox.list('bluenote')
[{}, {}]
>>>
| 4.949996
| 6.364689
| 0.777728
|
limit = str(limit).lower()
fuzzy_match = str(fuzzy_match).lower()
kwargs = locals()
kwargs['bucket'] = buckets or []
del kwargs['buckets']
result = util.callm("%s/%s" % ('artist', 'search'), kwargs)
return [Artist(**util.fix(a_dict)) for a_dict in result['response']['artists']]
|
def search(name=None, description=None, style=None, mood=None, start=0, \
results=15, buckets=None, limit=False, \
fuzzy_match=False, sort=None, max_familiarity=None, min_familiarity=None, \
max_hotttnesss=None, min_hotttnesss=None, test_new_things=None, rank_type=None, \
artist_start_year_after=None, artist_start_year_before=None,artist_end_year_after=None,artist_end_year_before=None)
|
Search for artists by name, description, or constraint.
Args:
Kwargs:
name (str): the name of an artist
description (str): A string describing the artist
style (str): A string describing the style/genre of the artist
mood (str): A string describing the mood of the artist
start (int): An integer starting value for the result set
results (int): An integer number of results to return
buckets (list): A list of strings specifying which buckets to retrieve
limit (bool): A boolean indicating whether or not to limit the results to one of the id spaces specified in buckets
fuzzy_match (bool): A boolean indicating whether or not to search for similar sounding matches (only works with name)
max_familiarity (float): A float specifying the max familiarity of artists to search for
min_familiarity (float): A float specifying the min familiarity of artists to search for
max_hotttnesss (float): A float specifying the max hotttnesss of artists to search for
min_hotttnesss (float): A float specifying the max hotttnesss of artists to search for
artist_start_year_before (int): Returned artists will have started recording music before this year.
artist_start_year_after (int): Returned artists will have started recording music after this year.
artist_end_year_before (int): Returned artists will have stopped recording music before this year.
artist_end_year_after (int): Returned artists will have stopped recording music after this year.
rank_type (str): A string denoting the desired ranking for description searches, either 'relevance' or 'familiarity'
Returns:
A list of Artist objects
Example:
>>> results = artist.search(name='t-pain')
>>> results
[<artist - T-Pain>, <artist - T-Pain & Lil Wayne>, <artist - T Pain & 2 Pistols>, <artist - Roscoe Dash & T-Pain>, <artist - Tony Moxberg & T-Pain>, <artist - Flo-Rida (feat. T-Pain)>, <artist - Shortyo/Too Short/T-Pain>]
>>>
| 5.123016
| 7.419118
| 0.690516
|
buckets = buckets or []
kwargs = {}
if start:
kwargs['start'] = start
if results:
kwargs['results'] = results
if buckets:
kwargs['bucket'] = buckets
if limit:
kwargs['limit'] = 'true'
result = util.callm("%s/%s" % ('artist', 'top_hottt'), kwargs)
return [Artist(**util.fix(a_dict)) for a_dict in result['response']['artists']]
|
def top_hottt(start=0, results=15, buckets = None, limit=False)
|
Get the top hotttest artists, according to The Echo Nest
Args:
Kwargs:
results (int): An integer number of results to return
start (int): An integer starting value for the result set
buckets (list): A list of strings specifying which buckets to retrieve
limit (bool): A boolean indicating whether or not to limit the results to one of the id spaces specified in buckets
Returns:
A list of hottt Artist objects
Example:
>>> hot_stuff = artist.top_hottt()
>>> hot_stuff
[<artist - Deerhunter>, <artist - Sufjan Stevens>, <artist - Belle and Sebastian>, <artist - Glee Cast>, <artist - Linkin Park>, <artist - Neil Young>, <artist - Jimmy Eat World>, <artist - Kanye West>, <artist - Katy Perry>, <artist - Bruno Mars>, <artist - Lady Gaga>, <artist - Rihanna>, <artist - Lil Wayne>, <artist - Jason Mraz>, <artist - Green Day>]
>>>
| 4.061638
| 4.492228
| 0.904148
|
kwargs = {}
if results:
kwargs['results'] = results
result = util.callm("%s/%s" % ('artist', 'top_terms'), kwargs)
return result['response']['terms']
|
def top_terms(results=15)
|
Get a list of the top overall terms
Args:
Kwargs:
results (int): An integer number of results to return
Returns:
A list of term document dicts
Example:
>>> terms = artist.top_terms(results=5)
>>> terms
[{u'frequency': 1.0, u'name': u'rock'},
{u'frequency': 0.99054710039307992, u'name': u'electronic'},
{u'frequency': 0.96131624654034398, u'name': u'hip hop'},
{u'frequency': 0.94358477322411127, u'name': u'jazz'},
{u'frequency': 0.94023302416455468, u'name': u'pop rock'}]
>>>
| 10.789482
| 10.956903
| 0.98472
|
buckets = buckets or []
kwargs = {}
if ids:
if not isinstance(ids, list):
ids = [ids]
kwargs['id'] = ids
if names:
if not isinstance(names, list):
names = [names]
kwargs['name'] = names
if max_familiarity is not None:
kwargs['max_familiarity'] = max_familiarity
if min_familiarity is not None:
kwargs['min_familiarity'] = min_familiarity
if max_hotttnesss is not None:
kwargs['max_hotttnesss'] = max_hotttnesss
if min_hotttnesss is not None:
kwargs['min_hotttnesss'] = min_hotttnesss
if seed_catalog is not None:
kwargs['seed_catalog'] = seed_catalog
if start:
kwargs['start'] = start
if results:
kwargs['results'] = results
if buckets:
kwargs['bucket'] = buckets
if limit:
kwargs['limit'] = 'true'
if artist_start_year_before:
kwargs['artist_start_year_before'] = artist_start_year_before
if artist_start_year_after:
kwargs['artist_start_year_after'] = artist_start_year_after
if artist_end_year_before:
kwargs['artist_end_year_before'] = artist_end_year_before
if artist_end_year_after:
kwargs['artist_end_year_after'] = artist_end_year_after
result = util.callm("%s/%s" % ('artist', 'similar'), kwargs)
return [Artist(**util.fix(a_dict)) for a_dict in result['response']['artists']]
|
def similar(names=None, ids=None, start=0, results=15, buckets=None, limit=False, max_familiarity=None, min_familiarity=None,
max_hotttnesss=None, min_hotttnesss=None, seed_catalog=None,artist_start_year_before=None, \
artist_start_year_after=None,artist_end_year_before=None,artist_end_year_after=None)
|
Return similar artists to this one
Args:
Kwargs:
ids (str/list): An artist id or list of ids
names (str/list): An artist name or list of names
results (int): An integer number of results to return
buckets (list): A list of strings specifying which buckets to retrieve
limit (bool): A boolean indicating whether or not to limit the results to one of the id spaces specified in buckets
start (int): An integer starting value for the result set
max_familiarity (float): A float specifying the max familiarity of artists to search for
min_familiarity (float): A float specifying the min familiarity of artists to search for
max_hotttnesss (float): A float specifying the max hotttnesss of artists to search for
min_hotttnesss (float): A float specifying the max hotttnesss of artists to search for
seed_catalog (str): A string specifying the catalog similar artists are restricted to
Returns:
A list of similar Artist objects
Example:
>>> some_dudes = [artist.Artist('weezer'), artist.Artist('radiohead')]
>>> some_dudes
[<artist - Weezer>, <artist - Radiohead>]
>>> sims = artist.similar(ids=[art.id for art in some_dudes], results=5)
>>> sims
[<artist - The Smashing Pumpkins>, <artist - Biffy Clyro>, <artist - Death Cab for Cutie>, <artist - Jimmy Eat World>, <artist - Nerf Herder>]
>>>
| 1.477974
| 1.591486
| 0.928675
|
buckets = buckets or []
kwargs = {}
kwargs['text'] = text
if max_familiarity is not None:
kwargs['max_familiarity'] = max_familiarity
if min_familiarity is not None:
kwargs['min_familiarity'] = min_familiarity
if max_hotttnesss is not None:
kwargs['max_hotttnesss'] = max_hotttnesss
if min_hotttnesss is not None:
kwargs['min_hotttnesss'] = min_hotttnesss
if start:
kwargs['start'] = start
if results:
kwargs['results'] = results
if buckets:
kwargs['bucket'] = buckets
if limit:
kwargs['limit'] = 'true'
result = util.callm("%s/%s" % ('artist', 'extract'), kwargs)
return [Artist(**util.fix(a_dict)) for a_dict in result['response']['artists']]
|
def extract(text='', start=0, results=15, buckets=None, limit=False, max_familiarity=None, min_familiarity=None,
max_hotttnesss=None, min_hotttnesss=None)
|
Extract artist names from a block of text.
Args:
Kwargs:
text (str): The text to extract artists from
start (int): An integer starting value for the result set
results (int): An integer number of results to return
buckets (list): A list of strings specifying which buckets to retrieve
limit (bool): A boolean indicating whether or not to limit the results to one of the id spaces specified in buckets
max_familiarity (float): A float specifying the max familiarity of artists to search for
min_familiarity (float): A float specifying the min familiarity of artists to search for
max_hotttnesss (float): A float specifying the max hotttnesss of artists to search for
min_hotttnesss (float): A float specifying the max hotttnesss of artists to search for
Returns:
A list of Artist objects
Example:
>>> results = artist.extract(text='i saw beyonce at burger king, she was eatin, she was eatin')
>>> results
>>>
| 2.081919
| 2.193518
| 0.949123
|
buckets = buckets or []
kwargs = {}
kwargs['q'] = q
if max_familiarity is not None:
kwargs['max_familiarity'] = max_familiarity
if min_familiarity is not None:
kwargs['min_familiarity'] = min_familiarity
if max_hotttnesss is not None:
kwargs['max_hotttnesss'] = max_hotttnesss
if min_hotttnesss is not None:
kwargs['min_hotttnesss'] = min_hotttnesss
if results:
kwargs['results'] = results
if buckets:
kwargs['bucket'] = buckets
if limit:
kwargs['limit'] = 'true'
result = util.callm("%s/%s" % ('artist', 'suggest'), kwargs)
return [Artist(**util.fix(a_dict)) for a_dict in result['response']['artists']]
|
def suggest(q='', results=15, buckets=None, limit=False, max_familiarity=None, min_familiarity=None,
max_hotttnesss=None, min_hotttnesss=None)
|
Suggest artists based upon partial names.
Args:
Kwargs:
q (str): The text to suggest artists from
results (int): An integer number of results to return
buckets (list): A list of strings specifying which buckets to retrieve
limit (bool): A boolean indicating whether or not to limit the results to one of the id spaces specified in buckets
max_familiarity (float): A float specifying the max familiarity of artists to search for
min_familiarity (float): A float specifying the min familiarity of artists to search for
max_hotttnesss (float): A float specifying the max hotttnesss of artists to search for
min_hotttnesss (float): A float specifying the max hotttnesss of artists to search for
Returns:
A list of Artist objects
Example:
>>> results = artist.suggest(text='rad')
>>> results
>>>
| 2.034309
| 2.256215
| 0.901647
|
if cache and ('biographies' in self.cache) and results==15 and start==0 and license==None:
return self.cache['biographies']
else:
response = self.get_attribute('biographies', results=results, start=start, license=license)
if results==15 and start==0 and license==None:
self.cache['biographies'] = ResultList(response['biographies'], 0, response['total'])
return ResultList(response['biographies'], start, response['total'])
|
def get_biographies(self, results=15, start=0, license=None, cache=True)
|
Get a list of artist biographies
Args:
Kwargs:
cache (bool): A boolean indicating whether or not the cached value should be used (if available). Defaults to True.
results (int): An integer number of results to return
start (int): An integer starting value for the result set
license (str): A string specifying the desired license type
Returns:
A list of biography document dicts; list contains additional attributes 'start' and 'total'
Example:
>>> a = artist.Artist('britney spears')
>>> bio = a.get_biographies(results=1)[0]
>>> bio['url']
u'http://www.mtvmusic.com/spears_britney'
>>>
| 2.43325
| 2.797423
| 0.869818
|
if not (cache and ('familiarity' in self.cache)):
response = self.get_attribute('familiarity')
self.cache['familiarity'] = response['artist']['familiarity']
return self.cache['familiarity']
|
def get_familiarity(self, cache=True)
|
Get our numerical estimation of how familiar an artist currently is to the world
Args:
Kwargs:
cache (bool): A boolean indicating whether or not the cached value should be used (if available). Defaults to True.
Returns:
A float representing familiarity.
Example:
>>> a = artist.Artist('frank sinatra')
>>> a.get_familiarity()
0.65142555825947457
>>> a.familiarity
0.65142555825947457
>>>
| 3.284745
| 4.447724
| 0.738523
|
if not (cache and ('foreign_ids' in self.cache) and filter(lambda d: d.get('catalog') == idspace, self.cache['foreign_ids'])):
response = self.get_attribute('profile', bucket=['id:'+idspace])
foreign_ids = response['artist'].get("foreign_ids", [])
self.cache['foreign_ids'] = self.cache.get('foreign_ids', []) + foreign_ids
cval = filter(lambda d: d.get('catalog') == util.map_idspace(idspace),
self.cache.get('foreign_ids'))
return cval[0].get('foreign_id') if cval else None
|
def get_foreign_id(self, idspace='musicbrainz', cache=True)
|
Get the foreign id for this artist for a specific id space
Args:
Kwargs:
idspace (str): A string indicating the idspace to fetch a foreign id for.
Returns:
A foreign ID string
Example:
>>> a = artist.Artist('fabulous')
>>> a.get_foreign_id('7digital')
u'7digital:artist:186042'
>>>
| 4.102266
| 4.244253
| 0.966546
|
if not (cache and ('twitter' in self.cache)):
response = self.get_attribute('twitter')
self.cache['twitter'] = response['artist'].get('twitter')
return self.cache['twitter']
|
def get_twitter_id(self, cache=True)
|
Get the twitter id for this artist if it exists
Args:
Kwargs:
Returns:
A twitter ID string
Example:
>>> a = artist.Artist('big boi')
>>> a.get_twitter_id()
u'BigBoi'
>>>
| 5.222138
| 6.200173
| 0.842257
|
if not (cache and ('hotttnesss' in self.cache)):
response = self.get_attribute('hotttnesss')
self.cache['hotttnesss'] = response['artist']['hotttnesss']
return self.cache['hotttnesss']
|
def get_hotttnesss(self, cache=True)
|
Get our numerical description of how hottt an artist currently is
Args:
Kwargs:
cache (bool): A boolean indicating whether or not the cached value should be used (if available). Defaults to True.
Returns:
float: the hotttnesss value
Example:
>>> a = artist.Artist('hannah montana')
>>> a.get_hotttnesss()
0.59906022155998995
>>> a.hotttnesss
0.59906022155998995
>>>
| 2.890868
| 3.412634
| 0.847107
|
if cache and ('images' in self.cache) and results==15 and start==0 and license==None:
return self.cache['images']
else:
response = self.get_attribute('images', results=results, start=start, license=license)
total = response.get('total') or 0
if results==15 and start==0 and license==None:
self.cache['images'] = ResultList(response['images'], 0, total)
return ResultList(response['images'], start, total)
|
def get_images(self, results=15, start=0, license=None, cache=True)
|
Get a list of artist images
Args:
cache (bool): A boolean indicating whether or not the cached value should be used (if available). Defaults to True.
results (int): An integer number of results to return
start (int): An integer starting value for the result set
license (str): A string specifying the desired license type
Returns:
A list of image document dicts; list contains additional attributes 'start' and 'total'
Example:
>>> a = artist.Artist('Captain Beefheart')
>>> images = a.get_images(results=1)
>>> images.total
49
>>> images[0]['url']
u'http://c4.ac-images.myspacecdn.com/images01/5/l_e1a329cdfdb16a848288edc6d578730f.jpg'
>>>
| 2.939605
| 3.239257
| 0.907494
|
if cache and ('news' in self.cache) and results==15 and start==0 and not high_relevance:
return self.cache['news']
else:
high_relevance = 'true' if high_relevance else 'false'
response = self.get_attribute('news', results=results, start=start, high_relevance=high_relevance)
if results==15 and start==0:
self.cache['news'] = ResultList(response['news'], 0, response['total'])
return ResultList(response['news'], start, response['total'])
|
def get_news(self, results=15, start=0, cache=True, high_relevance=False)
|
Get a list of news articles found on the web related to an artist
Args:
cache (bool): A boolean indicating whether or not the cached value should be used (if available). Defaults to True.
results (int): An integer number of results to return
start (int): An integer starting value for the result set
Returns:
A list of news document dicts; list contains additional attributes 'start' and 'total'
Example:
>>> a = artist.Artist('Henry Threadgill')
>>> news = a.news
>>> news.total
41
>>> news[0]['name']
u'Jazz Journalists Association Announces 2010 Jazz Award Winners'
>>>
| 2.650637
| 2.997481
| 0.884288
|
buckets = buckets or []
kwargs = {}
if max_familiarity:
kwargs['max_familiarity'] = max_familiarity
if min_familiarity:
kwargs['min_familiarity'] = min_familiarity
if max_hotttnesss:
kwargs['max_hotttnesss'] = max_hotttnesss
if min_hotttnesss:
kwargs['min_hotttnesss'] = min_hotttnesss
if min_results:
kwargs['min_results'] = min_results
if buckets:
kwargs['bucket'] = buckets
if limit:
kwargs['limit'] = 'true'
if reverse:
kwargs['reverse'] = 'true'
if artist_start_year_before:
kwargs['artist_start_year_before'] = artist_start_year_before
if artist_start_year_after:
kwargs['artist_start_year_after'] = artist_start_year_after
if artist_end_year_before:
kwargs['artist_end_year_before'] = artist_end_year_before
if artist_end_year_after:
kwargs['artist_end_year_after'] = artist_end_year_after
if cache and ('similar' in self.cache) and results==15 and start==0 and (not kwargs):
return [Artist(**util.fix(a)) for a in self.cache['similar']]
else:
response = self.get_attribute('similar', results=results, start=start, **kwargs)
if results==15 and start==0 and (not kwargs):
self.cache['similar'] = response['artists']
return [Artist(**util.fix(a)) for a in response['artists']]
|
def get_similar(self, results=15, start=0, buckets=None, limit=False, cache=True, max_familiarity=None, min_familiarity=None, \
max_hotttnesss=None, min_hotttnesss=None, min_results=None, reverse=False, artist_start_year_before=None, \
artist_start_year_after=None,artist_end_year_before=None,artist_end_year_after=None)
|
Return similar artists to this one
Args:
Kwargs:
cache (bool): A boolean indicating whether or not the cached value should be used (if available). Defaults to True.
results (int): An integer number of results to return
start (int): An integer starting value for the result set
max_familiarity (float): A float specifying the max familiarity of artists to search for
min_familiarity (float): A float specifying the min familiarity of artists to search for
max_hotttnesss (float): A float specifying the max hotttnesss of artists to search for
min_hotttnesss (float): A float specifying the max hotttnesss of artists to search for
reverse (bool): A boolean indicating whether or not to return dissimilar artists (wrecommender). Defaults to False.
Returns:
A list of similar Artist objects
Example:
>>> a = artist.Artist('Sleater Kinney')
>>> similars = a.similar[:5]
>>> similars
[<artist - Bikini Kill>, <artist - Pretty Girls Make Graves>, <artist - Huggy Bear>, <artist - Bratmobile>, <artist - Team Dresch>]
>>>
| 1.564819
| 1.647794
| 0.949645
|
if cache and ('songs' in self.cache) and results==15 and start==0:
if not isinstance(self.cache['songs'][0], Song):
song_objects = []
for s in self.cache["songs"]:
song_objects.append(Song(id=s['id'],
title=s['title'],
artist_name=self.name,
artist_id=self.id))
self.cache['songs'] = song_objects
return self.cache['songs']
else:
response = self.get_attribute('songs', results=results, start=start)
for s in response['songs']:
s.update({'artist_id':self.id, 'artist_name':self.name})
songs = [Song(**util.fix(s)) for s in response['songs']]
if results==15 and start==0:
self.cache['songs'] = ResultList(songs, 0, response['total'])
return ResultList(songs, start, response['total'])
|
def get_songs(self, cache=True, results=15, start=0)
|
Get the songs associated with an artist
Args:
Kwargs:
cache (bool): A boolean indicating whether or not the cached value should be used (if available). Defaults to True.
results (int): An integer number of results to return
start (int): An integer starting value for the result set
Results:
A list of Song objects; list contains additional attributes 'start' and 'total'
Example:
>>> a = artist.Artist('Strokes')
>>> a.get_songs(results=5)
[<song - Fear Of Sleep>, <song - Red Light>, <song - Ize Of The World>, <song - Evening Sun>, <song - Juicebox>]
>>>
| 2.70971
| 2.736457
| 0.990226
|
if cache and ('terms' in self.cache) and sort=='weight':
return self.cache['terms']
else:
response = self.get_attribute('terms', sort=sort)
if sort=='weight':
self.cache['terms'] = response['terms']
return response['terms']
|
def get_terms(self, sort='weight', cache=True)
|
Get the terms associated with an artist
Args:
Kwargs:
cache (bool): A boolean indicating whether or not the cached value should be used (if available). Defaults to True.
sort (str): A string specifying the desired sorting type (weight or frequency)
Results:
A list of term document dicts
Example:
>>> a = artist.Artist('tom petty')
>>> a.terms
[{u'frequency': 1.0, u'name': u'heartland rock', u'weight': 1.0},
{u'frequency': 0.88569401860168606,
u'name': u'jam band',
u'weight': 0.9116501862732439},
{u'frequency': 0.9656145118557401,
u'name': u'pop rock',
u'weight': 0.89777934440040685},
{u'frequency': 0.8414744288140491,
u'name': u'southern rock',
u'weight': 0.8698567153186606},
{u'frequency': 0.9656145118557401,
u'name': u'hard rock',
u'weight': 0.85738022655218893},
{u'frequency': 0.88569401860168606,
u'name': u'singer-songwriter',
u'weight': 0.77427243392312772},
{u'frequency': 0.88569401860168606,
u'name': u'rock',
u'weight': 0.71158718989399083},
{u'frequency': 0.60874110500110956,
u'name': u'album rock',
u'weight': 0.69758668733499629},
{u'frequency': 0.74350792060935744,
u'name': u'psychedelic',
u'weight': 0.68457367494207944},
{u'frequency': 0.77213698386292873,
u'name': u'pop',
u'weight': 0.65039556639337293},
{u'frequency': 0.41747136183050298,
u'name': u'bar band',
u'weight': 0.54974975024767025}]
>>>
| 2.79619
| 3.280112
| 0.852468
|
if not (cache and ('urls' in self.cache)):
response = self.get_attribute('urls')
self.cache['urls'] = response['urls']
return self.cache['urls']
|
def get_urls(self, cache=True)
|
Get the urls for an artist
Args:
Kwargs:
cache (bool): A boolean indicating whether or not the cached value should be used (if available). Defaults to True.
Results:
A url document dict
Example:
>>> a = artist.Artist('the unicorns')
>>> a.get_urls()
{u'amazon_url': u'http://www.amazon.com/gp/search?ie=UTF8&keywords=The Unicorns&tag=httpechonecom-20&index=music',
u'aolmusic_url': u'http://music.aol.com/artist/the-unicorns',
u'itunes_url': u'http://itunes.com/TheUnicorns',
u'lastfm_url': u'http://www.last.fm/music/The+Unicorns',
u'mb_url': u'http://musicbrainz.org/artist/603c5f9f-492a-4f21-9d6f-1642a5dbea2d.html',
u'myspace_url': u'http://www.myspace.com/iwasbornunicorn'}
>>>
| 3.557356
| 5.000115
| 0.711455
|
if cache and ('video' in self.cache) and results==15 and start==0:
return self.cache['video']
else:
response = self.get_attribute('video', results=results, start=start)
if results==15 and start==0:
self.cache['video'] = ResultList(response['video'], 0, response['total'])
return ResultList(response['video'], start, response['total'])
|
def get_video(self, results=15, start=0, cache=True)
|
Get a list of video documents found on the web related to an artist
Args:
Kwargs:
cache (bool): A boolean indicating whether or not the cached value should be used (if available). Defaults to True.
results (int): An integer number of results to return
start (int): An integer starting value for the result set
Returns:
A list of video document dicts; list contains additional attributes 'start' and 'total'
Example:
>>> a = artist.Artist('the vapors')
>>> a.get_video(results=1, start=2)
[{u'date_found': u'2009-12-28T08:27:48',
u'id': u'd02f9e6dc7904f70402d4676516286b9',
u'image_url': u'http://i1.ytimg.com/vi/p6c0wOFL3Us/default.jpg',
u'site': u'youtube',
u'title': u'The Vapors-Turning Japanese (rectangular white vinyl promo)',
u'url': u'http://youtube.com/watch?v=p6c0wOFL3Us'}]
>>>
| 3.135012
| 3.569282
| 0.878331
|
if cache and ('years_active' in self.cache):
return self.cache['years_active']
else:
response = self.get_attribute('profile', bucket=['years_active'])
self.cache['years_active'] = response['artist']['years_active']
return response['artist']['years_active']
|
def get_years_active(self, cache=True)
|
Get a list of years active dictionaries for an artist
Args:
Kwargs:
cache (bool): A boolean indicating whether or not the cached value should be used (if available). Defaults to True.
Returns:
A list of years active dictionaries; list contains additional attributes 'start' and 'total'
Example:
>>> a = artist.Artist('yelle')
>>> a.get_years_active()
[{ start: 2005 }]
>>>
| 3.745977
| 4.0955
| 0.914657
|
if not cache or not ('doc_counts' in self.cache):
response = self.get_attribute("profile", bucket='doc_counts')
self.cache['doc_counts'] = response['artist']['doc_counts']
return self.cache['doc_counts']
|
def get_doc_counts(self, cache=True)
|
Get the number of related documents of various types for the artist.
The types include audio, biographies, blogs, images, news, reviews, songs, videos.
Note that these documents can be retrieved by calling artist.<document type>, for example,
artist.biographies.
Args:
Kwargs:
cache (bool): A boolean indicating whether or not the cached value should be used (if available).
Defaults to True.
Returns:
A dictionary with one key for each document type, mapped to an integer count of documents.
Example:
>>> a = artist.Artist("The Kinks")
>>> a.get_doc_counts()
{u'audio': 194,
u'biographies': 9,
u'blogs': 379,
u'images': 177,
u'news': 84,
u'reviews': 110,
u'songs': 499,
u'videos': 340}
>>>
| 5.574224
| 5.659121
| 0.984998
|
''' run is a wrapper to create, start, attach, and delete a container.
Equivalent command line example:
singularity oci run -b ~/bundle mycontainer
Parameters
==========
bundle: the full path to the bundle folder
container_id: an optional container_id. If not provided, use same
container_id used to generate OciImage instance
log_path: the path to store the log.
pid_file: specify the pid file path to use
log_format: defaults to kubernetes. Can also be "basic" or "json"
'''
return self._run(bundle,
container_id=container_id,
log_path=log_path,
pid_file=pid_file,
command="run",
log_format=log_format)
|
def run(self, bundle,
container_id=None,
log_path=None,
pid_file=None,
log_format="kubernetes")
|
run is a wrapper to create, start, attach, and delete a container.
Equivalent command line example:
singularity oci run -b ~/bundle mycontainer
Parameters
==========
bundle: the full path to the bundle folder
container_id: an optional container_id. If not provided, use same
container_id used to generate OciImage instance
log_path: the path to store the log.
pid_file: specify the pid file path to use
log_format: defaults to kubernetes. Can also be "basic" or "json"
| 6.085235
| 1.439508
| 4.227303
|
def create(self, bundle,
container_id=None,
empty_process=False,
log_path=None,
pid_file=None,
sync_socket=None,
log_format="kubernetes"):
''' use the client to create a container from a bundle directory. The bundle
directory should have a config.json. You must be the root user to
create a runtime.
Equivalent command line example:
singularity oci create [create options...] <container_ID>
Parameters
==========
bundle: the full path to the bundle folder
container_id: an optional container_id. If not provided, use same
container_id used to generate OciImage instance
empty_process: run container without executing container process (for
example, for a pod container waiting for signals). This
is a specific use case for tools like Kubernetes
log_path: the path to store the log.
pid_file: specify the pid file path to use
sync_socket: the path to the unix socket for state synchronization.
log_format: defaults to kubernetes. Can also be "basic" or "json"
'''
return self._run(bundle,
container_id=container_id,
empty_process=empty_process,
log_path=log_path,
pid_file=pid_file,
sync_socket=sync_socket,
command="create",
log_format=log_format)
|
use the client to create a container from a bundle directory. The bundle
directory should have a config.json. You must be the root user to
create a runtime.
Equivalent command line example:
singularity oci create [create options...] <container_ID>
Parameters
==========
bundle: the full path to the bundle folder
container_id: an optional container_id. If not provided, use same
container_id used to generate OciImage instance
empty_process: run container without executing container process (for
example, for a pod container waiting for signals). This
is a specific use case for tools like Kubernetes
log_path: the path to store the log.
pid_file: specify the pid file path to use
sync_socket: the path to the unix socket for state synchronization.
log_format: defaults to kubernetes. Can also be "basic" or "json"
| null | null | null |
|
bot.exit('Bundle not found at %s' % bundle)
# Add the bundle
cmd = cmd + ['--bundle', bundle]
# Additional Logging Files
cmd = cmd + ['--log-format', log_format]
if log_path != None:
cmd = cmd + ['--log-path', log_path]
if pid_file != None:
cmd = cmd + ['--pid-file', pid_file]
if sync_socket != None:
cmd = cmd + ['--sync-socket', sync_socket]
if empty_process:
cmd.append('--empty-process')
# Finally, add the container_id
cmd.append(container_id)
# Generate the instance
result = self._send_command(cmd, sudo=True)
# Get the status to report to the user!
# TODO: Singularity seems to create even with error, can we check and
# delete for the user if this happens?
return self.state(container_id, sudo=True, sync_socket=sync_socket)
|
def _run(self, bundle,
container_id=None,
empty_process=False,
log_path=None,
pid_file=None,
sync_socket=None,
command="run",
log_format="kubernetes"):
''' _run is the base function for run and create, the only difference
between the two being that run does not have an option for sync_socket.
Equivalent command line example:
singularity oci create [create options...] <container_ID>
Parameters
==========
bundle: the full path to the bundle folder
container_id: an optional container_id. If not provided, use same
container_id used to generate OciImage instance
empty_process: run container without executing container process (for
example, for a pod container waiting for signals). This
is a specific use case for tools like Kubernetes
log_path: the path to store the log.
pid_file: specify the pid file path to use
sync_socket: the path to the unix socket for state synchronization.
command: the command (run or create) to use (default is run)
log_format: defaults to kubernetes. Can also be "basic" or "json"
'''
container_id = self.get_container_id(container_id)
# singularity oci create
cmd = self._init_command(command)
# Check that the bundle exists
if not os.path.exists(bundle)
|
_run is the base function for run and create, the only difference
between the two being that run does not have an option for sync_socket.
Equivalent command line example:
singularity oci create [create options...] <container_ID>
Parameters
==========
bundle: the full path to the bundle folder
container_id: an optional container_id. If not provided, use same
container_id used to generate OciImage instance
empty_process: run container without executing container process (for
example, for a pod container waiting for signals). This
is a specific use case for tools like Kubernetes
log_path: the path to store the log.
pid_file: specify the pid file path to use
sync_socket: the path to the unix socket for state synchronization.
command: the command (run or create) to use (default is run)
log_format: defaults to kubernetes. Can also be "basic" or "json"
| 3.855765
| 4.060985
| 0.949466
|
'''delete an instance based on container_id.
Parameters
==========
container_id: the container_id to delete
sudo: whether to issue the command with sudo (or not)
a container started with sudo will belong to the root user
If started by a user, the user needs to control deleting it
if the user doesn't set to True/False, we use client self.sudo
Returns
=======
return_code: the return code from the delete command. 0 indicates a
successful delete, 255 indicates not.
'''
sudo = self._get_sudo(sudo)
container_id = self.get_container_id(container_id)
# singularity oci delete
cmd = self._init_command('delete')
# Add the container_id
cmd.append(container_id)
# Delete the container, return code goes to user (message to screen)
return self._run_and_return(cmd, sudo=sudo)
|
def delete(self, container_id=None, sudo=None)
|
delete an instance based on container_id.
Parameters
==========
container_id: the container_id to delete
sudo: whether to issue the command with sudo (or not)
a container started with sudo will belong to the root user
If started by a user, the user needs to control deleting it
if the user doesn't set to True/False, we use client self.sudo
Returns
=======
return_code: the return code from the delete command. 0 indicates a
successful delete, 255 indicates not.
| 8.901788
| 2.65102
| 3.357873
|
'''attach to a container instance based on container_id
Parameters
==========
container_id: the container_id to delete
sudo: whether to issue the command with sudo (or not)
a container started with sudo will belong to the root user
If started by a user, the user needs to control deleting it
Returns
=======
return_code: the return code from the delete command. 0 indicates a
successful delete, 255 indicates not.
'''
sudo = self._get_sudo(sudo)
container_id = self.get_container_id(container_id)
# singularity oci delete
cmd = self._init_command('attach')
# Add the container_id
cmd.append(container_id)
# Delete the container, return code goes to user (message to screen)
return self._run_and_return(cmd, sudo)
|
def attach(self, container_id=None, sudo=False)
|
attach to a container instance based on container_id
Parameters
==========
container_id: the container_id to delete
sudo: whether to issue the command with sudo (or not)
a container started with sudo will belong to the root user
If started by a user, the user needs to control deleting it
Returns
=======
return_code: the return code from the delete command. 0 indicates a
successful delete, 255 indicates not.
| 8.850534
| 3.028266
| 2.922641
|
'''execute a command to a container instance based on container_id
Parameters
==========
container_id: the container_id to delete
command: the command to execute to the container
sudo: whether to issue the command with sudo (or not)
a container started with sudo will belong to the root user
If started by a user, the user needs to control deleting it
stream: if True, return an iterate to iterate over results of exec.
default is False, will return full output as string.
Returns
=======
return_code: the return code from the delete command. 0 indicates a
successful delete, 255 indicates not.
'''
sudo = self._get_sudo(sudo)
container_id = self.get_container_id(container_id)
# singularity oci delete
cmd = self._init_command('exec')
# Add the container_id
cmd.append(container_id)
if command != None:
if not isinstance(command, list):
command = [command]
cmd = cmd + command
# Execute the command, return response to user
if stream:
return stream_command(cmd, sudo=sudo)
return self._run_command(cmd, sudo=sudo, quiet=True)
|
def execute(self, command=None, container_id=None, sudo=False, stream=False)
|
execute a command to a container instance based on container_id
Parameters
==========
container_id: the container_id to delete
command: the command to execute to the container
sudo: whether to issue the command with sudo (or not)
a container started with sudo will belong to the root user
If started by a user, the user needs to control deleting it
stream: if True, return an iterate to iterate over results of exec.
default is False, will return full output as string.
Returns
=======
return_code: the return code from the delete command. 0 indicates a
successful delete, 255 indicates not.
| 6.691752
| 2.273421
| 2.943472
|
'''update container cgroup resources for a specific container_id,
The container must have state "running" or "created."
Singularity Example:
singularity oci update [update options...] <container_ID>
singularity oci update --from-file cgroups-update.json mycontainer
Parameters
==========
container_id: the container_id to update cgroups for
from_file: a path to an OCI JSON resource file to update from.
'''
sudo = self._get_sudo(sudo)
container_id = self.get_container_id(container_id)
# singularity oci delete
cmd = self._init_command('update')
if from_file != None:
cmd = cmd + ['--from-file', from_file]
# Add the container_id
cmd.append(container_id)
# Delete the container, return code goes to user (message to screen)
return self._run_and_return(cmd, sudo)
|
def update(self, container_id, from_file=None)
|
update container cgroup resources for a specific container_id,
The container must have state "running" or "created."
Singularity Example:
singularity oci update [update options...] <container_ID>
singularity oci update --from-file cgroups-update.json mycontainer
Parameters
==========
container_id: the container_id to update cgroups for
from_file: a path to an OCI JSON resource file to update from.
| 7.845707
| 2.747705
| 2.855368
|
'''
get the client and perform imports not on init, in case there are any
initialization or import errors.
Parameters
==========
quiet: if True, suppress most output about the client
debug: turn on debugging mode
'''
from spython.utils import get_singularity_version
from .base import Client
Client.quiet = quiet
Client.debug = debug
# Do imports here, can be customized
from .apps import apps
from .build import build
from .execute import execute
from .help import help
from .inspect import inspect
from .instances import ( instances, stopall ) # global instance commands
from .run import run
from .pull import pull
# Actions
Client.apps = apps
Client.build = build
Client.execute = execute
Client.help = help
Client.inspect = inspect
Client.instances = instances
Client.run = run
Client.pull = pull
# Command Groups, Images
from spython.image.cmd import generate_image_commands # deprecated
Client.image = generate_image_commands()
# Commands Groups, Instances
from spython.instance.cmd import generate_instance_commands # instance level commands
Client.instance = generate_instance_commands()
Client.instance_stopall = stopall
Client.instance.version = Client.version
# Commands Groups, OCI (Singularity version 3 and up)
if "version 3" in get_singularity_version():
from spython.oci.cmd import generate_oci_commands
Client.oci = generate_oci_commands()() # first () runs function, second
# initializes OciImage class
Client.oci.debug = Client.debug
Client.oci.quiet = Client.quiet
Client.oci.OciImage.quiet = Client.quiet
Client.oci.OciImage.debug = Client.debug
# Initialize
cli = Client()
# Pass on verbosity
cli.image.debug = cli.debug
cli.image.quiet = cli.quiet
cli.instance.debug = cli.debug
cli.instance.quiet = cli.quiet
return cli
|
def get_client(quiet=False, debug=False)
|
get the client and perform imports not on init, in case there are any
initialization or import errors.
Parameters
==========
quiet: if True, suppress most output about the client
debug: turn on debugging mode
| 5.360266
| 4.202589
| 1.275468
|
'''build a singularity image, optionally for an isolated build
(requires sudo). If you specify to stream, expect the image name
and an iterator to be returned.
image, builder = Client.build(...)
Parameters
==========
recipe: the path to the recipe file (or source to build from). If not
defined, we look for "Singularity" file in $PWD
image: the image to build (if None, will use arbitary name
isolated: if True, run build with --isolated flag
sandbox: if True, create a writable sandbox
writable: if True, use writable ext3 (sandbox takes preference)
build_folder: where the container should be built.
ext: the image extension to use.
robot_name: boolean, default False. if you don't give your image a
name (with "image") then a fun robot name will be generated
instead. Highly recommended :)
sudo: give sudo to the command (or not) default is True for build
'''
from spython.utils import check_install
check_install()
cmd = self._init_command('build')
if 'version 3' in self.version():
ext = 'sif'
# No image provided, default to use the client's loaded image
if recipe is None:
recipe = self._get_uri()
# If it's still None, try default build recipe
if recipe is None:
recipe = 'Singularity'
if not os.path.exists(recipe):
bot.exit('Cannot find %s, exiting.' %image)
if image is None:
if re.search('(docker|shub)://', recipe) and robot_name is False:
image = self._get_filename(recipe, ext)
else:
image = "%s.%s" %(self.RobotNamer.generate(), ext)
# Does the user want a custom build folder?
if build_folder is not None:
if not os.path.exists(build_folder):
bot.exit('%s does not exist!' % build_folder)
image = os.path.join(build_folder, image)
# The user wants to run an isolated build
if isolated is True:
cmd.append('--isolated')
if sandbox is True:
cmd.append('--sandbox')
elif sandbox is True:
cmd.append('--writable')
cmd = cmd + [image, recipe]
if stream is False:
output = self._run_command(cmd, sudo=sudo, capture=False)
else:
# Here we return the expected image, and an iterator!
# The caller must iterate over
return image, stream_command(cmd, sudo=sudo)
if os.path.exists(image):
return image
|
def build(self, recipe=None,
image=None,
isolated=False,
sandbox=False,
writable=False,
build_folder=None,
robot_name=False,
ext='simg',
sudo=True,
stream=False)
|
build a singularity image, optionally for an isolated build
(requires sudo). If you specify to stream, expect the image name
and an iterator to be returned.
image, builder = Client.build(...)
Parameters
==========
recipe: the path to the recipe file (or source to build from). If not
defined, we look for "Singularity" file in $PWD
image: the image to build (if None, will use arbitary name
isolated: if True, run build with --isolated flag
sandbox: if True, create a writable sandbox
writable: if True, use writable ext3 (sandbox takes preference)
build_folder: where the container should be built.
ext: the image extension to use.
robot_name: boolean, default False. if you don't give your image a
name (with "image") then a fun robot name will be generated
instead. Highly recommended :)
sudo: give sudo to the command (or not) default is True for build
| 6.458857
| 2.732858
| 2.363408
|
'''start an instance. This is done by default when an instance is created.
Parameters
==========
image: optionally, an image uri (if called as a command from Client)
name: a name for the instance
sudo: if the user wants to run the command with sudo
capture: capture output, default is False. With True likely to hang.
args: arguments to provide to the instance (supported Singularity 3.1+)
options: a list of tuples, each an option to give to the start command
[("--bind", "/tmp"),...]
USAGE:
singularity [...] instance.start [...] <container path> <instance name>
'''
from spython.utils import ( run_command,
check_install )
check_install()
# If name provided, over write robot (default)
if name != None:
self.name = name
# If an image isn't provided, we have an initialized instance
if image is None:
# Not having this means it was called as a command, without an image
if not hasattr(self, "_image"):
bot.exit('Please provide an image, or create an Instance first.')
image = self._image
# Derive subgroup command based on singularity version
subgroup = 'instance.start'
if 'version 3' in self.version():
subgroup = ["instance", "start"]
cmd = self._init_command(subgroup)
# Add options, if they are provided
if not isinstance(options, list):
options = options.split(' ')
# Assemble the command!
cmd = cmd + options + [image, self.name]
# If arguments are provided
if args != None:
if not isinstance(args, list):
args = [args]
cmd = cmd + args
# Save the options and cmd, if the user wants to see them later
self.options = options
self.args = args
self.cmd = cmd
output = run_command(cmd, sudo=sudo, quiet=True, capture=capture)
if output['return_code'] == 0:
self._update_metadata()
else:
message = '%s : return code %s' %(output['message'],
output['return_code'])
bot.error(message)
return self
|
def start(self, image=None, name=None, args=None, sudo=False, options=[], capture=False)
|
start an instance. This is done by default when an instance is created.
Parameters
==========
image: optionally, an image uri (if called as a command from Client)
name: a name for the instance
sudo: if the user wants to run the command with sudo
capture: capture output, default is False. With True likely to hang.
args: arguments to provide to the instance (supported Singularity 3.1+)
options: a list of tuples, each an option to give to the start command
[("--bind", "/tmp"),...]
USAGE:
singularity [...] instance.start [...] <container path> <instance name>
| 7.304107
| 3.667905
| 1.991356
|
'''import will import (stdin) to the image
Parameters
==========
image_path: path to image to import to.
input_source: input source or file
import_type: if not specified, imports whatever function is given
'''
from spython.utils import check_install
check_install()
cmd = ['singularity', 'image.import', image_path, input_source]
output = self.run_command(cmd, sudo=False)
self.println(output)
return image_path
|
def importcmd(self, image_path, input_source)
|
import will import (stdin) to the image
Parameters
==========
image_path: path to image to import to.
input_source: input source or file
import_type: if not specified, imports whatever function is given
| 11.528694
| 4.345716
| 2.652887
|
'''inspect will show labels, defile, runscript, and tests for an image
Parameters
==========
image: path of image to inspect
json: print json instead of raw text (default True)
quiet: Don't print result to the screen (default True)
app: if defined, return help in context of an app
'''
check_install()
# No image provided, default to use the client's loaded image
if image is None:
image = self._get_uri()
cmd = self._init_command('inspect')
if app is not None:
cmd = cmd + ['--app', app]
options = ['e','d','l','r','hf','t']
# After Singularity 3.0, helpfile was changed to H from
if "version 3" in self.version():
options = ['e','d','l','r','H','t']
[cmd.append('-%s' % x) for x in options]
if json is True:
cmd.append('--json')
cmd.append(image)
result = run_command(cmd, quiet=False)
if result['return_code'] == 0:
result = jsonp.loads(result['message'][0])
# Fix up labels
labels = parse_labels(result)
if not quiet:
print(jsonp.dumps(result, indent=4))
return result
|
def inspect(self, image=None, json=True, app=None, quiet=True)
|
inspect will show labels, defile, runscript, and tests for an image
Parameters
==========
image: path of image to inspect
json: print json instead of raw text (default True)
quiet: Don't print result to the screen (default True)
app: if defined, return help in context of an app
| 7.010474
| 4.356123
| 1.609338
|
'''fix up the labels, meaning parse to json if needed, and return
original updated object
Parameters
==========
result: the json object to parse from inspect
'''
if "data" in result:
labels = result['data']['attributes'].get('labels') or {}
elif 'attributes' in result:
labels = result['attributes'].get('labels') or {}
# If labels included, try parsing to json
try:
labels = jsonp.loads(labels)
except:
pass
if "data" in result:
result['data']['attributes']['labels'] = labels
else:
result['attributes']['labels'] = labels
return result
|
def parse_labels(result)
|
fix up the labels, meaning parse to json if needed, and return
original updated object
Parameters
==========
result: the json object to parse from inspect
| 6.293994
| 2.865786
| 2.196254
|
'''give the user an ipython shell
'''
# The client will announce itself (backend/database) unless it's get
from spython.main import get_client
from spython.main.parse import ( DockerRecipe, SingularityRecipe )
client = get_client()
client.load(image)
# Add recipe parsers
client.DockerRecipe = DockerRecipe
client.SingularityRecipe = SingularityRecipe
from IPython import embed
embed()
|
def ipython(image)
|
give the user an ipython shell
| 15.395097
| 14.058626
| 1.095064
|
'''parse_env will parse a single line (with prefix like ENV removed) to
a list of commands in the format KEY=VALUE For example:
ENV PYTHONBUFFER 1 --> [PYTHONBUFFER=1]
::Notes
Docker: https://docs.docker.com/engine/reference/builder/#env
'''
if not isinstance(envlist, list):
envlist = [envlist]
exports = []
for env in envlist:
pieces = re.split("( |\\\".*?\\\"|'.*?')", env)
pieces = [p for p in pieces if p.strip()]
while len(pieces) > 0:
current = pieces.pop(0)
if current.endswith('='):
# Case 1: ['A='] --> A=
next = ""
# Case 2: ['A=', '"1 2"'] --> A=1 2
if len(pieces) > 0:
next = pieces.pop(0)
exports.append("%s%s" %(current, next))
# Case 2: ['A=B'] --> A=B
elif '=' in current:
exports.append(current)
# Case 3: ENV \\
elif current.endswith('\\'):
continue
# Case 4: ['A', 'B'] --> A=B
else:
next = pieces.pop(0)
exports.append("%s=%s" %(current, next))
return exports
|
def parse_env(envlist)
|
parse_env will parse a single line (with prefix like ENV removed) to
a list of commands in the format KEY=VALUE For example:
ENV PYTHONBUFFER 1 --> [PYTHONBUFFER=1]
::Notes
Docker: https://docs.docker.com/engine/reference/builder/#env
| 5.162757
| 2.847352
| 1.813178
|
'''setup required adding content from the host to the rootfs,
so we try to capture with with ADD.
'''
bot.warning('SETUP is error prone, please check output.')
for line in lines:
# For all lines, replace rootfs with actual root /
line = re.sub('[$]{?SINGULARITY_ROOTFS}?',
'', '$SINGULARITY_ROOTFS')
# If we have nothing left, don't continue
if line in ['', None]:
continue
# If the line starts with copy or move, assume is file from host
if re.search('(^cp|^mv)', line):
line = re.sub('(^cp|^mv)', '', line)
self.files.append(line)
# If it's a general command, add to install routine
else:
self.install.append(line)
|
def _setup(self, lines)
|
setup required adding content from the host to the rootfs,
so we try to capture with with ADD.
| 12.855629
| 7.167491
| 1.793602
|
''' get the FROM container image name from a FROM line!
Parameters
==========
line: the line from the recipe file to parse for FROM
'''
self.fromHeader = line
bot.debug('FROM %s' %self.fromHeader)
|
def _from(self, line)
|
get the FROM container image name from a FROM line!
Parameters
==========
line: the line from the recipe file to parse for FROM
| 21.110697
| 5.590123
| 3.776428
|
'''env will parse a list of environment lines and simply remove any
blank lines, or those with export. Dockerfiles don't usually
have exports.
Parameters
==========
lines: A list of environment pair lines.
'''
environ = [x for x in lines if not x.startswith('export')]
self.environ += environ
|
def _env(self, lines)
|
env will parse a list of environment lines and simply remove any
blank lines, or those with export. Dockerfiles don't usually
have exports.
Parameters
==========
lines: A list of environment pair lines.
| 22.041903
| 3.550969
| 6.207293
|
''' comments is a wrapper for comment, intended to be given a list
of comments.
Parameters
==========
lines: the list of lines to parse
'''
for line in lines:
comment = self._comment(line)
self.comments.append(comment)
|
def _comments(self, lines)
|
comments is a wrapper for comment, intended to be given a list
of comments.
Parameters
==========
lines: the list of lines to parse
| 10.42958
| 3.352263
| 3.111205
|
'''_parse the runscript to be the Docker CMD. If we have one line,
call it directly. If not, write the entrypoint into a script.
Parameters
==========
lines: the line from the recipe file to parse for CMD
'''
lines = [x for x in lines if x not in ['', None]]
# Default runscript is first index
runscript = lines[0]
# Multiple line runscript needs multiple lines written to script
if len(lines) > 1:
bot.warning('More than one line detected for runscript!')
bot.warning('These will be echoed into a single script to call.')
self._write_script('/entrypoint.sh', lines)
runscript = "/bin/bash /entrypoint.sh"
self.cmd = runscript
|
def _run(self, lines)
|
_parse the runscript to be the Docker CMD. If we have one line,
call it directly. If not, write the entrypoint into a script.
Parameters
==========
lines: the line from the recipe file to parse for CMD
| 11.298234
| 4.463459
| 2.531273
|
'''mapping will take the section name from a Singularity recipe
and return a map function to add it to the appropriate place.
Any lines that don't cleanly map are assumed to be comments.
Parameters
==========
section: the name of the Singularity recipe section
Returns
=======
function: to map a line to its command group (e.g., install)
'''
# Ensure section is lowercase
section = section.lower()
mapping = {"environment": self._env,
"comments": self._comments,
"runscript": self._run,
"labels": self._labels,
"setup": self._setup,
"files": self._files,
"from": self._from,
"post": self._post,
"test": self._test,
"help": self._comments}
if section in mapping:
return mapping[section]
return self._comments
|
def _get_mapping(self, section)
|
mapping will take the section name from a Singularity recipe
and return a map function to add it to the appropriate place.
Any lines that don't cleanly map are assumed to be comments.
Parameters
==========
section: the name of the Singularity recipe section
Returns
=======
function: to map a line to its command group (e.g., install)
| 7.315881
| 2.440545
| 2.997642
|
'''parse is the base function for parsing the recipe, and extracting
elements into the correct data structures. Everything is parsed into
lists or dictionaries that can be assembled again on demand.
Singularity: we parse files/labels first, then install.
cd first in a line is parsed as WORKDIR
'''
# If the recipe isn't loaded, load it
if not hasattr(self, 'config'):
self.load_recipe()
# Parse each section
for section, lines in self.config.items():
bot.debug(section)
# Get the correct parsing function
parser = self._get_mapping(section)
# Parse it, if appropriate
if parser:
parser(lines)
|
def _parse(self)
|
parse is the base function for parsing the recipe, and extracting
elements into the correct data structures. Everything is parsed into
lists or dictionaries that can be assembled again on demand.
Singularity: we parse files/labels first, then install.
cd first in a line is parsed as WORKDIR
| 17.562183
| 3.44835
| 5.092924
|
'''load the From section of the recipe for the Dockerfile.
'''
# Remove any comments
line = line.split('#',1)[0]
line = re.sub('(F|f)(R|r)(O|o)(M|m):','', line).strip()
bot.info('FROM %s' %line)
self.config['from'] = line
|
def _load_from(self, line)
|
load the From section of the recipe for the Dockerfile.
| 9.04002
| 5.804985
| 1.557286
|
'''read in a section to a list, and stop when we hit the next section
'''
members = []
while True:
if len(lines) == 0:
break
next_line = lines[0]
# The end of a section
if next_line.strip().startswith("%"):
break
# Still in current section!
else:
new_member = lines.pop(0).strip()
if new_member not in ['', None]:
members.append(new_member)
# Add the list to the config
if len(members) > 0:
if section is not None:
self.config[section] += members
|
def _load_section(self, lines, section)
|
read in a section to a list, and stop when we hit the next section
| 5.100369
| 4.035373
| 1.263915
|
'''load will return a loaded in singularity recipe. The idea
is that these sections can then be parsed into a Dockerfile,
or printed back into their original form.
Returns
=======
config: a parsed recipe Singularity recipe
'''
# Comments between sections, add to top of file
lines = self.lines.copy()
comments = []
# Start with a fresh config!
self.config = dict()
section = None
name = None
while len(lines) > 0:
# Clean up white trailing/leading space
line = lines.pop(0)
stripped = line.strip()
# Bootstrap Line
if re.search('(b|B)(o|O){2}(t|T)(s|S)(t|T)(r|R)(a|A)(p|P)', line):
self._load_bootstrap(stripped)
# From Line
if re.search('(f|F)(r|R)(O|o)(m|M)', stripped):
self._load_from(stripped)
# Comment
if stripped.startswith("#"):
comments.append(stripped)
continue
# Section
elif stripped.startswith('%'):
section = self._add_section(stripped)
bot.debug("Adding section title %s" %section)
# If we have a section, and are adding it
elif section is not None:
lines = [line] + lines
self._load_section(lines=lines,
section=section)
self.config['comments'] = comments
|
def load_recipe(self)
|
load will return a loaded in singularity recipe. The idea
is that these sections can then be parsed into a Dockerfile,
or printed back into their original form.
Returns
=======
config: a parsed recipe Singularity recipe
| 7.266346
| 4.454597
| 1.631202
|
'''parse a line for a section, and return the parsed section (if not
None)
Parameters
==========
line: the line to parse
section: the current (or previous) section
Resulting data structure is:
config['post'] (in lowercase)
'''
# Remove any comments
line = line.split('#',1)[0].strip()
# Is there a section name?
parts = line.split(' ')
if len(parts) > 1:
name = ' '.join(parts[1:])
section = re.sub('[%]|(\s+)','',parts[0]).lower()
if section not in self.config:
self.config[section] = []
bot.debug("Adding section %s" %section)
return section
|
def _add_section(self, line, section=None)
|
parse a line for a section, and return the parsed section (if not
None)
Parameters
==========
line: the line to parse
section: the current (or previous) section
Resulting data structure is:
config['post'] (in lowercase)
| 7.10908
| 3.284501
| 2.164432
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.