_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 75
19.8k
| language
stringclasses 1
value | meta_information
dict |
|---|---|---|---|---|---|
q12700
|
RemoteFile.close
|
train
|
def close(self):
"""in write mode, closing the handle adds the sentinel value into the
queue and joins the thread executing the HTTP request. in read mode,
this clears out the read response object so there are no references
to it, and the resources can be reclaimed.
"""
if self._mode.find('w') >= 0:
self._queue.put(self._sentinel)
self._thread.join(timeout=self._timeout)
if self._thread.is_alive():
raise RemoteFileException("Closing file timed out.")
response = self._response_queue.get_nowait()
try:
response.raise_for_status()
except Exception as e:
raise RestApiError(cause=e)
else:
self._read_response = None
|
python
|
{
"resource": ""
}
|
q12701
|
ChartView.get
|
train
|
def get(self, request, *args, **kwargs):
"""
Main entry. This View only responds to GET requests.
"""
context = self.chart_instance.chartjs_configuration(*args, **kwargs)
return self.render_json_response(context)
|
python
|
{
"resource": ""
}
|
q12702
|
BblfshClient.parse
|
train
|
def parse(self, filename: str, language: Optional[str]=None,
contents: Optional[str]=None, mode: Optional[ModeType]=None,
timeout: Optional[int]=None) -> ResultContext:
"""
Queries the Babelfish server and receives the UAST response for the specified
file.
:param filename: The path to the file. Can be arbitrary if contents \
is not None.
:param language: The programming language of the file. Refer to \
https://doc.bblf.sh/languages.html for the list of \
currently supported languages. None means autodetect.
:param contents: The contents of the file. IF None, it is read from \
filename.
:param mode: UAST transformation mode.
:param timeout: The request timeout in seconds.
:type filename: str
:type language: str
:type contents: str
:type timeout: float
:return: UAST object.
"""
# TODO: handle syntax errors
contents = self._get_contents(contents, filename)
request = ParseRequest(filename=os.path.basename(filename),
content=contents, mode=mode,
language=self._scramble_language(language))
response = self._stub_v2.Parse(request, timeout=timeout)
return ResultContext(response)
|
python
|
{
"resource": ""
}
|
q12703
|
BblfshClient.close
|
train
|
def close(self) -> None:
"""
Close the gRPC channel and free the acquired resources. Using a closed client is
not supported.
"""
self._channel.close()
self._channel = self._stub_v1 = self._stub_v2 = None
|
python
|
{
"resource": ""
}
|
q12704
|
filter_string
|
train
|
def filter_string(n: Node, query: str) -> str:
"""
Filter and ensure that the returned value is of string type.
"""
return _scalariter2item(n, query, str)
|
python
|
{
"resource": ""
}
|
q12705
|
filter_bool
|
train
|
def filter_bool(n: Node, query: str) -> bool:
"""
Filter and ensure that the returned value is of type bool.
"""
return _scalariter2item(n, query, bool)
|
python
|
{
"resource": ""
}
|
q12706
|
CompatBblfshClient.parse
|
train
|
def parse(self, filename: str, language: str = None, contents: str = None,
timeout: float = None) -> CompatParseResponse:
"""
Parse the specified filename or contents and return a CompatParseResponse.
"""
return self._parse(filename, language, contents, timeout,
Mode.Value('ANNOTATED'))
|
python
|
{
"resource": ""
}
|
q12707
|
CompatNodeIterator.filter
|
train
|
def filter(self, query: str) -> Optional['CompatNodeIterator']:
"""
Further filter the results using this iterator as base.
"""
if not self._last_node:
return None
return filter(self._last_node, query)
|
python
|
{
"resource": ""
}
|
q12708
|
CompatNodeIterator.properties
|
train
|
def properties(self) -> dict:
"""
Returns the properties of the current node in the iteration.
"""
if isinstance(self._last_node, dict):
return self._last_node.keys()
else:
return {}
|
python
|
{
"resource": ""
}
|
q12709
|
create_catalog_by_name
|
train
|
def create_catalog_by_name(name, T="general"):
"""
Creates a catalog object, with a given name. Does not check to see if the catalog already exists.
Create a catalog object like
"""
result = util.callm("catalog/create", {}, POST=True,
data={"name":name, "type":T})
result = result['response']
return Catalog(result['id'], **dict( (k,result[k]) for k in ('name', 'type')))
|
python
|
{
"resource": ""
}
|
q12710
|
list_catalogs
|
train
|
def list_catalogs(results=30, start=0):
"""
Returns list of all catalogs created on this API key
Args:
Kwargs:
results (int): An integer number of results to return
start (int): An integer starting value for the result set
Returns:
A list of catalog objects
Example:
>>> catalog.list_catalogs()
[<catalog - test_artist_catalog>, <catalog - test_song_catalog>, <catalog - my_songs>]
>>>
"""
result = util.callm("%s/%s" % ('catalog', 'list'), {'results': results, 'start': start})
cats = [Catalog(**util.fix(d)) for d in result['response']['catalogs']]
start = result['response']['start']
total = result['response']['total']
return ResultList(cats, start, total)
|
python
|
{
"resource": ""
}
|
q12711
|
Catalog.update
|
train
|
def update(self, items):
"""
Update a catalog object
Args:
items (list): A list of dicts describing update data and action codes (see api docs)
Kwargs:
Returns:
A ticket id
Example:
>>> c = catalog.Catalog('my_songs', type='song')
>>> items
[{'action': 'update',
'item': {'artist_name': 'dAn ThE aUtOmAtOr',
'disc_number': 1,
'genre': 'Instrumental',
'item_id': '38937DDF04BC7FC4',
'play_count': 5,
'release': 'Bombay the Hard Way: Guns, Cars & Sitars',
'song_name': 'Inspector Jay From Dehli',
'track_number': 9,
'url': 'file://localhost/Users/tylerw/Music/iTunes/iTunes%20Media/Music/Dan%20the%20Automator/Bombay%20the%20Hard%20Way_%20Guns,%20Cars%20&%20Sitars/09%20Inspector%20Jay%20From%20Dehli.m4a'}}]
>>> ticket = c.update(items)
>>> ticket
u'7dcad583f2a38e6689d48a792b2e4c96'
>>> c.status(ticket)
{u'ticket_status': u'complete', u'update_info': []}
>>>
"""
post_data = {}
items_json = json.dumps(items, default=dthandler)
post_data['data'] = items_json
response = self.post_attribute("update", data=post_data)
return response['ticket']
|
python
|
{
"resource": ""
}
|
q12712
|
Catalog.read_items
|
train
|
def read_items(self, buckets=None, results=15, start=0,item_ids=None):
"""
Returns data from the catalog; also expanded for the requested buckets.
This method is provided for backwards-compatibility
Args:
Kwargs:
buckets (list): A list of strings specifying which buckets to retrieve
results (int): An integer number of results to return
start (int): An integer starting value for the result set
Returns:
A list of objects in the catalog; list contains additional attributes 'start' and 'total'
Example:
>>> c
<catalog - my_songs>
>>> c.read_items(results=1)
[<song - Harmonice Mundi II>]
>>>
"""
warnings.warn("catalog.read_items() is depreciated. Please use catalog.get_item_dicts() instead.")
kwargs = {}
kwargs['bucket'] = buckets or []
kwargs['item_id'] = item_ids or []
response = self.get_attribute("read", results=results, start=start, **kwargs)
rval = ResultList([])
if item_ids:
rval.start=0;
rval.total=len(response['catalog']['items'])
else:
rval.start = response['catalog']['start']
rval.total = response['catalog']['total']
for item in response['catalog']['items']:
new_item = None
# song items
if 'song_id' in item:
item['id'] = item.pop('song_id')
item['title'] = item.pop('song_name')
request = item['request']
new_item = song.Song(**util.fix(item))
new_item.request = request
# artist item
elif 'artist_id' in item:
item['id'] = item.pop('artist_id')
item['name'] = item.pop('artist_name')
request = item['request']
new_item = artist.Artist(**util.fix(item))
new_item.request = request
# unresolved item
else:
new_item = item
rval.append(new_item)
return rval
|
python
|
{
"resource": ""
}
|
q12713
|
Catalog.get_item_dicts
|
train
|
def get_item_dicts(self, buckets=None, results=15, start=0,item_ids=None):
"""
Returns data from the catalog; also expanded for the requested buckets
Args:
Kwargs:
buckets (list): A list of strings specifying which buckets to retrieve
results (int): An integer number of results to return
start (int): An integer starting value for the result set
Returns:
A list of dicts representing objects in the catalog; list has additional attributes 'start' and 'total'
Example:
>>> c
<catalog - my_songs>
>>> c.read_items(results=1)
[
{
"artist_id": "AR78KRI1187B98E6F2",
"artist_name": "Art of Noise",
"date_added": "2012-04-02T16:50:02",
"foreign_id": "CAHLYLR13674D1CF83:song:1000",
"request": {
"artist_name": "The Art Of Noise",
"item_id": "1000",
"song_name": "Love"
},
"song_id": "SOSBCTO1311AFE7AE0",
"song_name": "Love"
}
]
"""
kwargs = {}
kwargs['bucket'] = buckets or []
kwargs['item_id'] = item_ids or []
response = self.get_attribute("read", results=results, start=start, **kwargs)
rval = ResultList(response['catalog']['items'])
if item_ids:
rval.start=0;
rval.total=len(response['catalog']['items'])
else:
rval.start = response['catalog']['start']
rval.total = response['catalog']['total']
return rval
|
python
|
{
"resource": ""
}
|
q12714
|
_show_one
|
train
|
def _show_one(audio_file):
"given an audio file, print out the artist, title and some audio attributes of the song"
print 'File: ', audio_file
pytrack = track.track_from_filename(audio_file)
print 'Artist: ', pytrack.artist if hasattr(pytrack, 'artist') else 'Unknown'
print 'Title: ', pytrack.title if hasattr(pytrack, 'title') else 'Unknown'
print 'Track ID: ', pytrack.id
print 'Tempo: ', pytrack.tempo
print 'Energy: %1.3f %s' % (pytrack.energy, _bar(pytrack.energy))
if not pytrack.valence:
# Track hasn't had latest attributes computed. Force an upload.
pytrack = track.track_from_filename(audio_file, force_upload=True)
print 'Valence: %1.3f %s' % (pytrack.valence, _bar(pytrack.valence))
print 'Acousticness: %1.3f %s' % (pytrack.acousticness, _bar(pytrack.acousticness))
print
|
python
|
{
"resource": ""
}
|
q12715
|
show_attrs
|
train
|
def show_attrs(directory):
"print out the tempo for each audio file in the given directory"
for f in os.listdir(directory):
if _is_audio(f):
path = os.path.join(directory, f)
_show_one(path)
|
python
|
{
"resource": ""
}
|
q12716
|
search
|
train
|
def search(title=None, artist=None, artist_id=None, combined=None, description=None, style=None, mood=None,
results=None, start=None, max_tempo=None, min_tempo=None,
max_duration=None, min_duration=None, max_loudness=None, min_loudness=None,
artist_max_familiarity=None, artist_min_familiarity=None, artist_max_hotttnesss=None,
artist_min_hotttnesss=None, song_max_hotttnesss=None, song_min_hotttnesss=None, mode=None,
min_energy=None, max_energy=None, min_danceability=None, max_danceability=None,
key=None, max_latitude=None, min_latitude=None, max_longitude=None, min_longitude=None,
sort=None, buckets=None, limit=False, test_new_things=None, rank_type=None,
artist_start_year_after=None, artist_start_year_before=None, artist_end_year_after=None,
artist_end_year_before=None,song_type=None,min_song_currency=None,max_song_currency=None,
min_song_discovery=None, max_song_discovery=None, max_acousticness=None, min_acousticness=None,
max_liveness=None, min_liveness=None, max_speechiness=None, min_speechiness=None,
max_valence=None, min_valence=None):
"""Search for songs by name, description, or constraint.
Args:
Kwargs:
title (str): the name of a song
artist (str): the name of an artist
artist_id (str): the artist_id
combined (str): the artist name and song title
description (str): A string describing the artist and song
style (str): A string describing the style/genre of the artist and song
mood (str): A string describing the mood of the artist and song
results (int): An integer number of results to return
max_acousticness (float): The max acousticness of song results
min_acousticness (float): The min acousticness of song results
max_tempo (float): The max tempo of song results
min_tempo (float): The min tempo of song results
max_duration (float): The max duration of song results
min_duration (float): The min duration of song results
max_liveness (float): The max liveness of song results
min_liveness (float): The min liveness of song results
max_loudness (float): The max loudness of song results
min_loudness (float): The min loudness of song results
max_speechiness (float): The max speechiness of song results
min_speechiess (float): The min speechiness of song results
max_valence (float): The max valence of song results
min_valence (float): The min valence of song results
artist_max_familiarity (float): A float specifying the max familiarity of artists to search for
artist_min_familiarity (float): A float specifying the min familiarity of artists to search for
artist_max_hotttnesss (float): A float specifying the max hotttnesss of artists to search for
artist_min_hotttnesss (float): A float specifying the max hotttnesss of artists to search for
song_max_hotttnesss (float): A float specifying the max hotttnesss of songs to search for
song_min_hotttnesss (float): A float specifying the max hotttnesss of songs to search for
max_energy (float): The max energy of song results
min_energy (float): The min energy of song results
max_dancibility (float): The max dancibility of song results
min_dancibility (float): The min dancibility of song results
mode (int): 0 or 1 (minor or major)
key (int): 0-11 (c, c-sharp, d, e-flat, e, f, f-sharp, g, a-flat, a, b-flat, b)
max_latitude (float): A float specifying the max latitude of artists to search for
min_latitude (float): A float specifying the min latitude of artists to search for
max_longitude (float): A float specifying the max longitude of artists to search for
min_longitude (float): A float specifying the min longitude of artists to search for
sort (str): A string indicating an attribute and order for sorting the results
buckets (list): A list of strings specifying which buckets to retrieve
limit (bool): A boolean indicating whether or not to limit the results to one of the id spaces specified in buckets
rank_type (str): A string denoting the desired ranking for description searches, either 'relevance' or 'familiarity
artist_start_year_before (int): Returned songs's artists will have started recording music before this year.
artist_start_year_after (int): Returned songs's artists will have started recording music after this year.
artist_end_year_before (int): Returned songs's artists will have stopped recording music before this year.
artist_end_year_after (int): Returned songs's artists will have stopped recording music after this year.
song_type (string): A string or list of strings specifiying the type of song to search for.
Returns:
A list of Song objects
Example:
>>> results = song.search(artist='shakira', title='she wolf', buckets=['id:7digital', 'tracks'], limit=True, results=1)
>>> results
[<song - She Wolf>]
>>> results[0].get_tracks('7digital')[0]
{u'catalog': u'7digital',
u'foreign_id': u'7digital:track:7854109',
u'id': u'TRTOBSE12903CACEC4',
u'preview_url': u'http://previews.7digital.com/clips/34/7854109.clip.mp3',
u'release_image': u'http://cdn.7static.com/static/img/sleeveart/00/007/081/0000708184_200.jpg'}
>>>
"""
limit = str(limit).lower()
kwargs = locals()
kwargs['bucket'] = buckets
del kwargs['buckets']
result = util.callm("%s/%s" % ('song', 'search'), kwargs)
return [Song(**util.fix(s_dict)) for s_dict in result['response']['songs']]
|
python
|
{
"resource": ""
}
|
q12717
|
_track_from_response
|
train
|
def _track_from_response(result, timeout):
"""
This is the function that actually creates the track object
"""
response = result['response']
status = response['track']['status'].lower()
if status == 'pending':
# Need to wait for async upload or analyze call to finish.
result = _wait_for_pending_track(response['track']['id'], timeout)
response = result['response']
status = response['track']['status'].lower()
if not status == 'complete':
track_id = response['track']['id']
if status == 'pending':
raise Exception('%s: the operation didn\'t complete before the timeout (%d secs)' %
(track_id, timeout))
else:
raise Exception('%s: there was an error analyzing the track, status: %s' % (track_id, status))
else:
# track_properties starts as the response dictionary.
track_properties = response['track']
# 'id' and 'md5' are separated to construct the Track object.
identifier = track_properties.pop('id')
md5 = track_properties.pop('md5', None) # tracks from song api calls will not have an md5
# Pop off the audio_summary dict and make those keys attributes
# of the Track. This includes things like tempo, energy, and loudness.
track_properties.update(track_properties.pop('audio_summary'))
return Track(identifier, md5, track_properties)
|
python
|
{
"resource": ""
}
|
q12718
|
_upload
|
train
|
def _upload(param_dict, timeout, data):
"""
Calls upload either with a local audio file,
or a url. Returns a track object.
"""
param_dict['format'] = 'json'
param_dict['wait'] = 'true'
param_dict['bucket'] = 'audio_summary'
result = util.callm('track/upload', param_dict, POST = True, socket_timeout = 300, data = data)
return _track_from_response(result, timeout)
|
python
|
{
"resource": ""
}
|
q12719
|
track_from_file
|
train
|
def track_from_file(file_object, filetype, timeout=DEFAULT_ASYNC_TIMEOUT, force_upload=False):
"""
Create a track object from a file-like object.
NOTE: Does not create the detailed analysis for the Track. Call
Track.get_analysis() for that.
Args:
file_object: a file-like Python object
filetype: the file type. Supported types include mp3, ogg, wav, m4a, mp4, au
force_upload: skip the MD5 shortcut path, force an upload+analysis
Example:
>>> f = open("Miaow-01-Tempered-song.mp3")
>>> t = track.track_from_file(f, 'mp3')
>>> t
< Track >
>>>
"""
if not force_upload:
try:
# Check if this file has already been uploaded.
# This is much faster than uploading.
md5 = hashlib.md5(file_object.read()).hexdigest()
return track_from_md5(md5)
except util.EchoNestAPIError:
# Fall through to do a fresh upload.
pass
file_object.seek(0)
return _track_from_data(file_object.read(), filetype, timeout)
|
python
|
{
"resource": ""
}
|
q12720
|
track_from_filename
|
train
|
def track_from_filename(filename, filetype = None, timeout=DEFAULT_ASYNC_TIMEOUT, force_upload=False):
"""
Create a track object from a filename.
NOTE: Does not create the detailed analysis for the Track. Call
Track.get_analysis() for that.
Args:
filename: A string containing the path to the input file.
filetype: A string indicating the filetype; Defaults to None (type determined by file extension).
force_upload: skip the MD5 shortcut path, force an upload+analysis
Example:
>>> t = track.track_from_filename("Miaow-01-Tempered-song.mp3")
>>> t
< Track >
>>>
"""
filetype = filetype or filename.split('.')[-1]
file_object = open(filename, 'rb')
result = track_from_file(file_object, filetype, timeout, force_upload)
file_object.close()
return result
|
python
|
{
"resource": ""
}
|
q12721
|
track_from_url
|
train
|
def track_from_url(url, timeout=DEFAULT_ASYNC_TIMEOUT):
"""
Create a track object from a public http URL.
NOTE: Does not create the detailed analysis for the Track. Call
Track.get_analysis() for that.
Args:
url: A string giving the URL to read from. This must be on a public machine accessible by HTTP.
Example:
>>> t = track.track_from_url("http://www.miaowmusic.com/mp3/Miaow-01-Tempered-song.mp3")
>>> t
< Track >
>>>
"""
param_dict = dict(url = url)
return _upload(param_dict, timeout, data=None)
|
python
|
{
"resource": ""
}
|
q12722
|
track_from_id
|
train
|
def track_from_id(identifier, timeout=DEFAULT_ASYNC_TIMEOUT):
"""
Create a track object from an Echo Nest track ID.
NOTE: Does not create the detailed analysis for the Track. Call
Track.get_analysis() for that.
Args:
identifier: A string containing the ID of a previously analyzed track.
Example:
>>> t = track.track_from_id("TRWFIDS128F92CC4CA")
>>> t
<track - Let The Spirit>
>>>
"""
param_dict = dict(id = identifier)
return _profile(param_dict, timeout)
|
python
|
{
"resource": ""
}
|
q12723
|
track_from_md5
|
train
|
def track_from_md5(md5, timeout=DEFAULT_ASYNC_TIMEOUT):
"""
Create a track object from an md5 hash.
NOTE: Does not create the detailed analysis for the Track. Call
Track.get_analysis() for that.
Args:
md5: A string 32 characters long giving the md5 checksum of a track already analyzed.
Example:
>>> t = track.track_from_md5('b8abf85746ab3416adabca63141d8c2d')
>>> t
<track - Neverwas Restored (from Neverwas Soundtrack)>
>>>
"""
param_dict = dict(md5 = md5)
return _profile(param_dict, timeout)
|
python
|
{
"resource": ""
}
|
q12724
|
Track.get_analysis
|
train
|
def get_analysis(self):
""" Retrieve the detailed analysis for the track, if available.
Raises Exception if unable to create the detailed analysis. """
if self.analysis_url:
try:
# Try the existing analysis_url first. This expires shortly
# after creation.
try:
json_string = urllib2.urlopen(self.analysis_url).read()
except urllib2.HTTPError:
# Probably the analysis_url link has expired. Refresh it.
param_dict = dict(id = self.id)
new_track = _profile(param_dict, DEFAULT_ASYNC_TIMEOUT)
if new_track and new_track.analysis_url:
self.analysis_url = new_track.analysis_url
json_string = urllib2.urlopen(self.analysis_url).read()
else:
raise Exception("Failed to create track analysis.")
analysis = json.loads(json_string)
analysis_track = analysis.pop('track', {})
self.__dict__.update(analysis)
self.__dict__.update(analysis_track)
except Exception: #pylint: disable=W0702
# No detailed analysis found.
raise Exception("Failed to create track analysis.")
else:
raise Exception("Failed to create track analysis.")
|
python
|
{
"resource": ""
}
|
q12725
|
get_tempo
|
train
|
def get_tempo(artist, title):
"gets the tempo for a song"
results = song.search(artist=artist, title=title, results=1, buckets=['audio_summary'])
if len(results) > 0:
return results[0].audio_summary['tempo']
else:
return None
|
python
|
{
"resource": ""
}
|
q12726
|
suggest
|
train
|
def suggest(q='', results=15, buckets=None, limit=False, max_familiarity=None, min_familiarity=None,
max_hotttnesss=None, min_hotttnesss=None):
"""Suggest artists based upon partial names.
Args:
Kwargs:
q (str): The text to suggest artists from
results (int): An integer number of results to return
buckets (list): A list of strings specifying which buckets to retrieve
limit (bool): A boolean indicating whether or not to limit the results to one of the id spaces specified in buckets
max_familiarity (float): A float specifying the max familiarity of artists to search for
min_familiarity (float): A float specifying the min familiarity of artists to search for
max_hotttnesss (float): A float specifying the max hotttnesss of artists to search for
min_hotttnesss (float): A float specifying the max hotttnesss of artists to search for
Returns:
A list of Artist objects
Example:
>>> results = artist.suggest(text='rad')
>>> results
>>>
"""
buckets = buckets or []
kwargs = {}
kwargs['q'] = q
if max_familiarity is not None:
kwargs['max_familiarity'] = max_familiarity
if min_familiarity is not None:
kwargs['min_familiarity'] = min_familiarity
if max_hotttnesss is not None:
kwargs['max_hotttnesss'] = max_hotttnesss
if min_hotttnesss is not None:
kwargs['min_hotttnesss'] = min_hotttnesss
if results:
kwargs['results'] = results
if buckets:
kwargs['bucket'] = buckets
if limit:
kwargs['limit'] = 'true'
result = util.callm("%s/%s" % ('artist', 'suggest'), kwargs)
return [Artist(**util.fix(a_dict)) for a_dict in result['response']['artists']]
|
python
|
{
"resource": ""
}
|
q12727
|
Artist.get_twitter_id
|
train
|
def get_twitter_id(self, cache=True):
"""Get the twitter id for this artist if it exists
Args:
Kwargs:
Returns:
A twitter ID string
Example:
>>> a = artist.Artist('big boi')
>>> a.get_twitter_id()
u'BigBoi'
>>>
"""
if not (cache and ('twitter' in self.cache)):
response = self.get_attribute('twitter')
self.cache['twitter'] = response['artist'].get('twitter')
return self.cache['twitter']
|
python
|
{
"resource": ""
}
|
q12728
|
run
|
train
|
def run(self, bundle,
container_id=None,
log_path=None,
pid_file=None,
log_format="kubernetes"):
''' run is a wrapper to create, start, attach, and delete a container.
Equivalent command line example:
singularity oci run -b ~/bundle mycontainer
Parameters
==========
bundle: the full path to the bundle folder
container_id: an optional container_id. If not provided, use same
container_id used to generate OciImage instance
log_path: the path to store the log.
pid_file: specify the pid file path to use
log_format: defaults to kubernetes. Can also be "basic" or "json"
'''
return self._run(bundle,
container_id=container_id,
log_path=log_path,
pid_file=pid_file,
command="run",
log_format=log_format)
|
python
|
{
"resource": ""
}
|
q12729
|
create
|
train
|
def create(self, bundle,
container_id=None,
empty_process=False,
log_path=None,
pid_file=None,
sync_socket=None,
log_format="kubernetes"):
''' use the client to create a container from a bundle directory. The bundle
directory should have a config.json. You must be the root user to
create a runtime.
Equivalent command line example:
singularity oci create [create options...] <container_ID>
Parameters
==========
bundle: the full path to the bundle folder
container_id: an optional container_id. If not provided, use same
container_id used to generate OciImage instance
empty_process: run container without executing container process (for
example, for a pod container waiting for signals). This
is a specific use case for tools like Kubernetes
log_path: the path to store the log.
pid_file: specify the pid file path to use
sync_socket: the path to the unix socket for state synchronization.
log_format: defaults to kubernetes. Can also be "basic" or "json"
'''
return self._run(bundle,
container_id=container_id,
empty_process=empty_process,
log_path=log_path,
pid_file=pid_file,
sync_socket=sync_socket,
command="create",
log_format=log_format)
|
python
|
{
"resource": ""
}
|
q12730
|
_run
|
train
|
def _run(self, bundle,
container_id=None,
empty_process=False,
log_path=None,
pid_file=None,
sync_socket=None,
command="run",
log_format="kubernetes"):
''' _run is the base function for run and create, the only difference
between the two being that run does not have an option for sync_socket.
Equivalent command line example:
singularity oci create [create options...] <container_ID>
Parameters
==========
bundle: the full path to the bundle folder
container_id: an optional container_id. If not provided, use same
container_id used to generate OciImage instance
empty_process: run container without executing container process (for
example, for a pod container waiting for signals). This
is a specific use case for tools like Kubernetes
log_path: the path to store the log.
pid_file: specify the pid file path to use
sync_socket: the path to the unix socket for state synchronization.
command: the command (run or create) to use (default is run)
log_format: defaults to kubernetes. Can also be "basic" or "json"
'''
container_id = self.get_container_id(container_id)
# singularity oci create
cmd = self._init_command(command)
# Check that the bundle exists
if not os.path.exists(bundle):
bot.exit('Bundle not found at %s' % bundle)
# Add the bundle
cmd = cmd + ['--bundle', bundle]
# Additional Logging Files
cmd = cmd + ['--log-format', log_format]
if log_path != None:
cmd = cmd + ['--log-path', log_path]
if pid_file != None:
cmd = cmd + ['--pid-file', pid_file]
if sync_socket != None:
cmd = cmd + ['--sync-socket', sync_socket]
if empty_process:
cmd.append('--empty-process')
# Finally, add the container_id
cmd.append(container_id)
# Generate the instance
result = self._send_command(cmd, sudo=True)
# Get the status to report to the user!
# TODO: Singularity seems to create even with error, can we check and
# delete for the user if this happens?
return self.state(container_id, sudo=True, sync_socket=sync_socket)
|
python
|
{
"resource": ""
}
|
q12731
|
delete
|
train
|
def delete(self, container_id=None, sudo=None):
'''delete an instance based on container_id.
Parameters
==========
container_id: the container_id to delete
sudo: whether to issue the command with sudo (or not)
a container started with sudo will belong to the root user
If started by a user, the user needs to control deleting it
if the user doesn't set to True/False, we use client self.sudo
Returns
=======
return_code: the return code from the delete command. 0 indicates a
successful delete, 255 indicates not.
'''
sudo = self._get_sudo(sudo)
container_id = self.get_container_id(container_id)
# singularity oci delete
cmd = self._init_command('delete')
# Add the container_id
cmd.append(container_id)
# Delete the container, return code goes to user (message to screen)
return self._run_and_return(cmd, sudo=sudo)
|
python
|
{
"resource": ""
}
|
q12732
|
attach
|
train
|
def attach(self, container_id=None, sudo=False):
'''attach to a container instance based on container_id
Parameters
==========
container_id: the container_id to delete
sudo: whether to issue the command with sudo (or not)
a container started with sudo will belong to the root user
If started by a user, the user needs to control deleting it
Returns
=======
return_code: the return code from the delete command. 0 indicates a
successful delete, 255 indicates not.
'''
sudo = self._get_sudo(sudo)
container_id = self.get_container_id(container_id)
# singularity oci delete
cmd = self._init_command('attach')
# Add the container_id
cmd.append(container_id)
# Delete the container, return code goes to user (message to screen)
return self._run_and_return(cmd, sudo)
|
python
|
{
"resource": ""
}
|
q12733
|
execute
|
train
|
def execute(self, command=None, container_id=None, sudo=False, stream=False):
'''execute a command to a container instance based on container_id
Parameters
==========
container_id: the container_id to delete
command: the command to execute to the container
sudo: whether to issue the command with sudo (or not)
a container started with sudo will belong to the root user
If started by a user, the user needs to control deleting it
stream: if True, return an iterate to iterate over results of exec.
default is False, will return full output as string.
Returns
=======
return_code: the return code from the delete command. 0 indicates a
successful delete, 255 indicates not.
'''
sudo = self._get_sudo(sudo)
container_id = self.get_container_id(container_id)
# singularity oci delete
cmd = self._init_command('exec')
# Add the container_id
cmd.append(container_id)
if command != None:
if not isinstance(command, list):
command = [command]
cmd = cmd + command
# Execute the command, return response to user
if stream:
return stream_command(cmd, sudo=sudo)
return self._run_command(cmd, sudo=sudo, quiet=True)
|
python
|
{
"resource": ""
}
|
q12734
|
update
|
train
|
def update(self, container_id, from_file=None):
'''update container cgroup resources for a specific container_id,
The container must have state "running" or "created."
Singularity Example:
singularity oci update [update options...] <container_ID>
singularity oci update --from-file cgroups-update.json mycontainer
Parameters
==========
container_id: the container_id to update cgroups for
from_file: a path to an OCI JSON resource file to update from.
'''
sudo = self._get_sudo(sudo)
container_id = self.get_container_id(container_id)
# singularity oci delete
cmd = self._init_command('update')
if from_file != None:
cmd = cmd + ['--from-file', from_file]
# Add the container_id
cmd.append(container_id)
# Delete the container, return code goes to user (message to screen)
return self._run_and_return(cmd, sudo)
|
python
|
{
"resource": ""
}
|
q12735
|
get_client
|
train
|
def get_client(quiet=False, debug=False):
'''
get the client and perform imports not on init, in case there are any
initialization or import errors.
Parameters
==========
quiet: if True, suppress most output about the client
debug: turn on debugging mode
'''
from spython.utils import get_singularity_version
from .base import Client
Client.quiet = quiet
Client.debug = debug
# Do imports here, can be customized
from .apps import apps
from .build import build
from .execute import execute
from .help import help
from .inspect import inspect
from .instances import ( instances, stopall ) # global instance commands
from .run import run
from .pull import pull
# Actions
Client.apps = apps
Client.build = build
Client.execute = execute
Client.help = help
Client.inspect = inspect
Client.instances = instances
Client.run = run
Client.pull = pull
# Command Groups, Images
from spython.image.cmd import generate_image_commands # deprecated
Client.image = generate_image_commands()
# Commands Groups, Instances
from spython.instance.cmd import generate_instance_commands # instance level commands
Client.instance = generate_instance_commands()
Client.instance_stopall = stopall
Client.instance.version = Client.version
# Commands Groups, OCI (Singularity version 3 and up)
if "version 3" in get_singularity_version():
from spython.oci.cmd import generate_oci_commands
Client.oci = generate_oci_commands()() # first () runs function, second
# initializes OciImage class
Client.oci.debug = Client.debug
Client.oci.quiet = Client.quiet
Client.oci.OciImage.quiet = Client.quiet
Client.oci.OciImage.debug = Client.debug
# Initialize
cli = Client()
# Pass on verbosity
cli.image.debug = cli.debug
cli.image.quiet = cli.quiet
cli.instance.debug = cli.debug
cli.instance.quiet = cli.quiet
return cli
|
python
|
{
"resource": ""
}
|
q12736
|
start
|
train
|
def start(self, image=None, name=None, args=None, sudo=False, options=[], capture=False):
'''start an instance. This is done by default when an instance is created.
Parameters
==========
image: optionally, an image uri (if called as a command from Client)
name: a name for the instance
sudo: if the user wants to run the command with sudo
capture: capture output, default is False. With True likely to hang.
args: arguments to provide to the instance (supported Singularity 3.1+)
options: a list of tuples, each an option to give to the start command
[("--bind", "/tmp"),...]
USAGE:
singularity [...] instance.start [...] <container path> <instance name>
'''
from spython.utils import ( run_command,
check_install )
check_install()
# If name provided, over write robot (default)
if name != None:
self.name = name
# If an image isn't provided, we have an initialized instance
if image is None:
# Not having this means it was called as a command, without an image
if not hasattr(self, "_image"):
bot.exit('Please provide an image, or create an Instance first.')
image = self._image
# Derive subgroup command based on singularity version
subgroup = 'instance.start'
if 'version 3' in self.version():
subgroup = ["instance", "start"]
cmd = self._init_command(subgroup)
# Add options, if they are provided
if not isinstance(options, list):
options = options.split(' ')
# Assemble the command!
cmd = cmd + options + [image, self.name]
# If arguments are provided
if args != None:
if not isinstance(args, list):
args = [args]
cmd = cmd + args
# Save the options and cmd, if the user wants to see them later
self.options = options
self.args = args
self.cmd = cmd
output = run_command(cmd, sudo=sudo, quiet=True, capture=capture)
if output['return_code'] == 0:
self._update_metadata()
else:
message = '%s : return code %s' %(output['message'],
output['return_code'])
bot.error(message)
return self
|
python
|
{
"resource": ""
}
|
q12737
|
parse_labels
|
train
|
def parse_labels(result):
'''fix up the labels, meaning parse to json if needed, and return
original updated object
Parameters
==========
result: the json object to parse from inspect
'''
if "data" in result:
labels = result['data']['attributes'].get('labels') or {}
elif 'attributes' in result:
labels = result['attributes'].get('labels') or {}
# If labels included, try parsing to json
try:
labels = jsonp.loads(labels)
except:
pass
if "data" in result:
result['data']['attributes']['labels'] = labels
else:
result['attributes']['labels'] = labels
return result
|
python
|
{
"resource": ""
}
|
q12738
|
ipython
|
train
|
def ipython(image):
'''give the user an ipython shell
'''
# The client will announce itself (backend/database) unless it's get
from spython.main import get_client
from spython.main.parse import ( DockerRecipe, SingularityRecipe )
client = get_client()
client.load(image)
# Add recipe parsers
client.DockerRecipe = DockerRecipe
client.SingularityRecipe = SingularityRecipe
from IPython import embed
embed()
|
python
|
{
"resource": ""
}
|
q12739
|
SingularityRecipe._setup
|
train
|
def _setup(self, lines):
'''setup required adding content from the host to the rootfs,
so we try to capture with with ADD.
'''
bot.warning('SETUP is error prone, please check output.')
for line in lines:
# For all lines, replace rootfs with actual root /
line = re.sub('[$]{?SINGULARITY_ROOTFS}?',
'', '$SINGULARITY_ROOTFS')
# If we have nothing left, don't continue
if line in ['', None]:
continue
# If the line starts with copy or move, assume is file from host
if re.search('(^cp|^mv)', line):
line = re.sub('(^cp|^mv)', '', line)
self.files.append(line)
# If it's a general command, add to install routine
else:
self.install.append(line)
|
python
|
{
"resource": ""
}
|
q12740
|
SingularityRecipe._comments
|
train
|
def _comments(self, lines):
''' comments is a wrapper for comment, intended to be given a list
of comments.
Parameters
==========
lines: the list of lines to parse
'''
for line in lines:
comment = self._comment(line)
self.comments.append(comment)
|
python
|
{
"resource": ""
}
|
q12741
|
SingularityRecipe._run
|
train
|
def _run(self, lines):
'''_parse the runscript to be the Docker CMD. If we have one line,
call it directly. If not, write the entrypoint into a script.
Parameters
==========
lines: the line from the recipe file to parse for CMD
'''
lines = [x for x in lines if x not in ['', None]]
# Default runscript is first index
runscript = lines[0]
# Multiple line runscript needs multiple lines written to script
if len(lines) > 1:
bot.warning('More than one line detected for runscript!')
bot.warning('These will be echoed into a single script to call.')
self._write_script('/entrypoint.sh', lines)
runscript = "/bin/bash /entrypoint.sh"
self.cmd = runscript
|
python
|
{
"resource": ""
}
|
q12742
|
SingularityRecipe._get_mapping
|
train
|
def _get_mapping(self, section):
'''mapping will take the section name from a Singularity recipe
and return a map function to add it to the appropriate place.
Any lines that don't cleanly map are assumed to be comments.
Parameters
==========
section: the name of the Singularity recipe section
Returns
=======
function: to map a line to its command group (e.g., install)
'''
# Ensure section is lowercase
section = section.lower()
mapping = {"environment": self._env,
"comments": self._comments,
"runscript": self._run,
"labels": self._labels,
"setup": self._setup,
"files": self._files,
"from": self._from,
"post": self._post,
"test": self._test,
"help": self._comments}
if section in mapping:
return mapping[section]
return self._comments
|
python
|
{
"resource": ""
}
|
q12743
|
SingularityRecipe._load_from
|
train
|
def _load_from(self, line):
'''load the From section of the recipe for the Dockerfile.
'''
# Remove any comments
line = line.split('#',1)[0]
line = re.sub('(F|f)(R|r)(O|o)(M|m):','', line).strip()
bot.info('FROM %s' %line)
self.config['from'] = line
|
python
|
{
"resource": ""
}
|
q12744
|
SingularityRecipe._load_section
|
train
|
def _load_section(self, lines, section):
'''read in a section to a list, and stop when we hit the next section
'''
members = []
while True:
if len(lines) == 0:
break
next_line = lines[0]
# The end of a section
if next_line.strip().startswith("%"):
break
# Still in current section!
else:
new_member = lines.pop(0).strip()
if new_member not in ['', None]:
members.append(new_member)
# Add the list to the config
if len(members) > 0:
if section is not None:
self.config[section] += members
|
python
|
{
"resource": ""
}
|
q12745
|
SingularityRecipe.load_recipe
|
train
|
def load_recipe(self):
'''load will return a loaded in singularity recipe. The idea
is that these sections can then be parsed into a Dockerfile,
or printed back into their original form.
Returns
=======
config: a parsed recipe Singularity recipe
'''
# Comments between sections, add to top of file
lines = self.lines.copy()
comments = []
# Start with a fresh config!
self.config = dict()
section = None
name = None
while len(lines) > 0:
# Clean up white trailing/leading space
line = lines.pop(0)
stripped = line.strip()
# Bootstrap Line
if re.search('(b|B)(o|O){2}(t|T)(s|S)(t|T)(r|R)(a|A)(p|P)', line):
self._load_bootstrap(stripped)
# From Line
if re.search('(f|F)(r|R)(O|o)(m|M)', stripped):
self._load_from(stripped)
# Comment
if stripped.startswith("#"):
comments.append(stripped)
continue
# Section
elif stripped.startswith('%'):
section = self._add_section(stripped)
bot.debug("Adding section title %s" %section)
# If we have a section, and are adding it
elif section is not None:
lines = [line] + lines
self._load_section(lines=lines,
section=section)
self.config['comments'] = comments
|
python
|
{
"resource": ""
}
|
q12746
|
OciImage.get_container_id
|
train
|
def get_container_id(self, container_id=None):
''' a helper function shared between functions that will return a
container_id. First preference goes to a container_id provided by
the user at runtime. Second preference goes to the container_id
instantiated with the client.
Parameters
==========
container_id: image uri to parse (required)
'''
# The user must provide a container_id, or have one with the client
if container_id == None and self.container_id == None:
bot.exit('You must provide a container_id.')
# Choose whichever is not None, with preference for function provided
container_id = container_id or self.container_id
return container_id
|
python
|
{
"resource": ""
}
|
q12747
|
OciImage._init_command
|
train
|
def _init_command(self, action, flags=None):
''' a wrapper to the base init_command, ensuring that "oci" is added
to each command
Parameters
==========
action: the main action to perform (e.g., build)
flags: one or more additional flags (e.g, volumes)
not implemented yet.
'''
from spython.main.base.command import init_command
if not isinstance(action, list):
action = [action]
cmd = ['oci'] + action
return init_command(self, cmd, flags)
|
python
|
{
"resource": ""
}
|
q12748
|
export
|
train
|
def export(self, image_path, tmptar=None):
'''export will export an image, sudo must be used.
Parameters
==========
image_path: full path to image
tmptar: if defined, use custom temporary path for tar export
'''
from spython.utils import check_install
check_install()
if tmptar is None:
tmptar = "/%s/tmptar.tar" %(tempfile.mkdtemp())
cmd = ['singularity', 'image.export', '-f', tmptar, image_path]
output = self.run_command(cmd, sudo=False)
return tmptar
|
python
|
{
"resource": ""
}
|
q12749
|
parse_table
|
train
|
def parse_table(table_string, header, remove_rows=1):
'''parse a table to json from a string, where a header is expected by default.
Return a jsonified table.
Parameters
==========
table_string: the string table, ideally with a header
header: header of expected table, must match dimension (number columns)
remove_rows: an integer to indicate a number of rows to remove from top
the default is 1 assuming we don't want the header
'''
rows = [x for x in table_string.split('\n') if x]
rows = rows[0+remove_rows:]
# Parse into json dictionary
parsed = []
for row in rows:
item = {}
# This assumes no white spaces in each entry, which should be the case
row = [x for x in row.split(' ') if x]
for e in range(len(row)):
item[header[e]] = row[e]
parsed.append(item)
return parsed
|
python
|
{
"resource": ""
}
|
q12750
|
get
|
train
|
def get(self, name, return_json=False, quiet=False):
'''get is a list for a single instance. It is assumed to be running,
and we need to look up the PID, etc.
'''
from spython.utils import check_install
check_install()
# Ensure compatible for singularity prior to 3.0, and after 3.0
subgroup = "instance.list"
if 'version 3' in self.version():
subgroup = ["instance", "list"]
cmd = self._init_command(subgroup)
cmd.append(name)
output = run_command(cmd, quiet=True)
# Success, we have instances
if output['return_code'] == 0:
# Only print the table if we are returning json
if quiet is False:
print(''.join(output['message']))
# Prepare json result from table
header = ['daemon_name','pid','container_image']
instances = parse_table(output['message'][0], header)
# Does the user want instance objects instead?
listing = []
if return_json is False:
for i in instances:
new_instance = Instance(pid=i['pid'],
name=i['daemon_name'],
image=i['container_image'],
start=False)
listing.append(new_instance)
instances = listing
# Couldn't get UID
elif output['return_code'] == 255:
bot.error("Couldn't get UID")
# Return code of 0
else:
bot.info('No instances found.')
# If we are given a name, return just one
if name is not None and len(instances) == 1:
instances = instances[0]
return instances
|
python
|
{
"resource": ""
}
|
q12751
|
set_verbosity
|
train
|
def set_verbosity(args):
'''determine the message level in the environment to set based on args.
'''
level = "INFO"
if args.debug is True:
level = "DEBUG"
elif args.quiet is True:
level = "QUIET"
os.environ['MESSAGELEVEL'] = level
os.putenv('MESSAGELEVEL', level)
os.environ['SINGULARITY_MESSAGELEVEL'] = level
os.putenv('SINGULARITY_MESSAGELEVEL', level)
# Import logger to set
from spython.logger import bot
bot.debug('Logging level %s' %level)
import spython
bot.debug("Singularity Python Version: %s" % spython.__version__)
|
python
|
{
"resource": ""
}
|
q12752
|
run_command
|
train
|
def run_command(self, cmd,
sudo=False,
capture=True,
quiet=None,
return_result=False):
'''run_command is a wrapper for the global run_command, checking first
for sudo and exiting on error if needed. The message is returned as
a list of lines for the calling function to parse, and stdout uses
the parent process so it appears for the user.
Parameters
==========
cmd: the command to run
sudo: does the command require sudo?
quiet: if quiet set by function, overrides client setting.
return_result: return the result, if not successful (default False).
On success, returns result.
'''
# First preference to function, then to client setting
if quiet == None:
quiet = self.quiet
result = run_cmd(cmd, sudo=sudo, capture=capture, quiet=quiet)
# If one line is returned, squash dimension
if len(result['message']) == 1:
result['message'] = result['message'][0]
# If the user wants to return the result, just return it
if return_result is True:
return result
# On success, return result
if result['return_code'] == 0:
return result['message']
return result
|
python
|
{
"resource": ""
}
|
q12753
|
Recipe.load
|
train
|
def load(self, recipe):
'''load a recipe file into the client, first performing checks, and
then parsing the file.
Parameters
==========
recipe: the original recipe file, parsed by the subclass either
DockerRecipe or SingularityRecipe
'''
self.recipe = recipe # the recipe file
self._run_checks() # does the recipe file exist?
self.parse()
|
python
|
{
"resource": ""
}
|
q12754
|
Recipe.parse
|
train
|
def parse(self):
'''parse is the base function for parsing the recipe, whether it be
a Dockerfile or Singularity recipe. The recipe is read in as lines,
and saved to a list if needed for the future. If the client has
it, the recipe type specific _parse function is called.
Instructions for making a client subparser:
It should have a main function _parse that parses a list of lines
from some recipe text file into the appropriate sections, e.g.,
self.fromHeader
self.environ
self.labels
self.install
self.files
self.test
self.entrypoint
'''
self.cmd = None
self.comments = []
self.entrypoint = None
self.environ = []
self.files = []
self.install = []
self.labels = []
self.ports = []
self.test = None
self.volumes = []
if self.recipe:
# Read in the raw lines of the file
self.lines = read_file(self.recipe)
# If properly instantiated by Docker or Singularity Recipe, parse
if hasattr(self, '_parse'):
self._parse()
|
python
|
{
"resource": ""
}
|
q12755
|
Recipe.convert
|
train
|
def convert(self, convert_to=None,
runscript="/bin/bash",
force=False):
'''This is a convenience function for the user to easily call to get
the most likely desired result, conversion to the opposite format.
We choose the selection based on the recipe name - meaning that we
perform conversion with default based on recipe name. If the recipe
object is DockerRecipe, we convert to Singularity. If the recipe
object is SingularityRecipe, we convert to Docker. The user can
override this by setting the variable convert_to
Parameters
==========
convert_to: can be manually forced (docker or singularity)
runscript: default runscript (entrypoint) to use
force: if True, override discovery from Dockerfile
'''
converter = self._get_converter(convert_to)
return converter(runscript=runscript, force=force)
|
python
|
{
"resource": ""
}
|
q12756
|
Recipe._get_converter
|
train
|
def _get_converter(self, convert_to=None):
'''see convert and save. This is a helper function that returns
the proper conversion function, but doesn't call it. We do this
so that in the case of convert, we do the conversion and return
a string. In the case of save, we save the recipe to file for the
user.
Parameters
==========
convert_to: a string either docker or singularity, if a different
Returns
=======
converter: the function to do the conversion
'''
conversion = self._get_conversion_type(convert_to)
# Perform conversion
if conversion == "singularity":
return self.docker2singularity
return self.singularity2docker
|
python
|
{
"resource": ""
}
|
q12757
|
Recipe._get_conversion_outfile
|
train
|
def _get_conversion_outfile(self, convert_to=None):
'''a helper function to return a conversion temporary output file
based on kind of conversion
Parameters
==========
convert_to: a string either docker or singularity, if a different
'''
conversion = self._get_conversion_type(convert_to)
prefix = "Singularity"
if conversion == "docker":
prefix = "Dockerfile"
suffix = next(tempfile._get_candidate_names())
return "%s.%s" %(prefix, suffix)
|
python
|
{
"resource": ""
}
|
q12758
|
Recipe._get_conversion_type
|
train
|
def _get_conversion_type(self, convert_to=None):
'''a helper function to return the conversion type based on user
preference and input recipe.
Parameters
==========
convert_to: a string either docker or singularity (default None)
'''
acceptable = ['singularity', 'docker']
# Default is to convert to opposite kind
conversion = "singularity"
if self.name == "singularity":
conversion = "docker"
# Unless the user asks for a specific type
if convert_to is not None and convert_to in acceptable:
conversion = convert_to
return conversion
|
python
|
{
"resource": ""
}
|
q12759
|
Recipe._write_script
|
train
|
def _write_script(path, lines, chmod=True):
'''write a script with some lines content to path in the image. This
is done by way of adding echo statements to the install section.
Parameters
==========
path: the path to the file to write
lines: the lines to echo to the file
chmod: If true, change permission to make u+x
'''
if len(lines) > 0:
lastline = lines.pop()
for line in lines:
self.install.append('echo "%s" >> %s' %path)
self.install.append(lastline)
if chmod is True:
self.install.append('chmod u+x %s' %path)
|
python
|
{
"resource": ""
}
|
q12760
|
resume
|
train
|
def resume(self, container_id=None, sudo=None):
''' resume a stopped OciImage container, if it exists
Equivalent command line example:
singularity oci resume <container_ID>
Parameters
==========
container_id: the id to stop.
sudo: Add sudo to the command. If the container was created by root,
you need sudo to interact and get its state.
Returns
=======
return_code: the return code to indicate if the container was resumed.
'''
return self._state_command(container_id, command='resume', sudo=sudo)
|
python
|
{
"resource": ""
}
|
q12761
|
pause
|
train
|
def pause(self, container_id=None, sudo=None):
''' pause a running OciImage container, if it exists
Equivalent command line example:
singularity oci pause <container_ID>
Parameters
==========
container_id: the id to stop.
sudo: Add sudo to the command. If the container was created by root,
you need sudo to interact and get its state.
Returns
=======
return_code: the return code to indicate if the container was paused.
'''
return self._state_command(container_id, command='pause', sudo=sudo)
|
python
|
{
"resource": ""
}
|
q12762
|
stopall
|
train
|
def stopall(self, sudo=False, quiet=True):
'''stop ALL instances. This command is only added to the command group
as it doesn't make sense to call from a single instance
Parameters
==========
sudo: if the command should be done with sudo (exposes different set of
instances)
'''
from spython.utils import run_command, check_install
check_install()
subgroup = 'instance.stop'
if 'version 3' in self.version():
subgroup = ["instance", "stop"]
cmd = self._init_command(subgroup)
cmd = cmd + ['--all']
output = run_command(cmd, sudo=sudo, quiet=quiet)
if output['return_code'] != 0:
message = '%s : return code %s' %(output['message'],
output['return_code'])
bot.error(message)
return output['return_code']
return output['return_code']
|
python
|
{
"resource": ""
}
|
q12763
|
println
|
train
|
def println(self, output, quiet=False):
'''print will print the output, given that quiet is not True. This
function also serves to convert output in bytes to utf-8
Parameters
==========
output: the string to print
quiet: a runtime variable to over-ride the default.
'''
if isinstance(output,bytes):
output = output.decode('utf-8')
if self.quiet is False and quiet is False:
print(output)
|
python
|
{
"resource": ""
}
|
q12764
|
help
|
train
|
def help(self, command=None):
'''help prints the general function help, or help for a specific command
Parameters
==========
command: the command to get help for, if none, prints general help
'''
from spython.utils import check_install
check_install()
cmd = ['singularity','--help']
if command != None:
cmd.append(command)
help = self._run_command(cmd)
return help
|
python
|
{
"resource": ""
}
|
q12765
|
Instance.generate_name
|
train
|
def generate_name(self, name=None):
'''generate a Robot Name for the instance to use, if the user doesn't
supply one.
'''
# If no name provided, use robot name
if name == None:
name = self.RobotNamer.generate()
self.name = name.replace('-','_')
|
python
|
{
"resource": ""
}
|
q12766
|
Instance._update_metadata
|
train
|
def _update_metadata(self, kwargs=None):
'''Extract any additional attributes to hold with the instance
from kwargs
'''
# If not given metadata, use instance.list to get it for container
if kwargs == None and hasattr(self, 'name'):
kwargs = self._list(self.name, quiet=True, return_json=True)
# Add acceptable arguments
for arg in ['pid', 'name']:
# Skip over non-iterables:
if arg in kwargs:
setattr(self, arg, kwargs[arg])
if "image" in kwargs:
self._image = kwargs['image']
elif "container_image" in kwargs:
self._image = kwargs['container_image']
|
python
|
{
"resource": ""
}
|
q12767
|
mount
|
train
|
def mount(self, image, sudo=None):
'''create an OCI bundle from SIF image
Parameters
==========
image: the container (sif) to mount
'''
return self._state_command(image, command="mount", sudo=sudo)
|
python
|
{
"resource": ""
}
|
q12768
|
main
|
train
|
def main(args, options, parser):
'''This function serves as a wrapper around the DockerRecipe and
SingularityRecipe converters. We can either save to file if
args.outfile is defined, or print to the console if not.
'''
from spython.main.parse import ( DockerRecipe, SingularityRecipe )
# We need something to work with
if not args.files:
parser.print_help()
sys.exit(1)
# Get the user specified input and output files
outfile = None
if len(args.files) > 1:
outfile = args.files[1]
# Choose the recipe parser
parser = SingularityRecipe
if args.input == "docker":
parser = DockerRecipe
elif args.input == "singularity":
parser = SingularityRecipe(args.files[0])
else:
if "dockerfile" in args.files[0].lower():
parser = DockerRecipe
# Initialize the chosen parser
parser = parser(args.files[0])
# By default, discover entrypoint / cmd from Dockerfile
entrypoint = "/bin/bash"
force = False
if args.entrypoint is not None:
entrypoint = args.entrypoint
force = True
# If the user specifies an output file, save to it
if outfile is not None:
parser.save(outfile, runscript=entrypoint, force=force)
# Otherwise, convert and print to screen
else:
recipe = parser.convert(runscript=entrypoint, force=True)
print(recipe)
|
python
|
{
"resource": ""
}
|
q12769
|
DockerRecipe._setup
|
train
|
def _setup(self, action, line):
''' replace the command name from the group, alert the user of content,
and clean up empty spaces
'''
bot.debug('[in] %s' % line)
# Replace ACTION at beginning
line = re.sub('^%s' % action, '', line)
# Handle continuation lines without ACTION by padding with leading space
line = " " + line
# Split into components
return [x for x in self._split_line(line) if x not in ['', None]]
|
python
|
{
"resource": ""
}
|
q12770
|
DockerRecipe._run
|
train
|
def _run(self, line):
''' everything from RUN goes into the install list
Parameters
==========
line: the line from the recipe file to parse for FROM
'''
line = self._setup('RUN', line)
self.install += line
|
python
|
{
"resource": ""
}
|
q12771
|
DockerRecipe._add
|
train
|
def _add(self, lines):
'''Add can also handle https, and compressed files.
Parameters
==========
line: the line from the recipe file to parse for ADD
'''
lines = self._setup('ADD', lines)
for line in lines:
values = line.split(" ")
frompath = values.pop(0)
# Custom parsing for frompath
# If it's a web address, add to install routine to get
if frompath.startswith('http'):
for topath in values:
self._parse_http(frompath, topath)
# Add the file, and decompress in install
elif re.search("[.](gz|gzip|bz2|xz)$", frompath.strip()):
for topath in values:
self._parse_archive(frompath, topath)
# Just add the files
else:
for topath in values:
self._add_files(frompath, topath)
|
python
|
{
"resource": ""
}
|
q12772
|
DockerRecipe._parse_http
|
train
|
def _parse_http(self, url, dest):
'''will get the filename of an http address, and return a statement
to download it to some location
Parameters
==========
url: the source url to retrieve with curl
dest: the destination folder to put it in the image
'''
file_name = os.path.basename(url)
download_path = "%s/%s" %(dest, file_name)
command = "curl %s -o %s" %(url, download_path)
self.install.append(command)
|
python
|
{
"resource": ""
}
|
q12773
|
DockerRecipe._parse_archive
|
train
|
def _parse_archive(self, targz, dest):
'''parse_targz will add a line to the install script to extract a
targz to a location, and also add it to the files.
Parameters
==========
targz: the targz to extract
dest: the location to extract it to
'''
# Add command to extract it
self.install.append("tar -zvf %s %s" %(targz, dest))
# Ensure added to container files
return self._add_files(targz, dest)
|
python
|
{
"resource": ""
}
|
q12774
|
DockerRecipe._workdir
|
train
|
def _workdir(self, line):
'''A Docker WORKDIR command simply implies to cd to that location
Parameters
==========
line: the line from the recipe file to parse for WORKDIR
'''
workdir = self._setup('WORKDIR', line)
line = "cd %s" %(''.join(workdir))
self.install.append(line)
|
python
|
{
"resource": ""
}
|
q12775
|
DockerRecipe._get_mapping
|
train
|
def _get_mapping(self, line, parser=None, previous=None):
'''mapping will take the command from a Dockerfile and return a map
function to add it to the appropriate place. Any lines that don't
cleanly map are assumed to be comments.
Parameters
==========
line: the list that has been parsed into parts with _split_line
parser: the previously used parser, for context
Returns
=======
function: to map a line to its command group
'''
# Split the command into cleanly the command and rest
if not isinstance(line, list):
line = self._split_line(line)
# No line we will give function to handle empty line
if len(line) == 0:
return None
cmd = line[0].upper()
mapping = {"ADD": self._add,
"ARG": self._arg,
"COPY": self._copy,
"CMD": self._cmd,
"ENTRYPOINT": self._entry,
"ENV": self._env,
"EXPOSE": self._expose,
"FROM": self._from,
"HEALTHCHECK": self._test,
"RUN": self._run,
"WORKDIR": self._workdir,
"MAINTAINER": self._label,
"VOLUME": self._volume,
"LABEL": self._label}
# If it's a command line, return correct function
if cmd in mapping:
return mapping[cmd]
# If it's a continued line, return previous
cleaned = self._clean_line(line[-1])
previous = self._clean_line(previous)
# if we are continuing from last
if cleaned.endswith('\\') and parser or previous.endswith('\\'):
return parser
return self._default
|
python
|
{
"resource": ""
}
|
q12776
|
DockerRecipe._parse
|
train
|
def _parse(self):
'''parse is the base function for parsing the Dockerfile, and extracting
elements into the correct data structures. Everything is parsed into
lists or dictionaries that can be assembled again on demand.
Environment: Since Docker also exports environment as we go,
we add environment to the environment section and
install
Labels: include anything that is a LABEL, ARG, or (deprecated)
maintainer.
Add/Copy: are treated the same
'''
parser = None
previous = None
for line in self.lines:
parser = self._get_mapping(line, parser, previous)
# Parse it, if appropriate
if parser:
parser(line)
previous = line
|
python
|
{
"resource": ""
}
|
q12777
|
ImageBase.remove_uri
|
train
|
def remove_uri(self, image):
'''remove_image_uri will return just the image name.
this will also remove all spaces from the uri.
'''
image = image or ''
uri = self.get_uri(image) or ''
image = image.replace('%s://' %uri,'', 1)
return image.strip('-').rstrip('/')
|
python
|
{
"resource": ""
}
|
q12778
|
create
|
train
|
def create(self,image_path, size=1024, sudo=False):
'''create will create a a new image
Parameters
==========
image_path: full path to image
size: image sizein MiB, default is 1024MiB
filesystem: supported file systems ext3/ext4 (ext[2/3]: default ext3
'''
from spython.utils import check_install
check_install()
cmd = self.init_command('image.create')
cmd = cmd + ['--size', str(size), image_path ]
output = self.run_command(cmd,sudo=sudo)
self.println(output)
if not os.path.exists(image_path):
bot.exit("Could not create image %s" %image_path)
return image_path
|
python
|
{
"resource": ""
}
|
q12779
|
check_install
|
train
|
def check_install(software='singularity', quiet=True):
'''check_install will attempt to run the singularity command, and
return True if installed. The command line utils will not run
without this check.
'''
cmd = [software, '--version']
found = False
try:
version = run_command(cmd, quiet=True)
except: # FileNotFoundError
return found
if version is not None:
if version['return_code'] == 0:
found = True
if quiet is False:
version = version['message']
bot.info("Found %s version %s" % (software.upper(), version))
return found
|
python
|
{
"resource": ""
}
|
q12780
|
get_singularity_version
|
train
|
def get_singularity_version():
'''get the singularity client version. Useful in the case that functionality
has changed, etc. Can be "hacked" if needed by exporting
SPYTHON_SINGULARITY_VERSION, which is checked before checking on the
command line.
'''
version = os.environ.get('SPYTHON_SINGULARITY_VERSION', "")
if version == "":
try:
version = run_command(["singularity", '--version'], quiet=True)
except: # FileNotFoundError
return version
if version['return_code'] == 0:
if len(version['message']) > 0:
version = version['message'][0].strip('\n')
return version
|
python
|
{
"resource": ""
}
|
q12781
|
get_requirements
|
train
|
def get_requirements(lookup=None):
'''get_requirements reads in requirements and versions from
the lookup obtained with get_lookup'''
if lookup == None:
lookup = get_lookup()
install_requires = []
for module in lookup['INSTALL_REQUIRES']:
module_name = module[0]
module_meta = module[1]
if "exact_version" in module_meta:
dependency = "%s==%s" %(module_name,module_meta['exact_version'])
elif "min_version" in module_meta:
if module_meta['min_version'] == None:
dependency = module_name
else:
dependency = "%s>=%s" %(module_name,module_meta['min_version'])
install_requires.append(dependency)
return install_requires
|
python
|
{
"resource": ""
}
|
q12782
|
load
|
train
|
def load(self, image=None):
'''load an image, either an actual path on the filesystem or a uri.
Parameters
==========
image: the image path or uri to load (e.g., docker://ubuntu
'''
from spython.image import Image
from spython.instance import Instance
self.simage = Image(image)
if image is not None:
if image.startswith('instance://'):
self.simage = Instance(image)
bot.info(self.simage)
|
python
|
{
"resource": ""
}
|
q12783
|
generate_oci_commands
|
train
|
def generate_oci_commands():
''' The oci command group will allow interaction with an image using
OCI commands.
'''
from spython.oci import OciImage
from spython.main.base.logger import println
# run_command uses run_cmd, but wraps to catch error
from spython.main.base.command import ( run_command, send_command )
from spython.main.base.generate import RobotNamer
# Oci Command Groups
from .mounts import ( mount, umount )
from .states import ( kill, state, start, pause, resume, _state_command )
from .actions import ( attach, create, delete, execute, run, _run, update )
# Oci Commands
OciImage.start = start
OciImage.mount = mount
OciImage.umount = umount
OciImage.state = state
OciImage.resume = resume
OciImage.pause = pause
OciImage.attach = attach
OciImage.create = create
OciImage.delete = delete
OciImage.execute = execute
OciImage.update = update
OciImage.kill = kill
OciImage.run = run
OciImage._run = _run
OciImage._state_command = _state_command
OciImage.RobotNamer = RobotNamer()
OciImage._send_command = send_command # send and disregard stderr, stdout
OciImage._run_command = run_command
OciImage._println = println
OciImage.OciImage = OciImage
return OciImage
|
python
|
{
"resource": ""
}
|
q12784
|
create_runscript
|
train
|
def create_runscript(self, default="/bin/bash", force=False):
'''create_entrypoint is intended to create a singularity runscript
based on a Docker entrypoint or command. We first use the Docker
ENTRYPOINT, if defined. If not, we use the CMD. If neither is found,
we use function default.
Parameters
==========
default: set a default entrypoint, if the container does not have
an entrypoint or cmd.
force: If true, use default and ignore Dockerfile settings
'''
entrypoint = default
# Only look at Docker if not enforcing default
if force is False:
if self.entrypoint is not None:
entrypoint = ''.join(self.entrypoint)
elif self.cmd is not None:
entrypoint = ''.join(self.cmd)
# Entrypoint should use exec
if not entrypoint.startswith('exec'):
entrypoint = "exec %s" %entrypoint
# Should take input arguments into account
if not re.search('"?[$]@"?', entrypoint):
entrypoint = '%s "$@"' %entrypoint
return entrypoint
|
python
|
{
"resource": ""
}
|
q12785
|
finish_section
|
train
|
def finish_section(section, name):
'''finish_section will add the header to a section, to finish the recipe
take a custom command or list and return a section.
Parameters
==========
section: the section content, without a header
name: the name of the section for the header
'''
if not isinstance(section, list):
section = [section]
header = ['%' + name ]
return header + section
|
python
|
{
"resource": ""
}
|
q12786
|
create_env_section
|
train
|
def create_env_section(pairs, name):
'''environment key value pairs need to be joined by an equal, and
exported at the end.
Parameters
==========
section: the list of values to return as a parsed list of lines
name: the name of the section to write (e.g., files)
'''
section = ['%' + name ]
for pair in pairs:
section.append("export %s" %pair)
return section
|
python
|
{
"resource": ""
}
|
q12787
|
abbreviate_str
|
train
|
def abbreviate_str(string, max_len=80, indicator="..."):
"""
Abbreviate a string, adding an indicator like an ellipsis if required.
"""
if not string or not max_len or len(string) <= max_len:
return string
elif max_len <= len(indicator):
return string[0:max_len]
else:
return string[0:max_len - len(indicator)] + indicator
|
python
|
{
"resource": ""
}
|
q12788
|
abbreviate_list
|
train
|
def abbreviate_list(items, max_items=10, item_max_len=40, joiner=", ", indicator="..."):
"""
Abbreviate a list, truncating each element and adding an indicator at the end if the
whole list was truncated. Set item_max_len to None or 0 not to truncate items.
"""
if not items:
return items
else:
shortened = [abbreviate_str("%s" % item, max_len=item_max_len) for item in items[0:max_items]]
if len(items) > max_items:
shortened.append(indicator)
return joiner.join(shortened)
|
python
|
{
"resource": ""
}
|
q12789
|
make_parent_dirs
|
train
|
def make_parent_dirs(path, mode=0o777):
"""
Ensure parent directories of a file are created as needed.
"""
parent = os.path.dirname(path)
if parent:
make_all_dirs(parent, mode)
return path
|
python
|
{
"resource": ""
}
|
q12790
|
atomic_output_file
|
train
|
def atomic_output_file(dest_path, make_parents=False, backup_suffix=None, suffix=".partial.%s"):
"""
A context manager for convenience in writing a file or directory in an atomic way. Set up
a temporary name, then rename it after the operation is done, optionally making a backup of
the previous file or directory, if present.
"""
if dest_path == os.devnull:
# Handle the (probably rare) case of writing to /dev/null.
yield dest_path
else:
tmp_path = ("%s" + suffix) % (dest_path, new_uid())
if make_parents:
make_parent_dirs(tmp_path)
yield tmp_path
# Note this is not in a finally block, so that result won't be renamed to final location
# in case of abnormal exit.
if not os.path.exists(tmp_path):
raise IOError("failure in writing file '%s': target file '%s' missing" % (dest_path, tmp_path))
if backup_suffix:
move_to_backup(dest_path, backup_suffix=backup_suffix)
# If the target already exists, and is a directory, it has to be removed.
if os.path.isdir(dest_path):
shutil.rmtree(dest_path)
shutil.move(tmp_path, dest_path)
|
python
|
{
"resource": ""
}
|
q12791
|
temp_output_file
|
train
|
def temp_output_file(prefix="tmp", suffix="", dir=None, make_parents=False, always_clean=False):
"""
A context manager for convenience in creating a temporary file,
which is deleted when exiting the context.
Usage:
with temp_output_file() as (fd, path):
...
"""
return _temp_output(False, prefix=prefix, suffix=suffix, dir=dir, make_parents=make_parents,
always_clean=always_clean)
|
python
|
{
"resource": ""
}
|
q12792
|
temp_output_dir
|
train
|
def temp_output_dir(prefix="tmp", suffix="", dir=None, make_parents=False, always_clean=False):
"""
A context manager for convenience in creating a temporary directory,
which is deleted when exiting the context.
Usage:
with temp_output_dir() as dirname:
...
"""
return _temp_output(True, prefix=prefix, suffix=suffix, dir=dir, make_parents=make_parents,
always_clean=always_clean)
|
python
|
{
"resource": ""
}
|
q12793
|
read_string_from_file
|
train
|
def read_string_from_file(path, encoding="utf8"):
"""
Read entire contents of file into a string.
"""
with codecs.open(path, "rb", encoding=encoding) as f:
value = f.read()
return value
|
python
|
{
"resource": ""
}
|
q12794
|
write_string_to_file
|
train
|
def write_string_to_file(path, string, make_parents=False, backup_suffix=BACKUP_SUFFIX, encoding="utf8"):
"""
Write entire file with given string contents, atomically. Keeps backup by default.
"""
with atomic_output_file(path, make_parents=make_parents, backup_suffix=backup_suffix) as tmp_path:
with codecs.open(tmp_path, "wb", encoding=encoding) as f:
f.write(string)
|
python
|
{
"resource": ""
}
|
q12795
|
set_file_mtime
|
train
|
def set_file_mtime(path, mtime, atime=None):
"""Set access and modification times on a file."""
if not atime:
atime = mtime
f = open(path, 'a')
try:
os.utime(path, (atime, mtime))
finally:
f.close()
|
python
|
{
"resource": ""
}
|
q12796
|
copyfile_atomic
|
train
|
def copyfile_atomic(source_path, dest_path, make_parents=False, backup_suffix=None):
"""
Copy file on local filesystem in an atomic way, so partial copies never exist. Preserves timestamps.
"""
with atomic_output_file(dest_path, make_parents=make_parents, backup_suffix=backup_suffix) as tmp_path:
shutil.copyfile(source_path, tmp_path)
set_file_mtime(tmp_path, os.path.getmtime(source_path))
|
python
|
{
"resource": ""
}
|
q12797
|
copytree_atomic
|
train
|
def copytree_atomic(source_path, dest_path, make_parents=False, backup_suffix=None, symlinks=False):
"""
Copy a file or directory recursively, and atomically, reanaming file or top-level dir when done.
Unlike shutil.copytree, this will not fail on a file.
"""
if os.path.isdir(source_path):
with atomic_output_file(dest_path, make_parents=make_parents, backup_suffix=backup_suffix) as tmp_path:
shutil.copytree(source_path, tmp_path, symlinks=symlinks)
else:
copyfile_atomic(source_path, dest_path, make_parents=make_parents, backup_suffix=backup_suffix)
|
python
|
{
"resource": ""
}
|
q12798
|
movefile
|
train
|
def movefile(source_path, dest_path, make_parents=False, backup_suffix=None):
"""
Move file. With a few extra options.
"""
if make_parents:
make_parent_dirs(dest_path)
move_to_backup(dest_path, backup_suffix=backup_suffix)
shutil.move(source_path, dest_path)
|
python
|
{
"resource": ""
}
|
q12799
|
rmtree_or_file
|
train
|
def rmtree_or_file(path, ignore_errors=False, onerror=None):
"""
rmtree fails on files or symlinks. This removes the target, whatever it is.
"""
# TODO: Could add an rsync-based delete, as in
# https://github.com/vivlabs/instaclone/blob/master/instaclone/instaclone.py#L127-L143
if ignore_errors and not os.path.exists(path):
return
if os.path.isdir(path) and not os.path.islink(path):
shutil.rmtree(path, ignore_errors=ignore_errors, onerror=onerror)
else:
os.unlink(path)
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.