_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q41700
DynamoDBManager._update_metadata
train
def _update_metadata(self, archive_name, archive_metadata): """ Appends the updated_metada dict to the Metadata Attribute list Parameters ---------- archive_name: str ID of archive to update updated_metadata: dict dictionary of metadata keys and values to update. If the value for a particular key is `None`, the key is removed. """ archive_metadata_current = self._get_archive_metadata(archive_name) archive_metadata_current.update(archive_metadata) for k, v in archive_metadata_current.items(): if v is None: del archive_metadata_current[k] # add the updated archive_metadata object to Dynamo self._table.update_item( Key={'_id': archive_name}, UpdateExpression="SET archive_metadata = :v", ExpressionAttributeValues={':v': archive_metadata_current}, ReturnValues='ALL_NEW')
python
{ "resource": "" }
q41701
DynamoDBManager._create_archive
train
def _create_archive( self, archive_name, metadata): ''' This adds an item in a DynamoDB table corresponding to a S3 object Args ---- arhive_name: str corresponds to the name of the Archive (e.g. ) Returns ------- Dictionary with confirmation of upload ''' archive_exists = False try: self.get_archive(archive_name) archive_exists = True except KeyError: pass if archive_exists: raise KeyError( "{} already exists. Use get_archive() to view".format( archive_name)) self._table.put_item(Item=metadata)
python
{ "resource": "" }
q41702
load_backend
train
def load_backend(backend_name): """ load pool backend.""" try: if len(backend_name.split(".")) > 1: mod = import_module(backend_name) else: mod = import_module("spamc.backend_%s" % backend_name) return mod except ImportError: error_msg = "%s isn't a spamc backend" % backend_name raise ImportError(error_msg)
python
{ "resource": "" }
q41703
Voevent
train
def Voevent(stream, stream_id, role): """Create a new VOEvent element tree, with specified IVORN and role. Args: stream (str): used to construct the IVORN like so:: ivorn = 'ivo://' + stream + '#' + stream_id (N.B. ``stream_id`` is converted to string if required.) So, e.g. we might set:: stream='voevent.soton.ac.uk/super_exciting_events' stream_id=77 stream_id (str): See above. role (str): role as defined in VOEvent spec. (See also :py:class:`.definitions.roles`) Returns: Root-node of the VOEvent, as represented by an lxml.objectify element tree ('etree'). See also http://lxml.de/objectify.html#the-lxml-objectify-api """ parser = objectify.makeparser(remove_blank_text=True) v = objectify.fromstring(voeventparse.definitions.v2_0_skeleton_str, parser=parser) _remove_root_tag_prefix(v) if not isinstance(stream_id, string_types): stream_id = repr(stream_id) v.attrib['ivorn'] = ''.join(('ivo://', stream, '#', stream_id)) v.attrib['role'] = role # Presumably we'll always want the following children: # (NB, valid to then leave them empty) etree.SubElement(v, 'Who') etree.SubElement(v, 'What') etree.SubElement(v, 'WhereWhen') v.Who.Description = ( 'VOEvent created with voevent-parse, version {}. ' 'See https://github.com/timstaley/voevent-parse for details.').format( __version__ ) return v
python
{ "resource": "" }
q41704
loads
train
def loads(s, check_version=True): """ Load VOEvent from bytes. This parses a VOEvent XML packet string, taking care of some subtleties. For Python 3 users, ``s`` should be a bytes object - see also http://lxml.de/FAQ.html, "Why can't lxml parse my XML from unicode strings?" (Python 2 users can stick with old-school ``str`` type if preferred) By default, will raise an exception if the VOEvent is not of version 2.0. This can be disabled but voevent-parse routines are untested with other versions. Args: s (bytes): Bytes containing raw XML. check_version (bool): (Default=True) Checks that the VOEvent is of a supported schema version - currently only v2.0 is supported. Returns: :py:class:`Voevent`: Root-node of the etree. Raises: ValueError: If passed a VOEvent of wrong schema version (i.e. schema 1.1) """ # .. note:: # # The namespace is removed from the root element tag to make # objectify access work as expected, # (see :py:func:`._remove_root_tag_prefix`) # so we must re-insert it when we want to conform to schema. v = objectify.fromstring(s) _remove_root_tag_prefix(v) if check_version: version = v.attrib['version'] if not version == '2.0': raise ValueError('Unsupported VOEvent schema version:' + version) return v
python
{ "resource": "" }
q41705
load
train
def load(file, check_version=True): """Load VOEvent from file object. A simple wrapper to read a file before passing the contents to :py:func:`.loads`. Use with an open file object, e.g.:: with open('/path/to/voevent.xml', 'rb') as f: v = vp.load(f) Args: file (io.IOBase): An open file object (binary mode preferred), see also http://lxml.de/FAQ.html : "Can lxml parse from file objects opened in unicode/text mode?" check_version (bool): (Default=True) Checks that the VOEvent is of a supported schema version - currently only v2.0 is supported. Returns: :py:class:`Voevent`: Root-node of the etree. """ s = file.read() return loads(s, check_version)
python
{ "resource": "" }
q41706
dumps
train
def dumps(voevent, pretty_print=False, xml_declaration=True, encoding='UTF-8'): """Converts voevent to string. .. note:: Default encoding is UTF-8, in line with VOE2.0 schema. Declaring the encoding can cause diffs with the original loaded VOEvent, but I think it's probably the right thing to do (and lxml doesn't really give you a choice anyway). Args: voevent (:class:`Voevent`): Root node of the VOevent etree. pretty_print (bool): indent the output for improved human-legibility when possible. See also: http://lxml.de/FAQ.html#why-doesn-t-the-pretty-print-option-reformat-my-xml-output xml_declaration (bool): Prepends a doctype tag to the string output, i.e. something like ``<?xml version='1.0' encoding='UTF-8'?>`` Returns: bytes: Bytestring containing raw XML representation of VOEvent. """ vcopy = copy.deepcopy(voevent) _return_to_standard_xml(vcopy) s = etree.tostring(vcopy, pretty_print=pretty_print, xml_declaration=xml_declaration, encoding=encoding) return s
python
{ "resource": "" }
q41707
dump
train
def dump(voevent, file, pretty_print=True, xml_declaration=True): """Writes the voevent to the file object. e.g.:: with open('/tmp/myvoevent.xml','wb') as f: voeventparse.dump(v, f) Args: voevent(:class:`Voevent`): Root node of the VOevent etree. file (io.IOBase): An open (binary mode) file object for writing. pretty_print pretty_print(bool): See :func:`dumps` xml_declaration(bool): See :func:`dumps` """ file.write(dumps(voevent, pretty_print, xml_declaration))
python
{ "resource": "" }
q41708
valid_as_v2_0
train
def valid_as_v2_0(voevent): """Tests if a voevent conforms to the schema. Args: voevent(:class:`Voevent`): Root node of a VOEvent etree. Returns: bool: Whether VOEvent is valid """ _return_to_standard_xml(voevent) valid_bool = voevent_v2_0_schema.validate(voevent) _remove_root_tag_prefix(voevent) return valid_bool
python
{ "resource": "" }
q41709
set_author
train
def set_author(voevent, title=None, shortName=None, logoURL=None, contactName=None, contactEmail=None, contactPhone=None, contributor=None): """For setting fields in the detailed author description. This can optionally be neglected if a well defined AuthorIVORN is supplied. .. note:: Unusually for this library, the args here use CamelCase naming convention, since there's a direct mapping to the ``Author.*`` attributes to which they will be assigned. Args: voevent(:class:`Voevent`): Root node of a VOEvent etree. The rest of the arguments are strings corresponding to child elements. """ # We inspect all local variables except the voevent packet, # Cycling through and assigning them on the Who.Author element. AuthChildren = locals() AuthChildren.pop('voevent') if not voevent.xpath('Who/Author'): etree.SubElement(voevent.Who, 'Author') for k, v in AuthChildren.items(): if v is not None: voevent.Who.Author[k] = v
python
{ "resource": "" }
q41710
add_where_when
train
def add_where_when(voevent, coords, obs_time, observatory_location, allow_tz_naive_datetime=False): """ Add details of an observation to the WhereWhen section. We Args: voevent(:class:`Voevent`): Root node of a VOEvent etree. coords(:class:`.Position2D`): Sky co-ordinates of event. obs_time(datetime.datetime): Nominal DateTime of the observation. Must either be timezone-aware, or should be carefully verified as representing UTC and then set parameter ``allow_tz_naive_datetime=True``. observatory_location(str): Telescope locale, e.g. 'La Palma'. May be a generic location as listed under :class:`voeventparse.definitions.observatory_location`. allow_tz_naive_datetime (bool): (Default False). Accept timezone-naive datetime-timestamps. See comments for ``obs_time``. """ # .. todo:: Implement TimeError using datetime.timedelta if obs_time.tzinfo is not None: utc_naive_obs_time = obs_time.astimezone(pytz.utc).replace(tzinfo=None) elif not allow_tz_naive_datetime: raise ValueError( "Datetime passed without tzinfo, cannot be sure if it is really a " "UTC timestamp. Please verify function call and either add tzinfo " "or pass parameter 'allow_tz_naive_obstime=True', as appropriate", ) else: utc_naive_obs_time = obs_time obs_data = etree.SubElement(voevent.WhereWhen, 'ObsDataLocation') etree.SubElement(obs_data, 'ObservatoryLocation', id=observatory_location) ol = etree.SubElement(obs_data, 'ObservationLocation') etree.SubElement(ol, 'AstroCoordSystem', id=coords.system) ac = etree.SubElement(ol, 'AstroCoords', coord_system_id=coords.system) time = etree.SubElement(ac, 'Time', unit='s') instant = etree.SubElement(time, 'TimeInstant') instant.ISOTime = utc_naive_obs_time.isoformat() # iso_time = etree.SubElement(instant, 'ISOTime') = obs_time.isoformat() pos2d = etree.SubElement(ac, 'Position2D', unit=coords.units) pos2d.Name1 = 'RA' pos2d.Name2 = 'Dec' pos2d_val = etree.SubElement(pos2d, 'Value2') pos2d_val.C1 = coords.ra pos2d_val.C2 = coords.dec pos2d.Error2Radius = coords.err
python
{ "resource": "" }
q41711
add_how
train
def add_how(voevent, descriptions=None, references=None): """Add descriptions or references to the How section. Args: voevent(:class:`Voevent`): Root node of a VOEvent etree. descriptions(str): Description string, or list of description strings. references(:py:class:`voeventparse.misc.Reference`): A reference element (or list thereof). """ if not voevent.xpath('How'): etree.SubElement(voevent, 'How') if descriptions is not None: for desc in _listify(descriptions): # d = etree.SubElement(voevent.How, 'Description') # voevent.How.Description[voevent.How.index(d)] = desc ##Simpler: etree.SubElement(voevent.How, 'Description') voevent.How.Description[-1] = desc if references is not None: voevent.How.extend(_listify(references))
python
{ "resource": "" }
q41712
add_citations
train
def add_citations(voevent, event_ivorns): """Add citations to other voevents. The schema mandates that the 'Citations' section must either be entirely absent, or non-empty - hence we require this wrapper function for its creation prior to listing the first citation. Args: voevent(:class:`Voevent`): Root node of a VOEvent etree. event_ivorns (:class:`voeventparse.misc.EventIvorn`): List of EventIvorn elements to add to citation list. """ if not voevent.xpath('Citations'): etree.SubElement(voevent, 'Citations') voevent.Citations.extend(_listify(event_ivorns))
python
{ "resource": "" }
q41713
_remove_root_tag_prefix
train
def _remove_root_tag_prefix(v): """ Removes 'voe' namespace prefix from root tag. When we load in a VOEvent, the root element has a tag prefixed by the VOE namespace, e.g. {http://www.ivoa.net/xml/VOEvent/v2.0}VOEvent Because objectify expects child elements to have the same namespace as their parent, this breaks the python-attribute style access mechanism. We can get around it without altering root, via e.g who = v['{}Who'] Alternatively, we can temporarily ditch the namespace altogether. This makes access to elements easier, but requires care to reinsert the namespace upon output. I've gone for the latter option. """ if v.prefix: # Create subelement without a prefix via etree.SubElement etree.SubElement(v, 'original_prefix') # Now carefully access said named subelement (without prefix cascade) # and alter the first value in the list of children with this name... # LXML syntax is a minefield! v['{}original_prefix'][0] = v.prefix v.tag = v.tag.replace(''.join(('{', v.nsmap[v.prefix], '}')), '') # Now v.tag = '{}VOEvent', v.prefix = None return
python
{ "resource": "" }
q41714
_reinsert_root_tag_prefix
train
def _reinsert_root_tag_prefix(v): """ Returns namespace prefix to root tag, if it had one. """ if hasattr(v, 'original_prefix'): original_prefix = v.original_prefix del v.original_prefix v.tag = ''.join(('{', v.nsmap[original_prefix], '}VOEvent')) return
python
{ "resource": "" }
q41715
_listify
train
def _listify(x): """Ensure x is iterable; if not then enclose it in a list and return it.""" if isinstance(x, string_types): return [x] elif isinstance(x, collections.Iterable): return x else: return [x]
python
{ "resource": "" }
q41716
Identity.list
train
def list(self, label=None, per_page=20, page=1): """ Get a list of identities that have been created :param per_page: The number of results per page returned :type per_page: int :param page: The page number of the results :type page: int :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ params = {'per_page': per_page, 'page': page} if label: params['label'] = label return self.request.get('', params)
python
{ "resource": "" }
q41717
Identity.update
train
def update(self, id, label=None, status=None, master=None): """ Update an Identity :param label: The label to give this new identity :param status: The status of this identity. Default: 'active' :param master: Represents whether this identity is a master. Default: False :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ params = {} if label: params['label'] = label if status: params['status'] = status if master: params['master'] = master return self.request.put(str(id), params)
python
{ "resource": "" }
q41718
PylonTask.get
train
def get(self, id, service='facebook', type='analysis'): """ Get a given Pylon task :param id: The ID of the task :type id: str :param service: The PYLON service (facebook) :type service: str :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ return self.request.get(service + '/task/' + type + '/' + id)
python
{ "resource": "" }
q41719
PylonTask.list
train
def list(self, per_page=None, page=None, status=None, service='facebook'): """ Get a list of Pylon tasks :param per_page: How many tasks to display per page :type per_page: int :param page: Which page of tasks to display :type page: int :param status: The status of the tasks to list :type page: string :param service: The PYLON service (facebook) :type service: str :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ params = {} if per_page is not None: params['per_page'] = per_page if page is not None: params['page'] = page if status: params['status'] = status return self.request.get(service + '/task', params)
python
{ "resource": "" }
q41720
PylonTask.create
train
def create(self, subscription_id, name, parameters, type='analysis', service='facebook'): """ Create a PYLON task :param subscription_id: The ID of the recording to create the task for :type subscription_id: str :param name: The name of the new task :type name: str :param parameters: The parameters for this task :type parameters: dict :param type: The type of analysis to create, currently only 'analysis' is accepted :type type: str :param service: The PYLON service (facebook) :type service: str :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ params = { 'subscription_id': subscription_id, 'name': name, 'parameters': parameters, 'type': type } return self.request.post(service + '/task/', params)
python
{ "resource": "" }
q41721
StickyUploadWidget.value_from_datadict
train
def value_from_datadict(self, data, files, name): """Returns uploaded file from serialized value.""" upload = super(StickyUploadWidget, self).value_from_datadict(data, files, name) if upload is not None: # File was posted or cleared as normal return upload else: # Try the hidden input hidden_name = self.get_hidden_name(name) value = data.get(hidden_name, None) if value is not None: upload = open_stored_file(value, self.url) if upload is not None: setattr(upload, '_seralized_location', value) return upload
python
{ "resource": "" }
q41722
StickyUploadWidget.render
train
def render(self, name, value, attrs=None, renderer=None): """Include a hidden input to store the serialized upload value.""" location = getattr(value, '_seralized_location', '') if location and not hasattr(value, 'url'): value.url = '#' if hasattr(self, 'get_template_substitution_values'): # Django 1.8-1.10 self.template_with_initial = ( '%(initial_text)s: %(initial)s %(clear_template)s' '<br />%(input_text)s: %(input)s') attrs = attrs or {} attrs.update({'data-upload-url': self.url}) hidden_name = self.get_hidden_name(name) kwargs = {} if django_version >= (1, 11): kwargs['renderer'] = renderer parent = super(StickyUploadWidget, self).render(name, value, attrs=attrs, **kwargs) hidden = forms.HiddenInput().render(hidden_name, location, **kwargs) return mark_safe(parent + '\n' + hidden)
python
{ "resource": "" }
q41723
configure
train
def configure(config={}, datastore=None, nested=False): """ Useful for when you need to control Switchboard's setup """ if nested: config = nested_config(config) # Re-read settings to make sure we have everything. # XXX It would be really nice if we didn't need to do this. Settings.init(**config) if datastore: Switch.ds = datastore # Register the builtins __import__('switchboard.builtins')
python
{ "resource": "" }
q41724
DupePredictor.get_dupe_prob
train
def get_dupe_prob(self, url): """ A probability of given url being a duplicate of some content that has already been seem. """ path, query = _parse_url(url) dupestats = [] extend_ds = lambda x: dupestats.extend(filter(None, ( ds_dict.get(key) for ds_dict, key in x))) if self.urls_by_path.get(path): extend_ds([(self.path_dupstats, path)]) # If param is in the query for param, value in query.items(): qwp_key = _q_key(_without_key(query, param)) # Have we seen the query with param changed or removed? has_changed = self.urls_by_path_qwp.get((path, param, qwp_key)) has_removed = self.urls_by_path_q.get((path, qwp_key)) if has_changed or has_removed: extend_ds(self._param_dupstats(path, param, qwp_key)) if has_removed: extend_ds(self._param_value_dupstats(path, param, value)) # If param is not in the query, but we've crawled a page when it is q_key = _q_key(query) for param in (self.params_by_path.get(path, set()) - set(query)): if self.urls_by_path_qwp.get((path, param, q_key)): extend_ds(self._param_dupstats(path, param, q_key)) # FIXME - this could be a long list of param values, # it's better to somehow store only high-probability values? for value in self.param_values.get((path, param), set()): extend_ds(self._param_value_dupstats(path, param, value)) return max(ds.get_prob() for ds in dupestats) if dupestats else 0.
python
{ "resource": "" }
q41725
DupePredictor._nodup_filter
train
def _nodup_filter(self, min_hash, all_urls, max_sample=200): """ This filters results that are considered not duplicates. But we really need to check that, because lsh.query does not always return ALL duplicates, esp. when there are a lot of them, so here we double-check and return only urls that are NOT duplicates. Return estimated number of not duplicates. """ if not all_urls: return 0 urls = random.sample(all_urls, max_sample) \ if len(all_urls) > max_sample else all_urls filtered = [ url for url in urls if min_hash.jaccard(self.seen_urls[url].min_hash) < self.jaccard_threshold] return int(len(filtered) / len(urls) * len(all_urls))
python
{ "resource": "" }
q41726
UnrenderedAdmin.get_queryset
train
def get_queryset(self, request): """ Remove ``show_rendered`` from the context, if it's there. """ qs = super(UnrenderedAdmin, self).get_queryset(request) if 'show_rendered' in qs.query.context: del qs.query.context['show_rendered'] return qs
python
{ "resource": "" }
q41727
Cases.get_one
train
def get_one(self, cls=None, **kwargs): """Returns a one case.""" case = cls() if cls else self._CasesClass() for attr, value in kwargs.iteritems(): setattr(case, attr, value) return case
python
{ "resource": "" }
q41728
Cases.get_each_choice
train
def get_each_choice(self, cls=None, **kwargs): """Returns a generator that generates positive cases by "each choice" algorithm. """ defaults = {attr: kwargs[attr][0] for attr in kwargs} for set_of_values in izip_longest(*kwargs.values()): case = cls() if cls else self._CasesClass() for attr, value in izip(kwargs.keys(), set_of_values): if value is None: value = defaults[attr] setattr(case, attr, value) yield case
python
{ "resource": "" }
q41729
Cases.get_pairwise
train
def get_pairwise(self, cls=None, **kwargs): """Returns a generator that generates positive cases by "pairwise" algorithm. """ for set_of_values in allpairs(kwargs.values()): case = cls() if cls else self._CasesClass() for attr, value in izip(kwargs.keys(), set_of_values): setattr(case, attr, value) yield case
python
{ "resource": "" }
q41730
Cases.get_negative
train
def get_negative(self, cls=None, **kwargs): """Returns a generator that generates negative cases by "each negative value in separate case" algorithm. """ for attr, set_of_values in kwargs.iteritems(): defaults = {key: kwargs[key][-1]["default"] for key in kwargs} defaults.pop(attr) for value in set_of_values[:-1]: case = cls() if cls else self._CasesClass() setattr(case, attr, value) for key in defaults: setattr(case, key, defaults[key]) yield case
python
{ "resource": "" }
q41731
Cases.get_mix_gen
train
def get_mix_gen(self, sample): """Returns function that returns sequence of characters of a given length from a given sample """ def mix(length): result = "".join(random.choice(sample) for _ in xrange(length)).strip() if len(result) == length: return result return mix(length) return mix
python
{ "resource": "" }
q41732
set_mysql_connection
train
def set_mysql_connection(host='localhost', user='pyctd_user', password='pyctd_passwd', db='pyctd', charset='utf8'): """Sets the connection using MySQL Parameters""" set_connection('mysql+pymysql://{user}:{passwd}@{host}/{db}?charset={charset}'.format( host=host, user=user, passwd=password, db=db, charset=charset) )
python
{ "resource": "" }
q41733
set_connection
train
def set_connection(connection=defaults.sqlalchemy_connection_string_default): """Set the connection string for SQLAlchemy :param str connection: SQLAlchemy connection string """ cfp = defaults.config_file_path config = RawConfigParser() if not os.path.exists(cfp): with open(cfp, 'w') as config_file: config['database'] = {'sqlalchemy_connection_string': connection} config.write(config_file) log.info('create configuration file %s', cfp) else: config.read(cfp) config.set('database', 'sqlalchemy_connection_string', connection) with open(cfp, 'w') as configfile: config.write(configfile)
python
{ "resource": "" }
q41734
BaseDbManager.set_connection_string_by_user_input
train
def set_connection_string_by_user_input(self): """Prompts the user to input a connection string""" user_connection = input( bcolors.WARNING + "\nFor any reason connection to " + bcolors.ENDC + bcolors.FAIL + "{}".format(self.connection) + bcolors.ENDC + bcolors.WARNING + " is not possible.\n\n" + bcolors.ENDC + "For more information about SQLAlchemy connection strings go to:\n" + "http://docs.sqlalchemy.org/en/latest/core/engines.html\n\n" "Please insert a valid connection string:\n" + bcolors.UNDERLINE + "Examples:\n\n" + bcolors.ENDC + "MySQL (recommended):\n" + bcolors.OKGREEN + "\tmysql+pymysql://user:passwd@localhost/database?charset=utf8\n" + bcolors.ENDC + "PostgreSQL:\n" + bcolors.OKGREEN + "\tpostgresql://scott:tiger@localhost/mydatabase\n" + bcolors.ENDC + "MsSQL (pyodbc have to be installed):\n" + bcolors.OKGREEN + "\tmssql+pyodbc://user:passwd@database\n" + bcolors.ENDC + "SQLite (always works):\n" + " - Linux:\n" + bcolors.OKGREEN + "\tsqlite:////absolute/path/to/database.db\n" + bcolors.ENDC + " - Windows:\n" + bcolors.OKGREEN + "\tsqlite:///C:\\path\\to\\database.db\n" + bcolors.ENDC + "Oracle:\n" + bcolors.OKGREEN + "\toracle://user:passwd@127.0.0.1:1521/database\n\n" + bcolors.ENDC + "[RETURN] for standard connection {}:\n".format(defaults.sqlalchemy_connection_string_default) ) if not (user_connection or user_connection.strip()): user_connection = defaults.sqlalchemy_connection_string_default set_connection(user_connection.strip())
python
{ "resource": "" }
q41735
BaseDbManager.drop_all
train
def drop_all(self): """Drops all tables in the database""" log.info('dropping tables in %s', self.engine.url) self.session.commit() models.Base.metadata.drop_all(self.engine) self.session.commit()
python
{ "resource": "" }
q41736
DbManager.import_tables
train
def import_tables(self, only_tables=None, exclude_tables=None): """Imports all data in database tables :param set[str] only_tables: names of tables to be imported :param set[str] exclude_tables: names of tables to be excluded """ for table in self.tables: if only_tables is not None and table.name not in only_tables: continue if exclude_tables is not None and table.name in exclude_tables: continue self.import_table(table)
python
{ "resource": "" }
q41737
DbManager.get_column_names_from_file
train
def get_column_names_from_file(file_path): """returns column names from CTD download file :param str file_path: path to CTD download file """ if file_path.endswith('.gz'): file_handler = io.TextIOWrapper(io.BufferedReader(gzip.open(file_path))) else: file_handler = open(file_path, 'r') fields_line = False with file_handler as file: for line in file: line = line.strip() if not fields_line and re.search('#\s*Fields\s*:$', line): fields_line = True elif fields_line and not (line == '' or line == '#'): return [column.strip() for column in line[1:].split("\t")]
python
{ "resource": "" }
q41738
comma_join
train
def comma_join(fields, oxford=True): """ Join together words. """ def fmt(field): return "'%s'" % field if not fields: return "nothing" elif len(fields) == 1: return fmt(fields[0]) elif len(fields) == 2: return " and ".join([fmt(f) for f in fields]) else: result = ", ".join([fmt(f) for f in fields[:-1]]) if oxford: result += "," result += " and %s" % fmt(fields[-1]) return result
python
{ "resource": "" }
q41739
ThreadHandler.run
train
def run(self, target, args=()): """ Run a function in a separate thread. :param target: the function to run. :param args: the parameters to pass to the function. """ run_event = threading.Event() run_event.set() thread = threading.Thread(target=target, args=args + (run_event, )) self.thread_pool.append(thread) self.run_events.append(run_event) thread.start()
python
{ "resource": "" }
q41740
ThreadHandler.stop
train
def stop(self): """ Stop all functions running in the thread handler.""" for run_event in self.run_events: run_event.clear() for thread in self.thread_pool: thread.join()
python
{ "resource": "" }
q41741
NoiseGenerator.generate
train
def generate(self, labels, split_idx): """Generate peak-specific noise abstract method, must be reimplemented in a subclass. :param tuple labels: Dimension labels of a peak. :param int split_idx: Index specifying which peak list split parameters to use. :return: List of noise values for dimensions ordered as they appear in a peak. :rtype: :py:class:`list` """ atom_labels = [label[0] for label in labels] noise = [] distribution_function = distributions[self.distribution_name]["function"] for label in atom_labels: params = [self.parameters["{}_{}".format(label, param)][split_idx] for param in self.distribution_parameter_names] if None in params: dim_noise = 0.0 else: try: dim_noise = distribution_function(*params) except ValueError: raise ValueError noise.append(dim_noise) return noise
python
{ "resource": "" }
q41742
version_diff
train
def version_diff(version1, version2): """Return string representing the diff between package versions. We're interested in whether this is a major, minor, patch or 'other' update. This method will compare the two versions and return None if they are the same, else it will return a string value indicating the type of diff - 'major', 'minor', 'patch', 'other'. Args: version1: the Version object we are interested in (e.g. current) version2: the Version object to compare against (e.g. latest) Returns a string - 'major', 'minor', 'patch', 'other', or None if the two are identical. """ if version1 is None or version2 is None: return 'unknown' if version1 == version2: return 'none' for v in ('major', 'minor', 'patch'): if getattr(version1, v) != getattr(version2, v): return v return 'other'
python
{ "resource": "" }
q41743
Package.data
train
def data(self): """Fetch latest data from PyPI, and cache for 30s.""" key = cache_key(self.name) data = cache.get(key) if data is None: logger.debug("Updating package info for %s from PyPI.", self.name) data = requests.get(self.url).json() cache.set(key, data, PYPI_CACHE_EXPIRY) return data
python
{ "resource": "" }
q41744
_query_wrap
train
def _query_wrap(fun, *args, **kwargs): """Wait until at least QUERY_WAIT_TIME seconds have passed since the last invocation of this function, then call the given function with the given arguments. """ with _query_lock: global _last_query_time since_last_query = time.time() - _last_query_time if since_last_query < QUERY_WAIT_TIME: time.sleep(QUERY_WAIT_TIME - since_last_query) _last_query_time = time.time() return fun(*args, **kwargs)
python
{ "resource": "" }
q41745
extract
train
def extract(pcmiter, samplerate, channels, duration = -1): """Given a PCM data stream, extract fingerprint data from the audio. Returns a byte string of fingerprint data. Raises an ExtractionError if fingerprinting fails. """ extractor = _fplib.Extractor(samplerate, channels, duration) # Get first block. try: next_block = next(pcmiter) except StopIteration: raise ExtractionError() # Get and process subsequent blocks. while True: # Shift over blocks. cur_block = next_block try: next_block = next(pcmiter) except StopIteration: next_block = None done = next_block is None # Process the block. try: if extractor.process(cur_block, done): # Success! break except RuntimeError as exc: # Exception from fplib. Most likely the file is too short. raise ExtractionError(exc.args[0]) # End of file but processor never became ready? if done: raise ExtractionError() # Get resulting fingerprint data. out = extractor.result() if out is None: raise ExtractionError() # Free extractor memory. extractor.free() return out
python
{ "resource": "" }
q41746
match_file
train
def match_file(apikey, path, metadata=None): """Uses the audioread library to decode an audio file and match it. """ import audioread with audioread.audio_open(path) as f: return match(apikey, iter(f), f.samplerate, int(f.duration), f.channels, metadata)
python
{ "resource": "" }
q41747
update_constants
train
def update_constants(nmrstar2cfg="", nmrstar3cfg="", resonance_classes_cfg="", spectrum_descriptions_cfg=""): """Update constant variables. :return: None :rtype: :py:obj:`None` """ nmrstar_constants = {} resonance_classes = {} spectrum_descriptions = {} this_directory = os.path.dirname(__file__) nmrstar2_config_filepath = os.path.join(this_directory, "conf/constants_nmrstar2.json") nmrstar3_config_filepath = os.path.join(this_directory, "conf/constants_nmrstar3.json") resonance_classes_config_filepath = os.path.join(this_directory, "conf/resonance_classes.json") spectrum_descriptions_config_filepath = os.path.join(this_directory, "conf/spectrum_descriptions.json") with open(nmrstar2_config_filepath, "r") as nmrstar2config, open(nmrstar3_config_filepath, "r") as nmrstar3config: nmrstar_constants["2"] = json.load(nmrstar2config) nmrstar_constants["3"] = json.load(nmrstar3config) with open(resonance_classes_config_filepath, "r") as config: resonance_classes.update(json.load(config)) with open(spectrum_descriptions_config_filepath, "r") as config: spectrum_descriptions.update(json.load(config)) if nmrstar2cfg: with open(nmrstar2cfg, "r") as nmrstar2config: nmrstar_constants["2"].update(json.load(nmrstar2config)) if nmrstar3cfg: with open(nmrstar2cfg, "r") as nmrstar3config: nmrstar_constants["3"].update(json.load(nmrstar3config)) if resonance_classes_cfg: with open(nmrstar2cfg, "r") as config: resonance_classes.update(json.load(config)) if spectrum_descriptions_cfg: with open(spectrum_descriptions_cfg, "r") as config: spectrum_descriptions.update(json.load(config)) NMRSTAR_CONSTANTS.update(nmrstar_constants) RESONANCE_CLASSES.update(resonance_classes) SPECTRUM_DESCRIPTIONS.update(spectrum_descriptions)
python
{ "resource": "" }
q41748
list_spectrum_descriptions
train
def list_spectrum_descriptions(*args): """List all available spectrum descriptions that can be used for peak list simulation. :param str args: Spectrum name(s), e.g. list_spectrum_descriptions("HNCO", "HNcoCACB"), leave empty to list everything. :return: None :rtype: :py:obj:`None` """ if args: for spectrum_name in args: pprint.pprint({spectrum_name: SPECTRUM_DESCRIPTIONS.get(spectrum_name, None)}, width=120) else: pprint.pprint(SPECTRUM_DESCRIPTIONS, width=120)
python
{ "resource": "" }
q41749
StarFile._is_nmrstar
train
def _is_nmrstar(string): """Test if input string is in NMR-STAR format. :param string: Input string. :type string: :py:class:`str` or :py:class:`bytes` :return: Input string if in NMR-STAR format or False otherwise. :rtype: :py:class:`str` or :py:obj:`False` """ if (string[0:5] == u"data_" and u"save_" in string) or (string[0:5] == b"data_" and b"save_" in string): return string return False
python
{ "resource": "" }
q41750
NMRStarFile._build_saveframe
train
def _build_saveframe(self, lexer): """Build NMR-STAR file saveframe. :param lexer: instance of the lexical analyzer. :type lexer: :func:`~nmrstarlib.bmrblex.bmrblex` :return: Saveframe dictionary. :rtype: :py:class:`collections.OrderedDict` """ odict = OrderedDict() loop_count = 0 token = next(lexer) while token != u"save_": try: if token[0] == u"_": # This strips off the leading underscore of tagnames for readability odict[token[1:]] = next(lexer) # Skip the saveframe if it's not in the list of wanted categories if self._frame_categories: if token == "_Saveframe_category" and odict[token[1:]] not in self._frame_categories: raise SkipSaveFrame() elif token == u"loop_": odict[u"loop_{}".format(loop_count)] = self._build_loop(lexer) loop_count += 1 elif token.lstrip().startswith(u"#"): continue else: print("Error: Invalid token {}".format(token), file=sys.stderr) print("In _build_saveframe try block", file=sys.stderr) raise InvalidToken("{}".format(token)) except IndexError: print("Error: Invalid token {}".format(token), file=sys.stderr) print("In _build_saveframe except block", file=sys.stderr) raise except SkipSaveFrame: self._skip_saveframe(lexer) odict = None finally: if odict is None: token = u"save_" else: token = next(lexer) return odict
python
{ "resource": "" }
q41751
NMRStarFile._build_loop
train
def _build_loop(self, lexer): """Build saveframe loop. :param lexer: instance of lexical analyzer. :type lexer: :func:`~nmrstarlib.bmrblex.bmrblex` :return: Fields and values of the loop. :rtype: :py:class:`tuple` """ fields = [] values = [] token = next(lexer) while token[0] == u"_": fields.append(token[1:]) token = next(lexer) while token != u"stop_": values.append(token) token = next(lexer) assert float(len(values) / len(fields)).is_integer(), \ "Error in loop construction: number of fields must be equal to number of values." values = [OrderedDict(zip(fields, values[i:i + len(fields)])) for i in range(0, len(values), len(fields))] return fields, values
python
{ "resource": "" }
q41752
NMRStarFile._skip_saveframe
train
def _skip_saveframe(self, lexer): """Skip entire saveframe - keep emitting tokens until the end of saveframe. :param lexer: instance of the lexical analyzer class. :type lexer: :class:`~nmrstarlib.bmrblex.bmrblex` :return: None :rtype: :py:obj:`None` """ token = u"" while token != u"save_": token = next(lexer)
python
{ "resource": "" }
q41753
NMRStarFile.print_saveframe
train
def print_saveframe(self, sf, f=sys.stdout, file_format="nmrstar", tw=3): """Print saveframe into a file or stdout. We need to keep track of how far over everything is tabbed. The "tab width" variable tw does this for us. :param str sf: Saveframe name. :param io.StringIO f: writable file-like stream. :param str file_format: Format to use: `nmrstar` or `json`. :param int tw: Tab width. :return: None :rtype: :py:obj:`None` """ if file_format == "nmrstar": for sftag in self[sf].keys(): # handle loops if sftag[:5] == "loop_": print(u"\n{}loop_".format(tw * u" "), file=f) self.print_loop(sf, sftag, f, file_format, tw * 2) print(u"\n{}stop_".format(tw * u" "), file=f) # handle the NMR-Star "multiline string" elif self[sf][sftag].endswith(u"\n"): print(u"{}_{}".format(tw * u" ", sftag), file=f) print(u";\n{};".format(self[sf][sftag]), file=f) elif len(self[sf][sftag].split()) > 1: # need to escape value with quotes (i.e. u"'{}'".format()) if value consists of two or more words print(u"{}_{}\t {}".format(tw * u" ", sftag, u"'{}'".format(self[sf][sftag])), file=f) else: print(u"{}_{}\t {}".format(tw * u" ", sftag, self[sf][sftag]), file=f) elif file_format == "json": print(json.dumps(self[sf], sort_keys=False, indent=4), file=f)
python
{ "resource": "" }
q41754
NMRStarFile.print_loop
train
def print_loop(self, sf, sftag, f=sys.stdout, file_format="nmrstar", tw=3): """Print loop into a file or stdout. :param str sf: Saveframe name. :param str sftag: Saveframe tag, i.e. field name. :param io.StringIO f: writable file-like stream. :param str file_format: Format to use: `nmrstar` or `json`. :param int tw: Tab width. :return: None :rtype: :py:obj:`None` """ if file_format == "nmrstar": # First print the fields for field in self[sf][sftag][0]: print(u"{}_{}".format(tw * u" ", field), file=f) print(u"", file=f) # new line between fields and values # Then print the values for valuesdict in self[sf][sftag][1]: # need to escape value with quotes (i.e. u"'{}'".format()) if value consists of two or more words print(u"{}{}".format(tw * u" ", u" ".join([u"'{}'".format(value) if len(value.split()) > 1 else value for value in valuesdict.values()])), file=f) elif file_format == "json": print(json.dumps(self[sf][sftag], sort_keys=False, indent=4), file=f)
python
{ "resource": "" }
q41755
NMRStarFile.chem_shifts_by_residue
train
def chem_shifts_by_residue(self, amino_acids=None, atoms=None, amino_acids_and_atoms=None, nmrstar_version="3"): """Organize chemical shifts by amino acid residue. :param list amino_acids: List of amino acids three-letter codes. :param list atoms: List of BMRB atom type codes. :param dict amino_acids_and_atoms: Amino acid and its atoms key-value pairs. :param str nmrstar_version: Version of NMR-STAR format to use for look up chemical shifts loop. :return: List of OrderedDict per each chain :rtype: :py:class:`list` of :py:class:`collections.OrderedDict` """ if (amino_acids_and_atoms and amino_acids) or (amino_acids_and_atoms and atoms): raise ValueError('"amino_acids_and_atoms" parameter cannot be used simultaneously with ' '"amino_acids" and "atoms" parameters, one or another must be provided.') chemshifts_loop = NMRSTAR_CONSTANTS[nmrstar_version]["chemshifts_loop"] aminoacid_seq_id = NMRSTAR_CONSTANTS[nmrstar_version]["aminoacid_seq_id"] aminoacid_code = NMRSTAR_CONSTANTS[nmrstar_version]["aminoacid_code"] atom_code = NMRSTAR_CONSTANTS[nmrstar_version]["atom_code"] chemshift_value = NMRSTAR_CONSTANTS[nmrstar_version]["chemshift_value"] chains = [] for saveframe in self: if saveframe == u"data" or saveframe.startswith(u"comment"): continue else: for ind in self[saveframe].keys(): if ind.startswith(u"loop_"): if list(self[saveframe][ind][0]) == chemshifts_loop: chem_shifts_dict = OrderedDict() for entry in self[saveframe][ind][1]: residue_id = entry[aminoacid_seq_id] chem_shifts_dict.setdefault(residue_id, OrderedDict()) chem_shifts_dict[residue_id][u"AA3Code"] = entry[aminoacid_code] chem_shifts_dict[residue_id][u"Seq_ID"] = residue_id chem_shifts_dict[residue_id][entry[atom_code]] = entry[chemshift_value] chains.append(chem_shifts_dict) if amino_acids_and_atoms: for chem_shifts_dict in chains: for aa_dict in list(chem_shifts_dict.values()): if aa_dict[u"AA3Code"].upper() not in list(amino_acids_and_atoms.keys()): chem_shifts_dict.pop(aa_dict[u"Seq_ID"]) else: for resonance in list(aa_dict.keys()): if resonance in (u"AA3Code", u"Seq_ID") or resonance.upper() in amino_acids_and_atoms[aa_dict[u"AA3Code"]]: continue else: aa_dict.pop(resonance) else: if amino_acids: for chem_shifts_dict in chains: for aa_dict in list(chem_shifts_dict.values()): if aa_dict[u"AA3Code"].upper() not in amino_acids: chem_shifts_dict.pop(aa_dict[u"Seq_ID"]) if atoms: for chem_shifts_dict in chains: for aa_dict in chem_shifts_dict.values(): for resonance in list(aa_dict.keys()): if resonance in (u"AA3Code", u"Seq_ID") or resonance.upper() in atoms: continue else: aa_dict.pop(resonance) return chains
python
{ "resource": "" }
q41756
Ndrive.GET
train
def GET(self, func, data): """Send GET request to execute Ndrive API :param func: The function name you want to execute in Ndrive API. :param params: Parameter data for HTTP request. :returns: metadata when success or False when failed """ if func not in ['getRegisterUserInfo']: s, message = self.checkAccount() if s is False: return False, message url = nurls[func] r = self.session.get(url, params = data) r.encoding = 'utf-8' if self.debug: print r.text try: try: metadata = json.loads(r.text) except: metadata = json.loads(r.text[r.text.find('{'):-1]) message = metadata['message'] if message == 'success': return True, metadata['resultvalue'] else: return False, message except: for e in sys.exc_info(): print e sys.exit(1) return False, "Error %s: Failed to send GET request" %func
python
{ "resource": "" }
q41757
Ndrive.POST
train
def POST(self, func, data): """Send POST request to execute Ndrive API :param func: The function name you want to execute in Ndrive API. :param params: Parameter data for HTTP request. :returns: ``metadata`` when success or ``False`` when failed """ s, message = self.checkAccount() if s is False: return False, message url = nurls[func] r = self.session.post(url, data = data) r.encoding = 'utf-8' if self.debug: print r.text.encode("utf-8") try: metadata = json.loads(r.text) message = metadata['message'] if message == 'success': try: return True, metadata['resultvalue'] except: return True, metadata['resultcode'] else: return False, "Error %s: %s" %(func, message) except: #for e in sys.exc_info(): # print e #sys.exit(1) return False, "Error %s: Failed to send POST request" %func
python
{ "resource": "" }
q41758
Ndrive.getRegisterUserInfo
train
def getRegisterUserInfo(self, svctype = "Android NDrive App ver", auth = 0): """Retrieve information about useridx :param svctype: Information about the platform you are using right now. :param auth: Authentication type :return: ``True`` when success or ``False`` when failed """ data = {'userid': self.user_id, 'svctype': svctype, 'auth': auth } s, metadata = self.GET('getRegisterUserInfo', data) if s is True: self.useridx = metadata['useridx'] return True, metadata else: return False, metadata
python
{ "resource": "" }
q41759
Ndrive.uploadFile
train
def uploadFile(self, file_obj, full_path, overwrite = False): """Upload a file as Ndrive really do. >>> nd.uploadFile('~/flower.png','/Picture/flower.png',True) This function imitates the process when Ndrive uploads a local file to its server. The process follows 7 steps: 1. POST /CheckStatus.ndrive 2. POST /GetDiskSpace.ndrive 3. POST /CheckUpload.ndrive 4. PUT /FILE_PATH 5. POST /GetList.ndrive 6. POST /GetWasteInfo.ndrive 7. POST /GetDiskSpace.ndrive nd.uploadFile('./flower.png','/Picture/flower.png') :param file_obj: A file-like object to check whether possible to upload. You can pass a string as a file_obj or a real file object. :param full_path: The full path to upload the file to, *including the file name*. If the destination directory does not yet exist, it will be created. :param overwrite: Whether to overwrite an existing file at the given path. (Default ``False``.) """ s = self.checkStatus() s = self.getDiskSpace() s = self.checkUpload(file_obj, full_path, overwrite) if s is True: self.put(file_obj, full_path, overwrite)
python
{ "resource": "" }
q41760
Ndrive.getDiskSpace
train
def getDiskSpace(self): """Get disk space information. >>> disk_info = nd.getDiskSpace() :return: ``metadata`` if success or ``error message`` :metadata: - expandablespace - filemaxsize - largefileminsize - largefileunusedspace - largefileusedspace - paymentspace - totallargespace - totalspace - unusedspace - usedspace """ data = {'userid': self.user_id, 'useridx': self.useridx, } s, metadata = self.POST('getDiskSpace',data) if s is True: usedspace = byte_readable(metadata['usedspace']) totalspace = byte_readable(metadata['totalspace']) print "Capacity: %s / %s" % (usedspace, totalspace) return metadata else: print message
python
{ "resource": "" }
q41761
Ndrive.checkUpload
train
def checkUpload(self, file_obj, full_path = '/', overwrite = False): """Check whether it is possible to upload a file. >>> s = nd.checkUpload('~/flower.png','/Picture/flower.png') :param file_obj: A file-like object to check whether possible to upload. You can pass a string as a file_obj or a real file object. :param str full_path: The full path to upload the file to, *including the file name*. If the destination directory does not yet exist, it will be created. :param overwrite: Whether to overwrite an existing file at the given path. (Default ``False``.) :return: ``True`` if possible to upload or ``False`` if impossible to upload. """ try: file_obj = file_obj.name except: file_obj = file_obj # do nothing file_size = os.stat(file_obj).st_size now = datetime.datetime.now().isoformat() data = {'uploadsize': file_size, 'overwrite': 'T' if overwrite else 'F', 'getlastmodified': now, 'dstresource': full_path, 'userid': self.user_id, 'useridx': self.useridx, } s, metadata = self.POST('checkUpload', data) if not s: print metadata return s
python
{ "resource": "" }
q41762
Ndrive.put
train
def put(self, file_obj, full_path, overwrite = False): """Upload a file. >>> nd.put('./flower.png','/Picture/flower.png') >>> nd.put(open('./flower.png','r'),'/Picture/flower.png') :param file_obj: A file-like object to check whether possible to upload. You can pass a string as a file_obj or a real file object. :param full_path: The full path to upload the file to, *including the file name*. If the destination directory does not yet exist, it will be created. :return: ``True`` when succcess to upload a file or ``False`` """ try: file_obj = open(file_obj, 'r') except: file_obj = file_obj # do nothing content = file_obj.read() file_name = os.path.basename(full_path) now = datetime.datetime.now().isoformat() url = nurls['put'] + full_path if overwrite: overwrite = 'T' else: overwrite = 'F' headers = {'userid': self.user_id, 'useridx': self.useridx, 'MODIFYDATE': now, 'Content-Type': magic.from_file(file_obj.name, mime=True), 'charset': 'UTF-8', 'Origin': 'http://ndrive2.naver.com', 'OVERWRITE': overwrite, 'X-Requested-With': 'XMLHttpRequest', 'NDriveSvcType': 'NHN/DRAGDROP Ver', } r = self.session.put(url = url, data = content, headers = headers) r.encoding = 'utf-8' message = json.loads(r.text)['message'] if message != 'success': print "Error put: " + message return False else: print "Success put: " + file_obj.name return True
python
{ "resource": "" }
q41763
Ndrive.delete
train
def delete(self, full_path): """Delete a file in full_path >>> nd.delete('/Picture/flower.png') :param full_path: The full path to delete the file to, *including the file name*. :return: ``True`` if success to delete the file or ``False`` """ now = datetime.datetime.now().isoformat() url = nurls['delete'] + full_path headers = {'userid': self.user_id, 'useridx': self.useridx, 'Content-Type': "application/x-www-form-urlencoded; charset=UTF-8", 'charset': 'UTF-8', 'Origin': 'http://ndrive2.naver.com', } try: r = self.session.delete(url = url, headers = headers) r.encoding = 'utf-8' except: print "Error delete: wrong full_path" return False message = json.loads(r.text)['message'] if message != 'success': print "Error delete: " + message return False else: return True
python
{ "resource": "" }
q41764
Ndrive.getList
train
def getList(self, full_path, type = 1, dept = 0, sort = 'name', order = 'asc', startnum = 0, pagingrow = 1000, dummy = 56184): """Get a list of files >>> nd_list = nd.getList('/', type=3) >>> print nd_list There are 5 kinds of ``type``: - 1 => only directories with idxfolder property - 2 => only files - 3 => directories and files with thumbnail info (like viewHeight, viewWidth for Image file) - 4 => only directories except idxfolder - 5 => directories and files without thumbnail info There are 5 kindes of ``sort``: - file : file type, 종류 - length : size of file, 크기 - date : edited date, 수정한 날짜 - credate : creation date, 올린 날짜 - protect : protect or not, 중요 표시 :param full_path: The full path to get the file list. :param type: 1, 2, 3, 4 or 5 :param depth: Dept for file list :param sort: name => 이름 :param order: Order by (asc, desc) :return: metadata (list of dict) or False when failed to get list :metadata: - u'copyright': u'N', - u'creationdate': u'2013-05-12T21:17:23+09:00', - u'filelink': None, - u'fileuploadstatus': u'1', - u'getcontentlength': 0, - u'getlastmodified': u'2014-01-26T12:23:07+09:00', - u'href': u'/Codes/', - u'lastaccessed': u'2013-05-12T21:17:23+09:00', - u'lastmodifieduser': None, - u'priority': u'1', - u'protect': u'N', - u'resourceno': 204041859, - u'resourcetype': u'collection', - u'sharedinfo': u'F', - u'sharemsgcnt': 0, - u'shareno': 0, - u'subfoldercnt': 5, - u'thumbnailpath': u'N', - u'virusstatus': u'N' """ if type not in range(1, 6): print "Error getList: `type` should be between 1 to 5" return False data = {'orgresource': full_path, 'type': type, 'dept': dept, 'sort': sort, 'order': order, 'startnum': startnum, 'pagingrow': pagingrow, 'userid': self.user_id, 'useridx': self.useridx, 'dummy': dummy, } s, metadata = self.POST('getList', data) if s is True: return metadata else: print metadata return False
python
{ "resource": "" }
q41765
Ndrive.makeDirectory
train
def makeDirectory(self, full_path, dummy = 40841): """Make a directory >>> nd.makeDirectory('/test') :param full_path: The full path to get the directory property. Should be end with '/'. :return: ``True`` when success to make a directory or ``False`` """ if full_path[-1] is not '/': full_path += '/' data = {'dstresource': full_path, 'userid': self.user_id, 'useridx': self.useridx, 'dummy': dummy, } s, metadata = self.POST('makeDirectory', data) return s
python
{ "resource": "" }
q41766
Ndrive.makeShareUrl
train
def makeShareUrl(self, full_path, passwd): """Make a share url of directory >>> nd.makeShareUrl('/Picture/flower.png', PASSWORD) Args: full_path: The full path of directory to get share url. Should be end with '/'. ex) /folder/ passwd: Access password for shared directory Returns: URL: share url for a directory False: Failed to share a directory """ if full_path[-1] is not '/': full_path += '/' data = {'_callback': 'window.__jindo_callback._347', 'path': full_path, 'passwd': passwd, 'userid': self.user_id, 'useridx': self.useridx, } s, metadata = self.GET('shareUrl', data) if s: print "URL: %s" % (metadata['href']) return metadata['href'] else: print "Error makeShareUrl: %s" % (metadata) return False
python
{ "resource": "" }
q41767
Ndrive.getFileLink
train
def getFileLink(self, full_path): """Get a link of file >>> file_link = nd.getFileLink('/Picture/flower.png') :param full_path: The full path of file to get file link. Path should start and end with '/'. :return: ``Shared url`` or ``False`` if failed to share a file or directory through url """ prop = self.getProperty(full_path) if not prop: print "Error getFileLink: wrong full_path" return False else: prop_url = prop['filelinkurl'] if prop_url: print "URL: " + prop_url return prop_url else: resourceno = prop["resourceno"] url = self.createFileLink(resourceno) if url: return url else: return False
python
{ "resource": "" }
q41768
Ndrive.createFileLink
train
def createFileLink(self, resourceno): """Make a link of file If you don't know ``resourceno``, you'd better use ``getFileLink``. :param resourceno: Resource number of a file to create link :return: ``Shared url`` or ``False`` when failed to share a file """ data = {'_callback': 'window.__jindo_callback._8920', 'resourceno': resourceno, 'userid': self.user_id, 'useridx': self.useridx, } s, metadata = self.GET('createFileLink', data) if s: print "URL: %s" % (metadata['short_url']) return metadata['short_url'] else: print "Error createFileLink: %s" % (metadata) return False
python
{ "resource": "" }
q41769
Ndrive.getProperty
train
def getProperty(self, full_path, dummy = 56184): """Get a file property :param full_path: The full path to get the file or directory property. :return: ``metadata`` if success or ``False`` if failed to get property :metadata: - creationdate - exif - filelink - filelinkurl - filetype => 1: document, 2: image, 3: video, 4: music, 5: zip - fileuploadstatus - getcontentlength - getlastmodified - href - lastaccessed - protect - resourceno - resourcetype - thumbnail - totalfilecnt - totalfoldercnt - virusstatus """ data = {'orgresource': full_path, 'userid': self.user_id, 'useridx': self.useridx, 'dummy': dummy, } s, metadata = self.POST('getProperty', data) if s is True: return metadata else: return False
python
{ "resource": "" }
q41770
Ndrive.getVersionList
train
def getVersionList(self, full_path, startnum = 0, pagingrow = 50, dummy = 54213): """Get a version list of a file or dierectory. :param full_path: The full path to get the file or directory property. Path should start with '/' :param startnum: Start version index. :param pagingrow: Max # of version list in one page. :returns: ``metadata`` if succcess or ``False`` (failed to get history or there is no history) :metadata: - createuser - filesize - getlastmodified - href - versioninfo - versionkey """ data = {'orgresource': full_path, 'startnum': startnum, 'pagingrow': pagingrow, 'userid': self.user_id, 'useridx': self.useridx, 'dummy': dummy, } s, metadata = self.POST('getVersionList', data) if s is True: return metadata else: print "Error getVersionList: Cannot get version list" return False
python
{ "resource": "" }
q41771
Ndrive.setProperty
train
def setProperty(self, full_path, protect, dummy = 7046): """Set property of a file. :param full_path: The full path to get the file or directory property. :param protect: 'Y' or 'N', 중요 표시 :return: ``True`` when success to set property or ``False`` """ data = {'orgresource': full_path, 'protect': protect, 'userid': self.user_id, 'useridx': self.useridx, 'dummy': dummy, } s, metadata = self.POST('setProperty', data) if s is True: return True else: return False
python
{ "resource": "" }
q41772
_choose_read_fs
train
def _choose_read_fs(authority, cache, read_path, version_check, hasher): ''' Context manager returning the appropriate up-to-date readable filesystem Use ``cache`` if it is a valid filessystem and has a file at ``read_path``, otherwise use ``authority``. If the file at ``read_path`` is out of date, update the file in ``cache`` before returning it. ''' if cache and cache.fs.isfile(read_path): if version_check(hasher(cache.fs.open(read_path, 'rb'))): yield cache.fs elif authority.fs.isfile(read_path): fs.utils.copyfile( authority.fs, read_path, cache.fs, read_path) yield cache.fs else: _makedirs(authority.fs, fs.path.dirname(read_path)) _makedirs(cache.fs, fs.path.dirname(read_path)) yield cache.fs else: if not authority.fs.isfile(read_path): _makedirs(authority.fs, fs.path.dirname(read_path)) yield authority.fs
python
{ "resource": "" }
q41773
_get_write_fs
train
def _get_write_fs(): ''' Context manager returning a writable filesystem Use a temporary directory and clean on exit. .. todo:: Evaluate options for using a cached memoryFS or streaming object instead of an OSFS(tmp). This could offer significant performance improvements. Writing to the cache is less of a problem since this would be done in any case, though performance could be improved by writing to an in-memory filesystem and then writing to both cache and auth. ''' tmp = tempfile.mkdtemp() try: # Create a writeFS and path to the directory containing the archive write_fs = OSFS(tmp) try: yield write_fs finally: _close(write_fs) finally: shutil.rmtree(tmp)
python
{ "resource": "" }
q41774
_prepare_write_fs
train
def _prepare_write_fs(read_fs, cache, read_path, readwrite_mode=True): ''' Prepare a temporary filesystem for writing to read_path The file will be moved to write_path on close if modified. ''' with _get_write_fs() as write_fs: # If opening in read/write or append mode, make sure file data is # accessible if readwrite_mode: if not write_fs.isfile(read_path): _touch(write_fs, read_path) if read_fs.isfile(read_path): fs.utils.copyfile( read_fs, read_path, write_fs, read_path) else: _touch(write_fs, read_path) yield write_fs
python
{ "resource": "" }
q41775
text_cleanup
train
def text_cleanup(data, key, last_type): """ I strip extra whitespace off multi-line strings if they are ready to be stripped!""" if key in data and last_type == STRING_TYPE: data[key] = data[key].strip() return data
python
{ "resource": "" }
q41776
rst_to_json
train
def rst_to_json(text): """ I convert Restructured Text with field lists into Dictionaries! TODO: Convert to text node approach. """ records = [] last_type = None key = None data = {} directive = False lines = text.splitlines() for index, line in enumerate(lines): # check for directives if len(line) and line.strip().startswith(".."): directive = True continue # set the title if len(line) and (line[0] in string.ascii_letters or line[0].isdigit()): directive = False try: if lines[index + 1][0] not in DIVIDERS: continue except IndexError: continue data = text_cleanup(data, key, last_type) data = {"title": line.strip()} records.append( data ) continue # Grab standard fields (int, string, float) if len(line) and line[0].startswith(":"): data = text_cleanup(data, key, last_type) index = line.index(":", 1) key = line[1:index] value = line[index + 1:].strip() data[key], last_type = type_converter(value) directive = False continue # Work on multi-line strings if len(line) and line[0].startswith(" ") and directive == False: if not isinstance(data[key], str): # Not a string so continue on continue value = line.strip() if not len(value): # empty string, continue on continue # add next line data[key] += "\n{}".format(value) continue if last_type == STRING_TYPE and not len(line): if key in data.keys(): data[key] += "\n" return json.dumps(records)
python
{ "resource": "" }
q41777
type_converter
train
def type_converter(text): """ I convert strings into integers, floats, and strings! """ if text.isdigit(): return int(text), int try: return float(text), float except ValueError: return text, STRING_TYPE
python
{ "resource": "" }
q41778
command_line_runner
train
def command_line_runner(): """ I run functions from the command-line! """ filename = sys.argv[-1] if not filename.endswith(".rst"): print("ERROR! Please enter a ReStructuredText filename!") sys.exit() print(rst_to_json(file_opener(filename)))
python
{ "resource": "" }
q41779
packb
train
def packb(obj, **kwargs): """wrap msgpack.packb, setting use_bin_type=True by default""" kwargs.setdefault('use_bin_type', True) return msgpack.packb(obj, **kwargs)
python
{ "resource": "" }
q41780
AudioPlayer.play
train
def play(cls, file_path, on_done=None, logger=None): """ Play an audio file. :param file_path: the path to the file to play. :param on_done: callback when audio playback completes. """ pygame.mixer.init() try: pygame.mixer.music.load(file_path) except pygame.error as e: if logger is not None: logger.warning(str(e)) return pygame.mixer.music.play() while pygame.mixer.music.get_busy(): time.sleep(0.1) continue if on_done: on_done()
python
{ "resource": "" }
q41781
AudioPlayer.play_async
train
def play_async(cls, file_path, on_done=None): """ Play an audio file asynchronously. :param file_path: the path to the file to play. :param on_done: callback when audio playback completes. """ thread = threading.Thread( target=AudioPlayer.play, args=(file_path, on_done,)) thread.start()
python
{ "resource": "" }
q41782
LiquidCrystal.left_to_right
train
def left_to_right(self): """This is for text that flows Left to Right""" self._entry_mode |= Command.MODE_INCREMENT self.command(self._entry_mode)
python
{ "resource": "" }
q41783
LiquidCrystal.right_to_left
train
def right_to_left(self): """This is for text that flows Right to Left""" self._entry_mode &= ~Command.MODE_INCREMENT self.command(self._entry_mode)
python
{ "resource": "" }
q41784
TempFileSystemStorage.get_available_name
train
def get_available_name(self, name, max_length=None): """Return relative path to name placed in random directory""" tempdir = tempfile.mkdtemp(dir=self.base_location) name = os.path.join( os.path.basename(tempdir), os.path.basename(name), ) method = super(TempFileSystemStorage, self).get_available_name return method(name, max_length=max_length)
python
{ "resource": "" }
q41785
yn_prompt
train
def yn_prompt(text): ''' Takes the text prompt, and presents it, takes only "y" or "n" for answers, and returns True or False. Repeats itself on bad input. ''' text = "\n"+ text + "\n('y' or 'n'): " while True: answer = input(text).strip() if answer != 'y' and answer != 'n': continue elif answer == 'y': return True elif answer == 'n': return False
python
{ "resource": "" }
q41786
underline
train
def underline(text): '''Takes a string, and returns it underscored.''' text += "\n" for i in range(len(text)-1): text += "=" text += "\n" return text
python
{ "resource": "" }
q41787
get_event_time_as_utc
train
def get_event_time_as_utc(voevent, index=0): """ Extracts the event time from a given `WhereWhen.ObsDataLocation`. Returns a datetime (timezone-aware, UTC). Accesses a `WhereWhere.ObsDataLocation.ObservationLocation` element and returns the AstroCoords.Time.TimeInstant.ISOTime element, converted to a (UTC-timezoned) datetime. Note that a packet may include multiple 'ObsDataLocation' entries under the 'WhereWhen' section, for example giving locations of an object moving over time. Most packets will have only one, however, so the default is to access the first. This function now implements conversion from the TDB (Barycentric Dynamical Time) time scale in ISOTime format, since this is the format used by GAIA VOEvents. (See also http://docs.astropy.org/en/stable/time/#time-scale ) Other timescales (i.e. TT, GPS) will presumably be formatted as a TimeOffset, parsing this format is not yet implemented. Args: voevent (:class:`voeventparse.voevent.Voevent`): Root node of the VOevent etree. index (int): Index of the ObsDataLocation to extract an ISOtime from. Returns: :class:`datetime.datetime`: Datetime representing the event-timestamp, converted to UTC (timezone aware). """ try: od = voevent.WhereWhen.ObsDataLocation[index] ol = od.ObservationLocation coord_sys = ol.AstroCoords.attrib['coord_system_id'] timesys_identifier = coord_sys.split('-')[0] if timesys_identifier == 'UTC': isotime_str = str(ol.AstroCoords.Time.TimeInstant.ISOTime) return iso8601.parse_date(isotime_str) elif (timesys_identifier == 'TDB'): isotime_str = str(ol.AstroCoords.Time.TimeInstant.ISOTime) isotime_dtime = iso8601.parse_date(isotime_str) tdb_time = astropy.time.Time(isotime_dtime, scale='tdb') return tdb_time.utc.to_datetime().replace(tzinfo=pytz.UTC) elif (timesys_identifier == 'TT' or timesys_identifier == 'GPS'): raise NotImplementedError( "Conversion from time-system '{}' to UTC not yet implemented" ) else: raise ValueError( 'Unrecognised time-system: {} (badly formatted VOEvent?)'.format( timesys_identifier ) ) except AttributeError: return None
python
{ "resource": "" }
q41788
get_event_position
train
def get_event_position(voevent, index=0): """Extracts the `AstroCoords` from a given `WhereWhen.ObsDataLocation`. Note that a packet may include multiple 'ObsDataLocation' entries under the 'WhereWhen' section, for example giving locations of an object moving over time. Most packets will have only one, however, so the default is to just return co-ords extracted from the first. Args: voevent (:class:`voeventparse.voevent.Voevent`): Root node of the VOEvent etree. index (int): Index of the ObsDataLocation to extract AstroCoords from. Returns: Position (:py:class:`.Position2D`): The sky position defined in the ObsDataLocation. """ od = voevent.WhereWhen.ObsDataLocation[index] ac = od.ObservationLocation.AstroCoords ac_sys = voevent.WhereWhen.ObsDataLocation.ObservationLocation.AstroCoordSystem sys = ac_sys.attrib['id'] if hasattr(ac.Position2D, "Name1"): assert ac.Position2D.Name1 == 'RA' and ac.Position2D.Name2 == 'Dec' posn = Position2D(ra=float(ac.Position2D.Value2.C1), dec=float(ac.Position2D.Value2.C2), err=float(ac.Position2D.Error2Radius), units=ac.Position2D.attrib['unit'], system=sys) return posn
python
{ "resource": "" }
q41789
get_grouped_params
train
def get_grouped_params(voevent): """ Fetch grouped Params from the `What` section of a voevent as an omdict. This fetches 'grouped' Params, i.e. those enclosed in a Group element, and returns them as a nested dict-like structure, keyed by GroupName->ParamName->AttribName. Note that since multiple Params may share the same ParamName, the returned data-structure is actually an `orderedmultidict.omdict <https://github.com/gruns/orderedmultidict>`_ and has extra methods such as 'getlist' to allow retrieval of all values. Args: voevent (:class:`voeventparse.voevent.Voevent`): Root node of the VOevent etree. Returns (orderedmultidict.omdict): Mapping of ``ParamName->Attribs``. Typical access like so:: foo_val = top_params['foo']['value'] # If there are multiple Param entries named 'foo': all_foo_vals = [atts['value'] for atts in top_params.getlist('foo')] """ groups_omd = OMDict() w = deepcopy(voevent.What) lxml.objectify.deannotate(w) if w.find('Group') is not None: for grp in w.Group: groups_omd.add(grp.attrib.get('name'), _get_param_children_as_omdict(grp)) return groups_omd
python
{ "resource": "" }
q41790
get_toplevel_params
train
def get_toplevel_params(voevent): """ Fetch ungrouped Params from the `What` section of a voevent as an omdict. This fetches 'toplevel' Params, i.e. those not enclosed in a Group element, and returns them as a nested dict-like structure, keyed like ParamName->AttribName. Note that since multiple Params may share the same ParamName, the returned data-structure is actually an `orderedmultidict.omdict <https://github.com/gruns/orderedmultidict>`_ and has extra methods such as 'getlist' to allow retrieval of all values. Any Params with no defined name (technically off-spec, but not invalidated by the XML schema) are returned under the dict-key ``None``. Args: voevent (:class:`voeventparse.voevent.Voevent`): Root node of the VOevent etree. Returns (orderedmultidict.omdict): Mapping of ``ParamName->Attribs``. Typical access like so:: foo_val = top_params['foo']['value'] # If there are multiple Param entries named 'foo': all_foo_vals = [atts['value'] for atts in top_params.getlist('foo')] """ result = OrderedDict() w = deepcopy(voevent.What) lxml.objectify.deannotate(w) return _get_param_children_as_omdict(w)
python
{ "resource": "" }
q41791
prettystr
train
def prettystr(subtree): """Print an element tree with nice indentation. Prettyprinting a whole VOEvent often doesn't seem to work, probably for issues relating to whitespace cf. http://lxml.de/FAQ.html#why-doesn-t-the-pretty-print-option-reformat-my-xml-output This function is a quick workaround for prettyprinting a subsection of a VOEvent, for easier desk-checking. Args: subtree(:class`lxml.etree.ElementTree`): A node in the VOEvent element tree. Returns: str: Prettyprinted string representation of the raw XML. """ subtree = deepcopy(subtree) lxml.objectify.deannotate(subtree) lxml.etree.cleanup_namespaces(subtree) return lxml.etree.tostring(subtree, pretty_print=True).decode( encoding="utf-8")
python
{ "resource": "" }
q41792
Server.start
train
def start(self): """ Start the MQTT client. """ self.thread_handler.run(target=self.start_blocking) self.thread_handler.start_run_loop()
python
{ "resource": "" }
q41793
Server.start_blocking
train
def start_blocking(self, run_event): """ Start the MQTT client, as a blocking method. :param run_event: a run event object provided by the thread handler. """ topics = [("hermes/intent/#", 0), ("hermes/hotword/#", 0), ("hermes/asr/#", 0), ("hermes/nlu/#", 0), ("snipsmanager/#", 0)] self.log_info("Connecting to {} on port {}".format(self.mqtt_hostname, str(self.mqtt_port))) retry = 0 while True and run_event.is_set(): try: self.log_info("Trying to connect to {}".format(self.mqtt_hostname)) self.client.connect(self.mqtt_hostname, self.mqtt_port, 60) break except (socket_error, Exception) as e: self.log_info("MQTT error {}".format(e)) time.sleep(5 + int(retry / 5)) retry = retry + 1 topics = [ (MQTT_TOPIC_INTENT + '#', 0), (MQTT_TOPIC_HOTWORD + '#', 0), (MQTT_TOPIC_ASR + '#', 0), (MQTT_TOPIC_SNIPSFILE, 0), (MQTT_TOPIC_DIALOG_MANAGER + '#', 0), ("snipsmanager/#", 0) ] self.client.subscribe(topics) while run_event.is_set(): try: self.client.loop() except AttributeError as e: self.log_info("Error in mqtt run loop {}".format(e)) time.sleep(1)
python
{ "resource": "" }
q41794
Server.on_connect
train
def on_connect(self, client, userdata, flags, result_code): """ Callback when the MQTT client is connected. :param client: the client being connected. :param userdata: unused. :param flags: unused. :param result_code: result code. """ self.log_info("Connected with result code {}".format(result_code)) self.state_handler.set_state(State.welcome)
python
{ "resource": "" }
q41795
Server.on_disconnect
train
def on_disconnect(self, client, userdata, result_code): """ Callback when the MQTT client is disconnected. In this case, the server waits five seconds before trying to reconnected. :param client: the client being disconnected. :param userdata: unused. :param result_code: result code. """ self.log_info("Disconnected with result code " + str(result_code)) self.state_handler.set_state(State.goodbye) time.sleep(5) self.thread_handler.run(target=self.start_blocking)
python
{ "resource": "" }
q41796
Token.list
train
def list(self, identity_id, per_page=20, page=1): """ Get a list of tokens :param identity_id: The ID of the identity to retrieve tokens for :param per_page: The number of results per page returned :param page: The page number of the results :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ params = {'per_page': per_page, 'page': page} return self.request.get(str(identity_id) + '/token', params)
python
{ "resource": "" }
q41797
Token.create
train
def create(self, identity_id, service, token): """ Create the token :param identity_id: The ID of the identity to retrieve :param service: The service that the token is linked to :param token: The token provided by the the service :param expires_at: Set an expiry for this token :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ params = {'service': service, 'token': token} return self.request.post(str(identity_id) + '/token', params)
python
{ "resource": "" }
q41798
Token.update
train
def update(self, identity_id, service, token=None): """ Update the token :param identity_id: The ID of the identity to retrieve :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ params = {} if token: params['token'] = token return self.request.put(str(identity_id) + '/token/' + service, params)
python
{ "resource": "" }
q41799
AutoCloudProcessor._func_router
train
def _func_router(self, msg, fname, **config): """ This method routes the messages based on the params and calls the appropriate method to process the message. The utility of the method is to cope up with the major message change during different releases. """ FNAME = 'handle_%s_autocloud_%s' if ('compose_id' in msg['msg'] or 'compose_job_id' in msg['msg'] or 'autocloud.compose' in msg['topic']): return getattr(self, FNAME % ('v2', fname))(msg, **config) else: return getattr(self, FNAME % ('v1', fname))(msg, **config)
python
{ "resource": "" }