_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q37400
pretty_time
train
def pretty_time(s, granularity=3): """Pretty print time in seconds. COnverts the input time in seconds into a string with interval names, such as days, hours and minutes From: http://stackoverflow.com/a/24542445/1144479 """ intervals = ( ('weeks', 604800), # 60 * 60 * 24 * 7 ('days', 86400), # 60 * 60 * 24 ('hours', 3600), # 60 * 60 ('minutes', 60), ('seconds', 1), ) def display_time(seconds, granularity=granularity): result = [] for name, count in intervals: value = seconds // count if value: seconds -= value * count if value == 1: name = name.rstrip('s') result.append('{} {}'.format(int(value), name)) return ', '.join(result[:granularity]) return display_time(s, granularity)
python
{ "resource": "" }
q37401
Proxy._create_class_proxy
train
def _create_class_proxy(cls, theclass): """creates a proxy for the given class""" def make_method(name): def method(self, *args, **kw): return getattr(object.__getattribute__(self, "_obj"), name)(*args, **kw) return method namespace = {} for name in cls._special_names: if hasattr(theclass, name): namespace[name] = make_method(name) return type("%s(%s)" % (cls.__name__, theclass.__name__), (cls,), namespace)
python
{ "resource": "" }
q37402
Cli.description
train
def description(self, argv0='manage.py', command=None): '''Description outputed to console''' command = command or self.__class__.__name__.lower() import inspect _help = u'' _help += u'{}\n'.format(command) if self.__doc__: _help += self._fix_docstring(self.__doc__) +'\n' else: _help += u'{}\n'.format(command) funcs = self.get_funcs() funcs.sort(key=lambda x: six.get_function_code(x[1]).co_firstlineno) for attr, func in funcs: func = getattr(self, attr) comm = attr.replace('command_', '', 1) args = inspect.getargspec(func).args[1:] args = (' [' + '] ['.join(args) + ']') if args else '' _help += "\t{} {}:{}{}\n".format( argv0, command, comm, args) if func.__doc__: _help += self._fix_docstring(func.__doc__, 2) return _help
python
{ "resource": "" }
q37403
AdditionRealm.requestAvatar
train
def requestAvatar(self, avatarId, mind, *interfaces): """ Create Adder avatars for any IBoxReceiver request. """ if IBoxReceiver in interfaces: return (IBoxReceiver, Adder(avatarId), lambda: None) raise NotImplementedError()
python
{ "resource": "" }
q37404
Application.handle_error
train
def handle_error(self, env): ''' Unhandled exception handler. You can put any logging, error warning, etc here.''' logger.exception('Exception for %s %s :', env.request.method, env.request.url)
python
{ "resource": "" }
q37405
Exposer.get
train
def get(self, obj, key): """ Retrieve 'key' from an instance of a class which previously exposed it. @param key: a hashable object, previously passed to L{Exposer.expose}. @return: the object which was exposed with the given name on obj's key. @raise MethodNotExposed: when the key in question was not exposed with this exposer. """ if key not in self._exposed: raise MethodNotExposed() rightFuncs = self._exposed[key] T = obj.__class__ seen = {} for subT in inspect.getmro(T): for name, value in subT.__dict__.items(): for rightFunc in rightFuncs: if value is rightFunc: if name in seen: raise MethodNotExposed() return value.__get__(obj, T) seen[name] = True raise MethodNotExposed()
python
{ "resource": "" }
q37406
repercent_broken_unicode
train
def repercent_broken_unicode(path): """ As per section 3.2 of RFC 3987, step three of converting a URI into an IRI, we need to re-percent-encode any octet produced that is not part of a strictly legal UTF-8 octet sequence. """ # originally from django.utils.encoding while True: try: return path.decode('utf-8') except UnicodeDecodeError as e: repercent = quote(path[e.start:e.end], safe=b"/#%[]=:;$&()+,!?*@'~") path = path[:e.start] + repercent.encode('ascii') + path[e.end:]
python
{ "resource": "" }
q37407
uri_to_iri_parts
train
def uri_to_iri_parts(path, query, fragment): r""" Converts a URI parts to corresponding IRI parts in a given charset. Examples for URI versus IRI: :param path: The path of URI to convert. :param query: The query string of URI to convert. :param fragment: The fragment of URI to convert. """ path = url_unquote(path, '%/;?') query = url_unquote(query, '%;/?:@&=+,$#') fragment = url_unquote(fragment, '%;/?:@&=+,$#') return path, query, fragment
python
{ "resource": "" }
q37408
default_bundle_config
train
def default_bundle_config(): """Return the default bundle config file as an AttrDict.""" import os from ambry.util import AttrDict config = AttrDict() f = os.path.join( os.path.dirname( os.path.realpath(__file__)), 'bundle.yaml') config.update_yaml(f) return config
python
{ "resource": "" }
q37409
find_package_data
train
def find_package_data(): """ Returns package_data, because setuptools is too stupid to handle nested directories. Returns: dict: key is "ambry", value is list of paths. """ l = list() for start in ('ambry/support', 'ambry/bundle/default_files'): for root, dirs, files in os.walk(start): for f in files: if f.endswith('.pyc'): continue path = os.path.join(root, f).replace('ambry/', '') l.append(path) return {'ambry': l}
python
{ "resource": "" }
q37410
File.update
train
def update(self, of): """Update a file from another file, for copying""" # The other values should be set when the file object is created with dataset.bsfile() for p in ('mime_type', 'preference', 'state', 'hash', 'modified', 'size', 'contents', 'source_hash', 'data'): setattr(self, p, getattr(of, p)) return self
python
{ "resource": "" }
q37411
File.dict_row_reader
train
def dict_row_reader(self): """ Unpacks message pack rows into a stream of dicts. """ rows = self.unpacked_contents if not rows: return header = rows.pop(0) for row in rows: yield dict(list(zip(header, row)))
python
{ "resource": "" }
q37412
File.update_contents
train
def update_contents(self, contents, mime_type): """Update the contents and set the hash and modification time""" import hashlib import time new_size = len(contents) self.mime_type = mime_type if mime_type == 'text/plain': self.contents = contents.encode('utf-8') else: self.contents = contents old_hash = self.hash self.hash = hashlib.md5(self.contents).hexdigest() if self.size and (old_hash != self.hash): self.modified = int(time.time()) self.size = new_size
python
{ "resource": "" }
q37413
ApiClient.__deserialize_datetime
train
def __deserialize_datetime(self, string): """ Deserializes string to datetime. The string should be in iso8601 datetime format. :param string: str. :return: datetime. """ try: from dateutil.parser import parse timestr = str(datetime.fromtimestamp(string/1000)) return parse(timestr) except ImportError: return string except ValueError: raise ApiException( status=0, reason=( "Failed to parse `{0}` into a datetime object" .format(string) ) )
python
{ "resource": "" }
q37414
un
train
def un(source, wrapper=list, error_bad_lines=True): """Parse a text stream to TSV If the source is a string, it is converted to a line-iterable stream. If it is a file handle or other object, we assume that we can iterate over the lines in it. The result is a generator, and what it contains depends on whether the second argument is set and what it is set to. If the second argument is set to list, the default, then each element of the result is a list of strings. If it is set to a class generated with namedtuple(), then each element is an instance of this class, or None if there were too many or too few fields. Although newline separated input is preferred, carriage-return-newline is accepted on every platform. Since there is no definite order to the fields of a dict, there is no consistent way to format dicts for output. To avoid the asymmetry of a type that can be read but not written, plain dictionary parsing is omitted. """ if isinstance(source, six.string_types): source = six.StringIO(source) # Prepare source lines for reading rows = parse_lines(source) # Get columns if is_namedtuple(wrapper): columns = wrapper._fields wrapper = wrapper._make else: columns = next(rows, None) if columns is not None: i, columns = columns yield wrapper(columns) # Get values for i, values in rows: if check_line_consistency(columns, values, i, error_bad_lines): yield wrapper(values)
python
{ "resource": "" }
q37415
to
train
def to(items, output=None): """Present a collection of items as TSV The items in the collection can themselves be any iterable collection. (Single field structures should be represented as one tuples.) With no output parameter, a generator of strings is returned. If an output parameter is passed, it should be a file-like object. Output is always newline separated. """ strings = (format_collection(item) for item in items) if output is None: return strings else: for s in strings: output.write(s + '\n')
python
{ "resource": "" }
q37416
get_bundle_ref
train
def get_bundle_ref(args, l, use_history=False): """ Use a variety of methods to determine which bundle to use :param args: :return: """ if not use_history: if args.id: return (args.id, '-i argument') if hasattr(args, 'bundle_ref') and args.bundle_ref: return (args.bundle_ref, 'bundle_ref argument') if 'AMBRY_BUNDLE' in os.environ: return (os.environ['AMBRY_BUNDLE'], 'environment') cwd_bundle = os.path.join(os.getcwd(), 'bundle.yaml') if os.path.exists(cwd_bundle): with open(cwd_bundle) as f: from ambry.identity import Identity config = yaml.load(f) try: ident = Identity.from_dict(config['identity']) return (ident.vid, 'directory') except KeyError: pass history = l.edit_history() if history: return (history[0].d_vid, 'history') return None, None
python
{ "resource": "" }
q37417
bundle_variant
train
def bundle_variant(args, l, rc): """Create a new bundle as a variant of an existing bundle""" from ambry.orm.exc import ConflictError ob = l.bundle(args.ref) d = dict( dataset=args.dataset or ob.identity.dataset, revision=args.revision, source=args.source or ob.identity.source, bspace=args.space or ob.identity.bspace, subset=args.subset or ob.identity.subset, btime=args.time or ob.identity.btime, variation=args.variation or ob.identity.variation) try: ambry_account = rc.accounts.get('ambry', {}) except: ambry_account = None if not ambry_account: fatal("Failed to get an accounts.ambry entry from the configuration. ") if not ambry_account.get('name') or not ambry_account.get('email'): fatal('Must set accounts.ambry.email and accounts.ambry.name n account config file') try: b = l.new_bundle(assignment_class=args.key, **d) b.metadata.contacts.wrangler.name = ambry_account.get('name') b.metadata.contacts.wrangler.email = ambry_account.get('email') b.commit() except ConflictError: fatal("Can't create dataset; one with a conflicting name already exists") # Now, need to copy over all of the partitions into the new bundle. for p in ob.partitions: ds = b.dataset.new_source(p.name, ref=p.name, reftype='partition') print ds b.build_source_files.sources.objects_to_record() b.commit()
python
{ "resource": "" }
q37418
bundle_new
train
def bundle_new(args, l, rc): """Create a new bundle""" from ambry.orm.exc import ConflictError d = dict( dataset=args.dataset, revision=args.revision, source=args.source, bspace=args.space, subset=args.subset, btime=args.time, variation=args.variation) try: ambry_account = rc.accounts.get('ambry', {}) except: ambry_account = None if not ambry_account: fatal("Failed to get an accounts.ambry entry from the configuration. ") if not ambry_account.get('name') or not ambry_account.get('email'): fatal('Must set accounts.ambry.email and accounts.ambry.name n account config file') if args.dryrun: from ..identity import Identity d['revision'] = 1 d['id'] = 'dXXX' print(str(Identity.from_dict(d))) return try: b = l.new_bundle(assignment_class=args.key, **d) if ambry_account: b.metadata.contacts.wrangler = ambry_account b.build_source_files.bundle_meta.objects_to_record() b.commit() except ConflictError: fatal("Can't create dataset; one with a conflicting name already exists") print(b.identity.fqname)
python
{ "resource": "" }
q37419
InitialDataUpdater.handle_deletions
train
def handle_deletions(self): """ Manages handling deletions of objects that were previously managed by the initial data process but no longer managed. It does so by mantaining a list of receipts for model objects that are registered for deletion on each round of initial data processing. Any receipts that are from previous rounds and not the current round will be deleted. """ deduplicated_objs = {} for model in self.model_objs_registered_for_deletion: key = '{0}:{1}'.format( ContentType.objects.get_for_model(model, for_concrete_model=False), model.id ) deduplicated_objs[key] = model # Create receipts for every object registered for deletion now = timezone.now() registered_for_deletion_receipts = [ RegisteredForDeletionReceipt( model_obj_type=ContentType.objects.get_for_model(model_obj, for_concrete_model=False), model_obj_id=model_obj.id, register_time=now) for model_obj in deduplicated_objs.values() ] # Do a bulk upsert on all of the receipts, updating their registration time. RegisteredForDeletionReceipt.objects.bulk_upsert( registered_for_deletion_receipts, ['model_obj_type_id', 'model_obj_id'], update_fields=['register_time']) # Delete all receipts and their associated model objects that weren't updated for receipt in RegisteredForDeletionReceipt.objects.exclude(register_time=now): try: receipt.model_obj.delete() except: # noqa # The model object may no longer be there, its ctype may be invalid, or it might be protected. # Regardless, the model object cannot be deleted, so go ahead and delete its receipt. pass receipt.delete()
python
{ "resource": "" }
q37420
InitialDataUpdater.update_all_apps
train
def update_all_apps(self): """ Loops through all app names contained in settings.INSTALLED_APPS and calls `update_app` on each one. Handles any object deletions that happened after all apps have been initialized. """ for app in apps.get_app_configs(): self.update_app(app.name) # During update_app, all apps added model objects that were registered for deletion. # Delete all objects that were previously managed by the initial data process self.handle_deletions()
python
{ "resource": "" }
q37421
BaseSearchBackend._and_join
train
def _and_join(self, terms): """ Joins terms using AND operator. Args: terms (list): terms to join Examples: self._and_join(['term1']) -> 'term1' self._and_join(['term1', 'term2']) -> 'term1 AND term2' self._and_join(['term1', 'term2', 'term3']) -> 'term1 AND term2 AND term3' Returns: str """ if len(terms) > 1: return ' AND '.join([self._or_join(t) for t in terms]) else: return self._or_join(terms[0])
python
{ "resource": "" }
q37422
BaseIndex.index_one
train
def index_one(self, instance, force=False): """ Indexes exactly one object of the Ambry system. Args: instance (any): instance to index. force (boolean): if True replace document in the index. Returns: boolean: True if document added to index, False if document already exists in the index. """ if not self.is_indexed(instance) and not force: doc = self._as_document(instance) self._index_document(doc, force=force) logger.debug('{} indexed as\n {}'.format(instance.__class__, pformat(doc))) return True logger.debug('{} already indexed.'.format(instance.__class__)) return False
python
{ "resource": "" }
q37423
BaseDatasetIndex._expand_terms
train
def _expand_terms(self, terms): """ Expands terms of the dataset to the appropriate fields. It will parse the search phrase and return only the search term components that are applicable to a Dataset query. Args: terms (dict or str): Returns: dict: keys are field names, values are query strings """ ret = { 'keywords': list(), 'doc': list()} if not isinstance(terms, dict): stp = SearchTermParser() terms = stp.parse(terms, term_join=self.backend._and_join) if 'about' in terms: ret['doc'].append(terms['about']) if 'source' in terms: ret['keywords'].append(terms['source']) return ret
python
{ "resource": "" }
q37424
BasePartitionIndex._as_document
train
def _as_document(self, partition): """ Converts given partition to the document indexed by FTS backend. Args: partition (orm.Partition): partition to convert. Returns: dict with structure matches to BasePartitionIndex._schema. """ schema = ' '.join( u'{} {} {} {} {}'.format( c.id, c.vid, c.name, c.altname, c.description) for c in partition.table.columns) values = '' for stat in partition.stats: if stat.uvalues : # SOme geometry vlaues are super long. They should not be in uvbalues, but when they are, # need to cut them down. values += ' '.join(e[:200] for e in stat.uvalues) + '\n' # Re-calculate the summarization of grains, since the geoid 0.0.7 package had a bug where state level # summaries had the same value as state-level allvals def resum(g): try: return str(GVid.parse(g).summarize()) except KeyError: return g except ValueError: logger.debug("Failed to parse gvid '{}' from partition '{}' grain coverage" .format(g, partition.identity.vname)) return g keywords = ( ' '.join(partition.space_coverage) + ' ' + ' '.join([resum(g) for g in partition.grain_coverage if resum(g)]) + ' ' + ' '.join(str(x) for x in partition.time_coverage) ) doc_field = u('{} {} {} {} {} {}').format( values, schema, ' '.join([ u('{}').format(partition.identity.vid), u('{}').format(partition.identity.id_), u('{}').format(partition.identity.name), u('{}').format(partition.identity.vname)]), partition.display.title, partition.display.description, partition.display.sub_description, partition.display.time_description, partition.display.geo_description ) document = dict( vid=u('{}').format(partition.identity.vid), dataset_vid=u('{}').format(partition.identity.as_dataset().vid), title=u('{}').format(partition.table.description), keywords=u('{}').format(keywords), doc=doc_field) return document
python
{ "resource": "" }
q37425
BasePartitionIndex._expand_terms
train
def _expand_terms(self, terms): """ Expands partition terms to the appropriate fields. Args: terms (dict or str): Returns: dict: keys are field names, values are query strings """ ret = { 'keywords': list(), 'doc': list(), 'from': None, 'to': None} if not isinstance(terms, dict): stp = SearchTermParser() terms = stp.parse(terms, term_join=self.backend._and_join) if 'about' in terms: ret['doc'].append(terms['about']) if 'with' in terms: ret['doc'].append(terms['with']) if 'in' in terms: place_vids = self._expand_place_ids(terms['in']) ret['keywords'].append(place_vids) if 'by' in terms: ret['keywords'].append(terms['by']) ret['from'] = terms.get('from', None) ret['to'] = terms.get('to', None) return ret
python
{ "resource": "" }
q37426
BasePartitionIndex._expand_place_ids
train
def _expand_place_ids(self, terms): """ Lookups all of the place identifiers to get gvids Args: terms (str or unicode): terms to lookup Returns: str or list: given terms if no identifiers found, otherwise list of identifiers. """ place_vids = [] first_type = None for result in self.backend.identifier_index.search(terms): if not first_type: first_type = result.type if result.type != first_type: # Ignore ones that aren't the same type as the best match continue place_vids.append(result.vid) if place_vids: # Add the 'all region' gvids for the higher level all_set = set(itertools.chain.from_iterable(iallval(GVid.parse(x)) for x in place_vids)) place_vids += list(str(x) for x in all_set) return place_vids else: return terms
python
{ "resource": "" }
q37427
BaseIdentifierIndex._as_document
train
def _as_document(self, identifier): """ Converts given identifier to the document indexed by FTS backend. Args: identifier (dict): identifier to convert. Dict contains at least 'identifier', 'type' and 'name' keys. Returns: dict with structure matches to BaseIdentifierIndex._schema. """ return { 'identifier': u('{}').format(identifier['identifier']), 'type': u('{}').format(identifier['type']), 'name': u('{}').format(identifier['name']) }
python
{ "resource": "" }
q37428
SearchTermParser._geograins
train
def _geograins(self): """Create a map geographic area terms to the geo grain GVid values """ from geoid.civick import GVid geo_grains = {} for sl, cls in GVid.sl_map.items(): if '_' not in cls.level: geo_grains[self.stem(cls.level)] = str(cls.nullval().summarize()) return geo_grains
python
{ "resource": "" }
q37429
SearchTermParser.parse
train
def parse(self, s, term_join=None): """ Parses search term to Args: s (str): string with search term. or_join (callable): function to join 'OR' terms. Returns: dict: all of the terms grouped by marker. Key is a marker, value is a term. Example: >>> SearchTermParser().parse('table2 from 1978 to 1979 in california') {'to': 1979, 'about': 'table2', 'from': 1978, 'in': 'california'} """ if not term_join: term_join = lambda x: '(' + ' OR '.join(x) + ')' toks = self.scan(s) # Examples: starting with this query: # diabetes from 2014 to 2016 source healthindicators.gov # Assume the first term is ABOUT, if it is not marked with a marker. if toks and toks[0] and (toks[0][0] == self.TERM or toks[0][0] == self.QUOTEDTERM): toks = [(self.MARKER, 'about')] + toks # The example query produces this list of tokens: #[(3, 'about'), # (0, 'diabetes'), # (3, 'from'), # (4, 2014), # (3, 'to'), # (4, 2016), # (3, 'source'), # (0, 'healthindicators.gov')] # Group the terms by their marker. bymarker = [] for t in toks: if t[0] == self.MARKER: bymarker.append((t[1], [])) else: bymarker[-1][1].append(t) # After grouping tokens by their markers # [('about', [(0, 'diabetes')]), # ('from', [(4, 2014)]), # ('to', [(4, 2016)]), # ('source', [(0, 'healthindicators.gov')]) # ] # Convert some of the markers based on their contents. This just changes the marker type for keywords # we'll do more adjustments later. comps = [] for t in bymarker: t = list(t) if t[0] == 'in' and len(t[1]) == 1 and isinstance(t[1][0][1], string_types) and self.stem( t[1][0][1]) in self.geograins.keys(): t[0] = 'by' # If the from term isn't an integer, then it is really a source. if t[0] == 'from' and len(t[1]) == 1 and t[1][0][0] != self.YEAR: t[0] = 'source' comps.append(t) # After conversions # [['about', [(0, 'diabetes')]], # ['from', [(4, 2014)]], # ['to', [(4, 2016)]], # ['source', [(0, 'healthindicators.gov')]]] # Join all of the terms into single marker groups groups = {marker: [] for marker, _ in comps} for marker, terms in comps: groups[marker] += [term for marker, term in terms] # At this point, the groups dict is formed, but it will have a list # for each marker that has multiple terms. # Only a few of the markers should have more than one term, so move # extras to the about group for marker, group in groups.items(): if marker == 'about': continue if len(group) > 1 and marker not in self.multiterms: groups[marker], extras = [group[0]], group[1:] if not 'about' in groups: groups['about'] = extras else: groups['about'] += extras if marker == 'by': groups['by'] = [ self.geograins.get(self.stem(e)) for e in group] for marker, terms in iteritems(groups): if len(terms) > 1: if marker in 'in': groups[marker] = ' '.join(terms) else: groups[marker] = term_join(terms) elif len(terms) == 1: groups[marker] = terms[0] else: pass # After grouping: # {'to': 2016, # 'about': 'diabetes', # 'from': 2014, # 'source': 'healthindicators.gov'} # If there were any markers with multiple terms, they would be cast in the or_join form. return groups
python
{ "resource": "" }
q37430
Sqla.command_create_tables
train
def command_create_tables(self, meta_name=None, verbose=False): ''' Create tables according sqlalchemy data model. Is not a complex migration tool like alembic, just creates tables that does not exist:: ./manage.py sqla:create_tables [--verbose] [meta_name] ''' def _create_metadata_tables(metadata): for table in metadata.sorted_tables: if verbose: print(self._schema(table)) else: print(' '+table.name) engine = self.session.get_bind(clause=table) metadata.create_all(bind=engine, tables=[table]) if isinstance(self.metadata, MetaData): print('Creating tables...') _create_metadata_tables(self.metadata) else: for current_meta_name, metadata in self.metadata.items(): if meta_name not in (current_meta_name, None): continue print('Creating tables for {}...'.format(current_meta_name)) _create_metadata_tables(metadata)
python
{ "resource": "" }
q37431
Sqla.command_gen
train
def command_gen(self, *names): ''' Runs generator functions. Run `docs` generator function:: ./manage.py sqla:gen docs Run `docs` generator function with `count=10`:: ./manage.py sqla:gen docs:10 ''' if not names: sys.exit('Please provide generator names') for name in names: name, count = name, 0 if ':' in name: name, count = name.split(':', 1) count = int(count) create = self.generators[name] print('Generating `{0}` count={1}'.format(name, count)) create(self.session, count) self.session.commit()
python
{ "resource": "" }
q37432
_get_table_names
train
def _get_table_names(statement): """ Returns table names found in the query. NOTE. This routine would use the sqlparse parse tree, but vnames don't parse very well. Args: statement (sqlparse.sql.Statement): parsed by sqlparse sql statement. Returns: list of str """ parts = statement.to_unicode().split() tables = set() for i, token in enumerate(parts): if token.lower() == 'from' or token.lower().endswith('join'): tables.add(parts[i + 1].rstrip(';')) return list(tables)
python
{ "resource": "" }
q37433
DatabaseBackend.install
train
def install(self, connection, partition, table_name=None, index_columns=None, materialize=False, logger=None): """ Installs partition's mpr to the database to allow to execute sql queries over mpr. Args: connection: partition (orm.Partition): materialize (boolean): if True, create generic table. If False create MED over mpr. Returns: str: name of the created table. """ raise NotImplementedError
python
{ "resource": "" }
q37434
DatabaseBackend.install_table
train
def install_table(self, connection, table, logger = None): """ Installs all partitons of the table and create view with union of all partitons. Args: connection: connection to database who stores mpr data. table (orm.Table): """ # first install all partitions of the table queries = [] query_tmpl = 'SELECT * FROM {}' for partition in table.partitions: partition.localize() installed_name = self.install(connection, partition) queries.append(query_tmpl.format(installed_name)) # now create view with union of all partitions. query = 'CREATE VIEW {} AS {} '.format( table.vid, '\nUNION ALL\n'.join(queries)) logger.debug('Creating view for table.\n table: {}\n query: {}'.format(table.vid, query)) self._execute(connection, query, fetch=False)
python
{ "resource": "" }
q37435
DatabaseBackend.query
train
def query(self, connection, query, fetch=True): """ Creates virtual tables for all partitions found in the query and executes query. Args: query (str): sql query fetch (bool): fetch result from database if True, do not fetch overwise. """ self.install_module(connection) statements = sqlparse.parse(sqlparse.format(query, strip_comments=True)) # install all partitions and replace table names in the query. # logger.debug('Finding and installing all partitions from query. \n query: {}'.format(query)) new_query = [] if len(statements) > 1: raise BadSQLError("Can only query a single statement") if len(statements) == 0: raise BadSQLError("DIdn't get any statements in '{}'".format(query)) statement = statements[0] logger.debug( 'Searching statement for partition ref.\n statement: {}'.format(statement.to_unicode())) #statement = self.install_statement(connection, statement.to_unicode()) logger.debug( 'Executing updated query after partition install.' '\n query before update: {}\n query to execute (updated query): {}' .format(statement, new_query)) return self._execute(connection, statement.to_unicode(), fetch=fetch)
python
{ "resource": "" }
q37436
build
train
def build(id=None, name=None, revision=None, temporary_build=False, timestamp_alignment=False, no_build_dependencies=False, keep_pod_on_failure=False, force_rebuild=False, rebuild_mode=common.REBUILD_MODES_DEFAULT): """ Trigger a BuildConfiguration by name or ID """ data = build_raw(id, name, revision, temporary_build, timestamp_alignment, no_build_dependencies, keep_pod_on_failure, force_rebuild, rebuild_mode) if data: return utils.format_json(data)
python
{ "resource": "" }
q37437
get_build_configuration
train
def get_build_configuration(id=None, name=None): """ Retrieve a specific BuildConfiguration """ data = get_build_configuration_raw(id, name) if data: return utils.format_json(data)
python
{ "resource": "" }
q37438
update_build_configuration
train
def update_build_configuration(id, **kwargs): """ Update an existing BuildConfiguration with new information :param id: ID of BuildConfiguration to update :param name: Name of BuildConfiguration to update :return: """ data = update_build_configuration_raw(id, **kwargs) if data: return utils.format_json(data)
python
{ "resource": "" }
q37439
list_build_configurations_for_product
train
def list_build_configurations_for_product(id=None, name=None, page_size=200, page_index=0, sort="", q=""): """ List all BuildConfigurations associated with the given Product. """ data = list_build_configurations_for_product_raw(id, name, page_size, page_index, sort, q) if data: return utils.format_json_list(data)
python
{ "resource": "" }
q37440
list_build_configurations_for_project
train
def list_build_configurations_for_project(id=None, name=None, page_size=200, page_index=0, sort="", q=""): """ List all BuildConfigurations associated with the given Project. """ data = list_build_configurations_for_project_raw(id, name, page_size, page_index, sort, q) if data: return utils.format_json_list(data)
python
{ "resource": "" }
q37441
list_build_configurations_for_product_version
train
def list_build_configurations_for_product_version(product_id, version_id, page_size=200, page_index=0, sort="", q=""): """ List all BuildConfigurations associated with the given ProductVersion """ data = list_build_configurations_for_project_raw(product_id, version_id, page_size, page_index, sort, q) if data: return utils.format_json_list(data)
python
{ "resource": "" }
q37442
add_dependency
train
def add_dependency(id=None, name=None, dependency_id=None, dependency_name=None): """ Add an existing BuildConfiguration as a dependency to another BuildConfiguration. """ data = add_dependency_raw(id, name, dependency_id, dependency_name) if data: return utils.format_json_list(data)
python
{ "resource": "" }
q37443
remove_dependency
train
def remove_dependency(id=None, name=None, dependency_id=None, dependency_name=None): """ Remove a BuildConfiguration from the dependency list of another BuildConfiguration """ data = remove_dependency_raw(id, name, dependency_id, dependency_name) if data: return utils.format_json_list(data)
python
{ "resource": "" }
q37444
list_product_versions_for_build_configuration
train
def list_product_versions_for_build_configuration(id=None, name=None, page_size=200, page_index=0, sort="", q=""): """ List all ProductVersions associated with a BuildConfiguration """ data = list_product_versions_for_build_configuration_raw(id, name, page_size, page_index, sort, q) if data: return utils.format_json_list(data)
python
{ "resource": "" }
q37445
add_product_version_to_build_configuration
train
def add_product_version_to_build_configuration(id=None, name=None, product_version_id=None): """ Associate an existing ProductVersion with a BuildConfiguration """ data = remove_product_version_from_build_configuration_raw(id, name, product_version_id) if data: return utils.format_json_list(data)
python
{ "resource": "" }
q37446
remove_product_version_from_build_configuration
train
def remove_product_version_from_build_configuration(id=None, name=None, product_version_id=None): """ Remove a ProductVersion from association with a BuildConfiguration """ data = remove_product_version_from_build_configuration_raw(id, name, product_version_id) if data: return utils.format_json_list(data)
python
{ "resource": "" }
q37447
list_revisions_of_build_configuration
train
def list_revisions_of_build_configuration(id=None, name=None, page_size=200, page_index=0, sort=""): """ List audited revisions of a BuildConfiguration """ data = list_revisions_of_build_configuration_raw(id, name, page_size, page_index, sort) if data: return utils.format_json_list(data)
python
{ "resource": "" }
q37448
get_revision_of_build_configuration
train
def get_revision_of_build_configuration(revision_id, id=None, name=None): """ Get a specific audited revision of a BuildConfiguration """ data = get_revision_of_build_configuration_raw(revision_id, id, name) if data: return utils.format_json_list(data)
python
{ "resource": "" }
q37449
list_build_configurations
train
def list_build_configurations(page_size=200, page_index=0, sort="", q=""): """ List all BuildConfigurations """ data = list_build_configurations_raw(page_size, page_index, sort, q) if data: return utils.format_json_list(data)
python
{ "resource": "" }
q37450
tzabbr_register
train
def tzabbr_register(abbr, name, region, zone, dst): """Register a new timezone abbreviation in the global registry. If another abbreviation with the same name has already been registered it new abbreviation will only be registered in region specific dictionary. """ newabbr = tzabbr() newabbr.abbr = abbr newabbr.name = name newabbr.region = region newabbr.zone = zone newabbr.dst = dst if abbr not in all: all[abbr] = newabbr if not region in regions: regions[region] = {} assert abbr not in regions[region] regions[region][abbr] = newabbr
python
{ "resource": "" }
q37451
create_license
train
def create_license(**kwargs): """ Create a new License """ License = create_license_object(**kwargs) response = utils.checked_api_call(pnc_api.licenses, 'create_new', body=License) if response: return utils.format_json(response.content)
python
{ "resource": "" }
q37452
get_license
train
def get_license(id): """ Get a specific License by either ID or fullname """ response = utils.checked_api_call( pnc_api.licenses, 'get_specific', id= id) if response: return utils.format_json(response.content)
python
{ "resource": "" }
q37453
delete_license
train
def delete_license(license_id): """ Delete a License by ID """ response = utils.checked_api_call(pnc_api.licenses, 'delete', id=license_id) if response: return utils.format_json(response.content)
python
{ "resource": "" }
q37454
update_license
train
def update_license(license_id, **kwargs): """ Replace the License with given ID with a new License """ updated_license = pnc_api.licenses.get_specific(id=license_id).content for key, value in iteritems(kwargs): if value: setattr(updated_license, key, value) response = utils.checked_api_call( pnc_api.licenses, 'update', id=int(license_id), body=updated_license) if response: return utils.format_json(response.content)
python
{ "resource": "" }
q37455
list_licenses
train
def list_licenses(page_size=200, page_index=0, sort="", q=""): """ List all Licenses """ response = utils.checked_api_call(pnc_api.licenses, 'get_all', page_size=page_size, page_index=page_index, sort=sort, q=q) if response: return utils.format_json_list(response.content)
python
{ "resource": "" }
q37456
Resizer.transform
train
def transform(self, img, transformation, params): ''' Apply transformations to the image. New transformations can be defined as methods:: def do__transformationname(self, img, transformation, params): 'returns new image with transformation applied' ... def new_size__transformationname(self, size, target_size, params): 'dry run, returns a size of image if transformation is applied' ... ''' # Transformations MUST be idempotent. # The limitation is caused by implementation of # image upload in iktomi.cms. # The transformation can be applied twice: # on image upload after crop (when TransientFile is created) # and on object save (when PersistentFile is created). method = getattr(self, 'do__' + transformation) return method(img, transformation, params)
python
{ "resource": "" }
q37457
ResizeMixed.get_resizer
train
def get_resizer(self, size, target_size): '''Choose a resizer depending an image size''' sw, sh = size if sw >= sh * self.rate: return self.hor_resize else: return self.vert_resize
python
{ "resource": "" }
q37458
Playlist.next_song
train
def next_song(self): """next song for player, calculated based on playback_mode""" # 如果没有正在播放的歌曲,找列表里面第一首能播放的 if self.current_song is None: return self._get_good_song() if self.playback_mode == PlaybackMode.random: next_song = self._get_good_song(random_=True) else: current_index = self._songs.index(self.current_song) if current_index == len(self._songs) - 1: if self.playback_mode in (PlaybackMode.loop, PlaybackMode.one_loop): next_song = self._get_good_song() elif self.playback_mode == PlaybackMode.sequential: next_song = None else: next_song = self._get_good_song(base=current_index+1) return next_song
python
{ "resource": "" }
q37459
Playlist.previous_song
train
def previous_song(self): """previous song for player to play NOTE: not the last played song """ if self.current_song is None: return self._get_good_song(base=-1, direction=-1) if self.playback_mode == PlaybackMode.random: previous_song = self._get_good_song(direction=-1) else: current_index = self._songs.index(self.current_song) previous_song = self._get_good_song(base=current_index - 1, direction=-1) return previous_song
python
{ "resource": "" }
q37460
AbstractPlayer.state
train
def state(self, value): """set player state, emit state changed signal outer object should not set state directly, use ``pause`` / ``resume`` / ``stop`` / ``play`` method instead. """ self._state = value self.state_changed.emit(value)
python
{ "resource": "" }
q37461
alter_poms
train
def alter_poms(pom_dir, additional_params, repo_url=None, mvn_repo_local=None): """ Runs mvn clean command with provided additional parameters to perform pom updates by pom-manipulation-ext. """ work_dir = os.getcwd() os.chdir(pom_dir) try: if repo_url: settings_filename = create_mirror_settings(repo_url) else: settings_filename = None args = ["mvn", "clean"] if mvn_repo_local: args.extend(["-s", settings_filename]) if mvn_repo_local: args.append("-Dmaven.repo.local=%s" % mvn_repo_local) param_list = additional_params.split(" ") args.extend(param_list) logging.debug("Running command: %s", " ".join(args)) command = Popen(args, stdout=PIPE, stderr=STDOUT) stdout = command.communicate()[0] if command.returncode: logging.error("POM manipulation failed. Output:\n%s" % stdout) else: logging.debug("POM manipulation succeeded. Output:\n%s" % stdout) finally: os.chdir(work_dir)
python
{ "resource": "" }
q37462
pom_contains_modules
train
def pom_contains_modules(): """ Reads pom.xml in current working directory and checks, if there is non-empty modules tag. """ pom_file = None try: pom_file = open("pom.xml") pom = pom_file.read() finally: if pom_file: pom_file.close() artifact = MavenArtifact(pom=pom) if artifact.modules: return True else: return False
python
{ "resource": "" }
q37463
create_mirror_settings
train
def create_mirror_settings(repo_url): """ Creates settings.xml in current working directory, which when used makes Maven use given repo URL as a mirror of all repositories to look at. :param repo_url: the repository URL to use :returns: filepath to the created file """ cwd = os.getcwd() settings_path = os.path.join(cwd, "settings.xml") settings_file = None try: settings_file = open(settings_path, "w") settings_file.write('<?xml version="1.0" encoding="UTF-8"?>\n') settings_file.write('<settings xmlns="http://maven.apache.org/SETTINGS/1.0.0"\n') settings_file.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"\n') settings_file.write(' xsi:schemaLocation="http://maven.apache.org/SETTINGS/1.0.0 http://maven.apache.org/xsd/settings-1.0.0.xsd">\n') settings_file.write('<mirrors>\n') settings_file.write(' <mirror>\n') settings_file.write(' <id>repo-mirror</id>\n') settings_file.write(' <url>%s</url>\n' % repo_url) settings_file.write(' <mirrorOf>*</mirrorOf>\n') settings_file.write(' </mirror>\n') settings_file.write(' </mirrors>\n') settings_file.write('</settings>\n') finally: if settings_file: settings_file.close() return settings_path
python
{ "resource": "" }
q37464
API.search
train
def search(self, s, stype=1, offset=0, total='true', limit=60): """get songs list from search keywords""" action = uri + '/search/get' data = { 's': s, 'type': stype, 'offset': offset, 'total': total, 'limit': 60 } resp = self.request('POST', action, data) if resp['code'] == 200: return resp['result']['songs'] return []
python
{ "resource": "" }
q37465
_init_index
train
def _init_index(root_dir, schema, index_name): """ Creates new index or opens existing. Args: root_dir (str): root dir where to find or create index. schema (whoosh.fields.Schema): schema of the index to create or open. index_name (str): name of the index. Returns: tuple ((whoosh.index.FileIndex, str)): first element is index, second is index directory. """ index_dir = os.path.join(root_dir, index_name) try: if not os.path.exists(index_dir): os.makedirs(index_dir) return create_in(index_dir, schema), index_dir else: return open_dir(index_dir), index_dir except Exception as e: logger.error("Init error: failed to open search index at: '{}': {} ".format(index_dir, e)) raise
python
{ "resource": "" }
q37466
DatasetWhooshIndex.reset
train
def reset(self): """ Resets index by removing index directory. """ if os.path.exists(self.index_dir): rmtree(self.index_dir) self.index = None
python
{ "resource": "" }
q37467
DatasetWhooshIndex._get_generic_schema
train
def _get_generic_schema(self): """ Returns whoosh's generic schema of the dataset. """ schema = Schema( vid=ID(stored=True, unique=True), # Object id title=NGRAMWORDS(), keywords=KEYWORD, # Lists of coverage identifiers, ISO time values and GVIDs, source names, source abbrev doc=TEXT) # Generated document for the core of the topic search return schema
python
{ "resource": "" }
q37468
IdentifierWhooshIndex.search
train
def search(self, search_phrase, limit=None): """ Finds identifier by search phrase. """ self._parsed_query = search_phrase schema = self._get_generic_schema() parser = QueryParser('name', schema=schema) query = parser.parse(search_phrase) class PosSizeWeighting(scoring.WeightingModel): def scorer(self, searcher, fieldname, text, qf=1): return self.PosSizeScorer(searcher, fieldname, text, qf=qf) class PosSizeScorer(scoring.BaseScorer): def __init__(self, searcher, fieldname, text, qf=1): self.searcher = searcher self.fieldname = fieldname self.text = text self.qf = qf self.bmf25 = scoring.BM25F() def max_quality(self): return 40 def score(self, matcher): poses = matcher.value_as('positions') return (2.0 / (poses[0] + 1) + 1.0 / (len(self.text) / 4 + 1) + self.bmf25.scorer(searcher, self.fieldname, self.text).score(matcher)) with self.index.searcher(weighting=PosSizeWeighting()) as searcher: results = searcher.search(query, limit=limit) for hit in results: vid = hit['identifier'] yield IdentifierSearchResult( score=hit.score, vid=vid, type=hit.get('type', False), name=hit.get('name', ''))
python
{ "resource": "" }
q37469
IdentifierWhooshIndex._get_generic_schema
train
def _get_generic_schema(self): """ Returns whoosh's generic schema. """ schema = Schema( identifier=ID(stored=True), # Partition versioned id type=ID(stored=True), name=NGRAM(phrase=True, stored=True, minsize=2, maxsize=8)) return schema
python
{ "resource": "" }
q37470
PartitionWhooshIndex.all
train
def all(self): """ Returns list with all indexed partitions. """ partitions = [] for partition in self.index.searcher().documents(): partitions.append( PartitionSearchResult(dataset_vid=partition['dataset_vid'], vid=partition['vid'], score=1)) return partitions
python
{ "resource": "" }
q37471
PartitionWhooshIndex._make_query_from_terms
train
def _make_query_from_terms(self, terms): """ returns a FTS query for partition created from decomposed search terms. args: terms (dict or str): returns: str containing fts query. """ expanded_terms = self._expand_terms(terms) cterms = '' if expanded_terms['doc']: cterms = self.backend._or_join(expanded_terms['doc']) keywords = expanded_terms['keywords'] frm_to = self._from_to_as_term(expanded_terms['from'], expanded_terms['to']) if frm_to: keywords.append(frm_to) if keywords: if cterms: cterms = self.backend._and_join( [cterms, self.backend._field_term('keywords', expanded_terms['keywords'])]) else: cterms = self.backend._field_term('keywords', expanded_terms['keywords']) logger.debug('partition terms conversion: `{}` terms converted to `{}` query.'.format(terms, cterms)) return cterms
python
{ "resource": "" }
q37472
PartitionWhooshIndex._from_to_as_term
train
def _from_to_as_term(self, frm, to): """ Turns from and to into the query format. Args: frm (str): from year to (str): to year Returns: FTS query str with years range. """ # The wackiness with the conversion to int and str, and adding ' ', is because there # can't be a space between the 'TO' and the brackets in the time range # when one end is open from_year = '' to_year = '' def year_or_empty(prefix, year, suffix): try: return prefix + str(int(year)) + suffix except (ValueError, TypeError): return '' if frm: from_year = year_or_empty('', frm, ' ') if to: to_year = year_or_empty(' ', to, '') if bool(from_year) or bool(to_year): return '[{}TO{}]'.format(from_year, to_year) else: return None
python
{ "resource": "" }
q37473
SimSymbolicDbgMemory.copy
train
def copy(self, _): """ Return a copy of the SimMemory. """ #l.debug("Copying %d bytes of memory with id %s." % (len(self.mem), self.id)) c = SimSymbolicDbgMemory( mem=self.mem.branch(), memory_id=self.id, endness=self.endness, abstract_backer=self._abstract_backer, read_strategies=[ s.copy() for s in self.read_strategies ], write_strategies=[ s.copy() for s in self.write_strategies ], stack_region_map=self._stack_region_map, generic_region_map=self._generic_region_map ) return c
python
{ "resource": "" }
q37474
AmbrySeries.column
train
def column(self): """Return the ambry column""" from ambry.orm.exc import NotFoundError if not hasattr(self, 'partition'): return None if not self.name: return None try: try: return self.partition.column(self.name) except AttributeError: return self.partition.table.column(self.name) except NotFoundError: return None
python
{ "resource": "" }
q37475
get_handler
train
def get_handler(progname, address=None, proto=None, facility=None, fmt=None, datefmt=None, **_): """Helper function to create a Syslog handler. See `ulogger.syslog.SyslogHandlerBuilder` for arguments and supported keyword arguments. Returns: (obj): Instance of `logging.SysLogHandler` """ builder = SyslogHandlerBuilder( progname, address=address, proto=proto, facility=facility, fmt=fmt, datefmt=datefmt) return builder.get_handler()
python
{ "resource": "" }
q37476
TargetRepositoryRest.repository_type
train
def repository_type(self, repository_type): """ Sets the repository_type of this TargetRepositoryRest. :param repository_type: The repository_type of this TargetRepositoryRest. :type: str """ allowed_values = ["MAVEN", "NPM", "COCOA_POD", "GENERIC_PROXY"] if repository_type not in allowed_values: raise ValueError( "Invalid value for `repository_type` ({0}), must be one of {1}" .format(repository_type, allowed_values) ) self._repository_type = repository_type
python
{ "resource": "" }
q37477
worker
train
def worker(inqueue, outqueue, initializer=None, initargs=(), maxtasks=None): """ Custom worker for bundle operations :param inqueue: :param outqueue: :param initializer: :param initargs: :param maxtasks: :return: """ from ambry.library import new_library from ambry.run import get_runconfig import traceback assert maxtasks is None or (type(maxtasks) == int and maxtasks > 0) put = outqueue.put get = inqueue.get if hasattr(inqueue, '_writer'): inqueue._writer.close() outqueue._reader.close() if initializer is not None: initializer(*initargs) try: task = get() except (EOFError, IOError): debug('worker got EOFError or IOError -- exiting') return if task is None: debug('worker got sentinel -- exiting') return job, i, func, args, kwds = task # func = mapstar = map(*args) # Since there is only one source build per process, we know the structure # of the args beforehand. mp_func = args[0][0] mp_args = list(args[0][1][0]) library = new_library(get_runconfig()) library.database.close() # Maybe it is still open after the fork. library.init_debug() bundle_vid = mp_args[0] try: b = library.bundle(bundle_vid) library.logger = b.logger # So library logs to the same file as the bundle. b = b.cast_to_subclass() b.multi = True # In parent it is a number, in child, just needs to be true to get the right logger template b.is_subprocess = True b.limited_run = bool(int(os.getenv('AMBRY_LIMITED_RUN', 0))) assert b._progress == None # Don't want to share connections across processes mp_args[0] = b result = (True, [mp_func(*mp_args)]) except Exception as e: import traceback tb = traceback.format_exc() b.error('Subprocess {} raised an exception: {}'.format(os.getpid(), e.message), False) b.error(tb, False) result = (False, e) assert result b.progress.close() library.close() try: put((job, i, result)) except Exception as e: wrapped = MaybeEncodingError(e, result[1]) debug("Possible encoding error while sending result: %s" % (wrapped)) put((job, i, (False, wrapped)))
python
{ "resource": "" }
q37478
init_library
train
def init_library(database_dsn, accounts_password, limited_run = False): """Child initializer, setup in Library.process_pool""" import os import signal # Have the child processes ignore the keyboard interrupt, and other signals. Instead, the parent will # catch these, and clean up the children. signal.signal(signal.SIGINT, signal.SIG_IGN) #signal.signal(signal.SIGTERM, sigterm_handler) os.environ['AMBRY_DB'] = database_dsn if accounts_password: os.environ['AMBRY_PASSWORD'] = accounts_password os.environ['AMBRY_LIMITED_RUN'] = '1' if limited_run else '0'
python
{ "resource": "" }
q37479
unify_mp
train
def unify_mp(b, partition_name): """Unify all of the segment partitions for a parent partition, then run stats on the MPR file""" with b.progress.start('coalesce_mp',0,message="MP coalesce {}".format(partition_name)) as ps: r = b.unify_partition(partition_name, None, ps) return r
python
{ "resource": "" }
q37480
LibraryFilesystem._compose
train
def _compose(self, name, args, mkdir=True): """Get a named filesystem entry, and extend it into a path with additional path arguments""" from os.path import normpath from ambry.dbexceptions import ConfigurationError root = p = self._config.filesystem[name].format(root=self._root) if args: args = [e.strip() for e in args] p = join(p, *args) if not isdir(p) and mkdir: makedirs(p) p = normpath(p) if not p.startswith(root): raise ConfigurationError("Path for name='{}', args={} resolved outside of define filesystem root" .format(name, args)) return p
python
{ "resource": "" }
q37481
LibraryFilesystem.compose
train
def compose(self, name, *args): """Compose, but don't create base directory""" return self._compose(name, args, mkdir=False)
python
{ "resource": "" }
q37482
LibraryFilesystem.database_dsn
train
def database_dsn(self): """Substitute the root dir into the database DSN, for Sqlite""" if not self._config.library.database: return 'sqlite:///{root}/library.db'.format(root=self._root) return self._config.library.database.format(root=self._root)
python
{ "resource": "" }
q37483
make_table_map
train
def make_table_map(table, headers): """Create a function to map from rows with the structure of the headers to the structure of the table.""" header_parts = {} for i, h in enumerate(headers): header_parts[h] = 'row[{}]'.format(i) body_code = 'lambda row: [{}]'.format(','.join(header_parts.get(c.name, 'None') for c in table.columns)) header_code = 'lambda row: [{}]'.format( ','.join(header_parts.get(c.name, "'{}'".format(c.name)) for c in table.columns)) return eval(header_code), eval(body_code)
python
{ "resource": "" }
q37484
augment_pipeline
train
def augment_pipeline(pl, head_pipe=None, tail_pipe=None): """ Augment the pipeline by adding a new pipe section to each stage that has one or more pipes. Can be used for debugging :param pl: :param DebugPipe: :return: """ for k, v in iteritems(pl): if v and len(v) > 0: if head_pipe and k != 'source': # Can't put anything before the source. v.insert(0, head_pipe) if tail_pipe: v.append(tail_pipe)
python
{ "resource": "" }
q37485
ReplaceWithDestHeader.process_header
train
def process_header(self, headers): """Ignore the incomming header and replace it with the destination header""" return [c.name for c in self.source.dest_table.columns][1:]
python
{ "resource": "" }
q37486
WriteToPartition.rate
train
def rate(self): """Report the insertion rate in records per second""" end = self._end_time if self._end_time else time.time() return self._count / (end - self._start_time)
python
{ "resource": "" }
q37487
Pipeline._subset
train
def _subset(self, subset): """Return a new pipeline with a subset of the sections""" pl = Pipeline(bundle=self.bundle) for group_name, pl_segment in iteritems(self): if group_name not in subset: continue pl[group_name] = pl_segment return pl
python
{ "resource": "" }
q37488
Pipeline.configure
train
def configure(self, pipe_config): """Configure from a dict""" # Create a context for evaluating the code for each pipeline. This removes the need # to qualify the class names with the module import ambry.etl import sys # ambry.build comes from ambry.bundle.files.PythonSourceFile#import_bundle eval_locals = dict(list(locals().items()) + list(ambry.etl.__dict__.items()) + list(sys.modules['ambry.build'].__dict__.items())) replacements = {} def eval_pipe(pipe): if isinstance(pipe, string_types): try: return eval(pipe, {}, eval_locals) except SyntaxError as e: raise SyntaxError("SyntaxError while parsing pipe '{}' from metadata: {}" .format(pipe, e)) else: return pipe def pipe_location(pipe): """Return a location prefix from a pipe, or None if there isn't one """ if not isinstance(pipe, string_types): return None elif pipe[0] in '+-$!': return pipe[0] else: return None for segment_name, pipes in list(pipe_config.items()): if segment_name == 'final': # The 'final' segment is actually a list of names of Bundle methods to call afer the pipeline # completes super(Pipeline, self).__setattr__('final', pipes) elif segment_name == 'replace': for frm, to in iteritems(pipes): self.replace(eval_pipe(frm), eval_pipe(to)) else: # Check if any of the pipes have a location command. If not, the pipe # is cleared and the set of pipes replaces the ones that are there. if not any(bool(pipe_location(pipe)) for pipe in pipes): # Nope, they are all clean self[segment_name] = [eval_pipe(pipe) for pipe in pipes] else: for i, pipe in enumerate(pipes): if pipe_location(pipe): # The pipe is prefixed with a location command location = pipe_location(pipe) pipe = pipe[1:] else: raise PipelineError( 'If any pipes in a section have a location command, they all must' ' Segment: {} pipes: {}'.format(segment_name, pipes)) ep = eval_pipe(pipe) if location == '+': # append to the segment self[segment_name].append(ep) elif location == '-': # Prepend to the segment self[segment_name].prepend(ep) elif location == '!': # Replace a pipe of the same class if isinstance(ep, type): repl_class = ep else: repl_class = ep.__class__ self.replace(repl_class, ep, segment_name)
python
{ "resource": "" }
q37489
Pipeline.replace
train
def replace(self, repl_class, replacement, target_segment_name=None): """Replace a pipe segment, specified by its class, with another segment""" for segment_name, pipes in iteritems(self): if target_segment_name and segment_name != target_segment_name: raise Exception() repl_pipes = [] found = False for pipe in pipes: if isinstance(pipe, repl_class): pipe = replacement found = True repl_pipes.append(pipe) if found: found = False self[segment_name] = repl_pipes
python
{ "resource": "" }
q37490
PostgreSQLBackend.install
train
def install(self, connection, partition, table_name=None, columns=None, materialize=False, logger=None): """ Creates FDW or materialize view for given partition. Args: connection: connection to postgresql partition (orm.Partition): materialize (boolean): if True, create read-only table. If False create virtual table. Returns: str: name of the created table. """ partition.localize() self._add_partition(connection, partition) fdw_table = partition.vid view_table = '{}_v'.format(fdw_table) if materialize: with connection.cursor() as cursor: view_exists = self._relation_exists(connection, view_table) if view_exists: logger.debug( 'Materialized view of the partition already exists.\n partition: {}, view: {}' .format(partition.name, view_table)) else: query = 'CREATE MATERIALIZED VIEW {} AS SELECT * FROM {};'\ .format(view_table, fdw_table) logger.debug( 'Creating new materialized view of the partition.' '\n partition: {}, view: {}, query: {}' .format(partition.name, view_table, query)) cursor.execute(query) cursor.execute('COMMIT;') final_table = view_table if materialize else fdw_table with connection.cursor() as cursor: view_q = "CREATE VIEW IF NOT EXISTS {} AS SELECT * FROM {} ".format(partition.vid, final_table) cursor.execute(view_q) cursor.execute('COMMIT;') return partition.vid
python
{ "resource": "" }
q37491
PostgreSQLBackend.close
train
def close(self): """ Closes connection to database. """ if getattr(self, '_connection', None): logger.debug('Closing postgresql connection.') self._connection.close() self._connection = None if getattr(self, '_engine', None): self._engine.dispose()
python
{ "resource": "" }
q37492
PostgreSQLBackend._get_mpr_table
train
def _get_mpr_table(self, connection, partition): """ Returns name of the postgres table who stores mpr data. Args: connection: connection to postgres db who stores mpr data. partition (orm.Partition): Returns: str: Raises: MissingTableError: if partition table not found in the db. """ # TODO: This is the first candidate for optimization. Add field to partition # with table name and update it while table creation. # Optimized version. # # return partition.mpr_table or raise exception # Not optimized version. # # first check either partition has materialized view. logger.debug( 'Looking for materialized view of the partition.\n partition: {}'.format(partition.name)) foreign_table = partition.vid view_table = '{}_v'.format(foreign_table) view_exists = self._relation_exists(connection, view_table) if view_exists: logger.debug( 'Materialized view of the partition found.\n partition: {}, view: {}' .format(partition.name, view_table)) return view_table # now check for fdw/virtual table logger.debug( 'Looking for foreign table of the partition.\n partition: {}'.format(partition.name)) foreign_exists = self._relation_exists(connection, foreign_table) if foreign_exists: logger.debug( 'Foreign table of the partition found.\n partition: {}, foreign table: {}' .format(partition.name, foreign_table)) return foreign_table raise MissingTableError('postgres database does not have table for {} partition.' .format(partition.vid))
python
{ "resource": "" }
q37493
PostgreSQLBackend._add_partition
train
def _add_partition(self, connection, partition): """ Creates FDW for the partition. Args: connection: partition (orm.Partition): """ logger.debug('Creating foreign table for partition.\n partition: {}'.format(partition.name)) with connection.cursor() as cursor: postgres_med.add_partition(cursor, partition.datafile, partition.vid)
python
{ "resource": "" }
q37494
PostgreSQLBackend._get_connection
train
def _get_connection(self): """ Returns connection to the postgres database. Returns: connection to postgres database who stores mpr data. """ if not getattr(self, '_connection', None): logger.debug( 'Creating new connection.\n dsn: {}' .format(self._dsn)) d = parse_url_to_dict(self._dsn) self._connection = psycopg2.connect( database=d['path'].strip('/'), user=d['username'], password=d['password'], port=d['port'], host=d['hostname']) # It takes some time to find the way how to get raw connection from sqlalchemy. So, # I leave the commented code. # # self._engine = create_engine(self._dsn) # self._connection = self._engine.raw_connection() # return self._connection
python
{ "resource": "" }
q37495
PostgreSQLBackend._execute
train
def _execute(self, connection, query, fetch=True): """ Executes given query and returns result. Args: connection: connection to postgres database who stores mpr data. query (str): sql query fetch (boolean, optional): if True, fetch query result and return it. If False, do not fetch. Returns: iterable with query result or None if fetch is False. """ # execute query with connection.cursor() as cursor: cursor.execute(query) if fetch: return cursor.fetchall() else: cursor.execute('COMMIT;')
python
{ "resource": "" }
q37496
CPI.get
train
def get(self, date=datetime.date.today(), country=None): """ Get the CPI value for a specific time. Defaults to today. This uses the closest method internally but sets limit to one day. """ if not country: country = self.country if country == "all": raise ValueError("You need to specify a country") if not isinstance(date, str) and not isinstance(date, int): date = date.year cpi = self.data.get(country.upper(), {}).get(str(date)) if not cpi: raise ValueError("Missing CPI data for {} for {}".format( country, date)) return CPIResult(date=date, value=cpi)
python
{ "resource": "" }
q37497
Reverse.as_url
train
def as_url(self): ''' Reverse object converted to `web.URL`. If Reverse is bound to env: * try to build relative URL, * use current domain name, port and scheme as default ''' if '' in self._scope: return self._finalize().as_url if not self._is_endpoint: raise UrlBuildingError('Not an endpoint {}'.format(repr(self))) if self._ready: path, host = self._path, self._host else: return self().as_url # XXX there is a little mess with `domain` and `host` terms if ':' in host: domain, port = host.split(':') else: domain = host port = None if self._bound_env: request = self._bound_env.request scheme_port = {'http': '80', 'https': '443'}.get(request.scheme, '80') # Domain to compare with the result of build. # If both values are equal, domain part can be hidden from result. # Take it from route_state, not from env.request, because # route_state contains domain values with aliased replaced by their # primary value primary_domain = self._bound_env._route_state.primary_domain host_split = request.host.split(':') request_domain = host_split[0] request_port = host_split[1] if len(host_split) > 1 else scheme_port port = port or request_port return URL(path, host=domain or request_domain, port=port if port != scheme_port else None, scheme=request.scheme, fragment=self._fragment, show_host=host and (domain != primary_domain \ or port != request_port)) return URL(path, host=domain, port=port, fragment=self._fragment, show_host=True)
python
{ "resource": "" }
q37498
URL.from_url
train
def from_url(cls, url, show_host=True): '''Parse string and get URL instance''' # url must be idna-encoded and url-quotted if six.PY2: if isinstance(url, six.text_type): url = url.encode('utf-8') parsed = urlparse(url) netloc = parsed.netloc.decode('utf-8') # XXX HACK else:# pragma: no cover if isinstance(url, six.binary_type): url = url.decode('utf-8', errors='replace') # XXX parsed = urlparse(url) netloc = parsed.netloc query = _parse_qs(parsed.query) host = netloc.split(':', 1)[0] if ':' in netloc else netloc port = netloc.split(':')[1] if ':' in netloc else '' path = unquote(parsed.path) fragment = unquote(parsed.fragment) if not fragment and not url.endswith('#'): fragment = None return cls(path, query, host, port, parsed.scheme, fragment, show_host)
python
{ "resource": "" }
q37499
URL.qs_set
train
def qs_set(self, *args, **kwargs): '''Set values in QuerySet MultiDict''' if args and kwargs: raise TypeError('Use positional args or keyword args not both') query = self.query.copy() if args: mdict = MultiDict(args[0]) for k in mdict.keys(): if k in query: del query[k] for k, v in mdict.items(): query.add(k, v) else: for k, v in kwargs.items(): query[k] = v return self._copy(query=query)
python
{ "resource": "" }