code stringlengths 1 18.2k |
|---|
def new_from_bundle_config(self, config): """ Create a new bundle, or link to an existing one, based on the identity in config data. :param config: A Dict form of a bundle.yaml file :return: """ identity = Identity.from_dict(config['identity']) ds = self._db.dataset(identity.vid, exception=False) if not ds: ds = self._db.new_dataset(**identity.dict) b = Bundle(ds, self) b.commit() b.state = Bundle.STATES.NEW b.set_last_access(Bundle.STATES.NEW) # b.set_file_system(source_url=self._fs.source(ds.name), # build_url=self._fs.build(ds.name)) return b |
def bundle(self, ref, capture_exceptions=False): """Return a bundle build on a dataset, with the given vid or id reference""" from ..orm.exc import NotFoundError if isinstance(ref, Dataset): ds = ref else: try: ds = self._db.dataset(ref) except NotFoundError: ds = None if not ds: try: p = self.partition(ref) ds = p._bundle.dataset except NotFoundError: ds = None if not ds: raise NotFoundError('Failed to find dataset for ref: {}'.format(ref)) b = Bundle(ds, self) b.capture_exceptions = capture_exceptions return b |
def partition(self, ref, localize=False): """ Finds partition by ref and converts to bundle partition. :param ref: A partition reference :param localize: If True, copy a remote partition to local filesystem. Defaults to False :raises: NotFoundError: if partition with given ref not found. :return: orm.Partition: found partition. """ if not ref: raise NotFoundError("No partition for empty ref") try: on = ObjectNumber.parse(ref) ds_on = on.as_dataset ds = self._db.dataset(ds_on) # Could do it in on SQL query, but this is easier. # The refresh is required because in some places the dataset is loaded without the partitions, # and if that persist, we won't have partitions in it until it is refreshed. self.database.session.refresh(ds) p = ds.partition(ref) except NotObjectNumberError: q = (self.database.session.query(Partition) .filter(or_(Partition.name == str(ref), Partition.vname == str(ref))) .order_by(Partition.vid.desc())) p = q.first() |
if not p: raise NotFoundError("No partition for ref: '{}'".format(ref)) b = self.bundle(p.d_vid) p = b.wrap_partition(p) if localize: p.localize() return p |
def table(self, ref): """ Finds table by ref and returns it. Args: ref (str): id, vid (versioned id) or name of the table Raises: NotFoundError: if table with given ref not found. Returns: orm.Table """ try: obj_number = ObjectNumber.parse(ref) ds_obj_number = obj_number.as_dataset dataset = self._db.dataset(ds_obj_number) # Could do it in on SQL query, but this is easier. table = dataset.table(ref) except NotObjectNumberError: q = self.database.session.query(Table)\ .filter(Table.name == str(ref))\ .order_by(Table.vid.desc()) table = q.first() if not table: raise NotFoundError("No table for ref: '{}'".format(ref)) return table |
def remove(self, bundle): """ Removes a bundle from the library and deletes the configuration for it from the library database.""" from six import string_types if isinstance(bundle, string_types): bundle = self.bundle(bundle) self.database.remove_dataset(bundle.dataset) |
def duplicate(self, b): """Duplicate a bundle, with a higher version number. This only copies the files, under the theory that the bundle can be rebuilt from them. """ on = b.identity.on on.revision = on.revision + 1 try: extant = self.bundle(str(on)) if extant: raise ConflictError('Already have a bundle with vid: {}'.format(str(on))) except NotFoundError: pass d = b.dataset.dict d['revision'] = on.revision d['vid'] = str(on) del d['name'] del d['vname'] del d['version'] del d['fqname'] del d['cache_key'] ds = self.database.new_dataset(**d) nb = self.bundle(ds.vid) nb.set_file_system(source_url=b.source_fs.getsyspath('/')) nb.state = Bundle.STATES.NEW nb.commit() # Copy all of the files. for f in b.dataset.files: assert f.major_type == f.MAJOR_TYPE.BUILDSOURCE nb.dataset.files.append(nb.dataset.bsfile(f.minor_type, f.path).update(f)) # Load the metadata in to records, then back out again. The objects_to_record process will set the # new identity object numbers in the metadata file nb.build_source_files.file(File.BSFILE.META).record_to_objects() nb.build_source_files.file(File.BSFILE.META).objects_to_record() ds.commit() |
return nb |
def checkin_bundle(self, db_path, replace=True, cb=None): """Add a bundle, as a Sqlite file, to this library""" from ambry.orm.exc import NotFoundError db = Database('sqlite:///{}'.format(db_path)) db.open() if len(db.datasets) == 0: raise NotFoundError("Did not get a dataset in the {} bundle".format(db_path)) ds = db.dataset(db.datasets[0].vid) # There should only be one assert ds is not None assert ds._database try: b = self.bundle(ds.vid) self.logger.info( "Removing old bundle before checking in new one of same number: '{}'" .format(ds.vid)) self.remove(b) except NotFoundError: pass try: self.dataset(ds.vid) # Skip loading bundles we already have except NotFoundError: self.database.copy_dataset(ds, cb=cb) b = self.bundle(ds.vid) # It had better exist now. # b.state = Bundle.STATES.INSTALLED b.commit() #self.search.index_library_datasets(tick) self.search.index_bundle(b) return b |
def send_to_remote(self, b, no_partitions=False): """ Copy a bundle to a new Sqlite file, then store the file on the remote. :param b: The bundle :return: """ raise DeprecationWarning("Don't use any more?") from ambry.bundle.process import call_interval remote_name = self.resolve_remote(b) remote = self.remote(remote_name) db_path = b.package() with b.progress.start('checkin', 0, message='Check in bundle') as ps: ps.add(message='Checking in bundle {} to {}'.format(b.identity.vname, remote)) db_ck = b.identity.cache_key + '.db' ps.add(message='Upload bundle file', item_type='bytes', item_count=0) total = [0] @call_interval(5) def upload_cb(n): total[0] += n ps.update(message='Upload bundle file', item_count=total[0]) with open(db_path) as f: remote.makedir(os.path.dirname(db_ck), recursive=True, allow_recreate=True) self.logger.info('Send bundle file {} '.format(db_path)) e = remote.setcontents_async(db_ck, f, progress_callback=upload_cb) e.wait() ps.update(state='done') if not no_partitions: for p in b.partitions: ps.add(message='Upload partition', item_type='bytes', item_count=0, p_vid=p.vid) with p.datafile.open(mode='rb') as fin: total = [0] @call_interval(5) def progress(bytes): total[0] += bytes ps.update( message='Upload |
partition'.format(p.identity.vname), item_count=total[0]) remote.makedir(os.path.dirname(p.datafile.path), recursive=True, allow_recreate=True) event = remote.setcontents_async(p.datafile.path, fin, progress_callback=progress) event.wait() ps.update(state='done') ps.add(message='Setting metadata') ident = json.dumps(b.identity.dict) remote.setcontents(os.path.join('_meta', 'vid', b.identity.vid), ident) remote.setcontents(os.path.join('_meta', 'id', b.identity.id_), ident) remote.setcontents(os.path.join('_meta', 'vname', text_type(b.identity.vname)), ident) remote.setcontents(os.path.join('_meta', 'name', text_type(b.identity.name)), ident) ps.update(state='done') b.dataset.commit() return remote_name, db_ck |
def checkin_remote_bundle(self, ref, remote=None): """ Checkin a remote bundle to this library. :param ref: Any bundle reference :param remote: If specified, use this remote. If not, search for the reference in cached directory listings :param cb: A one argument progress callback :return: """ if not remote: remote, vname = self.find_remote_bundle(ref) if vname: ref = vname else: pass if not remote: raise NotFoundError("Failed to find bundle ref '{}' in any remote".format(ref)) self.logger.info("Load '{}' from '{}'".format(ref, remote)) vid = self._checkin_remote_bundle(remote, ref) self.commit() return vid |
def _checkin_remote_bundle(self, remote, ref): """ Checkin a remote bundle from a remote :param remote: a Remote object :param ref: Any bundle reference :return: The vid of the loaded bundle """ from ambry.bundle.process import call_interval from ambry.orm.exc import NotFoundError from ambry.orm import Remote from ambry.util.flo import copy_file_or_flo from tempfile import NamedTemporaryFile assert isinstance(remote, Remote) @call_interval(5) def cb(r, total): self.logger.info("{}: Downloaded {} bytes".format(ref, total)) b = None try: b = self.bundle(ref) self.logger.info("{}: Already installed".format(ref)) vid = b.identity.vid except NotFoundError: self.logger.info("{}: Syncing".format(ref)) db_dir = self.filesystem.downloads('bundles') db_f = os.path.join(db_dir, ref) #FIXME. Could get multiple versions of same file. ie vid and vname if not os.path.exists(os.path.join(db_dir, db_f)): self.logger.info("Downloading bundle '{}' to '{}".format(ref, db_f)) with open(db_f, 'wb') as f_out: with remote.checkout(ref) as f: copy_file_or_flo(f, f_out, cb=cb) f_out.flush() self.checkin_bundle(db_f) b = self.bundle(ref) # Should exist |
now. b.dataset.data['remote_name'] = remote.short_name b.dataset.upstream = remote.url b.dstate = b.STATES.CHECKEDOUT b.commit() finally: if b: b.progress.close() vid = b.identity.vid return vid |
def remotes(self): """Return the names and URLs of the remotes""" from ambry.orm import Remote for r in self.database.session.query(Remote).all(): if not r.short_name: continue yield self.remote(r.short_name) |
def _remote(self, name): """Return a remote for which 'name' matches the short_name or url """ from ambry.orm import Remote from sqlalchemy import or_ from ambry.orm.exc import NotFoundError from sqlalchemy.orm.exc import NoResultFound, MultipleResultsFound if not name.strip(): raise NotFoundError("Empty remote name") try: try: r = self.database.session.query(Remote).filter(Remote.short_name == name).one() except NoResultFound as e: r = None if not r: r = self.database.session.query(Remote).filter(Remote.url == name).one() except NoResultFound as e: raise NotFoundError(str(e)+'; '+name) except MultipleResultsFound as e: self.logger.error("Got multiple results for search for remote '{}': {}".format(name, e)) return None return r |
def _find_remote_bundle(self, ref, remote_service_type='s3'): """ Locate a bundle, by any reference, among the configured remotes. The routine will only look in the cache directory lists stored in the remotes, which must be updated to be current. :param ref: :return: (remote,vname) or (None,None) if the ref is not found """ for r in self.remotes: if remote_service_type and r.service != remote_service_type: continue if 'list' not in r.data: continue for k, v in r.data['list'].items(): if ref in v.values(): return (r, v['vname']) return None, None |
def find_remote_bundle(self, ref, try_harder=None): """ Locate a bundle, by any reference, among the configured remotes. The routine will only look in the cache directory lists stored in the remotes, which must be updated to be current. :param vid: A bundle or partition reference, vid, or name :param try_harder: If the reference isn't found, try parsing for an object id, or subsets of the name :return: (remote,vname) or (None,None) if the ref is not found """ from ambry.identity import ObjectNumber remote, vid = self._find_remote_bundle(ref) if remote: return (remote, vid) if try_harder: on = ObjectNumber.parse(vid) if on: raise NotImplementedError() don = on.as_dataset return self._find_remote_bundle(vid) # Try subsets of a name, assuming it is a name parts = ref.split('-') for i in range(len(parts) - 1, 2, -1): remote, vid = self._find_remote_bundle('-'.join(parts[:i])) |
if remote: return (remote, vid) return (None, None) |
def account(self, url): """ Return accounts references for the given account id. :param account_id: :param accounts_password: The password for decrypting the secret :return: """ from sqlalchemy.orm.exc import NoResultFound from ambry.orm.exc import NotFoundError from ambry.util import parse_url_to_dict from ambry.orm import Account pd = parse_url_to_dict(url) # Old method of storing account information. try: act = self.database.session.query(Account).filter(Account.account_id == pd['netloc']).one() act.secret_password = self._account_password return act except NoResultFound: pass # Try the remotes. for r in self.remotes: if url.startswith(r.url): return r raise NotFoundError("Did not find account for url: '{}' ".format(url)) |
def accounts(self): """ Return an account reference :param account_id: :param accounts_password: The password for decrypting the secret :return: """ d = {} if False and not self._account_password: from ambry.dbexceptions import ConfigurationError raise ConfigurationError( "Can't access accounts without setting an account password" " either in the accounts.password config, or in the AMBRY_ACCOUNT_PASSWORD" " env var.") for act in self.database.session.query(Account).all(): if self._account_password: act.secret_password = self._account_password e = act.dict a_id = e['account_id'] d[a_id] = e return d |
def number(self, assignment_class=None, namespace='d'): """ Return a new number. :param assignment_class: Determines the length of the number. Possible values are 'authority' (3 characters) , 'registered' (5) , 'unregistered' (7) and 'self' (9). Self assigned numbers are random and acquired locally, while the other assignment classes use the number server defined in the configuration. If None, then look in the number server configuration for one of the class keys, starting with the longest class and working to the shortest. :param namespace: The namespace character, the first character in the number. Can be one of 'd', 'x' or 'b' :return: """ if assignment_class == 'self': # When 'self' is explicit, don't look for number server config return str(DatasetNumber()) elif assignment_class is None: try: nsconfig = self.services['numbers'] except ConfigurationError: # A |
missing configuration is equivalent to 'self' self.logger.error('No number server configuration; returning self assigned number') return str(DatasetNumber()) for assignment_class in ('self', 'unregistered', 'registered', 'authority'): if assignment_class+'-key' in nsconfig: break # For the case where the number configuratoin references a self-assigned key if assignment_class == 'self': return str(DatasetNumber()) else: try: nsconfig = self.services['numbers'] except ConfigurationError: raise ConfigurationError('No number server configuration') if assignment_class + '-key' not in nsconfig: raise ConfigurationError( 'Assignment class {} not number server config'.format(assignment_class)) try: key = nsconfig[assignment_class + '-key'] config = { 'key': key, 'host': nsconfig['host'], 'port': nsconfig.get('port', 80) } ns = NumberServer(**config) n = str(next(ns)) self.logger.info('Got number from number server: {}'.format(n)) except HTTPError as e: self.logger.error('Failed to get number from number server for key: {}'.format(key, e.message)) self.logger.error('Using self-generated number. There is no problem with this, ' |
'but they are longer than centrally generated numbers.') n = str(DatasetNumber()) return n |
def edit_history(self): """Return config record information about the most recent bundle accesses and operations""" ret = self._db.session\ .query(Config)\ .filter(Config.type == 'buildstate')\ .filter(Config.group == 'access')\ .filter(Config.key == 'last')\ .order_by(Config.modified.desc())\ .all() return ret |
def import_bundles(self, dir, detach=False, force=False): """ Import bundles from a directory :param dir: :return: """ import yaml fs = fsopendir(dir) bundles = [] for f in fs.walkfiles(wildcard='bundle.yaml'): self.logger.info('Visiting {}'.format(f)) config = yaml.load(fs.getcontents(f)) if not config: self.logger.error("Failed to get a valid bundle configuration from '{}'".format(f)) bid = config['identity']['id'] try: b = self.bundle(bid) except NotFoundError: b = None if not b: b = self.new_from_bundle_config(config) self.logger.info('{} Loading New'.format(b.identity.fqname)) else: self.logger.info('{} Loading Existing'.format(b.identity.fqname)) source_url = os.path.dirname(fs.getsyspath(f)) b.set_file_system(source_url=source_url) self.logger.info('{} Loading from {}'.format(b.identity.fqname, source_url)) b.sync_in() if detach: self.logger.info('{} Detaching'.format(b.identity.fqname)) b.set_file_system(source_url=None) if force: self.logger.info('{} Sync out'.format(b.identity.fqname)) # FIXME. It won't actually sync out until re-starting the bundle. # The source_file_system is probably cached b = self.bundle(bid) b.sync_out() bundles.append(b) b.close() return bundles |
def process_pool(self, limited_run=False): """Return a pool for multiprocess operations, sized either to the number of CPUS, or a configured value""" from multiprocessing import cpu_count from ambry.bundle.concurrent import Pool, init_library if self.processes: cpus = self.processes else: cpus = cpu_count() self.logger.info('Starting MP pool with {} processors'.format(cpus)) return Pool(self, processes=cpus, initializer=init_library, maxtasksperchild=1, initargs=[self.database.dsn, self._account_password, limited_run]) |
def file_loc(): """Return file and line number""" import sys import inspect try: raise Exception except: file_ = '.../' + '/'.join((inspect.currentframe().f_code.co_filename.split('/'))[-3:]) line_ = sys.exc_info()[2].tb_frame.f_back.f_lineno return "{}:{}".format(file_, line_) |
def make_row_processors(bundle, source_headers, dest_table, env): """ Make multiple row processors for all of the columns in a table. :param source_headers: :return: """ dest_headers = [c.name for c in dest_table.columns] row_processors = [] out = [file_header] transforms = list(dest_table.transforms) column_names = [] column_types = [] for i, segments in enumerate(transforms): seg_funcs = [] for col_num, (segment, column) in enumerate(zip(segments, dest_table.columns), 1): if not segment: seg_funcs.append('row[{}]'.format(col_num - 1)) continue assert column assert column.name == segment['column'].name col_name = column.name preamble, try_lines, exception = make_stack(env, i, segment) assert col_num == column.sequence_id, (dest_table.name, col_num, column.sequence_id) column_names.append(col_name) column_types.append(column.datatype) f_name = "{table_name}_{column_name}_{stage}".format( table_name=dest_table.name, column_name=col_name, stage=i ) exception = (exception if exception else ('raise ValueError("Failed to cast column \'{}\', in ' 'function {}, value \'{}\': {}".format(header_d,"') + f_name + '", v.encode(\'ascii\', \'replace\'), exc) ) ') try: |
i_s = source_headers.index(column.name) header_s = column.name v = 'row[{}]'.format(i_s) except ValueError as e: i_s = 'None' header_s = None v = 'None' if col_num > 1 else 'row_n' # Give the id column the row number i_d = column.sequence_id - 1 header_d = column.name template_args = dict( f_name=f_name, table_name=dest_table.name, column_name=col_name, stage=i, i_s=i_s, i_d=i_d, header_s=header_s, header_d=header_d, v=v, exception=indent + exception, stack='\n'.join(indent + l for l in try_lines), col_args = '# col_args not implemented yet' ) seg_funcs.append(f_name + ('({v}, {i_s}, {i_d}, {header_s}, \'{header_d}\', ' 'row, row_n, errors, scratch, accumulator, pipe, bundle, source)') .format(v=v, i_s=i_s, i_d=i_d, header_s="'" + header_s + "'" if header_s else 'None', header_d=header_d)) out.append('\n'.join(preamble)) out.append(column_template.format(**template_args)) source_headers = dest_headers stack = '\n'.join("{}{}, # {}".format(indent,l,cn) for l,cn, dt in zip(seg_funcs, column_names, column_types)) out.append(row_template.format( table=dest_table.name, stage=i, stack=stack )) row_processors.append('row_{table}_{stage}'.format(stage=i, table=dest_table.name)) # |
Add the final datatype cast, which is done seperately to avoid an unecessary function call. stack = '\n'.join("{}cast_{}(row[{}], '{}', errors),".format(indent, c.datatype, i, c.name) for i, c in enumerate(dest_table.columns) ) out.append(row_template.format( table=dest_table.name, stage=len(transforms), stack=stack )) row_processors.append('row_{table}_{stage}'.format(stage=len(transforms), table=dest_table.name)) out.append('row_processors = [{}]'.format(','.join(row_processors))) return '\n'.join(out) |
def calling_code(f, f_name=None, raise_for_missing=True): """Return the code string for calling a function. """ import inspect from ambry.dbexceptions import ConfigurationError if inspect.isclass(f): try: args = inspect.getargspec(f.__init__).args except TypeError as e: raise TypeError("Failed to inspect {}: {}".format(f, e)) else: args = inspect.getargspec(f).args if len(args) > 1 and args[0] == 'self': args = args[1:] for a in args: if a not in all_args + ('exception',): # exception arg is only for exception handlers if raise_for_missing: raise ConfigurationError('Caster code {} has unknown argument ' 'name: \'{}\'. Must be one of: {} '.format(f, a, ','.join(all_args))) arg_map = {e: e for e in var_args} args = [arg_map.get(a, a) for a in args] return "{}({})".format(f_name if f_name else f.__name__, ','.join(args)) |
def make_stack(env, stage, segment): """For each transform segment, create the code in the try/except block with the assignements for pipes in the segment """ import string import random from ambry.valuetype import ValueType column = segment['column'] def make_line(column, t): preamble = [] line_t = "v = {} # {}" if isinstance(t, type) and issubclass(t, ValueType): # A valuetype class, from the datatype column. try: cc, fl = calling_code(t, t.__name__), file_loc() except TypeError: cc, fl = "{}(v)".format(t.__name__), file_loc() preamble.append("{} = resolve_value_type('{}') # {}".format(t.__name__, t.vt_code, fl)) elif isinstance(t, type): # A python type, from the datatype columns. cc, fl= "parse_{}(v, header_d)".format(t.__name__), file_loc() elif callable(env.get(t)): # Transform function cc, fl = calling_code(env.get(t), t), file_loc() else: # A transform generator, or python code. rnd = (''.join(random.choice(string.ascii_lowercase) for _ in range(6))) name = 'tg_{}_{}_{}'.format(column.name, |
stage, rnd) try: a, b, fl = rewrite_tg(env, name, t) except CodeGenError as e: raise CodeGenError("Failed to re-write pipe code '{}' in column '{}.{}': {} " .format(t, column.table.name, column.name, e)) cc = str(a) if b: preamble.append("{} = {} # {}".format(name, b, fl)) line = line_t.format(cc, fl) return line, preamble preamble = [] try_lines = [] for t in [segment['init'], segment['datatype']] + segment['transforms']: if not t: continue line, col_preamble = make_line(column, t) preamble += col_preamble try_lines.append(line) exception = None if segment['exception']: exception, col_preamble = make_line(column, segment['exception']) if len(try_lines) == 0: try_lines.append('pass # Empty pipe segment') assert len(try_lines) > 0, column.name return preamble, try_lines, exception |
def rewrite_tg(env, tg_name, code): """Re-write a transform generating function pipe specification by extracting the tranform generating part, and replacing it with the generated transform. so: tgen(a,b,c).foo.bar becomes: tg = tgen(a,b,c) tg.foo.bar """ visitor = ReplaceTG(env, tg_name) assert visitor.tg_name tree = visitor.visit(ast.parse(code)) if visitor.loc: loc = ' #' + visitor.loc else: loc = file_loc() # The AST visitor didn't match a call node if visitor.trans_gen: tg = meta.dump_python_source(visitor.trans_gen).strip() else: tg = None return meta.dump_python_source(tree).strip(), tg, loc |
def push(self, el): """ Put a new element in the queue. """ count = next(self.counter) heapq.heappush(self._queue, (el, count)) |
def geo_description(self): """Return a description of the geographic extents, using the largest scale space and grain coverages""" sc = self._p.space_coverage gc = self._p.grain_coverage if sc and gc: if parse_to_gvid(gc[0]).level == 'state' and parse_to_gvid(sc[0]).level == 'state': return parse_to_gvid(sc[0]).geo_name else: return ("{} in {}".format( parse_to_gvid(gc[0]).level_plural.title(), parse_to_gvid(sc[0]).geo_name)) elif sc: return parse_to_gvid(sc[0]).geo_name.title() elif sc: return parse_to_gvid(gc[0]).level_plural.title() else: return '' |
def time_description(self): """String description of the year or year range""" tc = [t for t in self._p.time_coverage if t] if not tc: return '' mn = min(tc) mx = max(tc) if not mn and not mx: return '' elif mn == mx: return mn else: return "{} to {}".format(mn, mx) |
def sub_description(self): """Time and space dscription""" gd = self.geo_description td = self.time_description if gd and td: return '{}, {}. {} Rows.'.format(gd, td, self._p.count) elif gd: return '{}. {} Rows.'.format(gd, self._p.count) elif td: return '{}. {} Rows.'.format(td, self._p.count) else: return '{} Rows.'.format(self._p.count) |
def identity(self): """Return this partition information as a PartitionId.""" if self.dataset is None: # The relationship will be null until the object is committed s = object_session(self) ds = s.query(Dataset).filter(Dataset.id_ == self.d_id).one() else: ds = self.dataset d = { 'id': self.id, 'vid': self.vid, 'name': self.name, 'vname': self.vname, 'ref': self.ref, 'space': self.space, 'time': self.time, 'table': self.table_name, 'grain': self.grain, 'variant': self.variant, 'segment': self.segment, 'format': self.format if self.format else 'db' } return PartitionIdentity.from_dict(dict(list(ds.dict.items()) + list(d.items()))) |
def set_coverage(self, stats): """"Extract time space and grain coverage from the stats and store them in the partition""" from ambry.util.datestimes import expand_to_years scov = set() tcov = set() grains = set() def summarize_maybe(gvid): try: return parse_to_gvid(gvid).summarize() except: return None def simplifiy_maybe(values, column): parsed = [] for gvid in values: # The gvid should not be a st if gvid is None or gvid == 'None': continue try: parsed.append(parse_to_gvid(gvid)) except ValueError as e: if self._bundle: self._bundle.warn("While analyzing geo coverage in final partition stage, " + "Failed to parse gvid '{}' in {}.{}: {}" .format(str(gvid), column.table.name, column.name, e)) try: return isimplify(parsed) except: return None def int_maybe(year): try: return int(year) except: return None for c in self.table.columns: if c.name not in stats: continue try: if stats[c.name].is_gvid or stats[c.name].is_geoid: scov |= set(x |
for x in simplifiy_maybe(stats[c.name].uniques, c)) grains |= set(summarize_maybe(gvid) for gvid in stats[c.name].uniques) elif stats[c.name].is_year: tcov |= set(int_maybe(x) for x in stats[c.name].uniques) elif stats[c.name].is_date: # The fuzzy=True argument allows ignoring the '-' char in dates produced by .isoformat() try: tcov |= set(parser.parse(x, fuzzy=True).year if isinstance(x, string_types) else x.year for x in stats[c.name].uniques) except ValueError: pass except Exception as e: self._bundle.error("Failed to set coverage for column '{}', partition '{}': {}" .format(c.name, self.identity.vname, e)) raise # Space Coverage if 'source_data' in self.data: for source_name, source in list(self.data['source_data'].items()): scov.add(self.parse_gvid_or_place(source['space'])) if self.identity.space: # And from the partition name try: scov.add(self.parse_gvid_or_place(self.identity.space)) except ValueError: # Couldn't parse the space as a GVid pass # For geo_coverage, only includes the higher level summary levels, counties, states, # places and urban areas. self.space_coverage = sorted([str(x) for x |
in scov if bool(x) and x.sl in (10, 40, 50, 60, 160, 400)]) # # Time Coverage # From the source # If there was a time value in the source that this partition was created from, then # add it to the years. if 'source_data' in self.data: for source_name, source in list(self.data['source_data'].items()): if 'time' in source: for year in expand_to_years(source['time']): if year: tcov.add(year) # From the partition name if self.identity.name.time: for year in expand_to_years(self.identity.name.time): if year: tcov.add(year) self.time_coverage = [t for t in tcov if t] # # Grains if 'source_data' in self.data: for source_name, source in list(self.data['source_data'].items()): if 'grain' in source: grains.add(source['grain']) self.grain_coverage = sorted(str(g) for g in grains if g) |
def detail_dict(self): """A more detailed dict that includes the descriptions, sub descriptions, table and columns.""" d = self.dict def aug_col(c): d = c.dict d['stats'] = [s.dict for s in c.stats] return d d['table'] = self.table.dict d['table']['columns'] = [aug_col(c) for c in self.table.columns] return d |
def local_datafile(self): """Return the datafile for this partition, from the build directory, the remote, or the warehouse""" from ambry_sources import MPRowsFile from fs.errors import ResourceNotFoundError from ambry.orm.exc import NotFoundError try: return MPRowsFile(self._bundle.build_fs, self.cache_key) except ResourceNotFoundError: raise NotFoundError( 'Could not locate data file for partition {} (local)'.format(self.identity.fqname)) |
def remote(self): """ Return the remote for this partition :return: """ from ambry.exc import NotFoundError ds = self.dataset if 'remote_name' not in ds.data: raise NotFoundError('Could not determine remote for partition: {}'.format(self.identity.fqname)) return self._bundle.library.remote(ds.data['remote_name']) |
def is_local(self): """Return true is the partition file is local""" from ambry.orm.exc import NotFoundError try: if self.local_datafile.exists: return True except NotFoundError: pass return False |
def localize(self, ps=None): """Copy a non-local partition file to the local build directory""" from filelock import FileLock from ambry.util import ensure_dir_exists from ambry_sources import MPRowsFile from fs.errors import ResourceNotFoundError if self.is_local: return local = self._bundle.build_fs b = self._bundle.library.bundle(self.identity.as_dataset().vid) remote = self._bundle.library.remote(b) lock_path = local.getsyspath(self.cache_key + '.lock') ensure_dir_exists(lock_path) lock = FileLock(lock_path) if ps: ps.add_update(message='Localizing {}'.format(self.identity.name), partition=self, item_type='bytes', state='downloading') if ps: def progress(bts): if ps.rec.item_total is None: ps.rec.item_count = 0 if not ps.rec.data: ps.rec.data = {} # Should not need to do this. return self item_count = ps.rec.item_count + bts ps.rec.data['updates'] = ps.rec.data.get('updates', 0) + 1 if ps.rec.data['updates'] % 32 == 1: ps.update(message='Localizing {}'.format(self.identity.name), item_count=item_count) else: from ambry.bundle.process import call_interval @call_interval(5) def progress(bts): self._bundle.log("Localizing {}. {} bytes downloaded".format(self.vname, bts)) def exception_cb(e): raise e with lock: # FIXME! This won't work |
with remote ( http) API, only FS ( s3:, file:) if self.is_local: return self try: with remote.fs.open(self.cache_key + MPRowsFile.EXTENSION, 'rb') as f: event = local.setcontents_async(self.cache_key + MPRowsFile.EXTENSION, f, progress_callback=progress, error_callback=exception_cb) event.wait() if ps: ps.update_done() except ResourceNotFoundError as e: from ambry.orm.exc import NotFoundError raise NotFoundError("Failed to get MPRfile '{}' from {}: {} " .format(self.cache_key, remote.fs, e)) return self |
def reader(self): from ambry.orm.exc import NotFoundError from fs.errors import ResourceNotFoundError """The reader for the datafile""" try: return self.datafile.reader except ResourceNotFoundError: raise NotFoundError("Failed to find partition file, '{}' " .format(self.datafile.path)) |
def select(self, predicate=None, headers=None): """ Select rows from the reader using a predicate to select rows and and itemgetter to return a subset of elements :param predicate: If defined, a callable that is called for each row, and if it returns true, the row is included in the output. :param headers: If defined, a list or tuple of header names to return from each row :return: iterable of results WARNING: This routine works from the reader iterator, which returns RowProxy objects. RowProxy objects are reused, so if you construct a list directly from the output from this method, the list will have multiple copies of a single RowProxy, which will have as an inner row the last result row. If you will be directly constructing a list, use |
a getter that extracts the inner row, or which converts the RowProxy to a dict: list(s.datafile.select(lambda r: r.stusab == 'CA', lambda r: r.dict )) """ # FIXME; in Python 3, use yield from with self.reader as r: for row in r.select(predicate, headers): yield row |
def analysis(self): """Return an AnalysisPartition proxy, which wraps this partition to provide acess to dataframes, shapely shapes and other analysis services""" if isinstance(self, PartitionProxy): return AnalysisPartition(self._obj) else: return AnalysisPartition(self) |
def measuredim(self): """Return a MeasureDimension proxy, which wraps the partition to provide access to columns in terms of measures and dimensions""" if isinstance(self, PartitionProxy): return MeasureDimensionPartition(self._obj) else: return MeasureDimensionPartition(self) |
def update_id(self, sequence_id=None): """Alter the sequence id, and all of the names and ids derived from it. This often needs to be done after an IntegrityError in a multiprocessing run""" if sequence_id: self.sequence_id = sequence_id self._set_ids(force=True) if self.dataset: self._update_names() |
def _update_names(self): """Update the derived names""" d = dict( table=self.table_name, time=self.time, space=self.space, grain=self.grain, variant=self.variant, segment=self.segment ) assert self.dataset name = PartialPartitionName(**d).promote(self.dataset.identity.name) self.name = str(name.name) self.vname = str(name.vname) self.cache_key = name.cache_key self.fqname = str(self.identity.fqname) |
def before_insert(mapper, conn, target): """event.listen method for Sqlalchemy to set the sequence for this object and create an ObjectNumber value for the id_""" target._set_ids() if target.name and target.vname and target.cache_key and target.fqname and not target.dataset: return Partition.before_update(mapper, conn, target) |
def dataframe(self, predicate=None, filtered_columns=None, columns=None, df_class=None): """Return the partition as a Pandas dataframe :param predicate: If defined, a callable that is called for each row, and if it returns true, the row is included in the output. :param filtered_columns: If defined, the value is a dict of column names and associated values. Only rows where all of the named columms have the given values will be returned. Setting the argument will overwrite any value set for the predicate :param columns: A list or tuple of column names to return :return: Pandas dataframe """ from operator import itemgetter from ambry.pands import AmbryDataFrame df_class = df_class or AmbryDataFrame if columns: ig = itemgetter(*columns) else: ig = None columns = self.table.header if filtered_columns: def maybe_quote(v): from six import string_types if isinstance(v, |
string_types): return '"{}"'.format(v) else: return v code = ' and '.join("row.{} == {}".format(k, maybe_quote(v)) for k, v in filtered_columns.items()) predicate = eval('lambda row: {}'.format(code)) if predicate: def yielder(): for row in self.reader: if predicate(row): if ig: yield ig(row) else: yield row.dict df = df_class(yielder(), columns=columns, partition=self.measuredim) return df else: def yielder(): for row in self.reader: yield row.values() # Put column names in header order columns = [c for c in self.table.header if c in columns] return df_class(yielder(), columns=columns, partition=self.measuredim) |
def shapes(self, simplify=None, predicate=None): """ Return geodata as a list of Shapely shapes :param simplify: Integer or None. Simplify the geometry to a tolerance, in the units of the geometry. :param predicate: A single-argument function to select which records to include in the output. :return: A list of Shapely objects """ from shapely.wkt import loads if not predicate: predicate = lambda row: True if simplify: return [loads(row.geometry).simplify(simplify) for row in self if predicate(row)] else: return [loads(row.geometry) for row in self if predicate(row)] |
def patches(self, basemap, simplify=None, predicate=None, args_f=None, **kwargs): """ Return geodata as a list of Matplotlib patches :param basemap: A mpl_toolkits.basemap.Basemap :param simplify: Integer or None. Simplify the geometry to a tolerance, in the units of the geometry. :param predicate: A single-argument function to select which records to include in the output. :param args_f: A function that takes a row and returns a dict of additional args for the Patch constructor :param kwargs: Additional args to be passed to the descartes Path constructor :return: A list of patch objects """ from descartes import PolygonPatch from shapely.wkt import loads from shapely.ops import transform if not predicate: predicate = lambda row: True def map_xform(x, y, z=None): return basemap(x, y) def make_patch(shape, row): args = dict(kwargs.items()) if args_f: args.update(args_f(row)) return PolygonPatch(transform(map_xform, shape), |
**args) def yield_patches(row): if simplify: shape = loads(row.geometry).simplify(simplify) else: shape = loads(row.geometry) if shape.geom_type == 'MultiPolygon': for subshape in shape.geoms: yield make_patch(subshape, row) else: yield make_patch(shape, row) return [patch for row in self if predicate(row) for patch in yield_patches(row)] |
def measures(self): """Iterate over all measures""" from ambry.valuetype.core import ROLE return [c for c in self.columns if c.role == ROLE.MEASURE] |
def measure(self, vid): """Return a measure, given its vid or another reference""" from ambry.orm import Column if isinstance(vid, PartitionColumn): return vid elif isinstance(vid, Column): return PartitionColumn(vid) else: return PartitionColumn(self.table.column(vid), self) |
def dataframe(self, measure, p_dim, s_dim=None, filters={}, df_class=None): """ Return a dataframe with a sumse of the columns of the partition, including a measure and one or two dimensions. FOr dimensions that have labels, the labels are included The returned dataframe will have extra properties to describe the conversion: * plot_axes: List of dimension names for the first and second axis * labels: THe names of the label columns for the axes * filtered: The `filters` dict * floating: The names of primary dimensions that are not axes nor filtered THere is also an iterator, `rows`, which returns the header and then all of the rows. :param measure: The column names of one or more measures :param p_dim: The primary dimension. This will be the index of the dataframe. |
:param s_dim: a secondary dimension. The returned frame will be unstacked on this dimension :param filters: A dict of column names, mapped to a column value, indicating rows to select. a row that passes the filter must have the values for all given rows; the entries are ANDED :param df_class: :return: a Dataframe, with extra properties """ import numpy as np measure = self.measure(measure) p_dim = self.dimension(p_dim) assert p_dim if s_dim: s_dim = self.dimension(s_dim) columns = set([measure.name, p_dim.name]) if p_dim.label: # For geographic datasets, also need the gvid if p_dim.geoid: columns.add(p_dim.geoid.name) columns.add(p_dim.label.name) if s_dim: columns.add(s_dim.name) if s_dim.label: columns.add(s_dim.label.name) def maybe_quote(v): from six import string_types if isinstance(v, string_types): return '"{}"'.format(v) else: return v # Create the predicate to filter out the filtered dimensions if filters: selected_filters = [] for |
k, v in filters.items(): if isinstance(v, dict): # The filter is actually the whole set of possible options, so # just select the first one v = v.keys()[0] selected_filters.append("row.{} == {}".format(k, maybe_quote(v))) code = ' and '.join(selected_filters) predicate = eval('lambda row: {}'.format(code)) else: code = None def predicate(row): return True df = self.analysis.dataframe(predicate, columns=columns, df_class=df_class) if df is None or df.empty or len(df) == 0: return None # So we can track how many records were aggregated into each output row df['_count'] = 1 def aggregate_string(x): return ', '.join(set(str(e) for e in x)) agg = { '_count': 'count', } for col_name in columns: c = self.column(col_name) # The primary and secondary dimensions are put into the index by groupby if c.name == p_dim.name or (s_dim and c.name == |
s_dim.name): continue # FIXME! This will only work if the child is only level from the parent. Should # have an acessor for the top level. if c.parent and (c.parent == p_dim.name or (s_dim and c.parent == s_dim.name)): continue if c.is_measure: agg[c.name] = np.mean if c.is_dimension: agg[c.name] = aggregate_string plot_axes = [p_dim.name] if s_dim: plot_axes.append(s_dim.name) df = df.groupby(list(columns - set([measure.name]))).agg(agg).reset_index() df._metadata = ['plot_axes', 'filtered', 'floating', 'labels', 'dimension_set', 'measure'] df.plot_axes = [c for c in plot_axes] df.filtered = filters # Dimensions that are not specified as axes nor filtered df.floating = list(set(c.name for c in self.primary_dimensions) - set(df.filtered.keys()) - set(df.plot_axes)) df.labels = [self.column(c).label.name if self.column(c).label else c for c in df.plot_axes] df.dimension_set = self.dimension_set(p_dim, s_dim=s_dim) df.measure = measure.name def rows(self): yield ['id'] + list(df.columns) for t in df.itertuples(): yield |
list(t) # Really should not do this, but I don't want to re-build the dataframe with another # class df.__class__.rows = property(rows) return df |
def dimension_set(self, p_dim, s_dim=None, dimensions=None, extant=set()): """ Return a dict that describes the combination of one or two dimensions, for a plot :param p_dim: :param s_dim: :param dimensions: :param extant: :return: """ if not dimensions: dimensions = self.primary_dimensions key = p_dim.name if s_dim: key += '/' + s_dim.name # Ignore if the key already exists or the primary and secondary dims are the same if key in extant or p_dim == s_dim: return # Don't allow geography to be a secondary dimension. It must either be a primary dimension # ( to make a map ) or a filter, or a small-multiple if s_dim and s_dim.valuetype_class.is_geo(): return extant.add(key) filtered = {} for d in dimensions: if d != p_dim and d != s_dim: filtered[d.name] = d.pstats.uvalues.keys() if p_dim.valuetype_class.is_time(): |
value_type = 'time' chart_type = 'line' elif p_dim.valuetype_class.is_geo(): value_type = 'geo' chart_type = 'map' else: value_type = 'general' chart_type = 'bar' return dict( key=key, p_dim=p_dim.name, p_dim_type=value_type, p_label=p_dim.label_or_self.name, s_dim=s_dim.name if s_dim else None, s_label=s_dim.label_or_self.name if s_dim else None, filters=filtered, chart_type=chart_type ) |
def label(self): """"Return first child that of the column that is marked as a label""" for c in self.table.columns: if c.parent == self.name and 'label' in c.valuetype: return PartitionColumn(c, self._partition) |
def value_labels(self): """Return a map of column code values mapped to labels, for columns that have a label column If the column is not assocaited with a label column, it returns an identity map. WARNING! This reads the whole partition, so it is really slow """ from operator import itemgetter card = self.pstats.nuniques if self.label: ig = itemgetter(self.name, self.label.name) elif self.pstats.nuniques < MAX_LABELS: ig = itemgetter(self.name, self.name) else: return {} label_set = set() for row in self._partition: label_set.add(ig(row)) if len(label_set) >= card: break d = dict(label_set) assert len(d) == len(label_set) # Else the label set has multiple values per key return d |
def list_build_configuration_sets(page_size=200, page_index=0, sort="", q=""): """ List all build configuration sets """ data = list_build_configuration_sets_raw(page_size, page_index, sort, q) if data: return utils.format_json_list(data) |
def create_build_configuration_set_raw(**kwargs): """ Create a new BuildConfigurationSet. """ config_set = _create_build_config_set_object(**kwargs) response = utils.checked_api_call(pnc_api.build_group_configs, 'create_new', body=config_set) if response: return response.content |
def get_build_configuration_set_raw(id=None, name=None): """ Get a specific BuildConfigurationSet by name or ID """ found_id = common.set_id(pnc_api.build_group_configs, id, name) response = utils.checked_api_call(pnc_api.build_group_configs, 'get_specific', id=found_id) if response: return response.content |
def get_build_configuration_set(id=None, name=None): """ Get a specific BuildConfigurationSet by name or ID """ content = get_build_configuration_set_raw(id, name) if content: return utils.format_json(content) |
def update_build_configuration_set(id, **kwargs): """ Update a BuildConfigurationSet """ data = update_build_configuration_set_raw(id, **kwargs) if data: return utils.format_json(data) |
def build_set_raw(id=None, name=None, tempbuild=False, timestamp_alignment=False, force=False, rebuild_mode=common.REBUILD_MODES_DEFAULT, **kwargs): """ Start a build of the given BuildConfigurationSet """ logging.debug("temp_build: " + str(tempbuild)) logging.debug("timestamp_alignment: " + str(timestamp_alignment)) logging.debug("force: " + str(force)) if tempbuild is False and timestamp_alignment is True: logging.error("You can only activate timestamp alignment with the temporary build flag!") sys.exit(1) found_id = common.set_id(pnc_api.build_group_configs, id, name) revisions = kwargs.get("id_revisions") if revisions: id_revs = map(__parse_revision, revisions) bcsRest = common.get_entity(pnc_api.build_group_configs, found_id) body = swagger_client.BuildConfigurationSetWithAuditedBCsRest() body = __fill_BCSWithAuditedBCs_body(body, bcsRest, id_revs) response = utils.checked_api_call(pnc_api.build_group_configs, 'build_versioned', id=found_id, temporary_build=tempbuild, timestamp_alignment=timestamp_alignment, force_rebuild=force, rebuild_mode=rebuild_mode, body=body) else: response = utils.checked_api_call(pnc_api.build_group_configs, 'build', id=found_id, temporary_build=tempbuild, timestamp_alignment=timestamp_alignment, force_rebuild=force, rebuild_mode=rebuild_mode) if response: return response.content |
def build_set(id=None, name=None, temporary_build=False, timestamp_alignment=False, force=False, rebuild_mode=common.REBUILD_MODES_DEFAULT, **kwargs): """ Start a build of the given BuildConfigurationSet """ content = build_set_raw(id, name, temporary_build, timestamp_alignment, force, rebuild_mode, **kwargs) if content: return utils.format_json(content) |
def list_build_configurations_for_set(id=None, name=None, page_size=200, page_index=0, sort="", q=""): """ List all build configurations in a given BuildConfigurationSet. """ content = list_build_configurations_for_set_raw(id, name, page_size, page_index, sort, q) if content: return utils.format_json_list(content) |
def add_build_configuration_to_set( set_id=None, set_name=None, config_id=None, config_name=None): """ Add a build configuration to an existing BuildConfigurationSet """ content = add_build_configuration_to_set_raw(set_id, set_name, config_id, config_name) if content: return utils.format_json(content) |
def list_build_records_for_set(id=None, name=None, page_size=200, page_index=0, sort="", q=""): """ List all build records for a BuildConfigurationSet """ content = list_build_records_for_set_raw(id, name, page_size, page_index, sort, q) if content: return utils.format_json_list(content) |
def list_build_set_records(id=None, name=None, page_size=200, page_index=0, sort="", q=""): """ List all build set records for a BuildConfigurationSet """ content = list_build_set_records_raw(id, name, page_size, page_index, sort, q) if content: return utils.format_json_list(content) |
def react(reactor, main, argv): """ Call C{main} and run the reactor until the L{Deferred} it returns fires. @param reactor: An unstarted L{IReactorCore} provider which will be run and later stopped. @param main: A callable which returns a L{Deferred}. It should take as many arguments as there are elements in the list C{argv}. @param argv: A list of arguments to pass to C{main}. @return: C{None} """ stopping = [] reactor.addSystemEventTrigger('before', 'shutdown', stopping.append, True) finished = main(reactor, *argv) finished.addErrback(err, "main function encountered error") def cbFinish(ignored): if not stopping: reactor.callWhenRunning(reactor.stop) finished.addCallback(cbFinish) reactor.run() |
def register_model(cls, ModelClass, form_field=None, widget=None, title=None, prefix=None): """ Register a model to use in the URL field. This function needs to be called once for every model that should be selectable in the URL field. :param ModelClass: The model to register. :param form_field: The form field class used to render the field. This can be a lambda for lazy evaluation. :param widget: The widget class, can be used instead of the form field. :param title: The title of the model, by default it uses the models ``verbose_name``. :param prefix: A custom prefix for the model in the serialized database format. By default it uses "appname.modelname". """ cls._static_registry.register(ModelClass, form_field, widget, title, prefix) |
def resolve_objects(cls, objects, skip_cached_urls=False): """ Make sure all AnyUrlValue objects from a set of objects is resolved in bulk. This avoids making a query per item. :param objects: A list or queryset of models. :param skip_cached_urls: Whether to avoid prefetching data that has it's URL cached. """ # Allow the queryset or list to consist of multiple models. # This supports querysets from django-polymorphic too. queryset = list(objects) any_url_values = [] for obj in queryset: model = obj.__class__ for field in _any_url_fields_by_model[model]: any_url_value = getattr(obj, field) if any_url_value and any_url_value.url_type.has_id_value: any_url_values.append(any_url_value) AnyUrlValue.resolve_values(any_url_values, skip_cached_urls=skip_cached_urls) |
def config(path=None, root=None, db=None): """Return the default run_config object for this installation.""" import ambry.run return ambry.run.load(path=path, root=root, db=db) |
def get_library(path=None, root=None, db=None): import ambry.library as _l """Return the default library for this installation.""" rc = config(path=path, root=root, db=db ) return _l.new_library(rc) |
def doc_parser(): """Utility function to allow getting the arguments for a single command, for Sphinx documentation""" parser = argparse.ArgumentParser( prog='ambry', description='Ambry {}. Management interface for ambry, libraries ' 'and repositories. '.format(ambry._meta.__version__)) return parser |
def get_extra_commands(): """Use the configuration to discover additional CLI packages to load""" from ambry.run import find_config_file from ambry.dbexceptions import ConfigurationError from ambry.util import yaml try: plugins_dir = find_config_file('cli.yaml') except ConfigurationError: return [] with open(plugins_dir) as f: cli_modules = yaml.load(f) return cli_modules |
def _refresh_url(self): """刷新获取 url,失败的时候返回空而不是 None""" songs = self._api.weapi_songs_url([int(self.identifier)]) if songs and songs[0]['url']: self.url = songs[0]['url'] else: self.url = '' |
def url(self): """ We will always check if this song file exists in local library, if true, we return the url of the local file. .. note:: As netease song url will be expired after a period of time, we can not use static url here. Currently, we assume that the expiration time is 20 minutes, after the url expires, it will be automaticly refreshed. """ local_path = self._find_in_local() if local_path: return local_path if not self._url: self._refresh_url() elif time.time() > self._expired_at: logger.info('song({}) url is expired, refresh...'.format(self)) self._refresh_url() return self._url |
def get_handler(progname, fmt=None, datefmt=None, project_id=None, credentials=None, debug_thread_worker=False, **_): """Helper function to create a Stackdriver handler. See `ulogger.stackdriver.CloudLoggingHandlerBuilder` for arguments and supported keyword arguments. Returns: (obj): Instance of `google.cloud.logging.handlers. CloudLoggingHandler` """ builder = CloudLoggingHandlerBuilder( progname, fmt=fmt, datefmt=datefmt, project_id=project_id, credentials=credentials, debug_thread_worker=debug_thread_worker) return builder.get_handler() |
def _get_metadata(self, data_type, key, timeout=5): """Get host instance metadata (only works on GCP hosts). More details about instance metadata: https://cloud.google.com/compute/docs/storing-retrieving-metadata Args: data_type (str): Type of metadata to fetch. Eg. project, instance key (str): Key of metadata to fetch timeout (int, optional): HTTP request timeout in seconds. Default is 5 seconds. Returns: (str): Plain text value of metadata entry Raises: GoogleCloudError: when request to metadata endpoint fails """ endpoint_url = self.METADATA_ENDPOINT.format( data_type=data_type, key=key) try: rsp = requests.get( endpoint_url, headers={'Metadata-Flavor': 'Google'}, timeout=timeout) rsp.raise_for_status() except requests.exceptions.RequestException as e: raise exceptions.GoogleCloudError( 'Could not fetch "{key}" from "{type}" metadata using "{url}".' 'Error: {e}'.format( key=key, type=data_type, url=endpoint_url, e=e)) metadata_value = rsp.text if metadata_value.strip() == '': raise exceptions.GoogleCloudError( 'Error when fetching metadata from "{url}": server returned ' 'an empty value.'.format(url=endpoint_url)) return metadata_value |
def _create_gcl_resource(self): """Create a configured Resource object. The logging.resource.Resource object enables GCL to filter and bucket incoming logs according to which resource (host) they're coming from. Returns: (obj): Instance of `google.cloud.logging.resource.Resource` """ return gcl_resource.Resource('gce_instance', { 'project_id': self.project_id, 'instance_id': self.instance_id, 'zone': self.zone }) |
def get_formatter(self): """Create a fully configured `logging.Formatter` Example of formatted log message: 2017-08-27T20:19:24.424 cpm-example-gew1 progname (23123): hello Returns: (obj): Instance of `logging.Formatter` """ if not self.fmt: self.fmt = ('%(asctime)s.%(msecs)03d {host} {progname} ' '(%(process)d): %(message)s').format( host=self.hostname, progname=self.progname) if not self.datefmt: self.datefmt = '%Y-%m-%dT%H:%M:%S' return logging.Formatter(fmt=self.fmt, datefmt=self.datefmt) |
def _set_worker_thread_level(self): """Sets logging level of the background logging thread to DEBUG or INFO """ bthread_logger = logging.getLogger( 'google.cloud.logging.handlers.transports.background_thread') if self.debug_thread_worker: bthread_logger.setLevel(logging.DEBUG) else: bthread_logger.setLevel(logging.INFO) |
def get_handler(self): """Create a fully configured CloudLoggingHandler. Returns: (obj): Instance of `google.cloud.logging.handlers. CloudLoggingHandler` """ gcl_client = gcl_logging.Client( project=self.project_id, credentials=self.credentials) handler = gcl_handlers.CloudLoggingHandler( gcl_client, resource=self.resource, labels={ 'resource_id': self.instance_id, 'resource_project': self.project_id, 'resource_zone': self.zone, 'resource_host': self.hostname }) handler.setFormatter(self.get_formatter()) self._set_worker_thread_level() return handler |
def index_dataset(self, dataset, force=False): """ Adds given dataset to the index. """ self.backend.dataset_index.index_one(dataset, force=force) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.