_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3 values | text stringlengths 75 19.8k | language stringclasses 1 value | meta_information dict |
|---|---|---|---|---|---|
q37100 | Bundle.build_source | train | def build_source(self, stage, source, ps, force=False):
"""Build a single source"""
from ambry.bundle.process import call_interval
assert source.is_processable, source.name
if source.state == self.STATES.BUILT and not force:
ps.update(message='Source {} already built'.format(source.name), state='skipped')
return
pl = self.pipeline(source, ps=ps)
source.state = self.STATES.BUILDING
# Doing this before hand to get at least some information about the pipline,
# in case there is an error during the run. It will get overwritten with more information
# after successful run
self.log_pipeline(pl)
try:
source_name = source.name # In case the source drops out of the session, which is does.
s_vid = source.vid
ps.update(message='Running pipeline {}'.format(pl.name), s_vid=s_vid, item_type='rows', item_count=0)
@call_interval(5)
def run_progress_f(sink_pipe, rows):
(n_records, rate) = sink_pipe.report_progress()
if n_records > 0:
ps.update(message='Running pipeline {}: rate: {}'
.format(pl.name, rate),
s_vid=s_vid, item_type='rows', item_count=n_records)
pl.run(callback=run_progress_f)
# Run the final routines at the end of the pipelin
for f in pl.final:
ps.update(message='Run final routine: {}'.format(f.__name__))
f(pl)
ps.update(message='Finished building source')
except:
self.log_pipeline(pl)
raise
self.commit()
try:
partitions = list(pl[ambry.etl.PartitionWriter].partitions)
ps.update(message='Finalizing segment partition',
item_type='partitions', item_total=len(partitions), item_count=0)
for i, p in enumerate(partitions):
ps.update(message='Finalizing segment partition {}'.format(p.name), item_count=i, p_vid=p.vid)
try:
p.finalize()
except AttributeError:
print(self.table(p.table_name))
raise
# FIXME Shouldn't need to do this commit, but without it, some stats get added multiple
# times, causing an error later. Probably could be avoided by adding the stats to the
# collection in the dataset
self.commit()
except IndexError:
self.error("Pipeline didn't have a PartitionWriters, won't try to finalize")
self.log_pipeline(pl)
source.state = self.STATES.BUILT
self.commit()
return source.name | python | {
"resource": ""
} |
q37101 | Bundle.collect_segment_partitions | train | def collect_segment_partitions(self):
"""Return a dict of segments partitions, keyed on the name of the parent partition
"""
from collections import defaultdict
# Group the segments by their parent partition name, which is the
# same name, but without the segment.
partitions = defaultdict(set)
for p in self.dataset.partitions:
if p.type == p.TYPE.SEGMENT:
name = p.identity.name
name.segment = None
partitions[name].add(p)
return partitions | python | {
"resource": ""
} |
q37102 | Bundle.unify_partitions | train | def unify_partitions(self):
"""For all of the segments for a partition, create the parent partition, combine the
children into the parent, and delete the children. """
partitions = self.collect_segment_partitions()
# For each group, copy the segment partitions to the parent partitions, then
# delete the segment partitions.
with self.progress.start('coalesce', 0, message='Coalescing partition segments') as ps:
for name, segments in iteritems(partitions):
ps.add(item_type='partitions', item_count=len(segments),
message='Colescing partition {}'.format(name))
self.unify_partition(name, segments, ps) | python | {
"resource": ""
} |
q37103 | Bundle.exec_context | train | def exec_context(self, **kwargs):
"""Base environment for evals, the stuff that is the same for all evals. Primarily used in the
Caster pipe"""
import inspect
import dateutil.parser
import datetime
import random
from functools import partial
from ambry.valuetype.types import parse_date, parse_time, parse_datetime
import ambry.valuetype.types
import ambry.valuetype.exceptions
import ambry.valuetype.test
import ambry.valuetype
def set_from(f, frm):
try:
try:
f.ambry_from = frm
except AttributeError: # for instance methods
f.im_func.ambry_from = frm
except (TypeError, AttributeError): # Builtins, non python code
pass
return f
test_env = dict(
parse_date=parse_date,
parse_time=parse_time,
parse_datetime=parse_datetime,
partial=partial,
bundle=self
)
test_env.update(kwargs)
test_env.update(dateutil.parser.__dict__)
test_env.update(datetime.__dict__)
test_env.update(random.__dict__)
test_env.update(ambry.valuetype.core.__dict__)
test_env.update(ambry.valuetype.types.__dict__)
test_env.update(ambry.valuetype.exceptions.__dict__)
test_env.update(ambry.valuetype.test.__dict__)
test_env.update(ambry.valuetype.__dict__)
localvars = {}
for f_name, func in test_env.items():
if not isinstance(func, (str, tuple)):
localvars[f_name] = set_from(func, 'env')
# The 'b' parameter of randint is assumed to be a bundle, but
# replacing it with a lambda prevents the param assignment
localvars['randint'] = lambda a, b: random.randint(a, b)
if self != Bundle:
# Functions from the bundle
base = set(inspect.getmembers(Bundle, predicate=inspect.isfunction))
mine = set(inspect.getmembers(self.__class__, predicate=inspect.isfunction))
localvars.update({f_name: set_from(func, 'bundle') for f_name, func in mine - base})
# Bound methods. In python 2, these must be called referenced from the bundle, since
# there is a difference between bound and unbound methods. In Python 3, there is no differnce,
# so the lambda functions may not be necessary.
base = set(inspect.getmembers(Bundle, predicate=inspect.ismethod))
mine = set(inspect.getmembers(self.__class__, predicate=inspect.ismethod))
# Functions are descriptors, and the __get__ call binds the function to its object to make a bound method
localvars.update({f_name: set_from(func.__get__(self), 'bundle') for f_name, func in (mine - base)})
# Bundle module functions
module_entries = inspect.getmembers(sys.modules['ambry.build'], predicate=inspect.isfunction)
localvars.update({f_name: set_from(func, 'module') for f_name, func in module_entries})
return localvars | python | {
"resource": ""
} |
q37104 | Bundle.post_build_time_coverage | train | def post_build_time_coverage(self):
"""Collect all of the time coverage for the bundle."""
from ambry.util.datestimes import expand_to_years
years = set()
# From the bundle about
if self.metadata.about.time:
for year in expand_to_years(self.metadata.about.time):
years.add(year)
# From the bundle name
if self.identity.btime:
for year in expand_to_years(self.identity.btime):
years.add(year)
# From all of the partitions
for p in self.partitions:
years |= set(p.time_coverage) | python | {
"resource": ""
} |
q37105 | Bundle.post_build_geo_coverage | train | def post_build_geo_coverage(self):
"""Collect all of the geocoverage for the bundle."""
spaces = set()
grains = set()
def resolve(term):
places = list(self.library.search.search_identifiers(term))
if not places:
raise BuildError(
"Failed to find space identifier '{}' in full text identifier search".format(term))
return places[0].vid
if self.metadata.about.space: # From the bundle metadata
spaces.add(resolve(self.metadata.about.space))
if self.metadata.about.grain: # From the bundle metadata
grains.add(self.metadata.about.grain)
if self.identity.bspace: # And from the bundle name
spaces.add(resolve(self.identity.bspace))
# From all of the partitions
for p in self.partitions.all:
if 'geo_coverage' in p.record.data:
for space in p.record.data['geo_coverage']:
spaces.add(space)
if 'geo_grain' in p.record.data:
for grain in p.record.data['geo_grain']:
grains.add(grain)
def conv_grain(g):
"""Some grain are expressed as summary level names, not gvids."""
try:
c = GVid.get_class(g)
return b(c().summarize())
except NotASummaryName:
return g
self.metadata.coverage.geo = sorted(spaces)
self.metadata.coverage.grain = sorted(conv_grain(g) for g in grains)
self.metadata.write_to_dir() | python | {
"resource": ""
} |
q37106 | Bundle._run_events | train | def _run_events(self, tag, stage=None):
"""Run tests marked with a particular tag and stage"""
self._run_event_methods(tag, stage)
self._run_tests(tag, stage) | python | {
"resource": ""
} |
q37107 | Bundle._run_event_methods | train | def _run_event_methods(self, tag, stage=None):
"""Run code in the bundle that is marked with events. """
import inspect
from ambry.bundle.events import _runable_for_event
funcs = []
for func_name, f in inspect.getmembers(self, predicate=inspect.ismethod):
if _runable_for_event(f, tag, stage):
funcs.append(f)
for func in funcs:
func() | python | {
"resource": ""
} |
q37108 | include | train | def include(prop):
'''Replicate property that is normally not replicated. Right now it's
meaningful for one-to-many relations only.'''
if isinstance(prop, QueryableAttribute):
prop = prop.property
assert isinstance(prop, (Column, ColumnProperty, RelationshipProperty))
#assert isinstance(prop, RelationshipProperty)
_included.add(prop) | python | {
"resource": ""
} |
q37109 | reflect | train | def reflect(source, model, cache=None):
'''Finds an object of class `model` with the same identifier as the
`source` object'''
if source is None:
return None
if cache and source in cache:
return cache[source]
db = object_session(source)
ident = identity_key(instance=source)[1]
assert ident is not None
return db.query(model).get(ident) | python | {
"resource": ""
} |
q37110 | replicate_attributes | train | def replicate_attributes(source, target, cache=None):
'''Replicates common SQLAlchemy attributes from the `source` object to the
`target` object.'''
target_manager = manager_of_class(type(target))
column_attrs = set()
relationship_attrs = set()
relationship_columns = set()
for attr in manager_of_class(type(source)).attributes:
if attr.key not in target_manager:
# It's not common attribute
continue
target_attr = target_manager[attr.key]
if isinstance(attr.property, ColumnProperty):
assert isinstance(target_attr.property, ColumnProperty)
column_attrs.add(attr)
elif isinstance(attr.property, RelationshipProperty):
assert isinstance(target_attr.property, RelationshipProperty)
relationship_attrs.add(attr)
if attr.property.direction is MANYTOONE:
relationship_columns.update(attr.property.local_columns)
for attr in column_attrs:
if _column_property_in_registry(attr.property, _excluded):
continue
elif (not _column_property_in_registry(attr.property, _included) and
all(column in relationship_columns
for column in attr.property.columns)):
continue
setattr(target, attr.key, getattr(source, attr.key))
for attr in relationship_attrs:
target_attr_model = target_manager[attr.key].property.argument
if not is_relation_replicatable(attr):
continue
replicate_relation(source, target, attr, target_manager[attr.key],
cache=cache) | python | {
"resource": ""
} |
q37111 | replicate_filter | train | def replicate_filter(sources, model, cache=None):
'''Replicates the list of objects to other class and returns their
reflections'''
targets = [replicate_no_merge(source, model, cache=cache)
for source in sources]
# Some objects may not be available in target DB (not published), so we
# have to exclude None from the list.
return [target for target in targets if target is not None] | python | {
"resource": ""
} |
q37112 | reflect_filter | train | def reflect_filter(sources, model, cache=None):
'''Returns the list of reflections of objects in the `source` list to other
class. Objects that are not found in target table are silently discarded.
'''
targets = [reflect(source, model, cache=cache) for source in sources]
# Some objects may not be available in target DB (not published), so we
# have to exclude None from the list.
return [target for target in targets if target is not None] | python | {
"resource": ""
} |
q37113 | Column.valuetype_class | train | def valuetype_class(self):
"""Return the valuetype class, if one is defined, or a built-in type if it isn't"""
from ambry.valuetype import resolve_value_type
if self.valuetype:
return resolve_value_type(self.valuetype)
else:
return resolve_value_type(self.datatype) | python | {
"resource": ""
} |
q37114 | Column.python_type | train | def python_type(self):
"""Return the python type for the row, possibly getting it from a valuetype reference """
from ambry.valuetype import resolve_value_type
if self.valuetype and resolve_value_type(self.valuetype):
return resolve_value_type(self.valuetype)._pythontype
elif self.datatype:
try:
return self.types[self.datatype][1]
except KeyError:
return resolve_value_type(self.datatype)._pythontype
else:
from ambry.exc import ConfigurationError
raise ConfigurationError("Can't get python_type: neither datatype of valuetype is defined") | python | {
"resource": ""
} |
q37115 | Column.role | train | def role(self):
'''Return the code for the role, measure, dimension or error'''
from ambry.valuetype.core import ROLE
if not self.valuetype_class:
return ''
role = self.valuetype_class.role
if role == ROLE.UNKNOWN:
vt_code = self.valuetype_class.vt_code
if len(vt_code) == 1 or vt_code[1] == '/':
return vt_code[0]
else:
return ''
return role | python | {
"resource": ""
} |
q37116 | Column.children | train | def children(self):
""""Return the table's other column that have this column as a parent, excluding labels"""
for c in self.table.columns:
if c.parent == self.name and not c.valuetype_class.is_label():
yield c | python | {
"resource": ""
} |
q37117 | Column.label | train | def label(self):
""""Return first child of the column that is marked as a label. Returns self if the column is a label"""
if self.valuetype_class.is_label():
return self
for c in self.table.columns:
if c.parent == self.name and c.valuetype_class.is_label():
return c
return None | python | {
"resource": ""
} |
q37118 | Column.geoid | train | def geoid(self):
""""Return first child of the column, or self that is marked as a geographic identifier"""
if self.valuetype_class.is_geoid():
return self
for c in self.table.columns:
if c.parent == self.name and c.valuetype_class.is_geoid():
return c | python | {
"resource": ""
} |
q37119 | Column.python_cast | train | def python_cast(self, v):
"""Cast a value to the type of the column.
Primarily used to check that a value is valid; it will throw an
exception otherwise
"""
if self.type_is_time():
dt = dateutil.parser.parse(v)
if self.datatype == Column.DATATYPE_TIME:
dt = dt.time()
if not isinstance(dt, self.python_type):
raise TypeError('{} was parsed to {}, expected {}'.format(v, type(dt), self.python_type))
return dt
else:
# This isn't calling the python_type method -- it's getting a python type, then instantialting it,
# such as "int(v)"
return self.python_type(v) | python | {
"resource": ""
} |
q37120 | Column.convert_numpy_type | train | def convert_numpy_type(cls, dtype):
"""Convert a numpy dtype into a Column datatype. Only handles common
types.
Implemented as a function to decouple from numpy
"""
m = {
'int64': cls.DATATYPE_INTEGER64,
'float64': cls.DATATYPE_FLOAT,
'object': cls.DATATYPE_TEXT # Hack. Pandas makes strings into object.
}
t = m.get(dtype.name, None)
if not t:
raise TypeError(
"Failed to convert numpy type: '{}' ".format(
dtype.name))
return t | python | {
"resource": ""
} |
q37121 | Column.nonull_dict | train | def nonull_dict(self):
"""Like dict, but does not hold any null values.
:return:
"""
return {k: v for k, v in six.iteritems(self.dict) if v and k != '_codes'} | python | {
"resource": ""
} |
q37122 | Column.mangle_name | train | def mangle_name(name):
"""Mangles a column name to a standard form, remoing illegal
characters.
:param name:
:return:
"""
import re
try:
return re.sub('_+', '_', re.sub('[^\w_]', '_', name).lower()).rstrip('_')
except TypeError:
raise TypeError(
'Trying to mangle name with invalid type of: ' + str(type(name))) | python | {
"resource": ""
} |
q37123 | Column.expanded_transform | train | def expanded_transform(self):
"""Expands the transform string into segments """
segments = self._expand_transform(self.transform)
if segments:
segments[0]['datatype'] = self.valuetype_class
for s in segments:
s['column'] = self
else:
segments = [self.make_xform_seg(datatype=self.valuetype_class, column=self)]
# If we want to add the find datatype cast to a transform.
#segments.append(self.make_xform_seg(transforms=["cast_"+self.datatype], column=self))
return segments | python | {
"resource": ""
} |
q37124 | Column.before_insert | train | def before_insert(mapper, conn, target):
"""event.listen method for Sqlalchemy to set the seqience_id for this
object and create an ObjectNumber value for the id_"""
# from identity import ObjectNumber
# assert not target.fk_vid or not ObjectNumber.parse(target.fk_vid).revision
if target.sequence_id is None:
from ambry.orm.exc import DatabaseError
raise DatabaseError('Must have sequence_id before insertion')
# Check that the id column is always sequence id 1
assert (target.name == 'id') == (target.sequence_id == 1), (target.name, target.sequence_id)
Column.before_update(mapper, conn, target) | python | {
"resource": ""
} |
q37125 | Column.before_update | train | def before_update(mapper, conn, target):
"""Set the column id number based on the table number and the sequence
id for the column."""
assert target.datatype or target.valuetype
target.name = Column.mangle_name(target.name)
Column.update_number(target) | python | {
"resource": ""
} |
q37126 | spawnProcess | train | def spawnProcess(processProtocol, executable, args=(), env={},
path=None, uid=None, gid=None, usePTY=0,
packages=()):
"""Launch a process with a particular Python environment.
All arguments as to reactor.spawnProcess(), except for the
addition of an optional packages iterable. This should be
of strings naming packages the subprocess is to be able to
import.
"""
env = env.copy()
pythonpath = []
for pkg in packages:
p = os.path.split(imp.find_module(pkg)[1])[0]
if p.startswith(os.path.join(sys.prefix, 'lib')):
continue
pythonpath.append(p)
pythonpath = list(set(pythonpath))
pythonpath.extend(env.get('PYTHONPATH', '').split(os.pathsep))
env['PYTHONPATH'] = os.pathsep.join(pythonpath)
return reactor.spawnProcess(processProtocol, executable, args,
env, path, uid, gid, usePTY) | python | {
"resource": ""
} |
q37127 | spawnPythonProcess | train | def spawnPythonProcess(processProtocol, args=(), env={},
path=None, uid=None, gid=None, usePTY=0,
packages=()):
"""Launch a Python process
All arguments as to spawnProcess(), except the executable
argument is omitted.
"""
return spawnProcess(processProtocol, sys.executable,
args, env, path, uid, gid, usePTY,
packages) | python | {
"resource": ""
} |
q37128 | _runable_for_event | train | def _runable_for_event(f, tag, stage):
"""Loot at the event property for a function to see if it should be run at this stage. """
if not hasattr(f, '__ambry_event__'):
return False
f_tag, f_stage = f.__ambry_event__
if stage is None:
stage = 0
if tag != f_tag or stage != f_stage:
return False
return True | python | {
"resource": ""
} |
q37129 | load_obj_from_path | train | def load_obj_from_path(import_path, prefix=None, ld=dict()):
"""
import a python object from an import path
`import_path` - a python import path. For instance:
mypackage.module.func
or
mypackage.module.class
`prefix` (str) - a value to prepend to the import path
if it isn't already there. For instance:
load_obj_from_path('module.func', prefix='mypackage')
is the same as
load_obj_from_path('mypackage.module.func')
`ld` (dict) key:value data to pass to the logger if an error occurs
"""
if prefix and not import_path.startswith(prefix):
import_path = '.'.join([prefix, import_path])
log.debug(
'attempting to load a python object from an import path',
extra=dict(import_path=import_path, **ld))
try:
mod = importlib.import_module(import_path)
return mod # yay, we found a module. return it
except:
pass # try to extract an object from a module
try:
path, obj_name = import_path.rsplit('.', 1)
except ValueError:
log_raise(
("import path needs at least 1 period in your import path."
" An example import path is something like: module.obj"),
dict(import_path=import_path, **ld), InvalidImportPath)
try:
mod = importlib.import_module(path)
except ImportError:
newpath = path.replace(prefix, '', 1).lstrip('.')
log.debug(
"Could not load import path. Trying a different one",
extra=dict(oldpath=path, newpath=newpath))
path = newpath
mod = importlib.import_module(path)
try:
obj = getattr(mod, obj_name)
except AttributeError:
log_raise(
("object does not exist in given module."
" Your import path is not"
" properly defined because the given `obj_name` does not exist"),
dict(import_path=path, obj_name=obj_name, **ld),
InvalidImportPath)
return obj | python | {
"resource": ""
} |
q37130 | ArtifactRest.artifact_quality | train | def artifact_quality(self, artifact_quality):
"""
Sets the artifact_quality of this ArtifactRest.
:param artifact_quality: The artifact_quality of this ArtifactRest.
:type: str
"""
allowed_values = ["NEW", "VERIFIED", "TESTED", "DEPRECATED", "BLACKLISTED", "DELETED", "TEMPORARY"]
if artifact_quality not in allowed_values:
raise ValueError(
"Invalid value for `artifact_quality` ({0}), must be one of {1}"
.format(artifact_quality, allowed_values)
)
self._artifact_quality = artifact_quality | python | {
"resource": ""
} |
q37131 | _get_sqlite_columns | train | def _get_sqlite_columns(connection, table):
""" Returns list of tuple containg columns of the table.
Args:
connection: sqlalchemy connection to sqlite database.
table (str): name of the table
Returns:
list of (name, datatype, position): where name is column name, datatype is
python type of the column, position is ordinal position of the column.
"""
# TODO: Move to the sqlite wrapper.
# TODO: Consider sqlalchemy mapping.
SQL_TO_PYTHON_TYPES = {
'INT': int,
'INTEGER': int,
'TINYINT': int,
'SMALLINT': int,
'MEDIUMINT': int,
'BIGINT': int,
'UNSIGNED BIG INT': int,
'INT': int,
'INT8': int,
'NUMERIC': float,
'REAL': float,
'FLOAT': float,
'DOUBLE': float,
'BOOLEAN': bool,
'CHARACTER': str,
'VARCHAR': str,
'TEXT': str
}
query = 'PRAGMA table_info(\'{}\');'
result = connection.execute(query.format(table))
ret = []
for row in result:
position = row[0] + 1
name = row[1]
datatype = row[2]
try:
datatype = SQL_TO_PYTHON_TYPES[datatype]
except KeyError:
raise Exception(
'Do not know how to convert {} sql datatype to python data type.'
.format(datatype))
ret.append((name, datatype, position))
return ret | python | {
"resource": ""
} |
q37132 | DataSourceBase.partition | train | def partition(self):
"""For partition urltypes, return the partition specified by the ref """
if self.urltype != 'partition':
return None
return self._bundle.library.partition(self.url) | python | {
"resource": ""
} |
q37133 | DataSourceBase.spec | train | def spec(self):
"""Return a SourceSpec to describe this source"""
from ambry_sources.sources import SourceSpec
d = self.dict
d['url'] = self.url
# Will get the URL twice; once as ref and once as URL, but the ref is ignored
return SourceSpec(**d) | python | {
"resource": ""
} |
q37134 | DataSourceBase.account | train | def account(self):
"""Return an account record, based on the host in the url"""
from ambry.util import parse_url_to_dict
d = parse_url_to_dict(self.url)
return self._bundle.library.account(d['netloc']) | python | {
"resource": ""
} |
q37135 | DataSourceBase.update_table | train | def update_table(self, unknown_type='str'):
"""Update the source table from the datafile"""
from ambry_sources.intuit import TypeIntuiter
st = self.source_table
if self.reftype == 'partition':
for c in self.partition.table.columns:
st.add_column(c.sequence_id, source_header=c.name, dest_header=c.name,
datatype=c.datatype, description = c.description)
elif self.datafile.exists:
with self.datafile.reader as r:
names = set()
for col in r.columns:
name = col['name']
if name in names: # Handle duplicate names.
name = name+"_"+str(col['pos'])
names.add(name)
c = st.column(name)
dt = col['resolved_type'] if col['resolved_type'] != 'unknown' else unknown_type
if c:
c.datatype = TypeIntuiter.promote_type(c.datatype, col['resolved_type'])
else:
c = st.add_column(col['pos'],
source_header=name,
dest_header=name,
datatype=col['resolved_type'],
description=col['description'],
has_codes=col['has_codes']) | python | {
"resource": ""
} |
q37136 | DataSourceBase.update_spec | train | def update_spec(self):
"""Update the source specification with information from the row intuiter, but only if the spec values
are not already set. """
if self.datafile.exists:
with self.datafile.reader as r:
self.header_lines = r.info['header_rows']
self.comment_lines = r.info['comment_rows']
self.start_line = r.info['data_start_row']
self.end_line = r.info['data_end_row'] | python | {
"resource": ""
} |
q37137 | get_runconfig | train | def get_runconfig(path=None, root=None, db=None):
"""Load the main configuration files and accounts file.
Debprecated. Use load()
"""
return load(path, root=root, db=db) | python | {
"resource": ""
} |
q37138 | load | train | def load(path=None, root=None, db=None, load_user=True):
"Load all of the config files. "
config = load_config(path, load_user=load_user)
remotes = load_remotes(path, load_user=load_user)
# The external file overwrites the main config
if remotes:
if not 'remotes' in config:
config.remotes = AttrDict()
for k, v in remotes.remotes.items():
config.remotes[k] = v
accounts = load_accounts(path, load_user=load_user)
# The external file overwrites the main config
if accounts:
if not 'accounts' in config:
config.accounts = AttrDict()
for k, v in accounts.accounts.items():
config.accounts[k] = v
update_config(config)
if root:
config.library.filesystem_root = root
if db:
config.library.database = db
return config | python | {
"resource": ""
} |
q37139 | load_accounts | train | def load_accounts(extra_path=None, load_user=True):
"""Load the yaml account files
:param load_user:
:return: An `AttrDict`
"""
from os.path import getmtime
try:
accts_file = find_config_file(ACCOUNTS_FILE, extra_path=extra_path, load_user=load_user)
except ConfigurationError:
accts_file = None
if accts_file is not None and os.path.exists(accts_file):
config = AttrDict()
config.update_yaml(accts_file)
if not 'accounts' in config:
config.remotes = AttrDict()
config.accounts.loaded = [accts_file, getmtime(accts_file)]
return config
else:
return None | python | {
"resource": ""
} |
q37140 | load_remotes | train | def load_remotes(extra_path=None, load_user=True):
"""Load the YAML remotes file, which sort of combines the Accounts file with part of the
remotes sections from the main config
:return: An `AttrDict`
"""
from os.path import getmtime
try:
remotes_file = find_config_file(REMOTES_FILE, extra_path=extra_path, load_user=load_user)
except ConfigurationError:
remotes_file = None
if remotes_file is not None and os.path.exists(remotes_file):
config = AttrDict()
config.update_yaml(remotes_file)
if not 'remotes' in config:
config.remotes = AttrDict()
config.remotes.loaded = [remotes_file, getmtime(remotes_file)]
return config
else:
return None | python | {
"resource": ""
} |
q37141 | normalize_dsn_or_dict | train | def normalize_dsn_or_dict(d):
"""Clean up a database DSN, or dict version of a DSN, returning both the cleaned DSN and dict version"""
if isinstance(d, dict):
try:
# Convert from an AttrDict to a real dict
d = d.to_dict()
except AttributeError:
pass # Already a real dict
config = d
dsn = None
elif isinstance(d, string_types):
config = None
dsn = d
else:
raise ConfigurationError("Can't deal with database config '{}' type '{}' ".format(d, type(d)))
if dsn:
if dsn.startswith('sqlite') or dsn.startswith('spatialite'):
driver, path = dsn.split(':', 1)
slashes, path = path[:2], path[2:]
if slashes != '//':
raise ConfigurationError("Sqlite DSNs must start with at least 2 slashes")
if len(path) == 1 and path[0] == '/':
raise ConfigurationError("Sqlite DSNs can't have only 3 slashes in path")
if len(path) > 1 and path[0] != '/':
raise ConfigurationError("Sqlite DSNs with a path must have 3 or 4 slashes.")
path = path[1:]
config = dict(
server=None,
username=None,
password=None,
driver=driver,
dbname=path
)
else:
d = parse_url_to_dict(dsn)
config = dict(
server=d['hostname'],
dbname=d['path'].strip('/'),
driver=d['scheme'],
password=d.get('password', None),
username=d.get('username', None)
)
else:
up = d.get('username', '') or ''
if d.get('password'):
up += ':' + d.get('password', '')
if up:
up += '@'
if up and not d.get('server'):
raise ConfigurationError("Can't construct a DSN with a username or password without a hostname")
host_part = up + d.get('server', '') if d.get('server') else ''
if d.get('dbname', False):
path_part = '/' + d.get('dbname')
# if d['driver'] in ('sqlite3', 'sqlite', 'spatialite'):
# path_part = '/' + path_part
else:
path_part = '' # w/ no dbname, Sqlite should use memory, which required 2 slash. Rel dir is 3, abs dir is 4
dsn = '{}://{}{}'.format(d['driver'], host_part, path_part)
return config, dsn | python | {
"resource": ""
} |
q37142 | execute_command | train | def execute_command(cmd, execute, echo=True):
"""Execute a command in shell or just print it if execute is False"""
if execute:
if echo:
print("Executing: " + cmd)
return os.system(cmd)
else:
print(cmd)
return 0 | python | {
"resource": ""
} |
q37143 | set_log_level | train | def set_log_level(level):
"""Sets the desired log level."""
lLevel = level.lower()
unrecognized = False
if (lLevel == 'debug-all'):
loglevel = logging.DEBUG
elif (lLevel == 'debug'):
loglevel = logging.DEBUG
elif (lLevel == 'info'):
loglevel = logging.INFO
elif (lLevel == 'warning'):
loglevel = logging.WARNING
elif (lLevel == 'error'):
loglevel = logging.ERROR
elif (lLevel == 'critical'):
loglevel = logging.CRITICAL
else:
loglevel = logging.DEBUG
unrecognized = True
formatter = logging.Formatter('%(asctime)s %(levelname)s %(filename)s:%(lineno)d/%(funcName)s: %(message)s')
console = logging.StreamHandler()
console.setLevel(loglevel)
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
logging.getLogger('').setLevel(loglevel)
#logging.basicConfig(format='%(asctime)s %(levelname)s %(filename)s:%(lineno)d/%(funcName)s: %(message)s', level=loglevel)
if lLevel != 'debug-all':
# lower the loglevel for enumerated packages to avoid unwanted messages
packagesWarning = ["requests.packages.urllib3", "urllib3", "requests_kerberos", "jenkinsapi"]
for package in packagesWarning:
logging.debug("Setting loglevel for %s to WARNING.", package)
logger = logging.getLogger(package)
logger.setLevel(logging.WARNING)
if unrecognized:
logging.warning('Unrecognized log level: %s Log level set to debug', level)
#TODO ref: use external log config
fh = logging.FileHandler('builder.log')
fh.setLevel(loglevel)
fh.setFormatter(formatter)
logging.getLogger('').addHandler(fh) | python | {
"resource": ""
} |
q37144 | required | train | def required(field):
"""Decorator that checks if return value is set, if not, raises exception.
"""
def wrap(f):
def wrappedf(*args):
result = f(*args)
if result is None or result == "":
raise Exception(
"Config option '%s' is required." % field)
else:
return result
return wrappedf
return wrap | python | {
"resource": ""
} |
q37145 | JinjaView.render | train | def render(self, template_name, variables=None):
"""
Render a template with the passed variables.
"""
if variables is None:
variables = {}
template = self._engine.get_template(template_name)
return template.render(**variables) | python | {
"resource": ""
} |
q37146 | JinjaView.render_source | train | def render_source(self, source, variables=None):
"""
Render a source with the passed variables.
"""
if variables is None:
variables = {}
template = self._engine.from_string(source)
return template.render(**variables) | python | {
"resource": ""
} |
q37147 | construct_re | train | def construct_re(url_template, match_whole_str=False, converters=None,
default_converter='string', anonymous=False):
'''
url_template - str or unicode representing template
Constructed pattern expects urlencoded string!
returns (compiled re pattern,
dict {url param name: [converter name, converter args (str)]},
list of (variable name, converter name, converter args name))
If anonymous=True is set, regexp will be compiled without names of variables.
This is handy for example, if you want to dump an url map to JSON.
'''
# needed for reverse url building (or not needed?)
builder_params = []
# found url params and their converters
url_params = {}
result = r'^'
parts = _split_pattern.split(url_template)
for i, part in enumerate(parts):
is_url_pattern = _static_url_pattern.match(part)
if is_url_pattern:
#NOTE: right order:
# - make part str if it was unicode
# - urlquote part
# - escape all specific for re chars in part
result += re.escape(urlquote(part))
builder_params.append(part)
continue
is_converter = _converter_pattern.match(part)
if is_converter:
groups = is_converter.groupdict()
converter_name = groups['converter'] or default_converter
conv_object = init_converter(converters[converter_name],
groups['args'])
variable = groups['variable']
builder_params.append((variable, conv_object))
url_params[variable] = conv_object
if anonymous:
result += conv_object.regex
else:
result += '(?P<{}>{})'.format(variable, conv_object.regex)
continue
raise ValueError('Incorrect url template {!r}'.format(url_template))
if match_whole_str:
result += '$'
return re.compile(result), url_params, builder_params | python | {
"resource": ""
} |
q37148 | export | train | def export(bundle, force=False, force_restricted=False):
""" Exports bundle to ckan instance.
Args:
bundle (ambry.bundle.Bundle):
force (bool, optional): if True, ignore existance error and continue to export.
force_restricted (bool, optional): if True, then export restricted bundles as private (for debugging
purposes).
Raises:
EnvironmentError: if ckan credentials are missing or invalid.
UnpublishedAccessError: if dataset has unpublished access - one from ('internal', 'test',
'controlled', 'restricted', 'census').
"""
if not ckan:
raise EnvironmentError(MISSING_CREDENTIALS_MSG)
# publish dataset.
try:
ckan.action.package_create(**_convert_bundle(bundle))
except ckanapi.ValidationError:
if force:
logger.warning(
'{} dataset already exported, but new export forced. Continue to export dataset stuff.'
.format(bundle.dataset))
else:
raise
# set permissions.
access = bundle.dataset.config.metadata.about.access
if access == 'restricted' and force_restricted:
access = 'private'
assert access, 'CKAN publishing requires access level.'
if access in ('internal', 'controlled', 'restricted', 'census'):
# Never publish dataset with such access.
raise UnpublishedAccessError(
'{} dataset can not be published because of {} access.'
.format(bundle.dataset.vid, bundle.dataset.config.metadata.about.access))
elif access == 'public':
# The default permission of the CKAN allows to edit and create dataset without logging in. But
# admin of the certain CKAN instance can change default permissions.
# http://docs.ckan.org/en/ckan-1.7/authorization.html#anonymous-edit-mode
user_roles = [
{'user': 'visitor', 'domain_object': bundle.dataset.vid.lower(), 'roles': ['editor']},
{'user': 'logged_in', 'domain_object': bundle.dataset.vid.lower(), 'roles': ['editor']},
]
elif access == 'registered':
# Anonymous has no access, logged in users can read/edit.
# http://docs.ckan.org/en/ckan-1.7/authorization.html#logged-in-edit-mode
user_roles = [
{'user': 'visitor', 'domain_object': bundle.dataset.vid.lower(), 'roles': []},
{'user': 'logged_in', 'domain_object': bundle.dataset.vid.lower(), 'roles': ['editor']}
]
elif access in ('private', 'licensed', 'test'):
# Organization users can read/edit
# http://docs.ckan.org/en/ckan-1.7/authorization.html#publisher-mode
# disable access for anonymous and logged_in
user_roles = [
{'user': 'visitor', 'domain_object': bundle.dataset.vid.lower(), 'roles': []},
{'user': 'logged_in', 'domain_object': bundle.dataset.vid.lower(), 'roles': []}
]
organization_users = ckan.action.organization_show(id=CKAN_CONFIG.organization)['users']
for user in organization_users:
user_roles.append({
'user': user['id'], 'domain_object': bundle.dataset.vid.lower(), 'roles': ['editor']}),
for role in user_roles:
# http://docs.ckan.org/en/ckan-2.4.1/api/#ckan.logic.action.update.user_role_update
ckan.action.user_role_update(**role)
# TODO: Using bulk update gives http500 error. Try later with new version.
# http://docs.ckan.org/en/ckan-2.4.1/api/#ckan.logic.action.update.user_role_bulk_update - the same
# ckan.action.user_role_bulk_update(user_roles=user_roles)
# publish partitions
for partition in bundle.partitions:
ckan.action.resource_create(**_convert_partition(partition))
# publish schema.csv
ckan.action.resource_create(**_convert_schema(bundle))
# publish external documentation
for name, external in six.iteritems(bundle.dataset.config.metadata.external_documentation):
ckan.action.resource_create(**_convert_external(bundle, name, external)) | python | {
"resource": ""
} |
q37149 | is_exported | train | def is_exported(bundle):
""" Returns True if dataset is already exported to CKAN. Otherwise returns False. """
if not ckan:
raise EnvironmentError(MISSING_CREDENTIALS_MSG)
params = {'q': 'name:{}'.format(bundle.dataset.vid.lower())}
resp = ckan.action.package_search(**params)
return len(resp['results']) > 0 | python | {
"resource": ""
} |
q37150 | _convert_bundle | train | def _convert_bundle(bundle):
""" Converts ambry bundle to dict ready to send to CKAN API.
Args:
bundle (ambry.bundle.Bundle): bundle to convert.
Returns:
dict: dict to send to CKAN to create dataset.
See http://docs.ckan.org/en/latest/api/#ckan.logic.action.create.package_create
"""
# shortcut for metadata
meta = bundle.dataset.config.metadata
notes = ''
for f in bundle.dataset.files:
if f.path.endswith('documentation.md'):
contents = f.unpacked_contents
if isinstance(contents, six.binary_type):
contents = contents.decode('utf-8')
notes = json.dumps(contents)
break
ret = {
'name': bundle.dataset.vid.lower(),
'title': meta.about.title,
'author': meta.contacts.wrangler.name,
'author_email': meta.contacts.wrangler.email,
'maintainer': meta.contacts.maintainer.name,
'maintainer_email': meta.contacts.maintainer.email,
'license_id': '',
'notes': notes,
'url': meta.identity.source,
'version': bundle.dataset.version,
'state': 'active',
'owner_org': CKAN_CONFIG['organization'],
}
return ret | python | {
"resource": ""
} |
q37151 | _convert_partition | train | def _convert_partition(partition):
""" Converts partition to resource dict ready to save to CKAN. """
# http://docs.ckan.org/en/latest/api/#ckan.logic.action.create.resource_create
# convert bundle to csv.
csvfile = six.StringIO()
writer = unicodecsv.writer(csvfile)
headers = partition.datafile.headers
if headers:
writer.writerow(headers)
for row in partition:
writer.writerow([row[h] for h in headers])
csvfile.seek(0)
# prepare dict.
ret = {
'package_id': partition.dataset.vid.lower(),
'url': 'http://example.com',
'revision_id': '',
'description': partition.description or '',
'format': 'text/csv',
'hash': '',
'name': partition.name,
'resource_type': '',
'mimetype': 'text/csv',
'mimetype_inner': '',
'webstore_url': '',
'cache_url': '',
'upload': csvfile
}
return ret | python | {
"resource": ""
} |
q37152 | _convert_schema | train | def _convert_schema(bundle):
""" Converts schema of the dataset to resource dict ready to save to CKAN. """
# http://docs.ckan.org/en/latest/api/#ckan.logic.action.create.resource_create
schema_csv = None
for f in bundle.dataset.files:
if f.path.endswith('schema.csv'):
contents = f.unpacked_contents
if isinstance(contents, six.binary_type):
contents = contents.decode('utf-8')
schema_csv = six.StringIO(contents)
schema_csv.seek(0)
break
ret = {
'package_id': bundle.dataset.vid.lower(),
'url': 'http://example.com',
'revision_id': '',
'description': 'Schema of the dataset tables.',
'format': 'text/csv',
'hash': '',
'name': 'schema',
'upload': schema_csv,
}
return ret | python | {
"resource": ""
} |
q37153 | _convert_external | train | def _convert_external(bundle, name, external):
""" Converts external documentation to resource dict ready to save to CKAN. """
# http://docs.ckan.org/en/latest/api/#ckan.logic.action.create.resource_create
ret = {
'package_id': bundle.dataset.vid.lower(),
'url': external.url,
'description': external.description,
'name': name,
}
return ret | python | {
"resource": ""
} |
q37154 | wait_for_repo_creation | train | def wait_for_repo_creation(task_id, retry=30):
"""
Using polling check if the task finished
"""
success_event_types = ("RC_CREATION_SUCCESS", )
error_event_types = ("RC_REPO_CREATION_ERROR", "RC_REPO_CLONE_ERROR", "RC_CREATION_ERROR")
while retry > 0:
bpm_task = get_bpm_task_by_id(task_id)
if contains_event_type(bpm_task.content.events, success_event_types):
break
if contains_event_type(bpm_task.content.events, error_event_types):
logging.error("Creation of Repository Configuration failed")
logging.error(bpm_task.content)
return False
logging.info("Waiting until Repository Configuration creation task "+str(task_id)+" finishes.")
time.sleep(10)
retry -= 1
return retry > 0 | python | {
"resource": ""
} |
q37155 | M_.count | train | def count(self):
'''
A count based on `count_field` and `format_args`.
'''
args = self.format_args
if args is None or \
(isinstance(args, dict) and self.count_field not in args):
raise TypeError("count is required")
return args[self.count_field] if isinstance(args, dict) else args | python | {
"resource": ""
} |
q37156 | generate_sources_zip | train | def generate_sources_zip(milestone_id=None, output=None):
"""
Generate a sources archive for given milestone id.
"""
if not is_input_valid(milestone_id, output):
logging.error("invalid input")
return 1
create_work_dir(output)
download_sources_artifacts(milestone_id, output)
create_zip(output) | python | {
"resource": ""
} |
q37157 | get_repository_configuration | train | def get_repository_configuration(id):
"""
Retrieve a specific RepositoryConfiguration
"""
response = utils.checked_api_call(pnc_api.repositories, 'get_specific', id=id)
if response:
return response.content | python | {
"resource": ""
} |
q37158 | update_repository_configuration | train | def update_repository_configuration(id, external_repository=None, prebuild_sync=None):
"""
Update an existing RepositoryConfiguration with new information
"""
to_update_id = id
rc_to_update = pnc_api.repositories.get_specific(id=to_update_id).content
if external_repository is None:
external_repository = rc_to_update.external_url
else:
rc_to_update.external_url = external_repository
if prebuild_sync is not None:
rc_to_update.pre_build_sync_enabled = prebuild_sync
if not external_repository and prebuild_sync:
logging.error("You cannot enable prebuild sync without external repository")
return
response = utils.checked_api_call(pnc_api.repositories, 'update', id=to_update_id, body=rc_to_update)
if response:
return response.content | python | {
"resource": ""
} |
q37159 | list_repository_configurations | train | def list_repository_configurations(page_size=200, page_index=0, sort="", q=""):
"""
List all RepositoryConfigurations
"""
response = utils.checked_api_call(pnc_api.repositories, 'get_all', page_size=page_size, page_index=page_index, sort=sort, q=q)
if response:
return utils.format_json_list(response.content) | python | {
"resource": ""
} |
q37160 | match_repository_configuration | train | def match_repository_configuration(url, page_size=10, page_index=0, sort=""):
"""
Search for Repository Configurations based on internal or external url with exact match
"""
content = match_repository_configuration_raw(url, page_size, page_index, sort)
if content:
return utils.format_json_list(content) | python | {
"resource": ""
} |
q37161 | Form.render | train | def render(self):
'''Proxy method to form's environment render method'''
return self.env.template.render(self.template, form=self) | python | {
"resource": ""
} |
q37162 | Form.accept | train | def accept(self, data):
'''
Try to accpet MultiDict-like object and return if it is valid.
'''
self.raw_data = MultiDict(data)
self.errors = {}
for field in self.fields:
if field.writable:
self.python_data.update(field.accept())
else:
for name in field.field_names:
# readonly field
subfield = self.get_field(name)
value = self.python_data[subfield.name]
subfield.set_raw_value(self.raw_data, subfield.from_python(value))
return self.is_valid | python | {
"resource": ""
} |
q37163 | pluginPackagePaths | train | def pluginPackagePaths(name):
"""
Return a list of additional directories which should be searched for
modules to be included as part of the named plugin package.
@type name: C{str}
@param name: The fully-qualified Python name of a plugin package, eg
C{'twisted.plugins'}.
@rtype: C{list} of C{str}
@return: The absolute paths to other directories which may contain plugin
modules for the named plugin package.
"""
package = name.split('.')
# Note that this may include directories which do not exist. It may be
# preferable to remove such directories at this point, rather than allow
# them to be searched later on.
#
# Note as well that only '__init__.py' will be considered to make a
# directory a package (and thus exclude it from this list). This means
# that if you create a master plugin package which has some other kind of
# __init__ (eg, __init__.pyc) it will be incorrectly treated as a
# supplementary plugin directory.
return [
os.path.abspath(os.path.join(x, *package))
for x
in sys.path
if
not os.path.exists(os.path.join(x, *package + ['__init__.py']))] | python | {
"resource": ""
} |
q37164 | storage_method | train | def storage_method(func):
'''Calls decorated method with VersionedStorage as self'''
def wrap(self, *args, **kwargs):
return func(self._root_storage, *args, **kwargs)
return wrap | python | {
"resource": ""
} |
q37165 | TemplateEngine.render | train | def render(self, template_name, **kw):
'Interface method called from `Template.render`'
return self.env.get_template(template_name).render(**kw) | python | {
"resource": ""
} |
q37166 | Library.ctor_args | train | def ctor_args(self):
"""Return arguments for constructing a copy"""
return dict(
config=self._config,
search=self._search,
echo=self._echo,
read_only=self.read_only
) | python | {
"resource": ""
} |
q37167 | Library.sync_config | train | def sync_config(self, force=False):
"""Sync the file config into the library proxy data in the root dataset """
from ambry.library.config import LibraryConfigSyncProxy
lcsp = LibraryConfigSyncProxy(self)
lcsp.sync(force=force) | python | {
"resource": ""
} |
q37168 | Library.init_debug | train | def init_debug(self):
"""Initialize debugging features, such as a handler for USR2 to print a trace"""
import signal
def debug_trace(sig, frame):
"""Interrupt running process, and provide a python prompt for interactive
debugging."""
self.log('Trace signal received')
self.log(''.join(traceback.format_stack(frame)))
signal.signal(signal.SIGUSR2, debug_trace) | python | {
"resource": ""
} |
q37169 | Library.resolve_object_number | train | def resolve_object_number(self, ref):
"""Resolve a variety of object numebrs to a dataset number"""
if not isinstance(ref, ObjectNumber):
on = ObjectNumber.parse(ref)
else:
on = ref
ds_on = on.as_dataset
return ds_on | python | {
"resource": ""
} |
q37170 | Library.new_bundle | train | def new_bundle(self, assignment_class=None, **kwargs):
"""
Create a new bundle, with the same arguments as creating a new dataset
:param assignment_class: String. assignment class to use for fetching a number, if one
is not specified in kwargs
:param kwargs:
:return:
"""
if not ('id' in kwargs and bool(kwargs['id'])) or assignment_class is not None:
kwargs['id'] = self.number(assignment_class)
ds = self._db.new_dataset(**kwargs)
self._db.commit()
b = self.bundle(ds.vid)
b.state = Bundle.STATES.NEW
b.set_last_access(Bundle.STATES.NEW)
b.set_file_system(source_url=self._fs.source(b.identity.source_path),
build_url=self._fs.build(b.identity.source_path))
bs_meta = b.build_source_files.file(File.BSFILE.META)
bs_meta.set_defaults()
bs_meta.record_to_objects()
bs_meta.objects_to_record()
b.commit()
self._db.commit()
return b | python | {
"resource": ""
} |
q37171 | Library.new_from_bundle_config | train | def new_from_bundle_config(self, config):
"""
Create a new bundle, or link to an existing one, based on the identity in config data.
:param config: A Dict form of a bundle.yaml file
:return:
"""
identity = Identity.from_dict(config['identity'])
ds = self._db.dataset(identity.vid, exception=False)
if not ds:
ds = self._db.new_dataset(**identity.dict)
b = Bundle(ds, self)
b.commit()
b.state = Bundle.STATES.NEW
b.set_last_access(Bundle.STATES.NEW)
# b.set_file_system(source_url=self._fs.source(ds.name),
# build_url=self._fs.build(ds.name))
return b | python | {
"resource": ""
} |
q37172 | Library.bundle | train | def bundle(self, ref, capture_exceptions=False):
"""Return a bundle build on a dataset, with the given vid or id reference"""
from ..orm.exc import NotFoundError
if isinstance(ref, Dataset):
ds = ref
else:
try:
ds = self._db.dataset(ref)
except NotFoundError:
ds = None
if not ds:
try:
p = self.partition(ref)
ds = p._bundle.dataset
except NotFoundError:
ds = None
if not ds:
raise NotFoundError('Failed to find dataset for ref: {}'.format(ref))
b = Bundle(ds, self)
b.capture_exceptions = capture_exceptions
return b | python | {
"resource": ""
} |
q37173 | Library.partition | train | def partition(self, ref, localize=False):
""" Finds partition by ref and converts to bundle partition.
:param ref: A partition reference
:param localize: If True, copy a remote partition to local filesystem. Defaults to False
:raises: NotFoundError: if partition with given ref not found.
:return: orm.Partition: found partition.
"""
if not ref:
raise NotFoundError("No partition for empty ref")
try:
on = ObjectNumber.parse(ref)
ds_on = on.as_dataset
ds = self._db.dataset(ds_on) # Could do it in on SQL query, but this is easier.
# The refresh is required because in some places the dataset is loaded without the partitions,
# and if that persist, we won't have partitions in it until it is refreshed.
self.database.session.refresh(ds)
p = ds.partition(ref)
except NotObjectNumberError:
q = (self.database.session.query(Partition)
.filter(or_(Partition.name == str(ref), Partition.vname == str(ref)))
.order_by(Partition.vid.desc()))
p = q.first()
if not p:
raise NotFoundError("No partition for ref: '{}'".format(ref))
b = self.bundle(p.d_vid)
p = b.wrap_partition(p)
if localize:
p.localize()
return p | python | {
"resource": ""
} |
q37174 | Library.table | train | def table(self, ref):
""" Finds table by ref and returns it.
Args:
ref (str): id, vid (versioned id) or name of the table
Raises:
NotFoundError: if table with given ref not found.
Returns:
orm.Table
"""
try:
obj_number = ObjectNumber.parse(ref)
ds_obj_number = obj_number.as_dataset
dataset = self._db.dataset(ds_obj_number) # Could do it in on SQL query, but this is easier.
table = dataset.table(ref)
except NotObjectNumberError:
q = self.database.session.query(Table)\
.filter(Table.name == str(ref))\
.order_by(Table.vid.desc())
table = q.first()
if not table:
raise NotFoundError("No table for ref: '{}'".format(ref))
return table | python | {
"resource": ""
} |
q37175 | Library.remove | train | def remove(self, bundle):
""" Removes a bundle from the library and deletes the configuration for
it from the library database."""
from six import string_types
if isinstance(bundle, string_types):
bundle = self.bundle(bundle)
self.database.remove_dataset(bundle.dataset) | python | {
"resource": ""
} |
q37176 | Library.duplicate | train | def duplicate(self, b):
"""Duplicate a bundle, with a higher version number.
This only copies the files, under the theory that the bundle can be rebuilt from them.
"""
on = b.identity.on
on.revision = on.revision + 1
try:
extant = self.bundle(str(on))
if extant:
raise ConflictError('Already have a bundle with vid: {}'.format(str(on)))
except NotFoundError:
pass
d = b.dataset.dict
d['revision'] = on.revision
d['vid'] = str(on)
del d['name']
del d['vname']
del d['version']
del d['fqname']
del d['cache_key']
ds = self.database.new_dataset(**d)
nb = self.bundle(ds.vid)
nb.set_file_system(source_url=b.source_fs.getsyspath('/'))
nb.state = Bundle.STATES.NEW
nb.commit()
# Copy all of the files.
for f in b.dataset.files:
assert f.major_type == f.MAJOR_TYPE.BUILDSOURCE
nb.dataset.files.append(nb.dataset.bsfile(f.minor_type, f.path).update(f))
# Load the metadata in to records, then back out again. The objects_to_record process will set the
# new identity object numbers in the metadata file
nb.build_source_files.file(File.BSFILE.META).record_to_objects()
nb.build_source_files.file(File.BSFILE.META).objects_to_record()
ds.commit()
return nb | python | {
"resource": ""
} |
q37177 | Library.checkin_bundle | train | def checkin_bundle(self, db_path, replace=True, cb=None):
"""Add a bundle, as a Sqlite file, to this library"""
from ambry.orm.exc import NotFoundError
db = Database('sqlite:///{}'.format(db_path))
db.open()
if len(db.datasets) == 0:
raise NotFoundError("Did not get a dataset in the {} bundle".format(db_path))
ds = db.dataset(db.datasets[0].vid) # There should only be one
assert ds is not None
assert ds._database
try:
b = self.bundle(ds.vid)
self.logger.info(
"Removing old bundle before checking in new one of same number: '{}'"
.format(ds.vid))
self.remove(b)
except NotFoundError:
pass
try:
self.dataset(ds.vid) # Skip loading bundles we already have
except NotFoundError:
self.database.copy_dataset(ds, cb=cb)
b = self.bundle(ds.vid) # It had better exist now.
# b.state = Bundle.STATES.INSTALLED
b.commit()
#self.search.index_library_datasets(tick)
self.search.index_bundle(b)
return b | python | {
"resource": ""
} |
q37178 | Library.checkin_remote_bundle | train | def checkin_remote_bundle(self, ref, remote=None):
""" Checkin a remote bundle to this library.
:param ref: Any bundle reference
:param remote: If specified, use this remote. If not, search for the reference
in cached directory listings
:param cb: A one argument progress callback
:return:
"""
if not remote:
remote, vname = self.find_remote_bundle(ref)
if vname:
ref = vname
else:
pass
if not remote:
raise NotFoundError("Failed to find bundle ref '{}' in any remote".format(ref))
self.logger.info("Load '{}' from '{}'".format(ref, remote))
vid = self._checkin_remote_bundle(remote, ref)
self.commit()
return vid | python | {
"resource": ""
} |
q37179 | Library.remotes | train | def remotes(self):
"""Return the names and URLs of the remotes"""
from ambry.orm import Remote
for r in self.database.session.query(Remote).all():
if not r.short_name:
continue
yield self.remote(r.short_name) | python | {
"resource": ""
} |
q37180 | Library._remote | train | def _remote(self, name):
"""Return a remote for which 'name' matches the short_name or url """
from ambry.orm import Remote
from sqlalchemy import or_
from ambry.orm.exc import NotFoundError
from sqlalchemy.orm.exc import NoResultFound, MultipleResultsFound
if not name.strip():
raise NotFoundError("Empty remote name")
try:
try:
r = self.database.session.query(Remote).filter(Remote.short_name == name).one()
except NoResultFound as e:
r = None
if not r:
r = self.database.session.query(Remote).filter(Remote.url == name).one()
except NoResultFound as e:
raise NotFoundError(str(e)+'; '+name)
except MultipleResultsFound as e:
self.logger.error("Got multiple results for search for remote '{}': {}".format(name, e))
return None
return r | python | {
"resource": ""
} |
q37181 | Library.number | train | def number(self, assignment_class=None, namespace='d'):
"""
Return a new number.
:param assignment_class: Determines the length of the number. Possible values are 'authority' (3 characters) ,
'registered' (5) , 'unregistered' (7) and 'self' (9). Self assigned numbers are random and acquired locally,
while the other assignment classes use the number server defined in the configuration. If None,
then look in the number server configuration for one of the class keys, starting
with the longest class and working to the shortest.
:param namespace: The namespace character, the first character in the number. Can be one of 'd', 'x' or 'b'
:return:
"""
if assignment_class == 'self':
# When 'self' is explicit, don't look for number server config
return str(DatasetNumber())
elif assignment_class is None:
try:
nsconfig = self.services['numbers']
except ConfigurationError:
# A missing configuration is equivalent to 'self'
self.logger.error('No number server configuration; returning self assigned number')
return str(DatasetNumber())
for assignment_class in ('self', 'unregistered', 'registered', 'authority'):
if assignment_class+'-key' in nsconfig:
break
# For the case where the number configuratoin references a self-assigned key
if assignment_class == 'self':
return str(DatasetNumber())
else:
try:
nsconfig = self.services['numbers']
except ConfigurationError:
raise ConfigurationError('No number server configuration')
if assignment_class + '-key' not in nsconfig:
raise ConfigurationError(
'Assignment class {} not number server config'.format(assignment_class))
try:
key = nsconfig[assignment_class + '-key']
config = {
'key': key,
'host': nsconfig['host'],
'port': nsconfig.get('port', 80)
}
ns = NumberServer(**config)
n = str(next(ns))
self.logger.info('Got number from number server: {}'.format(n))
except HTTPError as e:
self.logger.error('Failed to get number from number server for key: {}'.format(key, e.message))
self.logger.error('Using self-generated number. There is no problem with this, '
'but they are longer than centrally generated numbers.')
n = str(DatasetNumber())
return n | python | {
"resource": ""
} |
q37182 | Library.edit_history | train | def edit_history(self):
"""Return config record information about the most recent bundle accesses and operations"""
ret = self._db.session\
.query(Config)\
.filter(Config.type == 'buildstate')\
.filter(Config.group == 'access')\
.filter(Config.key == 'last')\
.order_by(Config.modified.desc())\
.all()
return ret | python | {
"resource": ""
} |
q37183 | Library.import_bundles | train | def import_bundles(self, dir, detach=False, force=False):
"""
Import bundles from a directory
:param dir:
:return:
"""
import yaml
fs = fsopendir(dir)
bundles = []
for f in fs.walkfiles(wildcard='bundle.yaml'):
self.logger.info('Visiting {}'.format(f))
config = yaml.load(fs.getcontents(f))
if not config:
self.logger.error("Failed to get a valid bundle configuration from '{}'".format(f))
bid = config['identity']['id']
try:
b = self.bundle(bid)
except NotFoundError:
b = None
if not b:
b = self.new_from_bundle_config(config)
self.logger.info('{} Loading New'.format(b.identity.fqname))
else:
self.logger.info('{} Loading Existing'.format(b.identity.fqname))
source_url = os.path.dirname(fs.getsyspath(f))
b.set_file_system(source_url=source_url)
self.logger.info('{} Loading from {}'.format(b.identity.fqname, source_url))
b.sync_in()
if detach:
self.logger.info('{} Detaching'.format(b.identity.fqname))
b.set_file_system(source_url=None)
if force:
self.logger.info('{} Sync out'.format(b.identity.fqname))
# FIXME. It won't actually sync out until re-starting the bundle.
# The source_file_system is probably cached
b = self.bundle(bid)
b.sync_out()
bundles.append(b)
b.close()
return bundles | python | {
"resource": ""
} |
q37184 | Library.process_pool | train | def process_pool(self, limited_run=False):
"""Return a pool for multiprocess operations, sized either to the number of CPUS, or a configured value"""
from multiprocessing import cpu_count
from ambry.bundle.concurrent import Pool, init_library
if self.processes:
cpus = self.processes
else:
cpus = cpu_count()
self.logger.info('Starting MP pool with {} processors'.format(cpus))
return Pool(self, processes=cpus, initializer=init_library,
maxtasksperchild=1,
initargs=[self.database.dsn, self._account_password, limited_run]) | python | {
"resource": ""
} |
q37185 | file_loc | train | def file_loc():
"""Return file and line number"""
import sys
import inspect
try:
raise Exception
except:
file_ = '.../' + '/'.join((inspect.currentframe().f_code.co_filename.split('/'))[-3:])
line_ = sys.exc_info()[2].tb_frame.f_back.f_lineno
return "{}:{}".format(file_, line_) | python | {
"resource": ""
} |
q37186 | calling_code | train | def calling_code(f, f_name=None, raise_for_missing=True):
"""Return the code string for calling a function. """
import inspect
from ambry.dbexceptions import ConfigurationError
if inspect.isclass(f):
try:
args = inspect.getargspec(f.__init__).args
except TypeError as e:
raise TypeError("Failed to inspect {}: {}".format(f, e))
else:
args = inspect.getargspec(f).args
if len(args) > 1 and args[0] == 'self':
args = args[1:]
for a in args:
if a not in all_args + ('exception',): # exception arg is only for exception handlers
if raise_for_missing:
raise ConfigurationError('Caster code {} has unknown argument '
'name: \'{}\'. Must be one of: {} '.format(f, a, ','.join(all_args)))
arg_map = {e: e for e in var_args}
args = [arg_map.get(a, a) for a in args]
return "{}({})".format(f_name if f_name else f.__name__, ','.join(args)) | python | {
"resource": ""
} |
q37187 | PriorityQueue.push | train | def push(self, el):
""" Put a new element in the queue. """
count = next(self.counter)
heapq.heappush(self._queue, (el, count)) | python | {
"resource": ""
} |
q37188 | PartitionDisplay.geo_description | train | def geo_description(self):
"""Return a description of the geographic extents, using the largest scale
space and grain coverages"""
sc = self._p.space_coverage
gc = self._p.grain_coverage
if sc and gc:
if parse_to_gvid(gc[0]).level == 'state' and parse_to_gvid(sc[0]).level == 'state':
return parse_to_gvid(sc[0]).geo_name
else:
return ("{} in {}".format(
parse_to_gvid(gc[0]).level_plural.title(),
parse_to_gvid(sc[0]).geo_name))
elif sc:
return parse_to_gvid(sc[0]).geo_name.title()
elif sc:
return parse_to_gvid(gc[0]).level_plural.title()
else:
return '' | python | {
"resource": ""
} |
q37189 | PartitionDisplay.time_description | train | def time_description(self):
"""String description of the year or year range"""
tc = [t for t in self._p.time_coverage if t]
if not tc:
return ''
mn = min(tc)
mx = max(tc)
if not mn and not mx:
return ''
elif mn == mx:
return mn
else:
return "{} to {}".format(mn, mx) | python | {
"resource": ""
} |
q37190 | PartitionDisplay.sub_description | train | def sub_description(self):
"""Time and space dscription"""
gd = self.geo_description
td = self.time_description
if gd and td:
return '{}, {}. {} Rows.'.format(gd, td, self._p.count)
elif gd:
return '{}. {} Rows.'.format(gd, self._p.count)
elif td:
return '{}. {} Rows.'.format(td, self._p.count)
else:
return '{} Rows.'.format(self._p.count) | python | {
"resource": ""
} |
q37191 | Partition.identity | train | def identity(self):
"""Return this partition information as a PartitionId."""
if self.dataset is None:
# The relationship will be null until the object is committed
s = object_session(self)
ds = s.query(Dataset).filter(Dataset.id_ == self.d_id).one()
else:
ds = self.dataset
d = {
'id': self.id,
'vid': self.vid,
'name': self.name,
'vname': self.vname,
'ref': self.ref,
'space': self.space,
'time': self.time,
'table': self.table_name,
'grain': self.grain,
'variant': self.variant,
'segment': self.segment,
'format': self.format if self.format else 'db'
}
return PartitionIdentity.from_dict(dict(list(ds.dict.items()) + list(d.items()))) | python | {
"resource": ""
} |
q37192 | Partition.detail_dict | train | def detail_dict(self):
"""A more detailed dict that includes the descriptions, sub descriptions, table
and columns."""
d = self.dict
def aug_col(c):
d = c.dict
d['stats'] = [s.dict for s in c.stats]
return d
d['table'] = self.table.dict
d['table']['columns'] = [aug_col(c) for c in self.table.columns]
return d | python | {
"resource": ""
} |
q37193 | Partition.local_datafile | train | def local_datafile(self):
"""Return the datafile for this partition, from the build directory, the remote, or the warehouse"""
from ambry_sources import MPRowsFile
from fs.errors import ResourceNotFoundError
from ambry.orm.exc import NotFoundError
try:
return MPRowsFile(self._bundle.build_fs, self.cache_key)
except ResourceNotFoundError:
raise NotFoundError(
'Could not locate data file for partition {} (local)'.format(self.identity.fqname)) | python | {
"resource": ""
} |
q37194 | Partition.remote | train | def remote(self):
"""
Return the remote for this partition
:return:
"""
from ambry.exc import NotFoundError
ds = self.dataset
if 'remote_name' not in ds.data:
raise NotFoundError('Could not determine remote for partition: {}'.format(self.identity.fqname))
return self._bundle.library.remote(ds.data['remote_name']) | python | {
"resource": ""
} |
q37195 | Partition.is_local | train | def is_local(self):
"""Return true is the partition file is local"""
from ambry.orm.exc import NotFoundError
try:
if self.local_datafile.exists:
return True
except NotFoundError:
pass
return False | python | {
"resource": ""
} |
q37196 | Partition.localize | train | def localize(self, ps=None):
"""Copy a non-local partition file to the local build directory"""
from filelock import FileLock
from ambry.util import ensure_dir_exists
from ambry_sources import MPRowsFile
from fs.errors import ResourceNotFoundError
if self.is_local:
return
local = self._bundle.build_fs
b = self._bundle.library.bundle(self.identity.as_dataset().vid)
remote = self._bundle.library.remote(b)
lock_path = local.getsyspath(self.cache_key + '.lock')
ensure_dir_exists(lock_path)
lock = FileLock(lock_path)
if ps:
ps.add_update(message='Localizing {}'.format(self.identity.name),
partition=self,
item_type='bytes',
state='downloading')
if ps:
def progress(bts):
if ps.rec.item_total is None:
ps.rec.item_count = 0
if not ps.rec.data:
ps.rec.data = {} # Should not need to do this.
return self
item_count = ps.rec.item_count + bts
ps.rec.data['updates'] = ps.rec.data.get('updates', 0) + 1
if ps.rec.data['updates'] % 32 == 1:
ps.update(message='Localizing {}'.format(self.identity.name),
item_count=item_count)
else:
from ambry.bundle.process import call_interval
@call_interval(5)
def progress(bts):
self._bundle.log("Localizing {}. {} bytes downloaded".format(self.vname, bts))
def exception_cb(e):
raise e
with lock:
# FIXME! This won't work with remote ( http) API, only FS ( s3:, file:)
if self.is_local:
return self
try:
with remote.fs.open(self.cache_key + MPRowsFile.EXTENSION, 'rb') as f:
event = local.setcontents_async(self.cache_key + MPRowsFile.EXTENSION,
f,
progress_callback=progress,
error_callback=exception_cb)
event.wait()
if ps:
ps.update_done()
except ResourceNotFoundError as e:
from ambry.orm.exc import NotFoundError
raise NotFoundError("Failed to get MPRfile '{}' from {}: {} "
.format(self.cache_key, remote.fs, e))
return self | python | {
"resource": ""
} |
q37197 | Partition.reader | train | def reader(self):
from ambry.orm.exc import NotFoundError
from fs.errors import ResourceNotFoundError
"""The reader for the datafile"""
try:
return self.datafile.reader
except ResourceNotFoundError:
raise NotFoundError("Failed to find partition file, '{}' "
.format(self.datafile.path)) | python | {
"resource": ""
} |
q37198 | Partition.analysis | train | def analysis(self):
"""Return an AnalysisPartition proxy, which wraps this partition to provide acess to
dataframes, shapely shapes and other analysis services"""
if isinstance(self, PartitionProxy):
return AnalysisPartition(self._obj)
else:
return AnalysisPartition(self) | python | {
"resource": ""
} |
q37199 | Partition.measuredim | train | def measuredim(self):
"""Return a MeasureDimension proxy, which wraps the partition to provide access to
columns in terms of measures and dimensions"""
if isinstance(self, PartitionProxy):
return MeasureDimensionPartition(self._obj)
else:
return MeasureDimensionPartition(self) | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.