_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3 values | text stringlengths 75 19.8k | language stringclasses 1 value | meta_information dict |
|---|---|---|---|---|---|
q37000 | BuildSourceFileAccessor.record_to_objects | train | def record_to_objects(self, preference=None):
"""Create objects from files, or merge the files into the objects. """
from ambry.orm.file import File
for f in self.list_records():
pref = preference if preference else f.record.preference
if pref == File.PREFERENCE.FILE:
self._bundle.logger.debug(' Cleaning objects for file {}'.format(f.path))
f.clean_objects()
if pref in (File.PREFERENCE.FILE, File.PREFERENCE.MERGE):
self._bundle.logger.debug(' rto {}'.format(f.path))
f.record_to_objects() | python | {
"resource": ""
} |
q37001 | BuildSourceFileAccessor.objects_to_record | train | def objects_to_record(self, preference=None):
"""Create file records from objects. """
from ambry.orm.file import File
raise NotImplementedError("Still uses obsolete file_info_map")
for file_const, (file_name, clz) in iteritems(file_info_map):
f = self.file(file_const)
pref = preference if preference else f.record.preference
if pref in (File.PREFERENCE.MERGE, File.PREFERENCE.OBJECT):
self._bundle.logger.debug(' otr {}'.format(file_const))
f.objects_to_record() | python | {
"resource": ""
} |
q37002 | BuildSourceFileAccessor.set_defaults | train | def set_defaults(self):
"""Add default content to any file record that is empty"""
for const_name, c in file_classes.items():
if c.multiplicity == '1':
f = self.file(const_name)
if not f.record.unpacked_contents:
f.setcontent(f.default) | python | {
"resource": ""
} |
q37003 | run | train | def run(host='127.0.0.1', port=8000):
"""
Run web server.
"""
print("Server running on {}:{}".format(host, port))
app_router = Router()
server = make_server(host, port, app_router)
server.serve_forever() | python | {
"resource": ""
} |
q37004 | main | train | def main(args=None):
"""
Create a private key and a certificate and write them to a file.
"""
if args is None:
args = sys.argv[1:]
o = Options()
try:
o.parseOptions(args)
except usage.UsageError, e:
raise SystemExit(str(e))
else:
return createSSLCertificate(o) | python | {
"resource": ""
} |
q37005 | Code.update | train | def update(self, f):
"""Copy another files properties into this one."""
for p in self.__mapper__.attrs:
if p.key == 'oid':
continue
try:
setattr(self, p.key, getattr(f, p.key))
except AttributeError:
# The dict() method copies data property values into the main dict,
# and these don't have associated class properties.
continue | python | {
"resource": ""
} |
q37006 | TimeoutMixin.resetTimeout | train | def resetTimeout(self):
"""Reset the timeout count down"""
if self.__timeoutCall is not None and self.timeOut is not None:
self.__timeoutCall.reset(self.timeOut) | python | {
"resource": ""
} |
q37007 | TimeoutMixin.setTimeout | train | def setTimeout(self, period):
"""Change the timeout period
@type period: C{int} or C{NoneType}
@param period: The period, in seconds, to change the timeout to, or
C{None} to disable the timeout.
"""
prev = self.timeOut
self.timeOut = period
if self.__timeoutCall is not None:
if period is None:
self.__timeoutCall.cancel()
self.__timeoutCall = None
else:
self.__timeoutCall.reset(period)
elif period is not None:
self.__timeoutCall = self.callLater(period, self.__timedOut)
return prev | python | {
"resource": ""
} |
q37008 | Router._load_controllers | train | def _load_controllers(self):
"""
Load all controllers from folder 'controllers'.
Ignore files with leading underscore (for example: controllers/_blogs.py)
"""
for file_name in os.listdir(os.path.join(self._project_dir, 'controllers')):
# ignore disabled controllers
if not file_name.startswith('_'):
module_name = file_name.split('.', 1)[0]
module_path = "controllers.{}".format(module_name)
module = import_module(module_path)
# transform 'blog_articles' file name to 'BlogArticles' class
controller_class_name = module_name.title().replace('_', '')
controller_class = getattr(module, controller_class_name)
controller = controller_class()
for action_name in dir(controller):
action = getattr(controller, action_name)
if action_name.startswith('_') or not callable(action):
continue
url_path = "/".join([module_name, action_name])
self._controllers[url_path] = action
return self._controllers | python | {
"resource": ""
} |
q37009 | Router._init_view | train | def _init_view(self):
"""
Initialize View with project settings.
"""
views_engine = get_config('rails.views.engine', 'jinja')
templates_dir = os.path.join(self._project_dir, "views", "templates")
self._view = View(views_engine, templates_dir) | python | {
"resource": ""
} |
q37010 | Router.get_action_handler | train | def get_action_handler(self, controller_name, action_name):
"""
Return action of controller as callable.
If requested controller isn't found - return 'not_found' action
of requested controller or Index controller.
"""
try_actions = [
controller_name + '/' + action_name,
controller_name + '/not_found',
# call Index controller to catch all unhandled pages
'index/not_found'
]
# search first appropriate action handler
for path in try_actions:
if path in self._controllers:
return self._controllers[path]
return None | python | {
"resource": ""
} |
q37011 | _preprocess_sqlite_index | train | def _preprocess_sqlite_index(asql_query, library, backend, connection):
""" Creates materialized view for each indexed partition found in the query.
Args:
asql_query (str): asql query
library (ambry.Library):
backend (SQLiteBackend):
connection (apsw.Connection):
Returns:
str: converted asql if it contains index query. If not, returns asql_query as is.
"""
new_query = None
if asql_query.strip().lower().startswith('index'):
logger.debug(
'_preprocess_index: create index query found.\n asql query: {}'
.format(asql_query))
index = parse_index(asql_query)
partition = library.partition(index.source)
table = backend.install(connection, partition, materialize=True)
index_name = '{}_{}_ind'.format(partition.vid, '_'.join(index.columns))
new_query = 'CREATE INDEX IF NOT EXISTS {index} ON {table} ({columns});'.format(
index=index_name, table=table, columns=','.join(index.columns))
logger.debug(
'_preprocess_index: preprocess finished.\n asql query: {}\n new query: {}'
.format(asql_query, new_query))
return new_query or asql_query | python | {
"resource": ""
} |
q37012 | SQLiteBackend.close | train | def close(self):
""" Closes connection to sqlite database. """
if getattr(self, '_connection', None):
logger.debug('Closing sqlite connection.')
self._connection.close()
self._connection = None | python | {
"resource": ""
} |
q37013 | SQLiteBackend._get_mpr_view | train | def _get_mpr_view(self, connection, table):
""" Finds and returns view name in the sqlite db represented by given connection.
Args:
connection: connection to sqlite db where to look for partition table.
table (orm.Table):
Raises:
MissingViewError: if database does not have partition table.
Returns:
str: database table storing partition data.
"""
logger.debug(
'Looking for view of the table.\n table: {}'.format(table.vid))
view = self.get_view_name(table)
view_exists = self._relation_exists(connection, view)
if view_exists:
logger.debug(
'View of the table exists.\n table: {}, view: {}'
.format(table.vid, view))
return view
raise MissingViewError('sqlite database does not have view for {} table.'
.format(table.vid)) | python | {
"resource": ""
} |
q37014 | SQLiteBackend._get_mpr_table | train | def _get_mpr_table(self, connection, partition):
""" Returns name of the sqlite table who stores mpr data.
Args:
connection (apsw.Connection): connection to sqlite database who stores mpr data.
partition (orm.Partition):
Returns:
str:
Raises:
MissingTableError: if partition table not found in the db.
"""
# TODO: This is the first candidate for optimization. Add field to partition
# with table name and update it while table creation.
# Optimized version.
#
# return partition.mpr_table or raise exception
# Not optimized version.
#
# first check either partition has readonly table.
virtual_table = partition.vid
table = '{}_v'.format(virtual_table)
logger.debug(
'Looking for materialized table of the partition.\n partition: {}'.format(partition.name))
table_exists = self._relation_exists(connection, table)
if table_exists:
logger.debug(
'Materialized table of the partition found.\n partition: {}, table: {}'
.format(partition.name, table))
return table
# now check for virtual table
logger.debug(
'Looking for a virtual table of the partition.\n partition: {}'.format(partition.name))
virtual_exists = self._relation_exists(connection, virtual_table)
if virtual_exists:
logger.debug(
'Virtual table of the partition found.\n partition: {}, table: {}'
.format(partition.name, table))
return virtual_table
raise MissingTableError('sqlite database does not have table for mpr of {} partition.'
.format(partition.vid)) | python | {
"resource": ""
} |
q37015 | SQLiteBackend._get_create_query | train | def _get_create_query(partition, tablename, include=None):
""" Creates and returns `CREATE TABLE ...` sql statement for given mprows.
Args:
partition (orm.Partition):
tablename (str): name of the table in the return create query.
include (list of str, optional): list of columns to include to query.
Returns:
str: create table query.
"""
TYPE_MAP = {
'int': 'INTEGER',
'float': 'REAL',
six.binary_type.__name__: 'TEXT',
six.text_type.__name__: 'TEXT',
'date': 'DATE',
'datetime': 'TIMESTAMP WITHOUT TIME ZONE'
}
columns_types = []
if not include:
include = []
for column in sorted(partition.datafile.reader.columns, key=lambda x: x['pos']):
if include and column['name'] not in include:
continue
sqlite_type = TYPE_MAP.get(column['type'])
if not sqlite_type:
raise Exception('Do not know how to convert {} to sql column.'.format(column['type']))
columns_types.append(' "{}" {}'.format(column['name'], sqlite_type))
columns_types_str = ',\n'.join(columns_types)
query = 'CREATE TABLE IF NOT EXISTS {}(\n{})'.format(tablename, columns_types_str)
return query | python | {
"resource": ""
} |
q37016 | SQLiteBackend._get_connection | train | def _get_connection(self):
""" Returns connection to sqlite db.
Returns:
connection to the sqlite db who stores mpr data.
"""
if getattr(self, '_connection', None):
logger.debug('Connection to sqlite db already exists. Using existing one.')
else:
dsn = self._dsn
if dsn == 'sqlite://':
dsn = ':memory:'
else:
dsn = dsn.replace('sqlite:///', '')
logger.debug(
'Creating new apsw connection.\n dsn: {}, config_dsn: {}'
.format(dsn, self._dsn))
self._connection = apsw.Connection(dsn)
return self._connection | python | {
"resource": ""
} |
q37017 | SQLiteBackend._add_partition | train | def _add_partition(self, connection, partition):
""" Creates sqlite virtual table for mpr file of the given partition.
Args:
connection: connection to the sqlite db who stores mpr data.
partition (orm.Partition):
"""
logger.debug('Creating virtual table for partition.\n partition: {}'.format(partition.name))
sqlite_med.add_partition(connection, partition.datafile, partition.vid+'_vt') | python | {
"resource": ""
} |
q37018 | SQLiteBackend._execute | train | def _execute(self, connection, query, fetch=True):
""" Executes given query using given connection.
Args:
connection (apsw.Connection): connection to the sqlite db who stores mpr data.
query (str): sql query
fetch (boolean, optional): if True, fetch query result and return it. If False, do not fetch.
Returns:
iterable with query result.
"""
cursor = connection.cursor()
try:
cursor.execute(query)
except Exception as e:
from ambry.mprlib.exceptions import BadSQLError
raise BadSQLError("Failed to execute query: {}; {}".format(query, e))
if fetch:
return cursor.fetchall()
else:
return cursor | python | {
"resource": ""
} |
q37019 | list_milestones | train | def list_milestones(page_size=200, page_index=0, q="", sort=""):
"""
List all ProductMilestones
"""
data = list_milestones_raw(page_size, page_index, sort, q)
if data:
return utils.format_json_list(data) | python | {
"resource": ""
} |
q37020 | update_milestone | train | def update_milestone(id, **kwargs):
"""
Update a ProductMilestone
"""
data = update_milestone_raw(id, **kwargs)
if data:
return utils.format_json(data) | python | {
"resource": ""
} |
q37021 | close_milestone | train | def close_milestone(id, **kwargs):
"""
Close a milestone. This triggers its release process.
The user can optionally specify the release-date, otherwise today's date is
used.
If the wait parameter is specified and set to True, upon closing the milestone,
we'll periodically check that the release being processed is done.
Required:
- id: int
Optional:
- wait key: bool
"""
data = close_milestone_raw(id, **kwargs)
if data:
return utils.format_json(data) | python | {
"resource": ""
} |
q37022 | HtPasswdAuth.init_app | train | def init_app(self, app):
"""
Find and configure the user database from specified file
"""
app.config.setdefault('FLASK_AUTH_ALL', False)
app.config.setdefault('FLASK_AUTH_REALM', 'Login Required')
# Default set to bad file to trigger IOError
app.config.setdefault('FLASK_HTPASSWD_PATH', '/^^^/^^^')
# Load up user database
try:
self.load_users(app)
except IOError:
log.critical(
'No htpasswd file loaded, please set `FLASK_HTPASSWD`'
'or `FLASK_HTPASSWD_PATH` environment variable to a '
'valid apache htpasswd file.'
)
# Allow requiring auth for entire app, with pre request method
@app.before_request
def require_auth(): # pylint: disable=unused-variable
"""Pre request processing for enabling full app authentication."""
if not current_app.config['FLASK_AUTH_ALL']:
return
is_valid, user = self.authenticate()
if not is_valid:
return self.auth_failed()
g.user = user | python | {
"resource": ""
} |
q37023 | HtPasswdAuth.get_hashhash | train | def get_hashhash(self, username):
"""
Generate a digest of the htpasswd hash
"""
return hashlib.sha256(
self.users.get_hash(username)
).hexdigest() | python | {
"resource": ""
} |
q37024 | HtPasswdAuth.generate_token | train | def generate_token(self, username):
"""
assumes user exists in htpasswd file.
Return the token for the given user by signing a token of
the username and a hash of the htpasswd string.
"""
serializer = self.get_signature()
return serializer.dumps(
{
'username': username,
'hashhash': self.get_hashhash(username)
}
).decode('UTF-8') | python | {
"resource": ""
} |
q37025 | HtPasswdAuth.check_token_auth | train | def check_token_auth(self, token):
"""
Check to see who this is and if their token gets
them into the system.
"""
serializer = self.get_signature()
try:
data = serializer.loads(token)
except BadSignature:
log.warning('Received bad token signature')
return False, None
if data['username'] not in self.users.users():
log.warning(
'Token auth signed message, but invalid user %s',
data['username']
)
return False, None
if data['hashhash'] != self.get_hashhash(data['username']):
log.warning(
'Token and password do not match, %s '
'needs to regenerate token',
data['username']
)
return False, None
return True, data['username'] | python | {
"resource": ""
} |
q37026 | HtPasswdAuth.authenticate | train | def authenticate(self):
"""Authenticate user by any means and return either true or false.
Args:
Returns:
tuple (is_valid, username): True is valid user, False if not
"""
basic_auth = request.authorization
is_valid = False
user = None
if basic_auth:
is_valid, user = self.check_basic_auth(
basic_auth.username, basic_auth.password
)
else: # Try token auth
token = request.headers.get('Authorization', None)
param_token = request.args.get('access_token')
if token or param_token:
if token:
# slice the 'token ' piece of the header (following
# github style):
token = token[6:]
else:
# Grab it from query dict instead
token = param_token
log.debug('Received token: %s', token)
is_valid, user = self.check_token_auth(token)
return (is_valid, user) | python | {
"resource": ""
} |
q37027 | HtPasswdAuth.required | train | def required(self, func):
"""
Decorator function with basic and token authentication handler
"""
@wraps(func)
def decorated(*args, **kwargs):
"""
Actual wrapper to run the auth checks.
"""
is_valid, user = self.authenticate()
if not is_valid:
return self.auth_failed()
kwargs['user'] = user
return func(*args, **kwargs)
return decorated | python | {
"resource": ""
} |
q37028 | Name.source_path | train | def source_path(self):
"""The name in a form suitable for use in a filesystem.
Excludes the revision
"""
# Need to do this to ensure the function produces the
# bundle path when called from subclasses
names = [k for k, _, _ in self._name_parts]
parts = [self.source]
if self.bspace:
parts.append(self.bspace)
parts.append(
self._path_join(names=names, excludes=['source', 'version', 'bspace'], sep=self.NAME_PART_SEP))
return os.path.join(*parts) | python | {
"resource": ""
} |
q37029 | Name.cache_key | train | def cache_key(self):
"""The name in a form suitable for use as a cache-key"""
try:
return self.path
except TypeError:
raise TypeError("self.path is invalild: '{}', '{}'".format(str(self.path), type(self.path))) | python | {
"resource": ""
} |
q37030 | Name.ver | train | def ver(self, revision):
"""Clone and change the version."""
c = self.clone()
c.version = self._parse_version(self.version)
return c | python | {
"resource": ""
} |
q37031 | Name.as_partition | train | def as_partition(self, **kwargs):
"""Return a PartitionName based on this name."""
return PartitionName(**dict(list(self.dict.items()) + list(kwargs.items()))) | python | {
"resource": ""
} |
q37032 | PartialPartitionName.promote | train | def promote(self, name):
"""Promote to a PartitionName by combining with a bundle Name."""
return PartitionName(**dict(list(name.dict.items()) + list(self.dict.items()))) | python | {
"resource": ""
} |
q37033 | PartitionName.path | train | def path(self):
"""The path of the bundle source.
Includes the revision.
"""
# Need to do this to ensure the function produces the
# bundle path when called from subclasses
names = [k for k, _, _ in Name._name_parts]
return os.path.join(self.source,
self._path_join(names=names, excludes=['source', 'format'], sep=self.NAME_PART_SEP),
*self._local_parts()
) | python | {
"resource": ""
} |
q37034 | PartitionName.sub_path | train | def sub_path(self):
"""The path of the partition source, excluding the bundle path parts.
Includes the revision.
"""
try:
return os.path.join(*(self._local_parts()))
except TypeError as e:
raise TypeError(
"Path failed for partition {} : {}".format(
self.name,
e.message)) | python | {
"resource": ""
} |
q37035 | PartitionName.partital_dict | train | def partital_dict(self, with_name=True):
"""Returns the name as a dict, but with only the items that are
particular to a PartitionName."""
d = self._dict(with_name=False)
d = {k: d.get(k) for k, _, _ in PartialPartitionName._name_parts if d.get(k, False)}
if 'format' in d and d['format'] == Name.DEFAULT_FORMAT:
del d['format']
d['name'] = self.name
return d | python | {
"resource": ""
} |
q37036 | ObjectNumber.base62_encode | train | def base62_encode(cls, num):
"""Encode a number in Base X.
`num`: The number to encode
`alphabet`: The alphabet to use for encoding
Stolen from: http://stackoverflow.com/a/1119769/1144479
"""
alphabet = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
if num == 0:
return alphabet[0]
arr = []
base = len(alphabet)
while num:
rem = num % base
num = num // base
arr.append(alphabet[rem])
arr.reverse()
return ''.join(arr) | python | {
"resource": ""
} |
q37037 | ObjectNumber.base62_decode | train | def base62_decode(cls, string):
"""Decode a Base X encoded string into the number.
Arguments:
- `string`: The encoded string
- `alphabet`: The alphabet to use for encoding
Stolen from: http://stackoverflow.com/a/1119769/1144479
"""
alphabet = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
base = len(alphabet)
strlen = len(string)
num = 0
idx = 0
for char in string:
power = (strlen - (idx + 1))
try:
num += alphabet.index(char) * (base ** power)
except ValueError:
raise Base62DecodeError(
"Failed to decode char: '{}'".format(char))
idx += 1
return num | python | {
"resource": ""
} |
q37038 | ObjectNumber.increment | train | def increment(cls, v):
"""Increment the version number of an object number of object number string"""
if not isinstance(v, ObjectNumber):
v = ObjectNumber.parse(v)
return v.rev(v.revision+1) | python | {
"resource": ""
} |
q37039 | ObjectNumber.rev | train | def rev(self, i):
"""Return a clone with a different revision."""
on = copy(self)
on.revision = i
return on | python | {
"resource": ""
} |
q37040 | TopNumber.from_hex | train | def from_hex(cls, h, space, assignment_class='self'):
"""Produce a TopNumber, with a length to match the given assignment
class, based on an input hex string.
This can be used to create TopNumbers from a hash of a string.
"""
from math import log
# Use the ln(N)/ln(base) trick to find the right number of hext digits
# to use
hex_digits = int(
round(log(62 ** TopNumber.DLEN.DATASET_CLASSES[assignment_class]) / log(16), 0))
i = int(h[:hex_digits], 16)
return TopNumber(space, i, assignment_class=assignment_class) | python | {
"resource": ""
} |
q37041 | TopNumber.from_string | train | def from_string(cls, s, space):
"""Produce a TopNumber by hashing a string."""
import hashlib
hs = hashlib.sha1(s).hexdigest()
return cls.from_hex(hs, space) | python | {
"resource": ""
} |
q37042 | Identity.classify | train | def classify(cls, o):
"""Break an Identity name into parts, or describe the type of other
forms.
Break a name or object number into parts and classify them. Returns a named tuple
that indicates which parts of input string are name components, object number and
version number. Does not completely parse the name components.
Also can handle Name, Identity and ObjectNumbers
:param o: Input object to split
"""
# from collections import namedtuple
s = str(o)
if o is None:
raise ValueError("Input cannot be None")
class IdentityParts(object):
on = None
name = None
isa = None
name = None
vname = None
sname = None
name_parts = None
version = None
cache_key = None
# namedtuple('IdentityParts', ['isa', 'name', 'name_parts','on','version', 'vspec'])
ip = IdentityParts()
if isinstance(o, (DatasetNumber, PartitionNumber)):
ip.on = o
ip.name = None
ip.isa = type(ip.on)
ip.name_parts = None
elif isinstance(o, Name):
ip.on = None
ip.isa = type(o)
ip.name = str(o)
ip.name_parts = ip.name.split(Name.NAME_PART_SEP)
elif '/' in s:
# A cache key
ip.cache_key = s.strip()
ip.isa = str
elif cls.OBJECT_NUMBER_SEP in s:
# Must be a fqname
ip.name, on_s = s.strip().split(cls.OBJECT_NUMBER_SEP)
ip.on = ObjectNumber.parse(on_s)
ip.name_parts = ip.name.split(Name.NAME_PART_SEP)
ip.isa = type(ip.on)
elif Name.NAME_PART_SEP in s:
# Must be an sname or vname
ip.name = s
ip.on = None
ip.name_parts = ip.name.split(Name.NAME_PART_SEP)
ip.isa = Name
else:
# Probably an Object Number in string form
ip.name = None
ip.name_parts = None
ip.on = ObjectNumber.parse(s.strip())
ip.isa = type(ip.on)
if ip.name_parts:
last = ip.name_parts[-1]
try:
ip.version = sv.Version(last)
ip.vname = ip.name
except ValueError:
try:
ip.version = sv.Spec(last)
ip.vname = None # Specs aren't vnames you can query
except ValueError:
pass
if ip.version:
ip.name_parts.pop()
ip.sname = Name.NAME_PART_SEP.join(ip.name_parts)
else:
ip.sname = ip.name
return ip | python | {
"resource": ""
} |
q37043 | Identity.to_meta | train | def to_meta(self, md5=None, file=None):
"""Return a dictionary of metadata, for use in the Remote api."""
# from collections import OrderedDict
if not md5:
if not file:
raise ValueError('Must specify either file or md5')
md5 = md5_for_file(file)
size = os.stat(file).st_size
else:
size = None
return {
'id': self.id_,
'identity': json.dumps(self.dict),
'name': self.sname,
'fqname': self.fqname,
'md5': md5,
# This causes errors with calculating the AWS signature
'size': size
} | python | {
"resource": ""
} |
q37044 | Identity.names_dict | train | def names_dict(self):
"""A dictionary with only the generated names, name, vname and fqname."""
INCLUDE_KEYS = ['name', 'vname', 'vid']
d = {k: v for k, v in iteritems(self.dict) if k in INCLUDE_KEYS}
d['fqname'] = self.fqname
return d | python | {
"resource": ""
} |
q37045 | Identity.ident_dict | train | def ident_dict(self):
"""A dictionary with only the items required to specify the identy,
excluding the generated names, name, vname and fqname."""
SKIP_KEYS = ['name','vname','fqname','vid','cache_key']
return {k: v for k, v in iteritems(self.dict) if k not in SKIP_KEYS} | python | {
"resource": ""
} |
q37046 | Identity.as_partition | train | def as_partition(self, partition=0, **kwargs):
"""Return a new PartitionIdentity based on this Identity.
:param partition: Integer partition number for PartitionObjectNumber
:param kwargs:
"""
assert isinstance(self._name, Name), "Wrong type: {}".format(type(self._name))
assert isinstance(self._on, DatasetNumber), "Wrong type: {}".format(type(self._on))
name = self._name.as_partition(**kwargs)
on = self._on.as_partition(partition)
return PartitionIdentity(name, on) | python | {
"resource": ""
} |
q37047 | Identity.partition | train | def partition(self):
"""Convenience function for accessing the first partition in the
partitions list, when there is only one."""
if not self.partitions:
return None
if len(self.partitions) > 1:
raise ValueError(
"Can't use this method when there is more than one partition")
return list(self.partitions.values())[0] | python | {
"resource": ""
} |
q37048 | Identity.rev | train | def rev(self, rev):
"""Return a new identity with the given revision"""
d = self.dict
d['revision'] = rev
return self.from_dict(d) | python | {
"resource": ""
} |
q37049 | Identity._info | train | def _info(self):
"""Returns an OrderedDict of information, for human display."""
d = OrderedDict()
d['vid'] = self.vid
d['sname'] = self.sname
d['vname'] = self.vname
return d | python | {
"resource": ""
} |
q37050 | PartitionIdentity.from_dict | train | def from_dict(cls, d):
"""Like Identity.from_dict, but will cast the class type based on the
format. i.e. if the format is hdf, return an HdfPartitionIdentity.
:param d:
:return:
"""
name = PartitionIdentity._name_class(**d)
if 'id' in d and 'revision' in d:
# The vid should be constructed from the id and the revision
on = (ObjectNumber.parse(d['id']).rev(d['revision']))
elif 'vid' in d:
on = ObjectNumber.parse(d['vid'])
else:
raise ValueError("Must have id and revision, or vid")
try:
return PartitionIdentity(name, on)
except TypeError as e:
raise TypeError(
"Failed to make identity from \n{}\n: {}".format(
d,
e.message)) | python | {
"resource": ""
} |
q37051 | PartitionIdentity.as_dataset | train | def as_dataset(self):
"""Convert this identity to the identity of the corresponding
dataset."""
on = self.on.dataset
on.revision = self.on.revision
name = Name(**self.name.dict)
return Identity(name, on) | python | {
"resource": ""
} |
q37052 | NumberServer.sleep | train | def sleep(self):
"""Wait for the sleep time of the last response, to avoid being rate
limited."""
if self.next_time and time.time() < self.next_time:
time.sleep(self.next_time - time.time()) | python | {
"resource": ""
} |
q37053 | root_sync | train | def root_sync(args, l, config):
"""Sync with the remote. For more options, use library sync
"""
from requests.exceptions import ConnectionError
all_remote_names = [ r.short_name for r in l.remotes ]
if args.all:
remotes = all_remote_names
else:
remotes = args.refs
prt("Sync with {} remotes or bundles ".format(len(remotes)))
if not remotes:
return
for ref in remotes:
l.commit()
try:
if ref in all_remote_names: # It's a remote name
l.sync_remote(l.remote(ref))
else: # It's a bundle reference
l.checkin_remote_bundle(ref)
except NotFoundError as e:
warn(e)
continue
except ConnectionError as e:
warn(e)
continue | python | {
"resource": ""
} |
q37054 | _CaptureException | train | def _CaptureException(f, *args, **kwargs):
"""Decorator implementation for capturing exceptions."""
from ambry.dbexceptions import LoggedException
b = args[0] # The 'self' argument
try:
return f(*args, **kwargs)
except Exception as e:
raise
try:
b.set_error_state()
b.commit()
except Exception as e2:
b.log('Failed to set bundle error state: {}'.format(e))
raise e
if b.capture_exceptions:
b.logged_exception(e)
raise LoggedException(e, b)
else:
b.exception(e)
raise | python | {
"resource": ""
} |
q37055 | Bundle.clear_file_systems | train | def clear_file_systems(self):
"""Remove references to build and source file systems, reverting to the defaults"""
self._source_url = None
self.dataset.config.library.source.url = None
self._source_fs = None
self._build_url = None
self.dataset.config.library.build.url = None
self._build_fs = None
self.dataset.commit() | python | {
"resource": ""
} |
q37056 | Bundle.cast_to_subclass | train | def cast_to_subclass(self):
"""
Load the bundle file from the database to get the derived bundle class,
then return a new bundle built on that class
:return:
"""
self.import_lib()
self.load_requirements()
try:
self.commit() # To ensure the rollback() doesn't clear out anything important
bsf = self.build_source_files.file(File.BSFILE.BUILD)
except Exception as e:
self.log('Error trying to create a bundle source file ... {} '.format(e))
raise
self.rollback()
return self
try:
clz = bsf.import_bundle()
except Exception as e:
raise BundleError('Failed to load bundle code file, skipping : {}'.format(e))
b = clz(self._dataset, self._library, self._source_url, self._build_url)
b.limited_run = self.limited_run
b.capture_exceptions = self.capture_exceptions
b.multi = self.multi
return b | python | {
"resource": ""
} |
q37057 | Bundle.load_requirements | train | def load_requirements(self):
"""If there are python library requirements set, append the python dir
to the path."""
for module_name, pip_name in iteritems(self.metadata.requirements):
extant = self.dataset.config.requirements[module_name].url
force = (extant and extant != pip_name)
self._library.install_packages(module_name, pip_name, force=force)
self.dataset.config.requirements[module_name].url = pip_name
python_dir = self._library.filesystem.python()
sys.path.append(python_dir) | python | {
"resource": ""
} |
q37058 | Bundle.dep | train | def dep(self, source_name):
"""Return a bundle dependency from the sources list
:param source_name: Source name. The URL field must be a bundle or partition reference
:return:
"""
from ambry.orm.exc import NotFoundError
from ambry.dbexceptions import ConfigurationError
source = self.source(source_name)
ref = source.url
if not ref:
raise ValueError("Got an empty ref for source '{}' ".format(source.name))
try:
try:
p = self.library.partition(ref)
except NotFoundError:
self.warn("Partition reference {} not found, try to download it".format(ref))
remote, vname = self.library.find_remote_bundle(ref, try_harder=True)
if remote:
self.warn("Installing {} from {}".format(remote, vname))
self.library.checkin_remote_bundle(vname, remote)
p = self.library.partition(ref)
else:
raise
if not p.is_local:
with self.progress.start('test', 0, message='localizing') as ps:
p.localize(ps)
return p
except NotFoundError:
return self.library.bundle(ref) | python | {
"resource": ""
} |
q37059 | Bundle.documentation | train | def documentation(self):
"""Return the documentation, from the documentation.md file, with template substitutions"""
# Return the documentation as a scalar term, which has .text() and .html methods to do
# metadata substitution using Jinja
s = ''
rc = self.build_source_files.documentation.record_content
if rc:
s += rc
for k, v in self.metadata.documentation.items():
if v:
s += '\n### {}\n{}'.format(k.title(), v)
return self.metadata.scalar_term(s) | python | {
"resource": ""
} |
q37060 | Bundle.progress | train | def progress(self):
"""Returned a cached ProcessLogger to record build progress """
if not self._progress:
# If won't be building, only use one connection
new_connection = False if self._library.read_only else True
self._progress = ProcessLogger(self.dataset, self.logger, new_connection=new_connection)
return self._progress | python | {
"resource": ""
} |
q37061 | Bundle.partition | train | def partition(self, ref=None, **kwargs):
"""Return a partition in this bundle for a vid reference or name parts"""
from ambry.orm.exc import NotFoundError
from sqlalchemy.orm.exc import NoResultFound
if not ref and not kwargs:
return None
if ref:
for p in self.partitions:
if ref == p.name or ref == p.vname or ref == p.vid or ref == p.id:
p._bundle = self
return p
raise NotFoundError("No partition found for '{}' (a)".format(ref))
elif kwargs:
from ..identity import PartitionNameQuery
pnq = PartitionNameQuery(**kwargs)
try:
p = self.partitions._find_orm(pnq).one()
if p:
p._bundle = self
return p
except NoResultFound:
raise NotFoundError("No partition found for '{}' (b)".format(kwargs)) | python | {
"resource": ""
} |
q37062 | Bundle.partition_by_vid | train | def partition_by_vid(self, ref):
"""A much faster way to get partitions, by vid only"""
from ambry.orm import Partition
p = self.session.query(Partition).filter(Partition.vid == str(ref)).first()
if p:
return self.wrap_partition(p)
else:
return None | python | {
"resource": ""
} |
q37063 | Bundle.sources | train | def sources(self):
"""Iterate over downloadable sources"""
def set_bundle(s):
s._bundle = self
return s
return list(set_bundle(s) for s in self.dataset.sources) | python | {
"resource": ""
} |
q37064 | Bundle._resolve_sources | train | def _resolve_sources(self, sources, tables, stage=None, predicate=None):
"""
Determine what sources to run from an input of sources and tables
:param sources: A collection of source objects, source names, or source vids
:param tables: A collection of table names
:param stage: If not None, select only sources from this stage
:param predicate: If not none, a callable that selects a source to return when True
:return:
"""
assert sources is None or tables is None
if not sources:
if tables:
sources = list(s for s in self.sources if s.dest_table_name in tables)
else:
sources = self.sources
elif not isinstance(sources, (list, tuple)):
sources = [sources]
def objectify(source):
if isinstance(source, basestring):
source_name = source
return self.source(source_name)
else:
return source
sources = [objectify(s) for s in sources]
if predicate:
sources = [s for s in sources if predicate(s)]
if stage:
sources = [s for s in sources if str(s.stage) == str(stage)]
return sources | python | {
"resource": ""
} |
q37065 | Bundle.build_source_files | train | def build_source_files(self):
"""Return acessors to the build files"""
from .files import BuildSourceFileAccessor
return BuildSourceFileAccessor(self, self.dataset, self.source_fs) | python | {
"resource": ""
} |
q37066 | Bundle.build_partition_fs | train | def build_partition_fs(self):
"""Return a pyfilesystem subdirectory for the build directory for the bundle. This the sub-directory
of the build FS that holds the compiled SQLite file and the partition data files"""
base_path = os.path.dirname(self.identity.cache_key)
if not self.build_fs.exists(base_path):
self.build_fs.makedir(base_path, recursive=True, allow_recreate=True)
return self.build_fs.opendir(base_path) | python | {
"resource": ""
} |
q37067 | Bundle.build_ingest_fs | train | def build_ingest_fs(self):
"""Return a pyfilesystem subdirectory for the ingested source files"""
base_path = 'ingest'
if not self.build_fs.exists(base_path):
self.build_fs.makedir(base_path, recursive=True, allow_recreate=True)
return self.build_fs.opendir(base_path) | python | {
"resource": ""
} |
q37068 | Bundle.logger | train | def logger(self):
"""The bundle logger."""
if not self._logger:
ident = self.identity
if self.multi:
template = '%(levelname)s %(process)d {} %(message)s'.format(ident.vid)
else:
template = '%(levelname)s {} %(message)s'.format(ident.vid)
try:
file_name = self.build_fs.getsyspath(self.log_file)
self._logger = get_logger(__name__, template=template, stream=sys.stdout, file_name=file_name)
except NoSysPathError:
# file does not exists in the os - memory fs for example.
self._logger = get_logger(__name__, template=template, stream=sys.stdout)
self._logger.setLevel(self._log_level)
return self._logger | python | {
"resource": ""
} |
q37069 | Bundle.log_to_file | train | def log_to_file(self, message):
"""Write a log message only to the file"""
with self.build_fs.open(self.log_file, 'a+') as f:
f.write(unicode(message + '\n')) | python | {
"resource": ""
} |
q37070 | Bundle.logged_exception | train | def logged_exception(self, e):
"""Record the exception, but don't log it; it's already been logged
:param e: Exception to log.
"""
if str(e) not in self._errors:
self._errors.append(str(e))
self.set_error_state()
self.buildstate.state.exception_type = str(e.__class__.__name__)
self.buildstate.state.exception = str(e) | python | {
"resource": ""
} |
q37071 | Bundle.fatal | train | def fatal(self, message):
"""Log a fatal messsage and exit.
:param message: Log message.
"""
self.logger.fatal(message)
sys.stderr.flush()
if self.exit_on_fatal:
sys.exit(1)
else:
raise FatalError(message) | python | {
"resource": ""
} |
q37072 | Bundle.log_pipeline | train | def log_pipeline(self, pl):
"""Write a report of the pipeline out to a file """
from datetime import datetime
from ambry.etl.pipeline import CastColumns
self.build_fs.makedir('pipeline', allow_recreate=True)
try:
ccp = pl[CastColumns]
caster_code = ccp.pretty_code
except Exception as e:
caster_code = str(e)
templ = u("""
Pipeline : {}
run time : {}
phase : {}
source name : {}
source table : {}
dest table : {}
========================================================
{}
Pipeline Headers
================
{}
Caster Code
===========
{}
""")
try:
v = templ.format(pl.name, str(datetime.now()), pl.phase, pl.source_name, pl.source_table,
pl.dest_table, unicode(pl), pl.headers_report(), caster_code)
except UnicodeError as e:
v = ''
self.error('Faled to write pipeline log for pipeline {} '.format(pl.name))
path = os.path.join('pipeline', pl.phase + '-' + pl.file_name + '.txt')
self.build_fs.makedir(os.path.dirname(path), allow_recreate=True, recursive=True)
# LazyFS should handled differently because of:
# TypeError: lazy_fs.setcontents(..., encoding='utf-8') got an unexpected keyword argument 'encoding'
if isinstance(self.build_fs, LazyFS):
self.build_fs.wrapped_fs.setcontents(path, v, encoding='utf8')
else:
self.build_fs.setcontents(path, v, encoding='utf8') | python | {
"resource": ""
} |
q37073 | Bundle.pipeline | train | def pipeline(self, source=None, phase='build', ps=None):
"""
Construct the ETL pipeline for all phases. Segments that are not used for the current phase
are filtered out later.
:param source: A source object, or a source string name
:return: an etl Pipeline
"""
from ambry.etl.pipeline import Pipeline, PartitionWriter
from ambry.dbexceptions import ConfigurationError
if source:
source = self.source(source) if isinstance(source, string_types) else source
else:
source = None
sf, sp = self.source_pipe(source, ps) if source else (None, None)
pl = Pipeline(self, source=sp)
# Get the default pipeline, from the config at the head of this file.
try:
phase_config = self.default_pipelines[phase]
except KeyError:
phase_config = None # Ok for non-conventional pipe names
if phase_config:
pl.configure(phase_config)
# Find the pipe configuration, from the metadata
pipe_config = None
pipe_name = None
if source and source.pipeline:
pipe_name = source.pipeline
try:
pipe_config = self.metadata.pipelines[pipe_name]
except KeyError:
raise ConfigurationError("Pipeline '{}' declared in source '{}', but not found in metadata"
.format(source.pipeline, source.name))
else:
pipe_name, pipe_config = self._find_pipeline(source, phase)
if pipe_name:
pl.name = pipe_name
else:
pl.name = phase
pl.phase = phase
# The pipe_config can either be a list, in which case it is a list of pipe pipes for the
# augment segment or it could be a dict, in which case each is a list of pipes
# for the named segments.
def apply_config(pl, pipe_config):
if isinstance(pipe_config, (list, tuple)):
# Just convert it to dict form for the next section
# PartitionWriters are always moved to the 'store' section
store, body = [], []
for pipe in pipe_config:
store.append(pipe) if isinstance(pipe, PartitionWriter) else body.append(pipe)
pipe_config = dict(body=body, store=store)
if pipe_config:
pl.configure(pipe_config)
apply_config(pl, pipe_config)
# One more time, for the configuration for 'all' phases
if 'all' in self.metadata.pipelines:
apply_config(pl, self.metadata.pipelines['all'])
# Allows developer to over ride pipe configuration in code
self.edit_pipeline(pl)
try:
pl.dest_table = source.dest_table_name
pl.source_table = source.source_table.name
pl.source_name = source.name
except AttributeError:
pl.dest_table = None
return pl | python | {
"resource": ""
} |
q37074 | Bundle.field_row | train | def field_row(self, fields):
"""
Return a list of values to match the fields values. This is used when listing bundles to
produce a table of information about the bundle.
:param fields: A list of names of data items.
:return: A list of values, in the same order as the fields input
The names in the fields llist can be:
- state: The current build state
- source_fs: The URL of the build source filesystem
- about.*: Any of the metadata fields in the about section
"""
row = self.dataset.row(fields)
# Modify for special fields
for i, f in enumerate(fields):
if f == 'bstate':
row[i] = self.state
elif f == 'dstate':
row[i] = self.dstate
elif f == 'source_fs':
row[i] = self.source_fs
elif f.startswith('about'): # all metadata in the about section, ie: about.title
_, key = f.split('.')
row[i] = self.metadata.about[key]
elif f.startswith('state'):
_, key = f.split('.')
row[i] = self.buildstate.state[key]
elif f.startswith('count'):
_, key = f.split('.')
if key == 'sources':
row[i] = len(self.dataset.sources)
elif key == 'tables':
row[i] = len(self.dataset.tables)
return row | python | {
"resource": ""
} |
q37075 | Bundle.source_pipe | train | def source_pipe(self, source, ps=None):
"""Create a source pipe for a source, giving it access to download files to the local cache"""
if isinstance(source, string_types):
source = self.source(source)
source.dataset = self.dataset
source._bundle = self
iter_source, source_pipe = self._iterable_source(source, ps)
if self.limited_run:
source_pipe.limit = 500
return iter_source, source_pipe | python | {
"resource": ""
} |
q37076 | Bundle.error_state | train | def error_state(self):
"""Set the error condition"""
self.buildstate.state.lasttime = time()
self.buildstate.commit()
return self.buildstate.state.error | python | {
"resource": ""
} |
q37077 | Bundle.state | train | def state(self, state):
"""Set the current build state and record the time to maintain history.
Note! This is different from the dataset state. Setting the build set is commiteed to the
progress table/database immediately. The dstate is also set, but is not committed until the
bundle is committed. So, the dstate changes more slowly.
"""
assert state != 'build_bundle'
self.buildstate.state.current = state
self.buildstate.state[state] = time()
self.buildstate.state.lasttime = time()
self.buildstate.state.error = False
self.buildstate.state.exception = None
self.buildstate.state.exception_type = None
self.buildstate.commit()
if state in (self.STATES.NEW, self.STATES.CLEANED, self.STATES.BUILT, self.STATES.FINALIZED,
self.STATES.SOURCE):
state = state if state != self.STATES.CLEANED else self.STATES.NEW
self.dstate = state | python | {
"resource": ""
} |
q37078 | Bundle.record_stage_state | train | def record_stage_state(self, phase, stage):
"""Record the completion times of phases and stages"""
key = '{}-{}'.format(phase, stage if stage else 1)
self.buildstate.state[key] = time() | python | {
"resource": ""
} |
q37079 | Bundle.set_last_access | train | def set_last_access(self, tag):
"""Mark the time that this bundle was last accessed"""
import time
# time defeats check that value didn't change
self.buildstate.access.last = '{}-{}'.format(tag, time.time())
self.buildstate.commit() | python | {
"resource": ""
} |
q37080 | Bundle.sync_in | train | def sync_in(self, force=False):
"""Synchronize from files to records, and records to objects"""
self.log('---- Sync In ----')
self.dstate = self.STATES.BUILDING
for path_name in self.source_fs.listdir():
f = self.build_source_files.instance_from_name(path_name)
if not f:
self.warn('Ignoring unknown file: {}'.format(path_name))
continue
if f and f.exists and (f.fs_is_newer or force):
self.log('Sync: {}'.format(f.record.path))
f.fs_to_record()
f.record_to_objects()
self.commit()
self.library.search.index_bundle(self, force=True) | python | {
"resource": ""
} |
q37081 | Bundle.sync_out | train | def sync_out(self, file_name=None, force=False):
"""Synchronize from objects to records"""
self.log('---- Sync Out ----')
from ambry.bundle.files import BuildSourceFile
self.dstate = self.STATES.BUILDING
for f in self.build_source_files.list_records():
if (f.sync_dir() == BuildSourceFile.SYNC_DIR.RECORD_TO_FILE or f.record.path == file_name) or force:
self.log('Sync: {}'.format(f.record.path))
f.record_to_fs()
self.commit() | python | {
"resource": ""
} |
q37082 | Bundle.sync_objects_in | train | def sync_objects_in(self):
"""Synchronize from records to objects"""
self.dstate = self.STATES.BUILDING
self.build_source_files.record_to_objects() | python | {
"resource": ""
} |
q37083 | Bundle.sync_objects_out | train | def sync_objects_out(self, force=False):
"""Synchronize from objects to records, and records to files"""
self.log('---- Sync Objects Out ----')
from ambry.bundle.files import BuildSourceFile
self.dstate = self.STATES.BUILDING
for f in self.build_source_files.list_records():
self.log('Sync: {}'.format(f.record.path))
f.objects_to_record()
self.commit() | python | {
"resource": ""
} |
q37084 | Bundle.sync_sources | train | def sync_sources(self, force=False):
"""Sync in only the sources.csv file"""
from ambry.orm.file import File
self.dstate = self.STATES.BUILDING
synced = 0
for fc in [File.BSFILE.SOURCES]:
bsf = self.build_source_files.file(fc)
if bsf.fs_is_newer or force:
self.log('Syncing {}'.format(bsf.file_name))
bsf.fs_to_objects()
synced += 1
return synced | python | {
"resource": ""
} |
q37085 | Bundle.update_schema | train | def update_schema(self):
"""Propagate schema object changes to file records"""
self.commit()
self.build_source_files.schema.objects_to_record()
self.commit() | python | {
"resource": ""
} |
q37086 | Bundle.clean | train | def clean(self, force=False):
"""Clean generated objects from the dataset, but only if there are File contents
to regenerate them"""
if self.is_finalized and not force:
self.warn("Can't clean; bundle is finalized")
return False
self.log('---- Cleaning ----')
self.state = self.STATES.CLEANING
self.dstate = self.STATES.BUILDING
self.commit()
self.clean_sources()
self.clean_tables()
self.clean_partitions()
self.clean_build()
self.clean_files()
self.clean_ingested()
self.clean_build_state()
self.clean_progress()
self.state = self.STATES.CLEANED
self.commit()
return True | python | {
"resource": ""
} |
q37087 | Bundle.clean_except_files | train | def clean_except_files(self):
"""Clean everything except the build source files"""
if self.is_finalized:
self.warn("Can't clean; bundle is finalized")
return False
self.log('---- Cleaning ----')
self.state = self.STATES.CLEANING
self.commit()
self.clean_sources()
self.clean_tables()
self.clean_partitions()
self.clean_build()
self.clean_ingested()
self.clean_build_state()
self.state = self.STATES.CLEANED
self.commit()
self.log('---- Done Cleaning ----')
return True | python | {
"resource": ""
} |
q37088 | Bundle.clean_sources | train | def clean_sources(self):
"""Like clean, but also clears out files. """
for src in self.dataset.sources:
src.st_id = None
src.t_id = None
self.dataset.sources[:] = []
self.dataset.source_tables[:] = []
self.dataset.st_sequence_id = 1 | python | {
"resource": ""
} |
q37089 | Bundle.clean_partitions | train | def clean_partitions(self):
"""Delete partition records and any built partition files. """
import shutil
from ambry.orm import ColumnStat
# FIXME. There is a problem with the cascades for ColumnStats that prevents them from
# being deleted with the partitions. Probably, they are seen to be owed by the columns instead.
self.session.query(ColumnStat).filter(ColumnStat.d_vid == self.dataset.vid).delete()
self.dataset.delete_partitions()
for s in self.sources:
s.state = None
if self.build_partition_fs.exists:
try:
shutil.rmtree(self.build_partition_fs.getsyspath('/'))
except NoSysPathError:
pass | python | {
"resource": ""
} |
q37090 | Bundle.clean_build | train | def clean_build(self):
"""Delete the build directory and all ingested files """
import shutil
if self.build_fs.exists:
try:
shutil.rmtree(self.build_fs.getsyspath('/'))
except NoSysPathError:
pass | python | {
"resource": ""
} |
q37091 | Bundle.clean_ingested | train | def clean_ingested(self):
""""Clean ingested files"""
for s in self.sources:
df = s.datafile
if df.exists and not s.is_partition:
df.remove()
s.state = s.STATES.NEW
self.commit() | python | {
"resource": ""
} |
q37092 | Bundle.clean_process_meta | train | def clean_process_meta(self):
"""Remove all process and build metadata"""
ds = self.dataset
ds.config.build.clean()
ds.config.process.clean()
ds.commit()
self.state = self.STATES.CLEANED | python | {
"resource": ""
} |
q37093 | Bundle.clean_source_files | train | def clean_source_files(self):
"""Remove the schema.csv and source_schema.csv files"""
self.build_source_files.file(File.BSFILE.SOURCESCHEMA).remove()
self.build_source_files.file(File.BSFILE.SCHEMA).remove()
self.commit() | python | {
"resource": ""
} |
q37094 | Bundle.ingest | train | def ingest(self, sources=None, tables=None, stage=None, force=False, load_meta=False):
"""Ingest a set of sources, specified as source objects, source names, or destination tables.
If no stage is specified, execute the sources in groups by stage.
Note, however, that when this is called from run_stage, all of the sources have the same stage, so they
get grouped together. The result it that the stage in the inner loop is the same as the stage being
run buy run_stage.
"""
from itertools import groupby
from ambry.bundle.events import TAG
from fs.errors import ResourceNotFoundError
import zlib
self.log('---- Ingesting ----')
self.dstate = self.STATES.BUILDING
self.commit() # WTF? Without this, postgres blocks between table query, and update seq id in source tables.
key = lambda s: s.stage if s.stage else 1
def not_final_or_delete(s):
import zlib
if force:
return True
try:
return s.is_processable and not s.is_ingested and not s.is_built
except (IOError, zlib.error):
s.local_datafile.remove()
return True
sources = sorted(self._resolve_sources(sources, tables, stage, predicate=not_final_or_delete),
key=key)
if not sources:
self.log('No sources left to ingest')
return
self.state = self.STATES.INGESTING
count = 0
errors = 0
self._run_events(TAG.BEFORE_INGEST, 0)
# Clear out all ingested files that are malformed
for s in self.sources:
if s.is_downloadable:
df = s.datafile
try:
info = df.info
df.close()
except (ResourceNotFoundError, zlib.error, IOError):
df.remove()
for stage, g in groupby(sources, key):
sources = [s for s in g if not_final_or_delete(s)]
if not len(sources):
continue
self._run_events(TAG.BEFORE_INGEST, stage)
stage_errors = self._ingest_sources(sources, stage, force=force)
errors += stage_errors
count += len(sources) - stage_errors
self._run_events(TAG.AFTER_INGEST, stage)
self.record_stage_state(self.STATES.INGESTING, stage)
self.state = self.STATES.INGESTED
try:
pass
finally:
self._run_events(TAG.AFTER_INGEST, 0)
self.log('Ingested {} sources'.format(count))
if load_meta:
if len(sources) == 1:
iterable_source, source_pipe = self.source_pipe(sources[0])
try:
meta = iterable_source.meta
if meta:
self.metadata.about.title = meta['title']
self.metadata.about.summary = meta['summary']
self.build_source_files.bundle_meta.objects_to_record()
except AttributeError as e:
self.warn("Failed to set metadata: {}".format(e))
pass
else:
self.warn("Didn't not load meta from source. Must have exactly one soruce, got {}".format(len(sources)))
self.commit()
if errors == 0:
return True
else:
return False | python | {
"resource": ""
} |
q37095 | Bundle._ingest_sources | train | def _ingest_sources(self, sources, stage, force=False):
"""Ingest a set of sources, usually for one stage"""
from concurrent import ingest_mp
self.state = self.STATES.INGESTING
downloadable_sources = [s for s in sources if force or
(s.is_processable and not s.is_ingested and not s.is_built)]
errors = 0
with self.progress.start('ingest', stage,
message='Ingesting ' + ('MP' if self.multi else 'SP'),
item_total=len(sources), item_type='source',
item_count=len(downloadable_sources)
) as ps:
# Create all of the source tables first, so we can't get contention for creating them
# in MP.
for source in sources:
_ = source.source_table
if self.multi:
args = [(self.identity.vid, stage, source.vid, force) for source in downloadable_sources]
pool = self.library.process_pool(limited_run=self.limited_run)
try:
# The '1' for chunksize ensures that the subprocess only gets one
# source to build. Combined with maxchildspertask = 1 in the pool,
# each process will only handle one source before exiting.
result = pool.map_async(ingest_mp, args, 1)
pool.close()
pool.join()
except KeyboardInterrupt:
self.log('Got keyboard interrrupt; terminating workers')
pool.terminate()
raise
else:
for i, source in enumerate(downloadable_sources, 1):
ps.add(
message='Ingesting source #{}, {}'.format(i, source.name),
source=source, state='running')
r = self._ingest_source(source, ps, force)
if not r:
errors += 1
if errors > 0:
from ambry.dbexceptions import IngestionError
raise IngestionError('Failed to ingest {} sources'.format(errors))
return errors | python | {
"resource": ""
} |
q37096 | Bundle.source_schema | train | def source_schema(self, sources=None, tables=None, clean=False):
"""Process a collection of ingested sources to make source tables. """
sources = self._resolve_sources(sources, tables, None,
predicate=lambda s: s.is_processable)
for source in sources:
source.update_table()
self.log("Creating source schema for '{}': Table {}, {} columns"
.format(source.name, source.source_table.name, len(source.source_table.columns)))
self.commit() | python | {
"resource": ""
} |
q37097 | Bundle.schema | train | def schema(self, sources=None, tables=None, clean=False, force=False, use_pipeline=False):
"""
Generate destination schemas.
:param sources: If specified, build only destination tables for these sources
:param tables: If specified, build only these tables
:param clean: Delete tables and partitions first
:param force: Population tables even if the table isn't empty
:param use_pipeline: If True, use the build pipeline to determine columns. If False,
:return: True on success.
"""
from itertools import groupby
from operator import attrgetter
from ambry.etl import Collect, Head
from ambry.orm.exc import NotFoundError
self.dstate = self.STATES.BUILDING
self.commit() # Workaround for https://github.com/CivicKnowledge/ambry/issues/171
self.log('---- Schema ----')
resolved_sources = self._resolve_sources(sources, tables, predicate=lambda s: s.is_processable)
if clean:
self.dataset.delete_tables_partitions()
self.commit()
# Group the sources by the destination table name
keyfunc = attrgetter('dest_table')
for t, table_sources in groupby(sorted(resolved_sources, key=keyfunc), keyfunc):
if use_pipeline:
for source in table_sources:
pl = self.pipeline(source)
pl.cast = [ambry.etl.CastSourceColumns]
pl.select_partition = []
pl.write = [Head, Collect]
pl.final = []
self.log_pipeline(pl)
pl.run()
pl.phase = 'build_schema'
self.log_pipeline(pl)
for h, c in zip(pl.write[Collect].headers, pl.write[Collect].rows[1]):
c = t.add_column(name=h, datatype=type(c).__name__ if c is not None else 'str',
update_existing=True)
self.log("Populated destination table '{}' from pipeline '{}'"
.format(t.name, pl.name))
else:
# Get all of the header names, for each source, associating the header position in the table
# with the header, then sort on the postition. This will produce a stream of header names
# that may have duplicates, but which is generally in the order the headers appear in the
# sources. The duplicates are properly handled when we add the columns in add_column()
self.commit()
def source_cols(source):
if source.is_partition and not source.source_table_exists:
return enumerate(source.partition.table.columns)
else:
return enumerate(source.source_table.columns)
columns = sorted(set([(i, col.dest_header, col.datatype, col.description, col.has_codes)
for source in table_sources for i, col in source_cols(source)]))
initial_count = len(t.columns)
for pos, name, datatype, desc, has_codes in columns:
kwds = dict(
name=name,
datatype=datatype,
description=desc,
update_existing=True
)
try:
extant = t.column(name)
except NotFoundError:
extant = None
if extant is None or not extant.description:
kwds['description'] = desc
c = t.add_column(**kwds)
final_count = len(t.columns)
if final_count > initial_count:
diff = final_count - initial_count
self.log("Populated destination table '{}' from source table '{}' with {} columns"
.format(t.name, source.source_table.name, diff))
self.commit()
return True | python | {
"resource": ""
} |
q37098 | Bundle._reset_build | train | def _reset_build(self, sources):
"""Remove partition datafiles and reset the datafiles to the INGESTED state"""
from ambry.orm.exc import NotFoundError
for p in self.dataset.partitions:
if p.type == p.TYPE.SEGMENT:
self.log("Removing old segment partition: {}".format(p.identity.name))
try:
self.wrap_partition(p).local_datafile.remove()
self.session.delete(p)
except NotFoundError:
pass
for s in sources:
# Don't delete partitions fro mother bundles!
if s.reftype == 'partition':
continue
p = s.partition
if p:
try:
self.wrap_partition(p).local_datafile.remove()
self.session.delete(p)
except NotFoundError:
pass
if s.state in (self.STATES.BUILDING, self.STATES.BUILT):
s.state = self.STATES.INGESTED
self.commit() | python | {
"resource": ""
} |
q37099 | Bundle.build_table | train | def build_table(self, table, force=False):
"""Build all of the sources for a table """
sources = self._resolve_sources(None, [table])
for source in sources:
self.build_source(None, source, force=force)
self.unify_partitions() | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.