_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3 values | text stringlengths 75 19.8k | language stringclasses 1 value | meta_information dict |
|---|---|---|---|---|---|
q37200 | Partition.update_id | train | def update_id(self, sequence_id=None):
"""Alter the sequence id, and all of the names and ids derived from it. This
often needs to be done after an IntegrityError in a multiprocessing run"""
if sequence_id:
self.sequence_id = sequence_id
self._set_ids(force=True)
if self.dataset:
self._update_names() | python | {
"resource": ""
} |
q37201 | Partition._update_names | train | def _update_names(self):
"""Update the derived names"""
d = dict(
table=self.table_name,
time=self.time,
space=self.space,
grain=self.grain,
variant=self.variant,
segment=self.segment
)
assert self.dataset
name = PartialPartitionName(**d).promote(self.dataset.identity.name)
self.name = str(name.name)
self.vname = str(name.vname)
self.cache_key = name.cache_key
self.fqname = str(self.identity.fqname) | python | {
"resource": ""
} |
q37202 | AnalysisPartition.dataframe | train | def dataframe(self, predicate=None, filtered_columns=None, columns=None, df_class=None):
"""Return the partition as a Pandas dataframe
:param predicate: If defined, a callable that is called for each row, and if it returns true, the
row is included in the output.
:param filtered_columns: If defined, the value is a dict of column names and
associated values. Only rows where all of the named columms have the given values will be returned.
Setting the argument will overwrite any value set for the predicate
:param columns: A list or tuple of column names to return
:return: Pandas dataframe
"""
from operator import itemgetter
from ambry.pands import AmbryDataFrame
df_class = df_class or AmbryDataFrame
if columns:
ig = itemgetter(*columns)
else:
ig = None
columns = self.table.header
if filtered_columns:
def maybe_quote(v):
from six import string_types
if isinstance(v, string_types):
return '"{}"'.format(v)
else:
return v
code = ' and '.join("row.{} == {}".format(k, maybe_quote(v))
for k, v in filtered_columns.items())
predicate = eval('lambda row: {}'.format(code))
if predicate:
def yielder():
for row in self.reader:
if predicate(row):
if ig:
yield ig(row)
else:
yield row.dict
df = df_class(yielder(), columns=columns, partition=self.measuredim)
return df
else:
def yielder():
for row in self.reader:
yield row.values()
# Put column names in header order
columns = [c for c in self.table.header if c in columns]
return df_class(yielder(), columns=columns, partition=self.measuredim) | python | {
"resource": ""
} |
q37203 | AnalysisPartition.patches | train | def patches(self, basemap, simplify=None, predicate=None, args_f=None, **kwargs):
"""
Return geodata as a list of Matplotlib patches
:param basemap: A mpl_toolkits.basemap.Basemap
:param simplify: Integer or None. Simplify the geometry to a tolerance, in the units of the geometry.
:param predicate: A single-argument function to select which records to include in the output.
:param args_f: A function that takes a row and returns a dict of additional args for the Patch constructor
:param kwargs: Additional args to be passed to the descartes Path constructor
:return: A list of patch objects
"""
from descartes import PolygonPatch
from shapely.wkt import loads
from shapely.ops import transform
if not predicate:
predicate = lambda row: True
def map_xform(x, y, z=None):
return basemap(x, y)
def make_patch(shape, row):
args = dict(kwargs.items())
if args_f:
args.update(args_f(row))
return PolygonPatch(transform(map_xform, shape), **args)
def yield_patches(row):
if simplify:
shape = loads(row.geometry).simplify(simplify)
else:
shape = loads(row.geometry)
if shape.geom_type == 'MultiPolygon':
for subshape in shape.geoms:
yield make_patch(subshape, row)
else:
yield make_patch(shape, row)
return [patch for row in self if predicate(row)
for patch in yield_patches(row)] | python | {
"resource": ""
} |
q37204 | MeasureDimensionPartition.measures | train | def measures(self):
"""Iterate over all measures"""
from ambry.valuetype.core import ROLE
return [c for c in self.columns if c.role == ROLE.MEASURE] | python | {
"resource": ""
} |
q37205 | MeasureDimensionPartition.measure | train | def measure(self, vid):
"""Return a measure, given its vid or another reference"""
from ambry.orm import Column
if isinstance(vid, PartitionColumn):
return vid
elif isinstance(vid, Column):
return PartitionColumn(vid)
else:
return PartitionColumn(self.table.column(vid), self) | python | {
"resource": ""
} |
q37206 | MeasureDimensionPartition.dimension_set | train | def dimension_set(self, p_dim, s_dim=None, dimensions=None, extant=set()):
"""
Return a dict that describes the combination of one or two dimensions, for a plot
:param p_dim:
:param s_dim:
:param dimensions:
:param extant:
:return:
"""
if not dimensions:
dimensions = self.primary_dimensions
key = p_dim.name
if s_dim:
key += '/' + s_dim.name
# Ignore if the key already exists or the primary and secondary dims are the same
if key in extant or p_dim == s_dim:
return
# Don't allow geography to be a secondary dimension. It must either be a primary dimension
# ( to make a map ) or a filter, or a small-multiple
if s_dim and s_dim.valuetype_class.is_geo():
return
extant.add(key)
filtered = {}
for d in dimensions:
if d != p_dim and d != s_dim:
filtered[d.name] = d.pstats.uvalues.keys()
if p_dim.valuetype_class.is_time():
value_type = 'time'
chart_type = 'line'
elif p_dim.valuetype_class.is_geo():
value_type = 'geo'
chart_type = 'map'
else:
value_type = 'general'
chart_type = 'bar'
return dict(
key=key,
p_dim=p_dim.name,
p_dim_type=value_type,
p_label=p_dim.label_or_self.name,
s_dim=s_dim.name if s_dim else None,
s_label=s_dim.label_or_self.name if s_dim else None,
filters=filtered,
chart_type=chart_type
) | python | {
"resource": ""
} |
q37207 | PartitionColumn.label | train | def label(self):
""""Return first child that of the column that is marked as a label"""
for c in self.table.columns:
if c.parent == self.name and 'label' in c.valuetype:
return PartitionColumn(c, self._partition) | python | {
"resource": ""
} |
q37208 | PartitionColumn.value_labels | train | def value_labels(self):
"""Return a map of column code values mapped to labels, for columns that have a label column
If the column is not assocaited with a label column, it returns an identity map.
WARNING! This reads the whole partition, so it is really slow
"""
from operator import itemgetter
card = self.pstats.nuniques
if self.label:
ig = itemgetter(self.name, self.label.name)
elif self.pstats.nuniques < MAX_LABELS:
ig = itemgetter(self.name, self.name)
else:
return {}
label_set = set()
for row in self._partition:
label_set.add(ig(row))
if len(label_set) >= card:
break
d = dict(label_set)
assert len(d) == len(label_set) # Else the label set has multiple values per key
return d | python | {
"resource": ""
} |
q37209 | list_build_configuration_sets | train | def list_build_configuration_sets(page_size=200, page_index=0, sort="", q=""):
"""
List all build configuration sets
"""
data = list_build_configuration_sets_raw(page_size, page_index, sort, q)
if data:
return utils.format_json_list(data) | python | {
"resource": ""
} |
q37210 | create_build_configuration_set_raw | train | def create_build_configuration_set_raw(**kwargs):
"""
Create a new BuildConfigurationSet.
"""
config_set = _create_build_config_set_object(**kwargs)
response = utils.checked_api_call(pnc_api.build_group_configs, 'create_new', body=config_set)
if response:
return response.content | python | {
"resource": ""
} |
q37211 | update_build_configuration_set | train | def update_build_configuration_set(id, **kwargs):
"""
Update a BuildConfigurationSet
"""
data = update_build_configuration_set_raw(id, **kwargs)
if data:
return utils.format_json(data) | python | {
"resource": ""
} |
q37212 | list_build_configurations_for_set | train | def list_build_configurations_for_set(id=None, name=None, page_size=200, page_index=0, sort="", q=""):
"""
List all build configurations in a given BuildConfigurationSet.
"""
content = list_build_configurations_for_set_raw(id, name, page_size, page_index, sort, q)
if content:
return utils.format_json_list(content) | python | {
"resource": ""
} |
q37213 | add_build_configuration_to_set | train | def add_build_configuration_to_set(
set_id=None, set_name=None, config_id=None, config_name=None):
"""
Add a build configuration to an existing BuildConfigurationSet
"""
content = add_build_configuration_to_set_raw(set_id, set_name, config_id, config_name)
if content:
return utils.format_json(content) | python | {
"resource": ""
} |
q37214 | list_build_records_for_set | train | def list_build_records_for_set(id=None, name=None, page_size=200, page_index=0, sort="", q=""):
"""
List all build records for a BuildConfigurationSet
"""
content = list_build_records_for_set_raw(id, name, page_size, page_index, sort, q)
if content:
return utils.format_json_list(content) | python | {
"resource": ""
} |
q37215 | list_build_set_records | train | def list_build_set_records(id=None, name=None, page_size=200, page_index=0, sort="", q=""):
"""
List all build set records for a BuildConfigurationSet
"""
content = list_build_set_records_raw(id, name, page_size, page_index, sort, q)
if content:
return utils.format_json_list(content) | python | {
"resource": ""
} |
q37216 | AnyUrlField.register_model | train | def register_model(cls, ModelClass, form_field=None, widget=None, title=None, prefix=None):
"""
Register a model to use in the URL field.
This function needs to be called once for every model
that should be selectable in the URL field.
:param ModelClass: The model to register.
:param form_field: The form field class used to render the field. This can be a lambda for lazy evaluation.
:param widget: The widget class, can be used instead of the form field.
:param title: The title of the model, by default it uses the models ``verbose_name``.
:param prefix: A custom prefix for the model in the serialized database format. By default it uses "appname.modelname".
"""
cls._static_registry.register(ModelClass, form_field, widget, title, prefix) | python | {
"resource": ""
} |
q37217 | AnyUrlField.resolve_objects | train | def resolve_objects(cls, objects, skip_cached_urls=False):
"""
Make sure all AnyUrlValue objects from a set of objects is resolved in bulk.
This avoids making a query per item.
:param objects: A list or queryset of models.
:param skip_cached_urls: Whether to avoid prefetching data that has it's URL cached.
"""
# Allow the queryset or list to consist of multiple models.
# This supports querysets from django-polymorphic too.
queryset = list(objects)
any_url_values = []
for obj in queryset:
model = obj.__class__
for field in _any_url_fields_by_model[model]:
any_url_value = getattr(obj, field)
if any_url_value and any_url_value.url_type.has_id_value:
any_url_values.append(any_url_value)
AnyUrlValue.resolve_values(any_url_values, skip_cached_urls=skip_cached_urls) | python | {
"resource": ""
} |
q37218 | config | train | def config(path=None, root=None, db=None):
"""Return the default run_config object for this installation."""
import ambry.run
return ambry.run.load(path=path, root=root, db=db) | python | {
"resource": ""
} |
q37219 | get_library | train | def get_library(path=None, root=None, db=None):
import ambry.library as _l
"""Return the default library for this installation."""
rc = config(path=path, root=root, db=db )
return _l.new_library(rc) | python | {
"resource": ""
} |
q37220 | doc_parser | train | def doc_parser():
"""Utility function to allow getting the arguments for a single command, for Sphinx documentation"""
parser = argparse.ArgumentParser(
prog='ambry',
description='Ambry {}. Management interface for ambry, libraries '
'and repositories. '.format(ambry._meta.__version__))
return parser | python | {
"resource": ""
} |
q37221 | get_extra_commands | train | def get_extra_commands():
"""Use the configuration to discover additional CLI packages to load"""
from ambry.run import find_config_file
from ambry.dbexceptions import ConfigurationError
from ambry.util import yaml
try:
plugins_dir = find_config_file('cli.yaml')
except ConfigurationError:
return []
with open(plugins_dir) as f:
cli_modules = yaml.load(f)
return cli_modules | python | {
"resource": ""
} |
q37222 | NSongModel.url | train | def url(self):
"""
We will always check if this song file exists in local library,
if true, we return the url of the local file.
.. note::
As netease song url will be expired after a period of time,
we can not use static url here. Currently, we assume that the
expiration time is 20 minutes, after the url expires, it
will be automaticly refreshed.
"""
local_path = self._find_in_local()
if local_path:
return local_path
if not self._url:
self._refresh_url()
elif time.time() > self._expired_at:
logger.info('song({}) url is expired, refresh...'.format(self))
self._refresh_url()
return self._url | python | {
"resource": ""
} |
q37223 | get_handler | train | def get_handler(progname, fmt=None, datefmt=None, project_id=None,
credentials=None, debug_thread_worker=False, **_):
"""Helper function to create a Stackdriver handler.
See `ulogger.stackdriver.CloudLoggingHandlerBuilder` for arguments
and supported keyword arguments.
Returns:
(obj): Instance of `google.cloud.logging.handlers.
CloudLoggingHandler`
"""
builder = CloudLoggingHandlerBuilder(
progname, fmt=fmt, datefmt=datefmt, project_id=project_id,
credentials=credentials, debug_thread_worker=debug_thread_worker)
return builder.get_handler() | python | {
"resource": ""
} |
q37224 | CloudLoggingHandlerBuilder._create_gcl_resource | train | def _create_gcl_resource(self):
"""Create a configured Resource object.
The logging.resource.Resource object enables GCL to filter and
bucket incoming logs according to which resource (host) they're
coming from.
Returns:
(obj): Instance of `google.cloud.logging.resource.Resource`
"""
return gcl_resource.Resource('gce_instance', {
'project_id': self.project_id,
'instance_id': self.instance_id,
'zone': self.zone
}) | python | {
"resource": ""
} |
q37225 | CloudLoggingHandlerBuilder.get_formatter | train | def get_formatter(self):
"""Create a fully configured `logging.Formatter`
Example of formatted log message:
2017-08-27T20:19:24.424 cpm-example-gew1 progname (23123): hello
Returns:
(obj): Instance of `logging.Formatter`
"""
if not self.fmt:
self.fmt = ('%(asctime)s.%(msecs)03d {host} {progname} '
'(%(process)d): %(message)s').format(
host=self.hostname, progname=self.progname)
if not self.datefmt:
self.datefmt = '%Y-%m-%dT%H:%M:%S'
return logging.Formatter(fmt=self.fmt, datefmt=self.datefmt) | python | {
"resource": ""
} |
q37226 | CloudLoggingHandlerBuilder._set_worker_thread_level | train | def _set_worker_thread_level(self):
"""Sets logging level of the background logging thread to DEBUG or INFO
"""
bthread_logger = logging.getLogger(
'google.cloud.logging.handlers.transports.background_thread')
if self.debug_thread_worker:
bthread_logger.setLevel(logging.DEBUG)
else:
bthread_logger.setLevel(logging.INFO) | python | {
"resource": ""
} |
q37227 | CloudLoggingHandlerBuilder.get_handler | train | def get_handler(self):
"""Create a fully configured CloudLoggingHandler.
Returns:
(obj): Instance of `google.cloud.logging.handlers.
CloudLoggingHandler`
"""
gcl_client = gcl_logging.Client(
project=self.project_id, credentials=self.credentials)
handler = gcl_handlers.CloudLoggingHandler(
gcl_client,
resource=self.resource,
labels={
'resource_id': self.instance_id,
'resource_project': self.project_id,
'resource_zone': self.zone,
'resource_host': self.hostname
})
handler.setFormatter(self.get_formatter())
self._set_worker_thread_level()
return handler | python | {
"resource": ""
} |
q37228 | Search.index_dataset | train | def index_dataset(self, dataset, force=False):
""" Adds given dataset to the index. """
self.backend.dataset_index.index_one(dataset, force=force) | python | {
"resource": ""
} |
q37229 | Search.index_partition | train | def index_partition(self, partition, force=False):
""" Adds given partition to the index. """
self.backend.partition_index.index_one(partition, force=force) | python | {
"resource": ""
} |
q37230 | Search.index_library_datasets | train | def index_library_datasets(self, tick_f=None):
""" Indexes all datasets of the library.
Args:
tick_f (callable, optional): callable of one argument. Gets string with index state.
"""
dataset_n = 0
partition_n = 0
def tick(d, p):
if tick_f:
tick_f('datasets: {} partitions: {}'.format(d, p))
for dataset in self.library.datasets:
if self.backend.dataset_index.index_one(dataset):
# dataset added to index
dataset_n += 1
tick(dataset_n, partition_n)
for partition in dataset.partitions:
self.backend.partition_index.index_one(partition)
partition_n += 1
tick(dataset_n, partition_n)
else:
# dataset already indexed
pass | python | {
"resource": ""
} |
q37231 | Search.search_datasets | train | def search_datasets(self, search_phrase, limit=None):
""" Search for datasets. """
return self.backend.dataset_index.search(search_phrase, limit=limit) | python | {
"resource": ""
} |
q37232 | Search.search | train | def search(self, search_phrase, limit=None):
"""Search for datasets, and expand to database records"""
from ambry.identity import ObjectNumber
from ambry.orm.exc import NotFoundError
from ambry.library.search_backends.base import SearchTermParser
results = []
stp = SearchTermParser()
# Because of the split between searching for partitions and bundles, some terms don't behave right.
# The source term should be a limit on everything, but it isn't part of the partition doc,
# so we check for it here.
parsed_terms = stp.parse(search_phrase)
for r in self.search_datasets(search_phrase, limit):
vid = r.vid or ObjectNumber.parse(next(iter(r.partitions))).as_dataset
r.vid = vid
try:
r.bundle = self.library.bundle(r.vid)
if 'source' not in parsed_terms or parsed_terms['source'] in r.bundle.dataset.source:
results.append(r)
except NotFoundError:
pass
return sorted(results, key=lambda r : r.score, reverse=True) | python | {
"resource": ""
} |
q37233 | Search.get_parsed_query | train | def get_parsed_query(self):
""" Returns string with last query parsed. Assuming called after search_datasets."""
return '{} OR {}'.format(
self.backend.dataset_index.get_parsed_query()[0],
self.backend.partition_index.get_parsed_query()[0]) | python | {
"resource": ""
} |
q37234 | list_build_configuration_set_records | train | def list_build_configuration_set_records(page_size=200, page_index=0, sort="", q=""):
"""
List all build configuration set records.
"""
data = list_build_configuration_set_records_raw(page_size, page_index, sort, q)
if data:
return utils.format_json_list(data) | python | {
"resource": ""
} |
q37235 | list_records_for_build_config_set | train | def list_records_for_build_config_set(id, page_size=200, page_index=0, sort="", q=""):
"""
Get a list of BuildRecords for the given BuildConfigSetRecord
"""
data = list_records_for_build_config_set_raw(id, page_size, page_index, sort, q)
if data:
return utils.format_json_list(data) | python | {
"resource": ""
} |
q37236 | AbstractProvider.auth_as | train | def auth_as(self, user):
"""auth as a user temporarily"""
old_user = self._user
self.auth(user)
try:
yield
finally:
self.auth(old_user) | python | {
"resource": ""
} |
q37237 | LibraryConfigSyncProxy.sync_accounts | train | def sync_accounts(self, accounts_data, clear = False, password=None, cb = None):
"""
Load all of the accounts from the account section of the config
into the database.
:param accounts_data:
:param password:
:return:
"""
# Map common values into the accounts records
all_accounts = self.accounts
kmap = Account.prop_map()
for account_id, values in accounts_data.items():
if not isinstance(values, dict):
continue
d = {}
a = self.library.find_or_new_account(account_id)
a.secret_password = password or self.password
for k, v in values.items():
if k in ('id',):
continue
try:
if kmap[k] == 'secret' and v:
a.encrypt_secret(v)
else:
setattr(a, kmap[k], v)
except KeyError:
d[k] = v
a.data = d
if values.get('service') == 's3':
a.url = 's3://{}'.format(a.account_id)
if cb:
cb('Loaded account: {}'.format(a.account_id))
self.database.session.commit() | python | {
"resource": ""
} |
q37238 | robust_int | train | def robust_int(v):
"""Parse an int robustly, ignoring commas and other cruft. """
if isinstance(v, int):
return v
if isinstance(v, float):
return int(v)
v = str(v).replace(',', '')
if not v:
return None
return int(v) | python | {
"resource": ""
} |
q37239 | Template.render | train | def render(self, template_name, **kw):
'''
Given a template name and template vars.
Searches a template file based on engine set, and renders it
with corresponding engine.
Returns a string.
'''
logger.debug('Rendering template "%s"', template_name)
vars = self.globs.copy()
vars.update(kw)
resolved_name, engine = self.resolve(template_name)
return engine.render(resolved_name, **vars) | python | {
"resource": ""
} |
q37240 | BoundTemplate.render | train | def render(self, template_name, __data=None, **kw):
'''Given a template name and template data.
Renders a template and returns as string'''
return self.template.render(template_name,
**self._vars(__data, **kw)) | python | {
"resource": ""
} |
q37241 | BoundTemplate.render_to_response | train | def render_to_response(self, template_name, __data,
content_type="text/html"):
'''Given a template name and template data.
Renders a template and returns `webob.Response` object'''
resp = self.render(template_name, __data)
return Response(resp,
content_type=content_type) | python | {
"resource": ""
} |
q37242 | Paginator.page | train | def page(self):
'''Current page.'''
page = self.request.GET.get(self.page_param)
if not page:
return 1
try:
page = int(page)
except ValueError:
self.invalid_page()
return 1
if page<1:
self.invalid_page()
return 1
return page | python | {
"resource": ""
} |
q37243 | Paginator.url | train | def url(self):
'''Current or base URL. Can be redefined via keyword argument on
initialization.
Returns `iktomi.web.URL object.
`'''
return URL.from_url(self.request.url, show_host=self.show_host) | python | {
"resource": ""
} |
q37244 | Paginator.page_url | train | def page_url(self, page):
'''
Returns URL for page, page is included as query parameter.
Can be redefined by keyword argument
'''
if page is not None and page != 1:
return self.url.qs_set(**{self.page_param: page})
elif page is not None:
return self.url.qs_delete('page') | python | {
"resource": ""
} |
q37245 | Paginator.pages_count | train | def pages_count(self):
'''Number of pages.'''
if not self.limit or self.count<self.limit:
return 1
if self.count % self.limit <= self.orphans:
return self.count // self.limit
return int(math.ceil(float(self.count)/self.limit)) | python | {
"resource": ""
} |
q37246 | Paginator.slice | train | def slice(self, items):
'''Slice the sequence of all items to obtain them for current page.'''
if self.limit:
if self.page>self.pages_count:
return []
if self.page == self.pages_count:
return items[self.limit*(self.page-1):]
return items[self.limit*(self.page-1):self.limit*self.page]
else:
return items[:] | python | {
"resource": ""
} |
q37247 | list_build_records | train | def list_build_records(page_size=200, page_index=0, sort="", q=""):
"""
List all BuildRecords
"""
data = list_build_records_raw(page_size, page_index, sort, q)
if data:
return utils.format_json_list(data) | python | {
"resource": ""
} |
q37248 | list_records_for_build_configuration | train | def list_records_for_build_configuration(id=None, name=None, page_size=200, page_index=0, sort="", q=""):
"""
List all BuildRecords for a given BuildConfiguration
"""
data = list_records_for_build_configuration_raw(id, name, page_size, page_index, sort, q)
if data:
return utils.format_json_list(data) | python | {
"resource": ""
} |
q37249 | list_records_for_project | train | def list_records_for_project(id=None, name=None, page_size=200, page_index=0, sort="", q=""):
"""
List all BuildRecords for a given Project
"""
data = list_records_for_project_raw(id, name, page_size, page_index, sort, q)
if data:
return utils.format_json_list(data) | python | {
"resource": ""
} |
q37250 | list_built_artifacts | train | def list_built_artifacts(id, page_size=200, page_index=0, sort="", q=""):
"""
List Artifacts associated with a BuildRecord
"""
data = list_built_artifacts_raw(id, page_size, page_index, sort, q)
if data:
return utils.format_json_list(data) | python | {
"resource": ""
} |
q37251 | list_dependency_artifacts | train | def list_dependency_artifacts(id, page_size=200, page_index=0, sort="", q=""):
"""
List dependency artifacts associated with a BuildRecord
"""
data = list_dependency_artifacts_raw(id, page_size, page_index, sort, q)
if data:
return utils.format_json_list(data) | python | {
"resource": ""
} |
q37252 | StatusReporter.on_start | train | def on_start(self):
"""Runs when the actor is started and schedules a status update
"""
logger.info('StatusReporter started.')
# if configured not to report status then return immediately
if self.config['status_update_interval'] == 0:
logger.info('StatusReporter disabled by configuration.')
return
self.in_future.report_status() | python | {
"resource": ""
} |
q37253 | StatusReporter.report_again | train | def report_again(self, current_status):
"""Computes a sleep interval, sleeps for the specified amount of time
then kicks off another status report.
"""
# calculate sleep interval based on current status and configured interval
_m = {'playing': 1, 'paused': 2, 'stopped': 5}[current_status['state']]
interval = (self.config['status_update_interval'] * _m) / 1000.0
# sleep for computed interval and kickoff another webhook
time.sleep(interval)
self.in_future.report_status() | python | {
"resource": ""
} |
q37254 | StatusReporter.report_status | train | def report_status(self):
"""Get status of player from mopidy core and send webhook.
"""
current_status = {
'current_track': self.core.playback.current_track.get(),
'state': self.core.playback.state.get(),
'time_position': self.core.playback.time_position.get(),
}
send_webhook(self.config, {'status_report': current_status})
self.report_again(current_status) | python | {
"resource": ""
} |
q37255 | get_urlfield_cache_key | train | def get_urlfield_cache_key(model, pk, language_code=None):
"""
The low-level function to get the cache key for a model.
"""
return 'anyurlfield.{0}.{1}.{2}.{3}'.format(model._meta.app_label, model.__name__, pk, language_code or get_language()) | python | {
"resource": ""
} |
q37256 | MovingAverage.flush | train | def flush(self):
""" Add accumulator to the moving average queue and reset it. For
example, called by the StatsCollector once per second to calculate
per-second average.
"""
n = self.accumulator
self.accumulator = 0
stream = self.stream
stream.append(n)
self.sum += n
streamlen = len(stream)
if streamlen > self.period:
self.sum -= stream.popleft()
streamlen -= 1
if streamlen == 0:
self.last_average = 0
else:
self.last_average = self.sum / streamlen | python | {
"resource": ""
} |
q37257 | send_webhook | train | def send_webhook(config, payload):
"""Sends a HTTP request to the configured server.
All exceptions are suppressed but emit a warning message in the log.
"""
try:
response = requests.post(
config['webhook_url'],
data=json.dumps(payload, cls=ModelJSONEncoder),
headers={config['api_key_header_name']: config['api_key']},
)
except Exception as e:
logger.warning('Unable to send webhook: ({1}) {2}'.format(
e.__class__.__name__,
e.message,
))
else:
logger.debug('Webhook response: ({0}) {1}'.format(
response.status_code,
response.text,
)) | python | {
"resource": ""
} |
q37258 | Warehouse.clean | train | def clean(self):
"""Remove all of the tables and data from the warehouse"""
connection = self._backend._get_connection()
self._backend.clean(connection) | python | {
"resource": ""
} |
q37259 | Warehouse.list | train | def list(self):
"""List the tables in the database"""
connection = self._backend._get_connection()
return list(self._backend.list(connection)) | python | {
"resource": ""
} |
q37260 | Warehouse.install | train | def install(self, ref, table_name=None, index_columns=None,logger=None):
""" Finds partition by reference and installs it to warehouse db.
Args:
ref (str): id, vid (versioned id), name or vname (versioned name) of the partition.
"""
try:
obj_number = ObjectNumber.parse(ref)
if isinstance(obj_number, TableNumber):
table = self._library.table(ref)
connection = self._backend._get_connection()
return self._backend.install_table(connection, table, logger=logger)
else:
# assume partition
raise NotObjectNumberError
except NotObjectNumberError:
# assume partition.
partition = self._library.partition(ref)
connection = self._backend._get_connection()
return self._backend.install(
connection, partition, table_name=table_name, index_columns=index_columns,
logger=logger) | python | {
"resource": ""
} |
q37261 | Warehouse.materialize | train | def materialize(self, ref, table_name=None, index_columns=None, logger=None):
""" Creates materialized table for given partition reference.
Args:
ref (str): id, vid, name or vname of the partition.
Returns:
str: name of the partition table in the database.
"""
from ambry.library import Library
assert isinstance(self._library, Library)
logger.debug('Materializing warehouse partition.\n partition: {}'.format(ref))
partition = self._library.partition(ref)
connection = self._backend._get_connection()
return self._backend.install(connection, partition, table_name=table_name,
index_columns=index_columns, materialize=True, logger=logger) | python | {
"resource": ""
} |
q37262 | Warehouse.parse_sql | train | def parse_sql(self, asql):
""" Executes all sql statements from asql.
Args:
library (library.Library):
asql (str): ambry sql query - see https://github.com/CivicKnowledge/ambry/issues/140 for details.
"""
import sqlparse
statements = sqlparse.parse(sqlparse.format(asql, strip_comments=True))
parsed_statements = []
for statement in statements:
statement_str = statement.to_unicode().strip()
for preprocessor in self._backend.sql_processors():
statement_str = preprocessor(statement_str, self._library, self._backend, self.connection)
parsed_statements.append(statement_str)
return parsed_statements | python | {
"resource": ""
} |
q37263 | Warehouse.query | train | def query(self, asql, logger=None):
"""
Execute an ASQL file and return the result of the first SELECT statement.
:param asql:
:param logger:
:return:
"""
import sqlparse
from ambry.mprlib.exceptions import BadSQLError
from ambry.bundle.asql_parser import process_sql
from ambry.orm.exc import NotFoundError
if not logger:
logger = self._library.logger
rec = process_sql(asql, self._library)
for drop in reversed(rec.drop):
if drop:
connection = self._backend._get_connection()
cursor = self._backend.query(connection, drop, fetch=False)
cursor.close()
for vid in rec.materialize:
logger.debug('Materialize {}'.format(vid))
self.materialize(vid, logger=logger)
for vid in rec.install:
logger.debug('Install {}'.format(vid))
self.install(vid, logger=logger)
for statement in rec.statements:
statement = statement.strip()
logger.debug("Process statement: {}".format(statement[:60]))
if statement.lower().startswith('create'):
logger.debug(' Create {}'.format(statement))
connection = self._backend._get_connection()
cursor = self._backend.query(connection, statement, fetch=False)
cursor.close()
elif statement.lower().startswith('select'):
logger.debug('Run query {}'.format(statement))
connection = self._backend._get_connection()
return self._backend.query(connection, statement, fetch=False)
for table_or_vid, columns in rec.indexes:
logger.debug('Index {}'.format(table_or_vid))
try:
self.index(table_or_vid, columns)
except NotFoundError as e:
# Comon when the index table in's a VID, so no partition can be found.
logger.debug('Failed to index {}; {}'.format(vid, e))
except Exception as e:
logger.error('Failed to index {}; {}'.format(vid, e))
# A fake cursor that can be closed and iterated
class closable_iterable(object):
def close(self):
pass
def __iter__(self):
pass
return closable_iterable() | python | {
"resource": ""
} |
q37264 | Warehouse.geoframe | train | def geoframe(self, sql, simplify=None, crs=None, epsg=4326):
"""
Return geopandas dataframe
:param simplify: Integer or None. Simplify the geometry to a tolerance, in the units of the geometry.
:param crs: Coordinate reference system information
:param epsg: Specifiy the CRS as an EPGS number.
:return: A Geopandas GeoDataFrame
"""
import geopandas
from shapely.wkt import loads
from fiona.crs import from_epsg
if crs is None:
try:
crs = from_epsg(epsg)
except TypeError:
raise TypeError('Must set either crs or epsg for output.')
df = self.dataframe(sql)
geometry = df['geometry']
if simplify:
s = geometry.apply(lambda x: loads(x).simplify(simplify))
else:
s = geometry.apply(lambda x: loads(x))
df['geometry'] = geopandas.GeoSeries(s)
return geopandas.GeoDataFrame(df, crs=crs, geometry='geometry') | python | {
"resource": ""
} |
q37265 | Request.get_url_param | train | def get_url_param(self, index, default=None):
"""
Return url parameter with given index.
Args:
- index: starts from zero, and come after controller and
action names in url.
"""
params = self.get_url_params()
return params[index] if index < len(params) else default | python | {
"resource": ""
} |
q37266 | UrlType.get_widget | train | def get_widget(self):
"""
Create the widget for the URL type.
"""
form_field = self.get_form_field()
widget = form_field.widget
if isinstance(widget, type):
widget = widget()
# Widget instantiation needs to happen manually.
# Auto skip if choices is not an existing attribute.
form_field_choices = getattr(form_field, 'choices', None)
if form_field_choices is not None:
if hasattr(widget, 'choices'):
widget.choices = form_field_choices
return widget | python | {
"resource": ""
} |
q37267 | UrlTypeRegistry.register | train | def register(self, ModelClass, form_field=None, widget=None, title=None, prefix=None, has_id_value=True):
"""
Register a custom model with the ``AnyUrlField``.
"""
if any(urltype.model == ModelClass for urltype in self._url_types):
raise ValueError("Model is already registered: '{0}'".format(ModelClass))
opts = ModelClass._meta
opts = opts.concrete_model._meta
if not prefix:
# Store something descriptive, easier to lookup from raw database content.
prefix = '{0}.{1}'.format(opts.app_label, opts.object_name.lower())
if not title:
title = ModelClass._meta.verbose_name
if self.is_external_url_prefix(prefix):
raise ValueError("Invalid prefix value: '{0}'.".format(prefix))
if self[prefix] is not None:
raise ValueError("Prefix is already registered: '{0}'".format(prefix))
if form_field is not None and widget is not None:
raise ValueError("Provide either a form_field or widget; use the widget parameter of the form field instead.")
urltype = UrlType(ModelClass, form_field, widget, title, prefix, has_id_value)
signals.post_save.connect(_on_model_save, sender=ModelClass)
self._url_types.append(urltype)
return urltype | python | {
"resource": ""
} |
q37268 | UrlTypeRegistry.get_for_model | train | def get_for_model(self, ModelClass):
"""
Return the URL type for a given model class
"""
for urltype in self._url_types:
if urltype.model is ModelClass:
return urltype
return None | python | {
"resource": ""
} |
q37269 | UrlTypeRegistry.index | train | def index(self, prefix):
"""
Return the model index for a prefix.
"""
# Any web domain will be handled by the standard URLField.
if self.is_external_url_prefix(prefix):
prefix = 'http'
for i, urltype in enumerate(self._url_types):
if urltype.prefix == prefix:
return i
return None | python | {
"resource": ""
} |
q37270 | message_received | train | def message_received(request, backend_name):
"""Handle HTTP requests from Tropo.
"""
logger.debug("@@ request from Tropo - raw data: %s" % request.body)
try:
post = json.loads(request.body)
except ValueError:
logger.exception("EXCEPTION decoding post data in incoming request")
return HttpResponseBadRequest()
except Exception:
logger.exception("@@responding to tropo with error")
return HttpResponseServerError()
logger.debug("@@ Decoded data: %r" % post)
if 'session' not in post:
logger.error("@@HEY, post does not contain session, "
"what's going on?")
return HttpResponseBadRequest()
session = post['session']
parms = session.get('parameters', {})
if 'program' in parms:
# Execute a program that we passed to Tropo to pass back to us.
# Extract the program, while verifying it came from us and
# has not been modified.
try:
program = signing.loads(parms['program'])
except signing.BadSignature:
logger.exception("@@ received program with bad signature")
return HttpResponseBadRequest()
return HttpResponse(json.dumps(program))
if 'from' in session:
# Must have received a message
# FIXME: is there any way we can verify it's really Tropo calling us?
logger.debug("@@Got a text message")
try:
from_address = session['from']['id']
text = session['initialText']
logger.debug("@@Received message from %s: %s" %
(from_address, text))
# pass the message to RapidSMS
identity = from_address
connections = lookup_connections(backend_name, [identity])
receive(text, connections[0])
# Respond nicely to Tropo
program = json.dumps({"tropo": [{"hangup": {}}]})
logger.debug("@@responding to tropo with hangup")
return HttpResponse(program)
except Exception:
logger.exception("@@responding to tropo with error")
return HttpResponseServerError()
logger.error("@@No recognized command in request from Tropo")
return HttpResponseBadRequest() | python | {
"resource": ""
} |
q37271 | list_running_builds | train | def list_running_builds(page_size=200, page_index=0, sort=""):
"""
List all RunningBuilds
"""
content = list_running_builds_raw(page_size, page_index, sort)
if content:
return utils.format_json_list(content) | python | {
"resource": ""
} |
q37272 | between | train | def between(min_value, max_value):
'Numerical values limit'
message = N_('value should be between %(min)d and %(max)d') % \
dict(min=min_value, max=max_value)
@validator(message)
def wrapper(conv, value):
if value is None:
# it meens that this value is not required
return True
if value < min_value:
return False
if value > max_value:
return False
return True
return wrapper | python | {
"resource": ""
} |
q37273 | _tzinfome | train | def _tzinfome(tzinfo):
"""Gets a tzinfo object from a string.
Args:
tzinfo: A string (or string like) object, or a datetime.tzinfo object.
Returns:
An datetime.tzinfo object.
Raises:
UnknownTimeZoneError: If the timezone given can't be decoded.
"""
if not isinstance(tzinfo, datetime.tzinfo):
try:
tzinfo = pytz.timezone(tzinfo)
assert tzinfo.zone in pytz.all_timezones
except AttributeError:
raise pytz.UnknownTimeZoneError("Unknown timezone! %s" % tzinfo)
return tzinfo | python | {
"resource": ""
} |
q37274 | localize | train | def localize(dt, force_to_local=True):
"""Localize a datetime to the local timezone.
If dt is naive, returns the same datetime with the local timezone, otherwise
uses astimezone to convert.
Args:
dt: datetime object.
force_to_local: Force all results to be in local time.
Returns:
A datetime_tz object.
"""
if not isinstance(dt, datetime_tz):
if not dt.tzinfo:
return datetime_tz(dt, tzinfo=localtz())
dt = datetime_tz(dt)
if force_to_local:
return dt.astimezone(localtz())
return dt | python | {
"resource": ""
} |
q37275 | get_naive | train | def get_naive(dt):
"""Gets a naive datetime from a datetime.
datetime_tz objects can't just have tzinfo replaced with None, you need to
call asdatetime.
Args:
dt: datetime object.
Returns:
datetime object without any timezone information.
"""
if not dt.tzinfo:
return dt
if hasattr(dt, "asdatetime"):
return dt.asdatetime()
return dt.replace(tzinfo=None) | python | {
"resource": ""
} |
q37276 | detect_timezone | train | def detect_timezone():
"""Try and detect the timezone that Python is currently running in.
We have a bunch of different methods for trying to figure this out (listed in
order they are attempted).
* In windows, use win32timezone.TimeZoneInfo.local()
* Try TZ environment variable.
* Try and find /etc/timezone file (with timezone name).
* Try and find /etc/localtime file (with timezone data).
* Try and match a TZ to the current dst/offset/shortname.
Returns:
The detected local timezone as a tzinfo object
Raises:
pytz.UnknownTimeZoneError: If it was unable to detect a timezone.
"""
if sys.platform == "win32":
tz = _detect_timezone_windows()
if tz is not None:
return tz
# First we try the TZ variable
tz = _detect_timezone_environ()
if tz is not None:
return tz
# Second we try /etc/timezone and use the value in that
tz = _detect_timezone_etc_timezone()
if tz is not None:
return tz
# Next we try and see if something matches the tzinfo in /etc/localtime
tz = _detect_timezone_etc_localtime()
if tz is not None:
return tz
# Next we try and use a similiar method to what PHP does.
# We first try to search on time.tzname, time.timezone, time.daylight to
# match a pytz zone.
warnings.warn("Had to fall back to worst detection method (the 'PHP' "
"method).")
tz = _detect_timezone_php()
if tz is not None:
return tz
raise pytz.UnknownTimeZoneError("Unable to detect your timezone!") | python | {
"resource": ""
} |
q37277 | _load_local_tzinfo | train | def _load_local_tzinfo():
"""Load zoneinfo from local disk."""
tzdir = os.environ.get("TZDIR", "/usr/share/zoneinfo/posix")
localtzdata = {}
for dirpath, _, filenames in os.walk(tzdir):
for filename in filenames:
filepath = os.path.join(dirpath, filename)
name = os.path.relpath(filepath, tzdir)
f = open(filepath, "rb")
tzinfo = pytz.tzfile.build_tzinfo(name, f)
f.close()
localtzdata[name] = tzinfo
return localtzdata | python | {
"resource": ""
} |
q37278 | _wrap_method | train | def _wrap_method(name):
"""Wrap a method.
Patch a method which might return a datetime.datetime to return a
datetime_tz.datetime_tz instead.
Args:
name: The name of the method to patch
"""
method = getattr(datetime.datetime, name)
# Have to give the second argument as method has no __module__ option.
@functools.wraps(method, ("__name__", "__doc__"), ())
def wrapper(self, *args, **kw):
r = method(self, *args, **kw)
if isinstance(r, datetime.datetime) and not isinstance(r, type(self)):
r = type(self)(r)
return r
setattr(datetime_tz, name, wrapper) | python | {
"resource": ""
} |
q37279 | datetime_tz.asdatetime | train | def asdatetime(self, naive=True):
"""Return this datetime_tz as a datetime object.
Args:
naive: Return *without* any tz info.
Returns:
This datetime_tz as a datetime object.
"""
args = list(self.timetuple()[0:6])+[self.microsecond]
if not naive:
args.append(self.tzinfo)
return datetime.datetime(*args) | python | {
"resource": ""
} |
q37280 | datetime_tz.asdate | train | def asdate(self):
"""Return this datetime_tz as a date object.
Returns:
This datetime_tz as a date object.
"""
return datetime.date(self.year, self.month, self.day) | python | {
"resource": ""
} |
q37281 | datetime_tz.astimezone | train | def astimezone(self, tzinfo):
"""Returns a version of this timestamp converted to the given timezone.
Args:
tzinfo: Either a datetime.tzinfo object or a string (which will be looked
up in pytz.
Returns:
A datetime_tz object in the given timezone.
"""
# Assert we are not a naive datetime object
assert self.tzinfo is not None
tzinfo = _tzinfome(tzinfo)
d = self.asdatetime(naive=False).astimezone(tzinfo)
return type(self)(d) | python | {
"resource": ""
} |
q37282 | datetime_tz.replace | train | def replace(self, **kw):
"""Return datetime with new specified fields given as arguments.
For example, dt.replace(days=4) would return a new datetime_tz object with
exactly the same as dt but with the days attribute equal to 4.
Any attribute can be replaced, but tzinfo can not be set to None.
Args:
Any datetime_tz attribute.
Returns:
A datetime_tz object with the attributes replaced.
Raises:
TypeError: If the given replacement is invalid.
"""
if "tzinfo" in kw:
if kw["tzinfo"] is None:
raise TypeError("Can not remove the timezone use asdatetime()")
else:
tzinfo = kw["tzinfo"]
del kw["tzinfo"]
else:
tzinfo = None
is_dst = None
if "is_dst" in kw:
is_dst = kw["is_dst"]
del kw["is_dst"]
else:
# Use our own DST setting..
is_dst = self.is_dst
replaced = self.asdatetime().replace(**kw)
return type(self)(
replaced, tzinfo=tzinfo or self.tzinfo.zone, is_dst=is_dst) | python | {
"resource": ""
} |
q37283 | datetime_tz.utcnow | train | def utcnow(cls):
"""Return a new datetime representing UTC day and time."""
obj = datetime.datetime.utcnow()
obj = cls(obj, tzinfo=pytz.utc)
return obj | python | {
"resource": ""
} |
q37284 | iterate.between | train | def between(start, delta, end=None):
"""Return an iterator between this date till given end point.
Example usage:
>>> d = datetime_tz.smartparse("5 days ago")
2008/05/12 11:45
>>> for i in d.between(timedelta(days=1), datetime_tz.now()):
>>> print i
2008/05/12 11:45
2008/05/13 11:45
2008/05/14 11:45
2008/05/15 11:45
2008/05/16 11:45
Args:
start: The date to start at.
delta: The interval to iterate with.
end: (Optional) Date to end at. If not given the iterator will never
terminate.
Yields:
datetime_tz objects.
"""
toyield = start
while end is None or toyield < end:
yield toyield
toyield += delta | python | {
"resource": ""
} |
q37285 | iterate.days | train | def days(start, end=None):
"""Iterate over the days between the given datetime_tzs.
Args:
start: datetime_tz to start from.
end: (Optional) Date to end at, if not given the iterator will never
terminate.
Returns:
An iterator which generates datetime_tz objects a day apart.
"""
return iterate.between(start, datetime.timedelta(days=1), end) | python | {
"resource": ""
} |
q37286 | iterate.hours | train | def hours(start, end=None):
"""Iterate over the hours between the given datetime_tzs.
Args:
start: datetime_tz to start from.
end: (Optional) Date to end at, if not given the iterator will never
terminate.
Returns:
An iterator which generates datetime_tz objects a hour apart.
"""
return iterate.between(start, datetime.timedelta(hours=1), end) | python | {
"resource": ""
} |
q37287 | iterate.minutes | train | def minutes(start, end=None):
"""Iterate over the minutes between the given datetime_tzs.
Args:
start: datetime_tz to start from.
end: (Optional) Date to end at, if not given the iterator will never
terminate.
Returns:
An iterator which generates datetime_tz objects a minute apart.
"""
return iterate.between(start, datetime.timedelta(minutes=1), end) | python | {
"resource": ""
} |
q37288 | publish_page | train | def publish_page(page, languages):
"""
Publish a CMS page in all given languages.
"""
for language_code, lang_name in iter_languages(languages):
url = page.get_absolute_url()
if page.publisher_is_draft:
page.publish(language_code)
log.info('page "%s" published in %s: %s', page, lang_name, url)
else:
log.info('published page "%s" already exists in %s: %s', page,
lang_name, url)
return page.reload() | python | {
"resource": ""
} |
q37289 | create_cms_plugin_page | train | def create_cms_plugin_page(apphook, apphook_namespace, placeholder_slot=None):
"""
Create cms plugin page in all existing languages.
Add a link to the index page.
:param apphook: e.g...........: 'FooBarApp'
:param apphook_namespace: e.g.: 'foobar'
:return:
"""
creator = CmsPluginPageCreator(
apphook=apphook,
apphook_namespace=apphook_namespace,
)
creator.placeholder_slot = placeholder_slot
plugin_page = creator.create()
return plugin_page | python | {
"resource": ""
} |
q37290 | CmsPageCreator.publish | train | def publish(self, page):
"""
Publish the page in all languages.
"""
assert page.publisher_is_draft == True, "Page '%s' must be a draft!" % page
publish_page(page, languages=self.languages) | python | {
"resource": ""
} |
q37291 | CmsPageCreator.add_plugins | train | def add_plugins(self, page, placeholder):
"""
Add a "TextPlugin" in all languages.
"""
for language_code, lang_name in iter_languages(self.languages):
for no in range(1, self.dummy_text_count + 1):
add_plugin_kwargs = self.get_add_plugin_kwargs(
page, no, placeholder, language_code, lang_name)
log.info(
'add plugin to placeholder "%s" (pk:%i) in: %s - no: %i',
placeholder, placeholder.pk, lang_name, no)
plugin = add_plugin(
placeholder=placeholder,
language=language_code,
**add_plugin_kwargs)
log.info('Plugin "%s" (pk:%r) added.', str(plugin), plugin.pk)
placeholder.save() | python | {
"resource": ""
} |
q37292 | CmsPageCreator.get_or_create_placeholder | train | def get_or_create_placeholder(self, page, placeholder_slot):
"""
Add a placeholder if not exists.
"""
placeholder, created = get_or_create_placeholder(
page, placeholder_slot, delete_existing=self.delete_first)
return placeholder, created | python | {
"resource": ""
} |
q37293 | CmsPageCreator.fill_content | train | def fill_content(self, page, placeholder_slot):
"""
Add a placeholder to the page.
Here we add a "TextPlugin" in all languages.
"""
if len(placeholder_slot) == 1:
raise RuntimeError(placeholder_slot)
placeholder, created = self.get_or_create_placeholder(
page, placeholder_slot)
self.add_plugins(page, placeholder) | python | {
"resource": ""
} |
q37294 | CmsPluginPageCreator.create | train | def create(self):
"""
Create the plugin page in all languages and fill dummy content.
"""
plugin = CMSPlugin.objects.filter(plugin_type=self.apphook)
if plugin.exists():
log.debug('Plugin page for "%s" plugin already exist, ok.',
self.apphook)
raise plugin
page, created = super(CmsPluginPageCreator, self).create()
if created:
# Add a plugin with content in all languages to the created page.
# But only on new created page
for placeholder_slot in self.placeholder_slots:
self.fill_content(page, placeholder_slot)
return page, created | python | {
"resource": ""
} |
q37295 | SourceTable.column | train | def column(self, source_header_or_pos):
"""
Return a column by name or position
:param source_header_or_pos: If a string, a source header name. If an integer, column position
:return:
"""
for c in self.columns:
if c.source_header == source_header_or_pos:
assert c.st_vid == self.vid
return c
elif c.position == source_header_or_pos:
assert c.st_vid == self.vid
return c
else:
return None | python | {
"resource": ""
} |
q37296 | table_convert_geometry | train | def table_convert_geometry(metadata, table_name):
"""Get table metadata from the database."""
from sqlalchemy import Table
from ..orm import Geometry
table = Table(table_name, metadata, autoload=True)
for c in table.columns:
# HACK! Sqlalchemy sees spatialte GEOMETRY types
# as NUMERIC
if c.name == 'geometry':
c.type = Geometry # What about variants?
return table | python | {
"resource": ""
} |
q37297 | incver | train | def incver(o, prop_names):
"""Increment the version numbers of a set of properties and return a new object"""
from ambry.identity import ObjectNumber
d = {}
for p in o.__mapper__.attrs:
v = getattr(o, p.key)
if v is None:
d[p.key] = None
elif p.key in prop_names:
d[p.key] = str(ObjectNumber.increment(v))
else:
if not hasattr(v, '__mapper__'): # Only copy values, never objects
d[p.key] = v
return o.__class__(**d) | python | {
"resource": ""
} |
q37298 | MutationList.coerce | train | def coerce(cls, key, value):
"""Convert plain list to MutationList."""
if isinstance(value, string_types):
value = value.strip()
if value[0] == '[': # It's json encoded, probably
try:
value = json.loads(value)
except ValueError:
raise ValueError("Failed to parse JSON: '{}' ".format(value))
else:
value = value.split(',')
if not value:
value = []
self = MutationList((MutationObj.coerce(key, v) for v in value))
self._key = key
return self | python | {
"resource": ""
} |
q37299 | parse_view | train | def parse_view(query):
""" Parses asql query to view object.
Args:
query (str): asql query
Returns:
View instance: parsed view.
"""
try:
idx = query.lower().index('where')
query = query[:idx]
except ValueError:
pass
if not query.endswith(';'):
query = query.strip()
query += ';'
result = _view_stmt.parseString(query)
return View(result) | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.