code
stringlengths
1
18.2k
def index_partition(self, partition, force=False): """ Adds given partition to the index. """ self.backend.partition_index.index_one(partition, force=force)
def index_bundle(self, bundle, force=False): """ Indexes a bundle/dataset and all of its partitions :param bundle: A bundle or dataset object :param force: If true, index the document even if it already exists :return: """ from ambry.orm.dataset import Dataset dataset = bundle if isinstance(bundle, Dataset) else bundle.dataset self.index_dataset(dataset, force) for partition in dataset.partitions: self.index_partition(partition, force)
def index_library_datasets(self, tick_f=None): """ Indexes all datasets of the library. Args: tick_f (callable, optional): callable of one argument. Gets string with index state. """ dataset_n = 0 partition_n = 0 def tick(d, p): if tick_f: tick_f('datasets: {} partitions: {}'.format(d, p)) for dataset in self.library.datasets: if self.backend.dataset_index.index_one(dataset): # dataset added to index dataset_n += 1 tick(dataset_n, partition_n) for partition in dataset.partitions: self.backend.partition_index.index_one(partition) partition_n += 1 tick(dataset_n, partition_n) else: # dataset already indexed pass
def search_datasets(self, search_phrase, limit=None): """ Search for datasets. """ return self.backend.dataset_index.search(search_phrase, limit=limit)
def search(self, search_phrase, limit=None): """Search for datasets, and expand to database records""" from ambry.identity import ObjectNumber from ambry.orm.exc import NotFoundError from ambry.library.search_backends.base import SearchTermParser results = [] stp = SearchTermParser() # Because of the split between searching for partitions and bundles, some terms don't behave right. # The source term should be a limit on everything, but it isn't part of the partition doc, # so we check for it here. parsed_terms = stp.parse(search_phrase) for r in self.search_datasets(search_phrase, limit): vid = r.vid or ObjectNumber.parse(next(iter(r.partitions))).as_dataset r.vid = vid try: r.bundle = self.library.bundle(r.vid) if 'source' not in parsed_terms or parsed_terms['source'] in r.bundle.dataset.source: results.append(r) except NotFoundError: pass return sorted(results, key=lambda r : r.score, reverse=True)
def list_documents(self, limit=None): """ Return a list of the documents :param limit: :return: """ from itertools import chain return chain(self.backend.dataset_index.list_documents(limit=limit), self.backend.partition_index.list_documents(limit=limit), self.backend.identifier_index.list_documents(limit=limit))
def get_parsed_query(self): """ Returns string with last query parsed. Assuming called after search_datasets.""" return '{} OR {}'.format( self.backend.dataset_index.get_parsed_query()[0], self.backend.partition_index.get_parsed_query()[0])
def list_build_configuration_set_records(page_size=200, page_index=0, sort="", q=""): """ List all build configuration set records. """ data = list_build_configuration_set_records_raw(page_size, page_index, sort, q) if data: return utils.format_json_list(data)
def list_records_for_build_config_set(id, page_size=200, page_index=0, sort="", q=""): """ Get a list of BuildRecords for the given BuildConfigSetRecord """ data = list_records_for_build_config_set_raw(id, page_size, page_index, sort, q) if data: return utils.format_json_list(data)
def auth_as(self, user): """auth as a user temporarily""" old_user = self._user self.auth(user) try: yield finally: self.auth(old_user)
def sync_accounts(self, accounts_data, clear = False, password=None, cb = None): """ Load all of the accounts from the account section of the config into the database. :param accounts_data: :param password: :return: """ # Map common values into the accounts records all_accounts = self.accounts kmap = Account.prop_map() for account_id, values in accounts_data.items(): if not isinstance(values, dict): continue d = {} a = self.library.find_or_new_account(account_id) a.secret_password = password or self.password for k, v in values.items(): if k in ('id',): continue try: if kmap[k] == 'secret' and v: a.encrypt_secret(v) else: setattr(a, kmap[k], v) except KeyError: d[k] = v a.data = d if values.get('service') == 's3': a.url = 's3://{}'.format(a.account_id) if cb: cb('Loaded account: {}'.format(a.account_id)) self.database.session.commit()
def robust_int(v): """Parse an int robustly, ignoring commas and other cruft. """ if isinstance(v, int): return v if isinstance(v, float): return int(v) v = str(v).replace(',', '') if not v: return None return int(v)
def subclass(cls, vt_code, vt_args): """ Return a dynamic subclass that has the extra parameters built in :param vt_code: The full VT code, privided to resolve_type :param vt_args: The portion of the VT code to the right of the part that matched a ValueType :return: """ return type(vt_code.replace('/', '_'), (cls,), {'vt_code': vt_code, 'vt_args': vt_args})
def render(self, template_name, **kw): ''' Given a template name and template vars. Searches a template file based on engine set, and renders it with corresponding engine. Returns a string. ''' logger.debug('Rendering template "%s"', template_name) vars = self.globs.copy() vars.update(kw) resolved_name, engine = self.resolve(template_name) return engine.render(resolved_name, **vars)
def render(self, template_name, __data=None, **kw): '''Given a template name and template data. Renders a template and returns as string''' return self.template.render(template_name, **self._vars(__data, **kw))
def render_to_response(self, template_name, __data, content_type="text/html"): '''Given a template name and template data. Renders a template and returns `webob.Response` object''' resp = self.render(template_name, __data) return Response(resp, content_type=content_type)
def per_section(it, is_delimiter=lambda x: x.isspace()): """ From http://stackoverflow.com/a/25226944/610569 """ ret = [] for line in it: if is_delimiter(line): if ret: yield ret # OR ''.join(ret) ret = [] else: ret.append(line.rstrip()) # OR ret.append(line) if ret: yield ret
def per_chunk(iterable, n=1, fillvalue=None): """ From http://stackoverflow.com/a/8991553/610569 >>> list(per_chunk('abcdefghi', n=2)) [('a', 'b'), ('c', 'd'), ('e', 'f'), ('g', 'h'), ('i', None)] >>> list(per_chunk('abcdefghi', n=3)) [('a', 'b', 'c'), ('d', 'e', 'f'), ('g', 'h', 'i')] """ args = [iter(iterable)] * n return zip_longest(*args, fillvalue=fillvalue)
def per_window(sequence, n=1): """ From http://stackoverflow.com/q/42220614/610569 >>> list(per_window([1,2,3,4], n=2)) [(1, 2), (2, 3), (3, 4)] >>> list(per_window([1,2,3,4], n=3)) [(1, 2, 3), (2, 3, 4)] """ start, stop = 0, n seq = list(sequence) while stop <= len(seq): yield tuple(seq[start:stop]) start += 1 stop += 1
def skipping_window(sequence, target, n=3): """ Return a sliding window with a constraint to check that target is inside the window. From http://stackoverflow.com/q/43626525/610569 >>> list(skipping_window([1,2,3,4,5], 2, 3)) [(1, 2, 3), (2, 3, 4)] """ start, stop = 0, n seq = list(sequence) while stop <= len(seq): subseq = seq[start:stop] if target in subseq: yield tuple(seq[start:stop]) start += 1 stop += 1 # Fast forwarding the start. # Find the next window which contains the target. try: # `seq.index(target, start) - (n-1)` would be the next # window where the constraint is met. start = max(seq.index(target, start) - (n-1), start) stop = start + n except ValueError: break
def camel_shuffle(sequence): """ Inspired by https://stackoverflow.com/q/42549212/610569 >>> list(range(12)) # Linear. [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11] >>> camel_shuffle(list(range(12))) # M-shape. [0, 4, 8, 9, 5, 1, 2, 6, 10, 11, 7, 3] >>> camel_shuffle(list(reversed(range(12)))) #W-shape. [11, 7, 3, 2, 6, 10, 9, 5, 1, 0, 4, 8] """ one_three, two_four = zigzag(sequence) one, three = zigzag(one_three) two, four = zigzag(two_four) return one + list(reversed(two)) + three + list(reversed(four))
def page(self): '''Current page.''' page = self.request.GET.get(self.page_param) if not page: return 1 try: page = int(page) except ValueError: self.invalid_page() return 1 if page<1: self.invalid_page() return 1 return page
def url(self): '''Current or base URL. Can be redefined via keyword argument on initialization. Returns `iktomi.web.URL object. `''' return URL.from_url(self.request.url, show_host=self.show_host)
def page_url(self, page): ''' Returns URL for page, page is included as query parameter. Can be redefined by keyword argument ''' if page is not None and page != 1: return self.url.qs_set(**{self.page_param: page}) elif page is not None: return self.url.qs_delete('page')
def pages_count(self): '''Number of pages.''' if not self.limit or self.count<self.limit: return 1 if self.count % self.limit <= self.orphans: return self.count // self.limit return int(math.ceil(float(self.count)/self.limit))
def slice(self, items): '''Slice the sequence of all items to obtain them for current page.''' if self.limit: if self.page>self.pages_count: return [] if self.page == self.pages_count: return items[self.limit*(self.page-1):] return items[self.limit*(self.page-1):self.limit*self.page] else: return items[:]
def get_build_configurations(self, id, **kwargs): """ Gets all BuildConfigurations associated with the specified Project Id This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_build_configurations(id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int id: Project Id (required) :param int page_index: Page Index :param int page_size: Pagination size :param str sort: Sorting RSQL :param str q: RSQL Query :return: BuildConfigurationPage If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.get_build_configurations_with_http_info(id, **kwargs) else: (data) = self.get_build_configurations_with_http_info(id, **kwargs) return data
def dict(self): """A dict that holds key/values for all of the properties in the object. :return: """ d = {p.key: getattr(self, p.key) for p in self.__mapper__.attrs if p.key not in ('data')} d['secret'] = 'not available' if self.secret_password: try: d['secret'] = self.decrypt_secret() except AccountDecryptionError: pass if self.data: for k, v in self.data.items(): d[k] = v return {k: v for k, v in d.items()}
def list_build_records(page_size=200, page_index=0, sort="", q=""): """ List all BuildRecords """ data = list_build_records_raw(page_size, page_index, sort, q) if data: return utils.format_json_list(data)
def list_records_for_build_configuration(id=None, name=None, page_size=200, page_index=0, sort="", q=""): """ List all BuildRecords for a given BuildConfiguration """ data = list_records_for_build_configuration_raw(id, name, page_size, page_index, sort, q) if data: return utils.format_json_list(data)
def list_records_for_project(id=None, name=None, page_size=200, page_index=0, sort="", q=""): """ List all BuildRecords for a given Project """ data = list_records_for_project_raw(id, name, page_size, page_index, sort, q) if data: return utils.format_json_list(data)
def list_built_artifacts(id, page_size=200, page_index=0, sort="", q=""): """ List Artifacts associated with a BuildRecord """ data = list_built_artifacts_raw(id, page_size, page_index, sort, q) if data: return utils.format_json_list(data)
def list_dependency_artifacts(id, page_size=200, page_index=0, sort="", q=""): """ List dependency artifacts associated with a BuildRecord """ data = list_dependency_artifacts_raw(id, page_size, page_index, sort, q) if data: return utils.format_json_list(data)
def on_start(self): """Runs when the actor is started and schedules a status update """ logger.info('StatusReporter started.') # if configured not to report status then return immediately if self.config['status_update_interval'] == 0: logger.info('StatusReporter disabled by configuration.') return self.in_future.report_status()
def report_again(self, current_status): """Computes a sleep interval, sleeps for the specified amount of time then kicks off another status report. """ # calculate sleep interval based on current status and configured interval _m = {'playing': 1, 'paused': 2, 'stopped': 5}[current_status['state']] interval = (self.config['status_update_interval'] * _m) / 1000.0 # sleep for computed interval and kickoff another webhook time.sleep(interval) self.in_future.report_status()
def report_status(self): """Get status of player from mopidy core and send webhook. """ current_status = { 'current_track': self.core.playback.current_track.get(), 'state': self.core.playback.state.get(), 'time_position': self.core.playback.time_position.get(), } send_webhook(self.config, {'status_report': current_status}) self.report_again(current_status)
def get_urlfield_cache_key(model, pk, language_code=None): """ The low-level function to get the cache key for a model. """ return 'anyurlfield.{0}.{1}.{2}.{3}'.format(model._meta.app_label, model.__name__, pk, language_code or get_language())
def get_object_cache_keys(instance): """ Return the cache keys associated with an object. """ if not instance.pk or instance._state.adding: return [] keys = [] for language in _get_available_languages(instance): keys.append(get_urlfield_cache_key(instance.__class__, instance.pk, language)) return keys
def flush(self): """ Add accumulator to the moving average queue and reset it. For example, called by the StatsCollector once per second to calculate per-second average. """ n = self.accumulator self.accumulator = 0 stream = self.stream stream.append(n) self.sum += n streamlen = len(stream) if streamlen > self.period: self.sum -= stream.popleft() streamlen -= 1 if streamlen == 0: self.last_average = 0 else: self.last_average = self.sum / streamlen
def artist_detail(self, artist_id): """获取歌手详情""" path = '/v8/fcg-bin/fcg_v8_singer_track_cp.fcg' url = api_base_url + path params = { 'singerid': artist_id, 'songstatus': 1, 'order': 'listen', 'begin': 0, 'num': 50, 'from': 'h5', 'platform': 'h5page', } resp = requests.get(url, params=params, timeout=self._timeout) rv = resp.json() return rv['data']
def send_webhook(config, payload): """Sends a HTTP request to the configured server. All exceptions are suppressed but emit a warning message in the log. """ try: response = requests.post( config['webhook_url'], data=json.dumps(payload, cls=ModelJSONEncoder), headers={config['api_key_header_name']: config['api_key']}, ) except Exception as e: logger.warning('Unable to send webhook: ({1}) {2}'.format( e.__class__.__name__, e.message, )) else: logger.debug('Webhook response: ({0}) {1}'.format( response.status_code, response.text, ))
def clean(self): """Remove all of the tables and data from the warehouse""" connection = self._backend._get_connection() self._backend.clean(connection)
def list(self): """List the tables in the database""" connection = self._backend._get_connection() return list(self._backend.list(connection))
def install(self, ref, table_name=None, index_columns=None,logger=None): """ Finds partition by reference and installs it to warehouse db. Args: ref (str): id, vid (versioned id), name or vname (versioned name) of the partition. """ try: obj_number = ObjectNumber.parse(ref) if isinstance(obj_number, TableNumber): table = self._library.table(ref) connection = self._backend._get_connection() return self._backend.install_table(connection, table, logger=logger) else: # assume partition raise NotObjectNumberError except NotObjectNumberError: # assume partition. partition = self._library.partition(ref) connection = self._backend._get_connection() return self._backend.install( connection, partition, table_name=table_name, index_columns=index_columns, logger=logger)
def materialize(self, ref, table_name=None, index_columns=None, logger=None): """ Creates materialized table for given partition reference. Args: ref (str): id, vid, name or vname of the partition. Returns: str: name of the partition table in the database. """ from ambry.library import Library assert isinstance(self._library, Library) logger.debug('Materializing warehouse partition.\n partition: {}'.format(ref)) partition = self._library.partition(ref) connection = self._backend._get_connection() return self._backend.install(connection, partition, table_name=table_name, index_columns=index_columns, materialize=True, logger=logger)
def index(self, ref, columns): """ Create an index on the columns. Args: ref (str): id, vid, name or versioned name of the partition. columns (list of str): names of the columns needed indexes. """ from ambry.orm.exc import NotFoundError logger.debug('Creating index for partition.\n ref: {}, columns: {}'.format(ref, columns)) connection = self._backend._get_connection() try: table_or_partition = self._library.partition(ref) except NotFoundError: table_or_partition = ref self._backend.index(connection, table_or_partition, columns)
def parse_sql(self, asql): """ Executes all sql statements from asql. Args: library (library.Library): asql (str): ambry sql query - see https://github.com/CivicKnowledge/ambry/issues/140 for details. """ import sqlparse statements = sqlparse.parse(sqlparse.format(asql, strip_comments=True)) parsed_statements = [] for statement in statements: statement_str = statement.to_unicode().strip() for preprocessor in self._backend.sql_processors(): statement_str = preprocessor(statement_str, self._library, self._backend, self.connection) parsed_statements.append(statement_str) return parsed_statements
def query(self, asql, logger=None): """ Execute an ASQL file and return the result of the first SELECT statement. :param asql: :param logger: :return: """ import sqlparse from ambry.mprlib.exceptions import BadSQLError from ambry.bundle.asql_parser import process_sql from ambry.orm.exc import NotFoundError if not logger: logger = self._library.logger rec = process_sql(asql, self._library) for drop in reversed(rec.drop): if drop: connection = self._backend._get_connection() cursor = self._backend.query(connection, drop, fetch=False) cursor.close() for vid in rec.materialize: logger.debug('Materialize {}'.format(vid)) self.materialize(vid, logger=logger) for vid in rec.install: logger.debug('Install {}'.format(vid)) self.install(vid, logger=logger) for statement in rec.statements: statement = statement.strip() logger.debug("Process statement: {}".format(statement[:60])) if statement.lower().startswith('create'): logger.debug(' Create {}'.format(statement)) connection = self._backend._get_connection() cursor = self._backend.query(connection, statement, fetch=False) cursor.close() elif statement.lower().startswith('select'): logger.debug('Run query {}'.format(statement)) connection = self._backend._get_connection() return self._backend.query(connection, statement, fetch=False) for table_or_vid, columns in rec.indexes: logger.debug('Index {}'.format(table_or_vid)) try: self.index(table_or_vid, columns) except NotFoundError as
e: # Comon when the index table in's a VID, so no partition can be found. logger.debug('Failed to index {}; {}'.format(vid, e)) except Exception as e: logger.error('Failed to index {}; {}'.format(vid, e)) # A fake cursor that can be closed and iterated class closable_iterable(object): def close(self): pass def __iter__(self): pass return closable_iterable()
def dataframe(self,asql, logger = None): """Like query(), but returns a Pandas dataframe""" import pandas as pd from ambry.mprlib.exceptions import BadSQLError try: def yielder(cursor): for i, row in enumerate(cursor): if i == 0: yield [ e[0] for e in cursor.getdescription()] yield row cursor = self.query(asql, logger) yld = yielder(cursor) header = next(yld) return pd.DataFrame(yld, columns=header) except BadSQLError as e: import traceback self._logger.error("SQL Error: {}".format( e)) self._logger.debug(traceback.format_exc())
def geoframe(self, sql, simplify=None, crs=None, epsg=4326): """ Return geopandas dataframe :param simplify: Integer or None. Simplify the geometry to a tolerance, in the units of the geometry. :param crs: Coordinate reference system information :param epsg: Specifiy the CRS as an EPGS number. :return: A Geopandas GeoDataFrame """ import geopandas from shapely.wkt import loads from fiona.crs import from_epsg if crs is None: try: crs = from_epsg(epsg) except TypeError: raise TypeError('Must set either crs or epsg for output.') df = self.dataframe(sql) geometry = df['geometry'] if simplify: s = geometry.apply(lambda x: loads(x).simplify(simplify)) else: s = geometry.apply(lambda x: loads(x)) df['geometry'] = geopandas.GeoSeries(s) return geopandas.GeoDataFrame(df, crs=crs, geometry='geometry')
def shapes(self, simplify=None): """ Return geodata as a list of Shapely shapes :param simplify: Integer or None. Simplify the geometry to a tolerance, in the units of the geometry. :param predicate: A single-argument function to select which records to include in the output. :return: A list of Shapely objects """ from shapely.wkt import loads if simplify: return [loads(row.geometry).simplify(simplify) for row in self] else: return [loads(row.geometry) for row in self]
def get_url_param(self, index, default=None): """ Return url parameter with given index. Args: - index: starts from zero, and come after controller and action names in url. """ params = self.get_url_params() return params[index] if index < len(params) else default
def get_widget(self): """ Create the widget for the URL type. """ form_field = self.get_form_field() widget = form_field.widget if isinstance(widget, type): widget = widget() # Widget instantiation needs to happen manually. # Auto skip if choices is not an existing attribute. form_field_choices = getattr(form_field, 'choices', None) if form_field_choices is not None: if hasattr(widget, 'choices'): widget.choices = form_field_choices return widget
def register(self, ModelClass, form_field=None, widget=None, title=None, prefix=None, has_id_value=True): """ Register a custom model with the ``AnyUrlField``. """ if any(urltype.model == ModelClass for urltype in self._url_types): raise ValueError("Model is already registered: '{0}'".format(ModelClass)) opts = ModelClass._meta opts = opts.concrete_model._meta if not prefix: # Store something descriptive, easier to lookup from raw database content. prefix = '{0}.{1}'.format(opts.app_label, opts.object_name.lower()) if not title: title = ModelClass._meta.verbose_name if self.is_external_url_prefix(prefix): raise ValueError("Invalid prefix value: '{0}'.".format(prefix)) if self[prefix] is not None: raise ValueError("Prefix is already registered: '{0}'".format(prefix)) if form_field is not None and widget is not None: raise ValueError("Provide either a form_field or widget; use the widget parameter of the form field instead.") urltype = UrlType(ModelClass, form_field, widget, title, prefix, has_id_value) signals.post_save.connect(_on_model_save, sender=ModelClass) self._url_types.append(urltype) return urltype
def get_for_model(self, ModelClass): """ Return the URL type for a given model class """ for urltype in self._url_types: if urltype.model is ModelClass: return urltype return None
def index(self, prefix): """ Return the model index for a prefix. """ # Any web domain will be handled by the standard URLField. if self.is_external_url_prefix(prefix): prefix = 'http' for i, urltype in enumerate(self._url_types): if urltype.prefix == prefix: return i return None
def message_received(request, backend_name): """Handle HTTP requests from Tropo. """ logger.debug("@@ request from Tropo - raw data: %s" % request.body) try: post = json.loads(request.body) except ValueError: logger.exception("EXCEPTION decoding post data in incoming request") return HttpResponseBadRequest() except Exception: logger.exception("@@responding to tropo with error") return HttpResponseServerError() logger.debug("@@ Decoded data: %r" % post) if 'session' not in post: logger.error("@@HEY, post does not contain session, " "what's going on?") return HttpResponseBadRequest() session = post['session'] parms = session.get('parameters', {}) if 'program' in parms: # Execute a program that we passed to Tropo to pass back to us. # Extract the program, while verifying it came from us and # has not been modified. try: program = signing.loads(parms['program']) except signing.BadSignature: logger.exception("@@ received program with bad signature") return HttpResponseBadRequest() return HttpResponse(json.dumps(program)) if 'from' in session: #
Must have received a message # FIXME: is there any way we can verify it's really Tropo calling us? logger.debug("@@Got a text message") try: from_address = session['from']['id'] text = session['initialText'] logger.debug("@@Received message from %s: %s" % (from_address, text)) # pass the message to RapidSMS identity = from_address connections = lookup_connections(backend_name, [identity]) receive(text, connections[0]) # Respond nicely to Tropo program = json.dumps({"tropo": [{"hangup": {}}]}) logger.debug("@@responding to tropo with hangup") return HttpResponse(program) except Exception: logger.exception("@@responding to tropo with error") return HttpResponseServerError() logger.error("@@No recognized command in request from Tropo") return HttpResponseBadRequest()
def list_running_builds(page_size=200, page_index=0, sort=""): """ List all RunningBuilds """ content = list_running_builds_raw(page_size, page_index, sort) if content: return utils.format_json_list(content)
def create_session(self, session_id, register=True, session_factory=None): """ Creates new session object and returns it. @param session_id: Session id. If not provided, will generate a new session id. @param register: Should be the session registered in a storage. Websockets don't need it. @param session_factory: Use the given (class, args, kwargs) tuple to create the session. Class should derive from `BaseSession`. Normally not needed. """ if session_factory is not None: # use custom class to create session sess_factory, sess_args, sess_kwargs = session_factory s = sess_factory(*sess_args, **sess_kwargs) else: # use default session and arguments if not using a custom session # factory s = session.Session(self._connection, self, session_id, self.settings.get('disconnect_delay')) if register: self._sessions.add(s) return s
def broadcast(self, clients, msg): """ Optimized C{broadcast} implementation. Depending on type of the session, will json-encode message once and will call either C{send_message} or C{send_jsonifed}. @param clients: Clients iterable @param msg: Message to send """ json_msg = None count = 0 for c in clients: sess = c.session if not sess.is_closed: if sess.send_expects_json: if json_msg is None: json_msg = proto.json_encode(msg) sess.send_jsonified(json_msg, stats=False) else: sess.send_message(msg, stats=False) count += 1 self.stats.packSent(count)
def between(min_value, max_value): 'Numerical values limit' message = N_('value should be between %(min)d and %(max)d') % \ dict(min=min_value, max=max_value) @validator(message) def wrapper(conv, value): if value is None: # it meens that this value is not required return True if value < min_value: return False if value > max_value: return False return True return wrapper
def accept(self, value, silent=False): ''' Accepts a value from the form, calls :meth:`to_python` method, checks `required` condition, applies filters and validators, catches ValidationError. :param value: a value to be accepted :param silent=False: write errors to `form.errors` or not ''' try: value = self.to_python(value) for v in self.validators: value = v(self, value) if self.required and self._is_empty(value): raise ValidationError(self.error_required) except ValidationError as e: if not silent: e.fill_errors(self.field) #NOTE: by default value for field is in python_data, # but this is not true for FieldList where data # is dynamic, so we set value to None for absent value. value = self._existing_value return value
def clean_value(self, value): ''' Additional clean action to preprocess value before :meth:`to_python` method. Subclasses may define own clean_value method to allow additional clean actions like html cleanup, etc. ''' # We have to clean before checking min/max length. It's done in # separate method to allow additional clean action in subclasses. if self.nontext_replacement is not None: value = replace_nontext(value, self.nontext_replacement) if self.strip: value = value.strip() return value
def options(self): ''' Yields `(raw_value, label)` pairs for all acceptable choices. ''' conv = self.conv for python_value, label in self.choices: yield conv.from_python(python_value), label
def _tzinfome(tzinfo): """Gets a tzinfo object from a string. Args: tzinfo: A string (or string like) object, or a datetime.tzinfo object. Returns: An datetime.tzinfo object. Raises: UnknownTimeZoneError: If the timezone given can't be decoded. """ if not isinstance(tzinfo, datetime.tzinfo): try: tzinfo = pytz.timezone(tzinfo) assert tzinfo.zone in pytz.all_timezones except AttributeError: raise pytz.UnknownTimeZoneError("Unknown timezone! %s" % tzinfo) return tzinfo
def localize(dt, force_to_local=True): """Localize a datetime to the local timezone. If dt is naive, returns the same datetime with the local timezone, otherwise uses astimezone to convert. Args: dt: datetime object. force_to_local: Force all results to be in local time. Returns: A datetime_tz object. """ if not isinstance(dt, datetime_tz): if not dt.tzinfo: return datetime_tz(dt, tzinfo=localtz()) dt = datetime_tz(dt) if force_to_local: return dt.astimezone(localtz()) return dt
def get_naive(dt): """Gets a naive datetime from a datetime. datetime_tz objects can't just have tzinfo replaced with None, you need to call asdatetime. Args: dt: datetime object. Returns: datetime object without any timezone information. """ if not dt.tzinfo: return dt if hasattr(dt, "asdatetime"): return dt.asdatetime() return dt.replace(tzinfo=None)
def detect_timezone(): """Try and detect the timezone that Python is currently running in. We have a bunch of different methods for trying to figure this out (listed in order they are attempted). * In windows, use win32timezone.TimeZoneInfo.local() * Try TZ environment variable. * Try and find /etc/timezone file (with timezone name). * Try and find /etc/localtime file (with timezone data). * Try and match a TZ to the current dst/offset/shortname. Returns: The detected local timezone as a tzinfo object Raises: pytz.UnknownTimeZoneError: If it was unable to detect a timezone. """ if sys.platform == "win32": tz = _detect_timezone_windows() if tz is not None: return tz # First we try the TZ variable tz = _detect_timezone_environ() if tz is not None: return tz # Second we try /etc/timezone and use
the value in that tz = _detect_timezone_etc_timezone() if tz is not None: return tz # Next we try and see if something matches the tzinfo in /etc/localtime tz = _detect_timezone_etc_localtime() if tz is not None: return tz # Next we try and use a similiar method to what PHP does. # We first try to search on time.tzname, time.timezone, time.daylight to # match a pytz zone. warnings.warn("Had to fall back to worst detection method (the 'PHP' " "method).") tz = _detect_timezone_php() if tz is not None: return tz raise pytz.UnknownTimeZoneError("Unable to detect your timezone!")
def _load_local_tzinfo(): """Load zoneinfo from local disk.""" tzdir = os.environ.get("TZDIR", "/usr/share/zoneinfo/posix") localtzdata = {} for dirpath, _, filenames in os.walk(tzdir): for filename in filenames: filepath = os.path.join(dirpath, filename) name = os.path.relpath(filepath, tzdir) f = open(filepath, "rb") tzinfo = pytz.tzfile.build_tzinfo(name, f) f.close() localtzdata[name] = tzinfo return localtzdata
def _detect_timezone_etc_localtime(): """Detect timezone based on /etc/localtime file.""" matches = [] if os.path.exists("/etc/localtime"): f = open("/etc/localtime", "rb") localtime = pytz.tzfile.build_tzinfo("/etc/localtime", f) f.close() # We want to match against the local database because /etc/localtime will # be copied from that. Once we have found a name for /etc/localtime, we can # use the name to get the "same" timezone from the inbuilt pytz database. tzdatabase = _load_local_tzinfo() if tzdatabase: tznames = tzdatabase.keys() tzvalues = tzdatabase.__getitem__ else: tznames = pytz.all_timezones tzvalues = _tzinfome # See if we can find a "Human Name" for this.. for tzname in tznames: tz = tzvalues(tzname) if dir(tz) != dir(localtime): continue for attrib in dir(tz): # Ignore functions and specials if callable(getattr(tz, attrib)) or attrib.startswith("__"): continue # This will always be different if attrib ==
"zone" or attrib == "_tzinfos": continue if getattr(tz, attrib) != getattr(localtime, attrib): break # We get here iff break didn't happen, i.e. no meaningful attributes # differ between tz and localtime else: # Try and get a timezone from pytz which has the same name as the zone # which matches in the local database. if tzname not in pytz.all_timezones: warnings.warn("Skipping %s because not in pytz database." % tzname) continue matches.append(_tzinfome(tzname)) matches.sort(key=lambda x: x.zone) if len(matches) == 1: return matches[0] if len(matches) > 1: warnings.warn("We detected multiple matches for your /etc/localtime. " "(Matches where %s)" % matches) return matches[0] else: warnings.warn("We detected no matches for your /etc/localtime.") # Register /etc/localtime as the timezone loaded. pytz._tzinfo_cache["/etc/localtime"] = localtime return localtime
def _wrap_method(name): """Wrap a method. Patch a method which might return a datetime.datetime to return a datetime_tz.datetime_tz instead. Args: name: The name of the method to patch """ method = getattr(datetime.datetime, name) # Have to give the second argument as method has no __module__ option. @functools.wraps(method, ("__name__", "__doc__"), ()) def wrapper(self, *args, **kw): r = method(self, *args, **kw) if isinstance(r, datetime.datetime) and not isinstance(r, type(self)): r = type(self)(r) return r setattr(datetime_tz, name, wrapper)
def asdatetime(self, naive=True): """Return this datetime_tz as a datetime object. Args: naive: Return *without* any tz info. Returns: This datetime_tz as a datetime object. """ args = list(self.timetuple()[0:6])+[self.microsecond] if not naive: args.append(self.tzinfo) return datetime.datetime(*args)
def asdate(self): """Return this datetime_tz as a date object. Returns: This datetime_tz as a date object. """ return datetime.date(self.year, self.month, self.day)
def astimezone(self, tzinfo): """Returns a version of this timestamp converted to the given timezone. Args: tzinfo: Either a datetime.tzinfo object or a string (which will be looked up in pytz. Returns: A datetime_tz object in the given timezone. """ # Assert we are not a naive datetime object assert self.tzinfo is not None tzinfo = _tzinfome(tzinfo) d = self.asdatetime(naive=False).astimezone(tzinfo) return type(self)(d)
def replace(self, **kw): """Return datetime with new specified fields given as arguments. For example, dt.replace(days=4) would return a new datetime_tz object with exactly the same as dt but with the days attribute equal to 4. Any attribute can be replaced, but tzinfo can not be set to None. Args: Any datetime_tz attribute. Returns: A datetime_tz object with the attributes replaced. Raises: TypeError: If the given replacement is invalid. """ if "tzinfo" in kw: if kw["tzinfo"] is None: raise TypeError("Can not remove the timezone use asdatetime()") else: tzinfo = kw["tzinfo"] del kw["tzinfo"] else: tzinfo = None is_dst = None if "is_dst" in kw: is_dst = kw["is_dst"] del kw["is_dst"] else: # Use our own DST setting.. is_dst = self.is_dst replaced = self.asdatetime().replace(**kw) return type(self)( replaced, tzinfo=tzinfo or self.tzinfo.zone, is_dst=is_dst)
def smartparse(cls, toparse, tzinfo=None): """Method which uses dateutil.parse and extras to try and parse the string. Valid dates are found at: http://labix.org/python-dateutil#head-1443e0f14ad5dff07efd465e080d1110920673d8-2 Other valid formats include: "now" or "today" "yesterday" "tomorrow" "5 minutes ago" "10 hours ago" "10h5m ago" "start of yesterday" "end of tomorrow" "end of 3rd of March" Args: toparse: The string to parse. tzinfo: Timezone for the resultant datetime_tz object should be in. (Defaults to your local timezone.) Returns: New datetime_tz object. Raises: ValueError: If unable to make sense of the input. """ # Default for empty fields are: # year/month/day == now # hour/minute/second/microsecond == 0 toparse = toparse.strip() if tzinfo is None: dt = cls.now() else: dt = cls.now(tzinfo) default = dt.replace(hour=0, minute=0, second=0, microsecond=0) # Remove "start of " and "end of
" prefix in the string if toparse.lower().startswith("end of "): toparse = toparse[7:].strip() dt += datetime.timedelta(days=1) dt = dt.replace(hour=0, minute=0, second=0, microsecond=0) dt -= datetime.timedelta(microseconds=1) default = dt elif toparse.lower().startswith("start of "): toparse = toparse[9:].strip() dt = dt.replace(hour=0, minute=0, second=0, microsecond=0) default = dt # Handle strings with "now", "today", "yesterday", "tomorrow" and "ago". # Need to use lowercase toparselower = toparse.lower() if toparselower in ["now", "today"]: pass elif toparselower == "yesterday": dt -= datetime.timedelta(days=1) elif toparselower in ("tomorrow", "tommorrow"): # tommorrow is spelled wrong, but code out there might be depending on it # working dt += datetime.timedelta(days=1) elif "ago" in toparselower: # Remove the "ago" bit toparselower = toparselower[:-3] # Replace all "a day and an hour" with "1 day 1 hour" toparselower = toparselower.replace("a ", "1
") toparselower = toparselower.replace("an ", "1 ") toparselower = toparselower.replace(" and ", " ") # Match the following # 1 hour ago # 1h ago # 1 h ago # 1 hour ago # 2 hours ago # Same with minutes, seconds, etc. tocheck = ("seconds", "minutes", "hours", "days", "weeks", "months", "years") result = {} for match in re.finditer("([0-9]+)([^0-9]*)", toparselower): amount = int(match.group(1)) unit = match.group(2).strip() for bit in tocheck: regex = "^([%s]|((%s)s?))$" % ( bit[0], bit[:-1]) bitmatch = re.search(regex, unit) if bitmatch: result[bit] = amount break else: raise ValueError("Was not able to parse date unit %r!" % unit) delta = dateutil.relativedelta.relativedelta(**result) dt -= delta else: # Handle strings with normal datetime format, use original case. dt = dateutil.parser.parse(toparse, default=default.asdatetime(), tzinfos=pytz_abbr.tzinfos) if dt is None: raise ValueError("Was not
able to parse date!") if dt.tzinfo is pytz_abbr.unknown: dt = dt.replace(tzinfo=None) if dt.tzinfo is None: if tzinfo is None: tzinfo = localtz() dt = cls(dt, tzinfo) else: if isinstance(dt.tzinfo, pytz_abbr.tzabbr): abbr = dt.tzinfo dt = dt.replace(tzinfo=None) dt = cls(dt, abbr.zone, is_dst=abbr.dst) dt = cls(dt) return dt
def utcfromtimestamp(cls, timestamp): """Returns a datetime object of a given timestamp (in UTC).""" obj = datetime.datetime.utcfromtimestamp(timestamp) obj = pytz.utc.localize(obj) return cls(obj)
def fromtimestamp(cls, timestamp): """Returns a datetime object of a given timestamp (in local tz).""" d = cls.utcfromtimestamp(timestamp) return d.astimezone(localtz())
def utcnow(cls): """Return a new datetime representing UTC day and time.""" obj = datetime.datetime.utcnow() obj = cls(obj, tzinfo=pytz.utc) return obj
def now(cls, tzinfo=None): """[tz] -> new datetime with tz's local day and time.""" obj = cls.utcnow() if tzinfo is None: tzinfo = localtz() return obj.astimezone(tzinfo)
def combine(cls, date, time, tzinfo=None): """date, time, [tz] -> datetime with same date and time fields.""" if tzinfo is None: tzinfo = localtz() return cls(datetime.datetime.combine(date, time), tzinfo)
def between(start, delta, end=None): """Return an iterator between this date till given end point. Example usage: >>> d = datetime_tz.smartparse("5 days ago") 2008/05/12 11:45 >>> for i in d.between(timedelta(days=1), datetime_tz.now()): >>> print i 2008/05/12 11:45 2008/05/13 11:45 2008/05/14 11:45 2008/05/15 11:45 2008/05/16 11:45 Args: start: The date to start at. delta: The interval to iterate with. end: (Optional) Date to end at. If not given the iterator will never terminate. Yields: datetime_tz objects. """ toyield = start while end is None or toyield < end: yield toyield toyield += delta
def days(start, end=None): """Iterate over the days between the given datetime_tzs. Args: start: datetime_tz to start from. end: (Optional) Date to end at, if not given the iterator will never terminate. Returns: An iterator which generates datetime_tz objects a day apart. """ return iterate.between(start, datetime.timedelta(days=1), end)
def hours(start, end=None): """Iterate over the hours between the given datetime_tzs. Args: start: datetime_tz to start from. end: (Optional) Date to end at, if not given the iterator will never terminate. Returns: An iterator which generates datetime_tz objects a hour apart. """ return iterate.between(start, datetime.timedelta(hours=1), end)
def minutes(start, end=None): """Iterate over the minutes between the given datetime_tzs. Args: start: datetime_tz to start from. end: (Optional) Date to end at, if not given the iterator will never terminate. Returns: An iterator which generates datetime_tz objects a minute apart. """ return iterate.between(start, datetime.timedelta(minutes=1), end)
def generate(dt, utc=True, accept_naive=False, microseconds=False): ''' Generate an :RFC:`3339`-formatted timestamp from a :class:`datetime.datetime`. >>> from datetime import datetime >>> generate(datetime(2009,1,1,12,59,59,0,pytz.utc)) '2009-01-01T12:59:59Z' The timestamp will use UTC unless `utc=False` is specified, in which case it will use the timezone from the :class:`datetime.datetime`'s :attr:`tzinfo` parameter. >>> eastern = pytz.timezone('US/Eastern') >>> dt = eastern.localize(datetime(2009,1,1,12,59,59)) >>> generate(dt) '2009-01-01T17:59:59Z' >>> generate(dt, utc=False) '2009-01-01T12:59:59-05:00' Unless `accept_naive=True` is specified, the `datetime` must not be naive. >>> generate(datetime(2009,1,1,12,59,59,0)) Traceback (most recent call last): ... ValueError: naive datetime and accept_naive is False >>> generate(datetime(2009,1,1,12,59,59,0), accept_naive=True) '2009-01-01T12:59:59Z' If `accept_naive=True` is specified, the `datetime` is assumed to be UTC. Attempting to generate a local timestamp from a naive datetime will result in an error. >>> generate(datetime(2009,1,1,12,59,59,0), accept_naive=True, utc=False) Traceback (most recent call last): ... ValueError: cannot generate a local
timestamp from a naive datetime ''' if dt.tzinfo is None: if accept_naive is True: if utc is True: dt = dt.replace(tzinfo=pytz.utc) else: raise ValueError("cannot generate a local timestamp from " + "a naive datetime") else: raise ValueError("naive datetime and accept_naive is False") if utc is True: dt = dt.astimezone(pytz.utc) timestamp = dt.strftime('%Y-%m-%dT%H:%M:%S') if microseconds is True: timestamp += dt.strftime('.%f') if dt.tzinfo is pytz.utc: timestamp += 'Z' else: timestamp += timezone(timedelta_seconds(dt.tzinfo.utcoffset(dt))) return timestamp
def get_or_create_placeholder(page, placeholder_slot, delete_existing=False): """ Get or create a placeholder on the given page. Optional: Delete existing placeholder. """ placeholder, created = page.placeholders.get_or_create( slot=placeholder_slot) if created: log.debug("Create placeholder %r for page %r", placeholder_slot, page.get_title()) else: log.debug("Use existing placeholder %r for page %r", placeholder_slot, page.get_title()) if delete_existing: queryset = CMSPlugin.objects.all().filter(placeholder=placeholder) log.info("Delete %i CMSPlugins on placeholder %s...", queryset.count(), placeholder) queryset.delete() return placeholder, created
def publish_page(page, languages): """ Publish a CMS page in all given languages. """ for language_code, lang_name in iter_languages(languages): url = page.get_absolute_url() if page.publisher_is_draft: page.publish(language_code) log.info('page "%s" published in %s: %s', page, lang_name, url) else: log.info('published page "%s" already exists in %s: %s', page, lang_name, url) return page.reload()
def create_cms_index_pages(placeholder_slot="content"): """ create cms home page and fill >content< placeholder with TextPlugin """ try: index_page = Page.objects.get(is_home=True, publisher_is_draft=False) except Page.DoesNotExist: log.debug('Create index page in "en" and...') index_page = create_page( title="index in English", template=TEMPLATE_INHERITANCE_MAGIC, language=settings.LANGUAGE_CODE, published=False, in_navigation=True) placeholder, created = index_page.placeholders.get_or_create( slot=placeholder_slot) for language_code, lang_name in settings.LANGUAGES: with translation.override(language_code): title = 'index in %s' % lang_name log.info('create %r', title) if language_code != settings.LANGUAGE_CODE: create_title(language_code, title, index_page) add_plugin( placeholder=placeholder, plugin_type='TextPlugin', # djangocms_text_ckeditor language=language_code, body='index page in %s' % lang_name) index_page.publish(language_code) created = True else: created = False log.debug('Index page already exists.') return index_page, created
def create_cms_plugin_page(apphook, apphook_namespace, placeholder_slot=None): """ Create cms plugin page in all existing languages. Add a link to the index page. :param apphook: e.g...........: 'FooBarApp' :param apphook_namespace: e.g.: 'foobar' :return: """ creator = CmsPluginPageCreator( apphook=apphook, apphook_namespace=apphook_namespace, ) creator.placeholder_slot = placeholder_slot plugin_page = creator.create() return plugin_page
def get_slug(self, language_code, lang_name): """ Notes: - slug must be unique! - slug is used to check if page already exists! :return: 'slug' string for cms.api.create_page() """ title = self.get_title(language_code, lang_name) assert title != "" title = str(title) # e.g.: evaluate a lazy translation slug = slugify(title) assert slug != "", "Title %r results in empty slug!" % title return slug
def get_home_page(self): """ Return the published home page. Used for 'parent' in cms.api.create_page() """ try: home_page_draft = Page.objects.get( is_home=True, publisher_is_draft=True) except Page.DoesNotExist: log.error('ERROR: "home page" doesn\'t exists!') raise RuntimeError('no home page') return home_page_draft