code
stringlengths
1
18.2k
def publish(self, page): """ Publish the page in all languages. """ assert page.publisher_is_draft == True, "Page '%s' must be a draft!" % page publish_page(page, languages=self.languages)
def create_page(self, **extra_kwargs): """ Create page (and page title) in default language extra_kwargs will be pass to cms.api.create_page() e.g.: extra_kwargs={ "soft_root": True, "reverse_id": my_reverse_id, } """ with translation.override(self.default_language_code): # for evaluate the language name lazy translation # e.g.: settings.LANGUAGE_CODE is not "en" self.default_lang_name = dict( self.languages)[self.default_language_code] self.slug = self.get_slug(self.default_language_code, self.default_lang_name) assert self.slug != "" page = None parent = self.get_parent_page() if parent is not None: assert parent.publisher_is_draft == True, "Parent page '%s' must be a draft!" % parent if self.delete_first: if self.apphook_namespace is not None: pages = Page.objects.filter( application_namespace=self.apphook_namespace, parent=parent, ) else: pages = Page.objects.filter( title_set__slug=self.slug, parent=parent, ) log.debug("Delete %i pages...", pages.count()) pages.delete() else: if self.apphook_namespace is not None: # Create a plugin page queryset = Page.objects.drafts() queryset = queryset.filter(parent=parent) try: page = queryset.get( application_namespace=self.apphook_namespace) except Page.DoesNotExist: pass
# Create page else: log.debug("Use existing page: %s", page) created = False return page, created else: # Not a plugin page queryset = Title.objects.filter( language=self.default_language_code) queryset = queryset.filter(page__parent=parent) try: title = queryset.filter(slug=self.slug).first() except Title.DoesNotExist: pass # Create page else: if title is not None: log.debug("Use page from title with slug %r", self.slug) page = title.page created = False if page is None: with translation.override(self.default_language_code): # set right translation language # for evaluate language name lazy translation # e.g.: settings.LANGUAGE_CODE is not "en" page = create_page( title=self.get_title(self.default_language_code, self.default_lang_name), menu_title=self.get_menu_title(self.default_language_code, self.default_lang_name), template=self.get_template(self.default_language_code, self.default_lang_name), language=self.default_language_code, slug=self.slug, published=False, parent=parent, in_navigation=self.in_navigation, apphook=self.apphook, apphook_namespace=self.apphook_namespace, **extra_kwargs) created = True log.debug("Page created in %s: %s", self.default_lang_name, page) assert page.publisher_is_draft == True return page, created
def create_title(self, page): """ Create page title in all other languages with cms.api.create_title() """ for language_code, lang_name in iter_languages(self.languages): try: title = Title.objects.get(page=page, language=language_code) except Title.DoesNotExist: slug = self.get_slug(language_code, lang_name) assert slug != "", "No slug for %r" % language_code title = create_title( language=language_code, title=self.get_title(language_code, lang_name), page=page, slug=slug, ) log.debug("Title created: %s", title) else: log.debug("Page title exist: %s", title)
def get_add_plugin_kwargs(self, page, no, placeholder, language_code, lang_name): """ Return "content" for create the plugin. Called from self.add_plugins() """ return { "plugin_type": 'TextPlugin', # djangocms_text_ckeditor "body": self.get_dummy_text(page, no, placeholder, language_code, lang_name) }
def add_plugins(self, page, placeholder): """ Add a "TextPlugin" in all languages. """ for language_code, lang_name in iter_languages(self.languages): for no in range(1, self.dummy_text_count + 1): add_plugin_kwargs = self.get_add_plugin_kwargs( page, no, placeholder, language_code, lang_name) log.info( 'add plugin to placeholder "%s" (pk:%i) in: %s - no: %i', placeholder, placeholder.pk, lang_name, no) plugin = add_plugin( placeholder=placeholder, language=language_code, **add_plugin_kwargs) log.info('Plugin "%s" (pk:%r) added.', str(plugin), plugin.pk) placeholder.save()
def get_or_create_placeholder(self, page, placeholder_slot): """ Add a placeholder if not exists. """ placeholder, created = get_or_create_placeholder( page, placeholder_slot, delete_existing=self.delete_first) return placeholder, created
def fill_content(self, page, placeholder_slot): """ Add a placeholder to the page. Here we add a "TextPlugin" in all languages. """ if len(placeholder_slot) == 1: raise RuntimeError(placeholder_slot) placeholder, created = self.get_or_create_placeholder( page, placeholder_slot) self.add_plugins(page, placeholder)
def create(self): """ Create the plugin page in all languages and fill dummy content. """ plugin = CMSPlugin.objects.filter(plugin_type=self.apphook) if plugin.exists(): log.debug('Plugin page for "%s" plugin already exist, ok.', self.apphook) raise plugin page, created = super(CmsPluginPageCreator, self).create() if created: # Add a plugin with content in all languages to the created page. # But only on new created page for placeholder_slot in self.placeholder_slots: self.fill_content(page, placeholder_slot) return page, created
def get_title(self, language_code, lang_name): """ :return: 'title' string for cms.api.create_page() """ title = "%s %i-%i in %s" % (self.title_prefix, self.current_count, self.current_level, language_code) log.info(title) return title
def get_parent_page(self): """ For 'parent' in cms.api.create_page() """ if self.current_level == 1: # 'root' page return None else: return self.page_data[(self.current_level - 1, self.current_count)]
def translate_path(self, pth): """Translate a /-separated PATH to the local filename syntax.""" # initially copied from SimpleHTTPServer words = pth.split('/') words = filter(None, words) pth = self.location for word in words: # Do not allow path separators other than /, # drive names and . .. drive, word = os.path.splitdrive(word) head, word = os.path.split(word) if drive or head or word in (os.curdir, os.pardir): return None pth = os.path.join(pth, word) assert pth.startswith(self.location + '/') assert pth == path.normpath(pth) return pth
def dict(self): """A dict that holds key/values for all of the properties in the object. :return: """ from collections import OrderedDict SKIP_KEYS = () return OrderedDict( (p.key, getattr(self, p.key)) for p in self.__mapper__.attrs if p.key not in SKIP_KEYS)
def column(self, source_header_or_pos): """ Return a column by name or position :param source_header_or_pos: If a string, a source header name. If an integer, column position :return: """ for c in self.columns: if c.source_header == source_header_or_pos: assert c.st_vid == self.vid return c elif c.position == source_header_or_pos: assert c.st_vid == self.vid return c else: return None
def add_column(self, position, source_header, datatype, **kwargs): """ Add a column to the source table. :param position: Integer position of the column started from 1. :param source_header: Name of the column, as it exists in the source file :param datatype: Python datatype ( str, int, float, None ) for the column :param kwargs: Other source record args. :return: """ from ..identity import GeneralNumber2 c = self.column(source_header) c_by_pos = self.column(position) datatype = 'str' if datatype == 'unicode' else datatype assert not c or not c_by_pos or c.vid == c_by_pos.vid # Convert almost anything to True / False if 'has_codes' in kwargs: FALSE_VALUES = ['False', 'false', 'F', 'f', '', None, 0, '0'] kwargs['has_codes'] = False if kwargs['has_codes'] in FALSE_VALUES else True if c: # Changing the position can result in conflicts
assert not c_by_pos or c_by_pos.vid == c.vid c.update( position=position, datatype=datatype.__name__ if isinstance(datatype, type) else datatype, **kwargs) elif c_by_pos: # FIXME This feels wrong; there probably should not be any changes to the both # of the table, since then it won't represent the previouls source. Maybe all of the sources # should get their own tables initially, then affterward the duplicates can be removed. assert not c or c_by_pos.vid == c.vid c_by_pos.update( source_header=source_header, datatype=datatype.__name__ if isinstance(datatype, type) else datatype, **kwargs) else: assert not c and not c_by_pos # Hacking an id number, since I don't want to create a new Identity ObjectNUmber type c = SourceColumn( vid=str(GeneralNumber2('C', self.d_vid, self.sequence_id, int(position))), position=position, st_vid=self.vid, d_vid=self.d_vid, datatype=datatype.__name__ if isinstance(datatype, type) else datatype, source_header=source_header, **kwargs) self.columns.append(c) return c
def update_id(self, sequence_id=None): """Alter the sequence id, and all of the names and ids derived from it. This often needs to be don after an IntegrityError in a multiprocessing run""" from ..identity import GeneralNumber1 if sequence_id: self.sequence_id = sequence_id self.vid = str(GeneralNumber1('T', self.d_vid, self.sequence_id))
def table_convert_geometry(metadata, table_name): """Get table metadata from the database.""" from sqlalchemy import Table from ..orm import Geometry table = Table(table_name, metadata, autoload=True) for c in table.columns: # HACK! Sqlalchemy sees spatialte GEOMETRY types # as NUMERIC if c.name == 'geometry': c.type = Geometry # What about variants? return table
def next_sequence_id(session, sequence_ids, parent_vid, table_class, force_query = False): """ Return the next sequence id for a object, identified by the vid of the parent object, and the database prefix for the child object. On the first call, will load the max sequence number from the database, but subsequence calls will run in process, so this isn't suitable for multi-process operation -- all of the tables in a dataset should be created by one process The child table must have a sequence_id value. :param session: Database session or connection ( must have an execute() method ) :param sequence_ids: A dict for caching sequence ids :param parent_vid: The VID of the parent object, which sets the namespace for the sequence :param table_class: Table class of the child object, the one
getting a number :return: """ from sqlalchemy import text seq_col = table_class.sequence_id.property.columns[0].name try: parent_col = table_class._parent_col except AttributeError: parent_col = table_class.d_vid.property.columns[0].name assert bool(parent_vid) key = (parent_vid, table_class.__name__) number = sequence_ids.get(key, None) if (not number and session) or force_query: sql = text("SELECT max({seq_col})+1 FROM {table} WHERE {parent_col} = '{vid}'" .format(table=table_class.__tablename__, parent_col=parent_col, seq_col=seq_col, vid=parent_vid)) max_id, = session.execute(sql).fetchone() if not max_id: max_id = 1 sequence_ids[key] = int(max_id) elif not session: # There was no session set. This should only happen when the parent object is new, and therefore, # there are no child number, so the appropriate starting number is 1. If the object is not new, # there will be conflicts. sequence_ids[key] = 1 else: # There were no previous numbers, so start with 1 sequence_ids[key] += 1 return sequence_ids[key]
def incver(o, prop_names): """Increment the version numbers of a set of properties and return a new object""" from ambry.identity import ObjectNumber d = {} for p in o.__mapper__.attrs: v = getattr(o, p.key) if v is None: d[p.key] = None elif p.key in prop_names: d[p.key] = str(ObjectNumber.increment(v)) else: if not hasattr(v, '__mapper__'): # Only copy values, never objects d[p.key] = v return o.__class__(**d)
def coerce(cls, key, value): """Convert plain list to MutationList.""" if isinstance(value, string_types): value = value.strip() if value[0] == '[': # It's json encoded, probably try: value = json.loads(value) except ValueError: raise ValueError("Failed to parse JSON: '{}' ".format(value)) else: value = value.split(',') if not value: value = [] self = MutationList((MutationObj.coerce(key, v) for v in value)) self._key = key return self
def parse_view(query): """ Parses asql query to view object. Args: query (str): asql query Returns: View instance: parsed view. """ try: idx = query.lower().index('where') query = query[:idx] except ValueError: pass if not query.endswith(';'): query = query.strip() query += ';' result = _view_stmt.parseString(query) return View(result)
def _flat_alias(t): """ Populates token (column or table) fields from parse result. """ t.name = t.parsed_name t.alias = t.parsed_alias[0] if t.parsed_alias else '' return t
def _build_join(t): """ Populates join token fields. """ t.source.name = t.source.parsed_name t.source.alias = t.source.parsed_alias[0] if t.source.parsed_alias else '' return t
def substitute_vids(library, statement): """ Replace all of the references to tables and partitions with their vids. This is a bit of a hack -- it ought to work with the parser, but instead it just looks for common SQL tokens that indicate an identifier. :param statement: an sqlstatement. String. :return: tuple: new_statement, set of table vids, set of partition vids. """ from ambry.identity import ObjectNumber, TableNumber, NotObjectNumberError from ambry.orm.exc import NotFoundError try: stmt_str = statement.to_unicode() except AttributeError: stmt_str = statement parts = stmt_str.strip(';').split() new_parts = [] tables = set() partitions = set() while parts: token = parts.pop(0).strip() if token.lower() in ('from', 'join', 'materialize', 'install'): ident = parts.pop(0).strip(';') new_parts.append(token) try: obj_number = ObjectNumber.parse(token) if isinstance(obj_number, TableNumber): table = library.table(ident) tables.add(table.vid) new_parts.append(table.vid) else: # Do not care about other object
numbers. Assume partition. raise NotObjectNumberError except NotObjectNumberError: # assume partition try: partition = library.partition(ident) partitions.add(partition.vid) new_parts.append(partition.vid) except NotFoundError: # Ok, maybe it is just a normal identifier... new_parts.append(ident) else: new_parts.append(token) return ' '.join(new_parts).strip(), tables, partitions
def find_indexable_materializable(sql, library): """ Parse a statement, then call functions to install, materialize or create indexes for partitions referenced in the statement. :param sql: :param materialize_f: :param install_f: :param index_f: :return: """ derefed, tables, partitions = substitute_vids(library, sql) if derefed.lower().startswith('create index') or derefed.lower().startswith('index'): parsed = parse_index(derefed) return FIMRecord(statement=derefed, indexes=[(parsed.source, tuple(parsed.columns))]) elif derefed.lower().startswith('materialize'): _, vid = derefed.split() return FIMRecord(statement=derefed, materialize=set([vid])) elif derefed.lower().startswith('install'): _, vid = derefed.split() return FIMRecord(statement=derefed, install=set([vid])) elif derefed.lower().startswith('select'): rec = FIMRecord(statement=derefed) parsed = parse_select(derefed) elif derefed.lower().startswith('drop'): return FIMRecord(statement=derefed, drop=derefed) elif derefed.lower().startswith('create table'): parsed = parse_view(derefed) rec = FIMRecord(statement=derefed, drop='DROP TABLE IF EXISTS {};'.format(parsed.name), views=1) elif derefed.lower().startswith('create view'): parsed = parse_view(derefed) rec = FIMRecord(statement=derefed, drop='DROP VIEW IF EXISTS {};'.format(parsed.name), views=1) else: return FIMRecord(statement=derefed, tables=set(tables), install=set(partitions)) def partition_aliases(parsed): d = {} for source in parsed.sources: if source.alias: d[source.alias]
= source.name for j in parsed.joins: if j.source.alias: d[j.source.alias] = j.source.name return d def indexable_columns(aliases, parsed): indexes = [] for j in parsed.joins: if j and j.join_cols: for col in j.join_cols: if '.' in col: try: alias, col = col.split('.') if alias: indexes.append((aliases[alias], (col,))) except KeyError: pass return indexes aliases = partition_aliases(parsed) indexes = indexable_columns(aliases, parsed) rec.joins = len(parsed.joins) install = set(partitions) rec.update(tables=tables, install=install, indexes=indexes) return rec
def update(self, rec=None, drop=None, tables=None, install=None, materialize=None, indexes=None, joins=0, views=0): """ Updates current record. Args: rec (FIMRecord): """ if not drop: drop = [] if not tables: tables = set() if not install: install = set() if not materialize: materialize = set() if not indexes: indexes = set() if rec: self.update( drop=rec.drop, tables=rec.tables, install=rec.install, materialize=rec.materialize, indexes=rec.indexes, joins=rec.joins ) self.drop += drop self.tables |= set(tables) self.install |= set(install) self.materialize |= set(materialize) self.indexes |= set(indexes) self.joins += joins self.views += views # Joins or views promote installed partitions to materialized partitions if self.joins > 0 or self.views > 0: self.materialize |= self.install self.install = set()
def support_level(self, support_level): """ Sets the support_level of this ProductReleaseRest. :param support_level: The support_level of this ProductReleaseRest. :type: str """ allowed_values = ["UNRELEASED", "EARLYACCESS", "SUPPORTED", "EXTENDED_SUPPORT", "EOL"] if support_level not in allowed_values: raise ValueError( "Invalid value for `support_level` ({0}), must be one of {1}" .format(support_level, allowed_values) ) self._support_level = support_level
def callLater(self, when, what, *a, **kw): """ Copied from twisted.internet.task.Clock, r20480. Fixes the bug where the wrong DelayedCall would sometimes be returned. """ dc = base.DelayedCall(self.seconds() + when, what, a, kw, self.calls.remove, lambda c: None, self.seconds) self.calls.append(dc) self.calls.sort(lambda a, b: cmp(a.getTime(), b.getTime())) return dc
def clockIsBroken(): """ Returns whether twisted.internet.task.Clock has the bug that returns the wrong DelayedCall or not. """ clock = Clock() dc1 = clock.callLater(10, lambda: None) dc2 = clock.callLater(1, lambda: None) if dc1 is dc2: return True else: return False
def get_scm_status(config, read_modules=False, repo_url=None, mvn_repo_local=None, additional_params=None): """ Gets the artifact status (MavenArtifact instance) from SCM defined by config. Only the top-level artifact is read by default, although it can be requested to read the whole available module structure. :param config: artifact config (ArtifactConfig instance) :param read_modules: if True all modules are read, otherwise only top-level artifact :param repo_url: the URL of the repository to use :param mvn_repo_local: local repository path :param additional_params: additional params to add on command-line when running maven """ global scm_status_cache if config.artifact in scm_status_cache.keys(): result = scm_status_cache[config.artifact] elif not read_modules and (("%s|False" % config.artifact) in scm_status_cache.keys()): result = scm_status_cache["%s|False" % config.artifact] else: result = _get_scm_status(config, read_modules, repo_url, mvn_repo_local, additional_params) if read_modules: scm_status_cache[config.artifact] = result if ("%s|False" % config.artifact) in scm_status_cache.keys(): del(scm_status_cache["%s|False" % config.artifact]) else:
scm_status_cache["%s|False" % config.artifact] = result return result
def shrink_patch(patch_path, target_file): """ Shrinks a patch on patch_path to contain only changes for target_file. :param patch_path: path to the shrinked patch file :param target_file: filename of a file of which changes should be kept :return: True if the is a section containing changes for target_file, Flase otherwise """ logging.debug("Shrinking patch file %s to keep only %s changes.", patch_path, target_file) shrinked_lines = [] patch_file = None try: patch_file = open(patch_path) adding = False search_line = "diff --git a/%s b/%s" % (target_file, target_file) for line in patch_file.read().split("\n"): if adding and line.startswith("diff --git a/") and line != search_line: adding = False elif line == search_line: adding = True if adding: shrinked_lines.append(line) finally: if patch_file: patch_file.close() if len(shrinked_lines): patch_file = None try: patch_file = open(patch_path, "w") content = "\n".join(shrinked_lines) if not
content.endswith("\n"): content = content + "\n" patch_file.write(content) finally: if patch_file: patch_file.close() return True else: return False
def get_scm_info(directory, branch_id=False, read_only=False, filePath=None): """ Reads SCM info from the given directory. It can fill real commit ID into commit_id field or branch name. @param directory: directory name @param branch_id: reads commit ID if False (default) or branch name if True @param read_only: if True it replaces the actual scheme to the read-only for known hosts, e.g. git+ssh to git for git.app.eng.bos.redhat.com, otherwise it just reads it (default) @return: an ScmInfo instance """ #TODO use a commit id instead of branch if in detached state if (directory, branch_id, read_only, filePath) in scm_info_path_cache: return copy.copy(scm_info_path_cache[(directory, branch_id, read_only, filePath)]) if os.path.exists(os.path.join(directory, ".git")): logging.debug("Getting git info for %s", directory) if filePath != None: args = ["git", "--git-dir", directory + "/.git", "log", "-z", "-n", "2", "--pretty=format:* dummy-branch %H %s%n", "--", filePath]
else: args = ["git", "--git-dir", directory + "/.git", "branch", "-v", "--no-abbrev"] command = Popen(args, stdout=PIPE, stderr=STDOUT) stdout = command.communicate()[0] if command.returncode: raise ScmException("Reading Git branch name and commit ID from %s failed. Output: %s" % (directory, stdout)) branch_name = None commit_id = None for line in stdout.split("\n"): if line.startswith("* "): pattern = "\* +(.*) +([a-f0-9]{40}) .*" m = re.match(pattern, line) if m: branch_name = m.group(1).strip() commit_id = m.group(2).strip() break else: raise ScmException("Cannot parse commit ID and branch name from result line:\n%s" % line) logging.info ("Retrieved branch_name %s and commit_id %s", branch_name, commit_id) args = ["git", "--git-dir", directory + "/.git", "remote", "-v"] command = Popen(args, stdout=PIPE, stderr=STDOUT) stdout = command.communicate()[0] if command.returncode: raise ScmException("Reading Git remote from %s failed. Output: %s" % (directory, stdout)) origin_url = None for line
in stdout.split("\n"): if line.startswith("origin" + chr(9)) and line.endswith(" (fetch)"): parts = re.split("[\s]+", line, 3) origin_url = parts[1] break if branch_id: scminfo = ScmInfo("%s#%s" % (origin_url, branch_name)) else: scminfo = ScmInfo("%s#%s" % (origin_url, commit_id)) if read_only: if scminfo.get_scm_url().startswith("git+ssh://git.app.eng.bos.redhat.com/srv/git/"): scminfo.scheme = "git" scminfo.path = scminfo.path.replace("/srv/git/", "/") elif scminfo.get_scm_url().startswith("git+ssh://code.engineering.redhat.com/"): scminfo.scheme = "git+https" scminfo.path = ("%s%s" % ("/gerrit/", scminfo.path)).replace("gerrit//", "gerrit/") scm_info_path_cache[(directory, branch_id, read_only, filePath)] = scminfo return scminfo elif os.path.exists(directory): #Special case for the integration-platform-tests which test tooling #inplace and use the file:// in the test.cfg scminfo = ScmInfo("file://%s#%s" % (directory, "xx")) scm_info_path_cache[(directory, branch_id, read_only, filePath)] = scminfo return scminfo else: raise ScmException("Unknown SCM type while reading SCM info from %s" % directory)
def new_status(self, new_status): """ Sets the new_status of this BuildSetStatusChangedEvent. :param new_status: The new_status of this BuildSetStatusChangedEvent. :type: str """ allowed_values = ["NEW", "DONE", "REJECTED"] if new_status not in allowed_values: raise ValueError( "Invalid value for `new_status` ({0}), must be one of {1}" .format(new_status, allowed_values) ) self._new_status = new_status
def old_status(self, old_status): """ Sets the old_status of this BuildSetStatusChangedEvent. :param old_status: The old_status of this BuildSetStatusChangedEvent. :type: str """ allowed_values = ["NEW", "DONE", "REJECTED"] if old_status not in allowed_values: raise ValueError( "Invalid value for `old_status` ({0}), must be one of {1}" .format(old_status, allowed_values) ) self._old_status = old_status
def command_serve(self, host='', port='8000', level='debug'): ''' Run development server with automated reload on code change:: ./manage.py app:serve [host] [port] [level] ''' logging.basicConfig(level=getattr(logging, level.upper()), format=self.format) if self.bootstrap: logger.info('Bootstraping...') self.bootstrap() try: server_thread = DevServerThread(host, port, self.app) server_thread.start() wait_for_code_change(extra_files=self.extra_files) server_thread.running = False server_thread.join() logger.info('Reloading...') flush_fds() pid = os.fork() # We need to fork before `execvp` to perform code reload # correctly, because we need to complete python destructors and # `atexit`. # This will save us from problems of incorrect exit, such as: # - unsaved data in data storage, which does not write data # on hard drive immediatly # - code, that can't be measured with coverage tool, because it uses # `atexit` handler to save coverage data # NOTE: we using untipical fork-exec scheme with replacing # the
parent process(not the child) to preserve PID of proccess # we use `pragma: no cover` here, because parent process cannot be # measured with coverage since it is ends with `execvp` if pid: # pragma: no cover os.closerange(3, MAXFD) os.waitpid(pid, 0) # reloading the code in parent process os.execvp(sys.executable, [sys.executable] + sys.argv) else: # we closing our recources, including file descriptors # and performing `atexit`. sys.exit() except KeyboardInterrupt: logger.info('Stoping dev-server...') server_thread.running = False server_thread.join() sys.exit()
def command_shell(self): ''' Shell command:: ./manage.py app:shell Executed with `self.shell_namespace` as local variables namespace. ''' from code import interact interact('Namespace {!r}'.format(self.shell_namespace), local=self.shell_namespace)
def declared_mixin(*bases): '''Create mix-in class with all assignments turned into methods decorated with declared_attr. Usage: @declared_mixin def FactoryFunction(): or with base mix-in class[es]: @declared_mixin(BaseMixIn1, BaseMixIn2) def FactoryFunction(): For example: @declared_mixin def WithParent(): parent_id = Column(ForeignKey(Parent.id)) parent = relationship(Parent) is equivalent to class WithParent(object): @declared_attr def parent_id(cls): return Column(ForeignKey(Parent.id)) @declared_attr def parent(cls): return relationship(Parent) ''' def wrapper(func): attrs = weakref.WeakKeyDictionary() def create_descriptor(name): def get_attr(cls): if cls not in attrs: # Call func only once per class attrs[cls] = return_locals(func)() return attrs[cls][name] get_attr.__name__ = name return declared_attr(get_attr) dict_ = {name: create_descriptor(name) for name in six.get_function_code(func).co_varnames} dict_['__doc__'] = func.__doc__ return type(func.__name__, bases, dict_) if len(bases)==1 and not isinstance(bases[0], type): # Short form (without bases) is used func = bases[0] bases = () return wrapper(func) else: return wrapper
def elfhash(s): """ :param string: bytes >>> import base64 >>> s = base64.b64encode(b'hello world') >>> elfhash(s) 224648685 """ hash = 0 x = 0 for c in s: hash = (hash << 4) + c x = hash & 0xF0000000 if x: hash ^= (x >> 24) hash &= ~x return (hash & 0x7FFFFFFF)
def find_previous(element, l): """ find previous element in a sorted list >>> find_previous(0, [0]) 0 >>> find_previous(2, [1, 1, 3]) 1 >>> find_previous(0, [1, 2]) >>> find_previous(1.5, [1, 2]) 1 >>> find_previous(3, [1, 2]) 2 """ length = len(l) for index, current in enumerate(l): # current is the last element if length - 1 == index: return current # current is the first element if index == 0: if element < current: return None if current <= element < l[index+1]: return current
def patch_match_hostname(): """Fixes https://github.com/boto/boto/issues/2836""" _old_match_hostname = ssl.match_hostname def _new_match_hostname(cert, hostname): if hostname.endswith('.s3.amazonaws.com'): pos = hostname.find('.s3.amazonaws.com') hostname = hostname[:pos].replace('.', '') + hostname[pos:] return _old_match_hostname(cert, hostname) ssl.match_hostname = _new_match_hostname
def dict(self): """A dict that holds key/values for all of the properties in the object. :return: """ from collections import OrderedDict d = OrderedDict( [(p.key, getattr(self, p.key)) for p in self.__mapper__.attrs if p.key not in ('data',)]) if 'list' in self.data: d['bundle_count'] = len(self.data['list']) else: d['bundle_count'] = None if self.data: for k, v in self.data.items(): d[k] = v return d
def update(self): """Cache the list into the data section of the record""" from ambry.orm.exc import NotFoundError from requests.exceptions import ConnectionError, HTTPError from boto.exception import S3ResponseError d = {} try: for k, v in self.list(full=True): if not v: continue d[v['vid']] = { 'vid': v['vid'], 'vname': v.get('vname'), 'id': v.get('id'), 'name': v.get('name') } self.data['list'] = d except (NotFoundError, ConnectionError, S3ResponseError, HTTPError) as e: raise RemoteAccessError("Failed to update {}: {}".format(self.short_name, e))
def list(self, full=False): """List all of the bundles in the remote""" if self.is_api: return self._list_api(full=full) else: return self._list_fs(full=full)
def _update_fs_list(self): """Cache the full list for http access. This creates a meta file that can be read all at once, rather than requiring a list operation like S3 access does""" from json import dumps full_list = [ e[1] for e in self._list_fs(full=True) ] remote = self._fs_remote(self.url) remote.setcontents(os.path.join('_meta', 'list.json'), dumps(full_list, indent = 4))
def checkin(self, package, no_partitions=False, force=False, cb=None): """ Check in a bundle package to the remote. :param package: A Database, referencing a sqlite database holding the bundle :param cb: a two argument progress callback: cb(message, num_records) :return: """ from ambry.orm.exc import NotFoundError if not os.path.exists(package.path): raise NotFoundError("Package path does not exist: '{}' ".format(package.path)) if self.is_api: return self._checkin_api(package, no_partitions=no_partitions, force=force, cb=cb) else: return self._checkin_fs(package, no_partitions=no_partitions, force=force, cb=cb)
def _put_metadata(self, fs_remote, ds): """Store metadata on a pyfs remote""" from six import text_type from fs.errors import ResourceNotFoundError identity = ds.identity d = identity.dict d['summary'] = ds.config.metadata.about.summary d['title'] = ds.config.metadata.about.title meta_stack = self._meta_infos(ds) def do_metadata(): for path, ident in meta_stack: fs_remote.setcontents(path, ident) try: # Assume the directories already exist do_metadata() except ResourceNotFoundError: # Nope, make them and try again. parts = ['vid', 'id', 'vname', 'name'] for p in parts: dirname = os.path.join('_meta', p) fs_remote.makedir(dirname, allow_recreate=True, recursive=True) do_metadata()
def checkout(self, ref, cb=None): """Checkout a bundle from the remote. Returns a file-like object""" if self.is_api: return self._checkout_api(ref, cb=cb) else: return self._checkout_fs(ref, cb=cb)
def remove(self, ref, cb=None): """Check in a bundle to the remote""" if self.is_api: return self._remove_api(ref, cb) else: return self._remove_fs(ref, cb)
def s3(self, url, account_acessor=None, access=None, secret=None): """Setup an S3 pyfs, with account credentials, fixing an ssl matching problem""" from ambry.util.ambrys3 import AmbryS3FS from ambry.util import parse_url_to_dict import ssl pd = parse_url_to_dict(url) if account_acessor: account = account_acessor(pd['hostname']) assert account['account_id'] == pd['hostname'] aws_access_key = account['access_key'], aws_secret_key = account['secret'] else: aws_access_key = access aws_secret_key = secret assert access, url assert secret, url s3 = AmbryS3FS( bucket=pd['netloc'], prefix=pd['path'].strip('/')+'/', aws_access_key=aws_access_key, aws_secret_key=aws_secret_key, ) return s3
def withdict(parser, token): """ Take a complete context dict as extra layer. """ bits = token.split_contents() if len(bits) != 2: raise TemplateSyntaxError("{% withdict %} expects one argument") nodelist = parser.parse(('endwithdict',)) parser.delete_first_token() return WithDictNode( nodelist=nodelist, context_expr=parser.compile_filter(bits[1]) )
def render(self, context): """ Render the tag, with extra context layer. """ extra_context = self.context_expr.resolve(context) if not isinstance(extra_context, dict): raise TemplateSyntaxError("{% withdict %} expects the argument to be a dictionary.") with context.push(**extra_context): return self.nodelist.render(context)
def filesessionmaker(sessionmaker, file_manager, file_managers=None): u'''Wrapper of session maker adding link to a FileManager instance to session.:: file_manager = FileManager(cfg.TRANSIENT_ROOT, cfg.PERSISTENT_ROOT) filesessionmaker(sessionmaker(...), file_manager) ''' registry = WeakKeyDictionary() if file_managers: for k, v in six.iteritems(file_managers): if isinstance(k, FileAttribute): raise NotImplementedError() registry[k] = v def find_file_manager(self, target): if isinstance(target, FileAttribute): assert hasattr(target, 'class_') target = target.class_ else: if not inspect.isclass(target): target = type(target) assert hasattr(target, 'metadata') assert class_mapper(target) is not None if target in registry: return registry[target] if target.metadata in registry: return registry[target.metadata] return file_manager def session_maker(*args, **kwargs): session = sessionmaker(*args, **kwargs) # XXX in case we want to use session manager somehow bound # to request environment. For example, to generate user-specific # URLs. #session.file_manager = \ # kwargs.get('file_manager', file_manager) session.file_manager = file_manager session.find_file_manager = six.create_bound_method( find_file_manager, session) return session
return session_maker
def oscillating_setpoint(_square_wave=False, shift=0): """A basic example of a target that you may want to approximate. If you have a thermostat, this is a temperature setting. This target can't change too often """ import math c = 0 while 1: if _square_wave: yield ((c % 300) < 150) * 30 + 20 c += 1 else: yield 10 * math.sin(2 * 3.1415926 * c + shift) \ + 20 + 5 * math.sin(2 * 3.1415926 * c * 3 + shift) c += .001
def bash_echo_metric(): """A very basic example that monitors a number of currently running processes""" import subprocess # import random # more predictable version of the metric cmd = ( 'set -o pipefail ' ' ; pgrep -f "^bash.*sleep .*from bash: started relay launcher"' ' | wc -l ' ) # less predictable version of the metric # cmd = 'ps aux|wc -l' while True: yield ( int(subprocess.check_output(cmd, shell=True, executable='bash')) # + random.choice([-2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8]) )
def bash_echo_warmer(n): """A very basic example of how to create n additional tasks. This is a warmer function with randomly delayed effects on the bash_echo_metric and random task lengths to make the metric less predictable """ import subprocess import random cmd = ( 'set -o pipefail ' " ; sleep %s " " ; sh -c 'echo from bash: started relay launcher task && sleep %s'" ) for i in range(n): subprocess.Popen( cmd % ((1 + random.random()) * 1, (1 + random.random()) * 4), shell=True, stdout=subprocess.PIPE, executable='bash')
def bash_echo_cooler(n): """A very basic example of how to destroy n running tasks This is a cooler function """ import subprocess cmd = ( 'set -o pipefile ' ' ; kill `pgrep -f "from bash: started relay launcher task"' ' | tail -n %s` 2>/dev/null' % n) subprocess.Popen(cmd, shell=True, executable='bash').wait()
def stop_if_mostly_diverging(errdata): """This is an example stop condition that asks Relay to quit if the error difference between consecutive samples is increasing more than half of the time. It's quite sensitive and designed for the demo, so you probably shouldn't use this is a production setting """ n_increases = sum([ abs(y) - abs(x) > 0 for x, y in zip(errdata, errdata[1:])]) if len(errdata) * 0.5 < n_increases: # most of the time, the next sample is worse than the previous sample # relay is not healthy return 0 else: # most of the time, the next sample is better than the previous sample # realy is in a healthy state return -1
def has_resolved_dependencies(self): """Return True if all dependencies are in State.DONE""" for dependency in self.dependencies: if dependency.state != Task.State.DONE: return False return True
def dependencies_as_list(self): """Returns a list of dependency names.""" dependencies = [] for dependency in self.dependencies: dependencies.append(dependency.name) return dependencies
def get_task(self, name): """Get task by name or create it if it does not exists.""" if name in self.tasks.keys(): task = self.tasks[name] else: task = Task(name) self.tasks[name] = task return task
def get_next(self): """Return next task from the stack that has all dependencies resolved. Return None if there are no tasks with resolved dependencies or is there are no more tasks on stack. Use `count` to check is there are still some task left on the stack. raise ValueError if total ordering is not possible.""" self.update_tasks_status() if self.dirty: self.tsort() self.dirty = False for key, task in self.tasks.iteritems(): if task.is_new() and task.has_resolved_dependencies(): return task return None
def count_buildable_tasks(self): """Count tasks that are new and have dependencies in non FAILED state.""" self.update_tasks_status() buildable_tasks_count = 0 for key, task in self.tasks.iteritems(): if task.state is Task.State.NEW: if self.are_dependencies_buildable(task): buildable_tasks_count += 1 logging.debug("Buildable task: %s" % task.name ) else: logging.debug("Task %s has broken dependencies." % task.name ) return buildable_tasks_count
def filter_tasks(self, task_names, keep_dependencies=False): """If filter is applied only tasks with given name and its dependencies (if keep_keep_dependencies=True) are kept in the list of tasks.""" new_tasks = {} for task_name in task_names: task = self.get_task(task_name) if task not in new_tasks: new_tasks[task.name] = task if keep_dependencies: for dependency in task.ordered_dependencies(): if dependency not in new_tasks: new_tasks[dependency.name] = dependency else: #strip dependencies task.dependencies = set() self.tasks = new_tasks
def tsort(self): """Given a partial ordering, return a totally ordered list. part is a dict of partial orderings. Each value is a set, which the key depends on. The return value is a list of sets, each of which has only dependencies on items in previous entries in the list. raise ValueError if ordering is not possible (check for circular or missing dependencies)""" task_dict = {} for key, task in self.tasks.iteritems(): task_dict[task] = task.dependencies # parts = parts.copy() parts = task_dict.copy() result = [] while True: level = set([name for name, deps in parts.iteritems() if not deps]) if not level: break result.append(level) parts = dict([(name, deps - level) for name, deps in parts.iteritems() if name not in level]) if parts: raise ValueError('total ordering not possible (check for circular
or missing dependencies)') return result
def build(self, build_execution_configuration, **kwargs): """ Triggers the build execution for a given configuration. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.build(build_execution_configuration, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str build_execution_configuration: Build Execution Configuration. See org.jboss.pnc.spi.executor.BuildExecutionConfiguration. (required) :param str username_triggered: Username who triggered the build. If empty current user is used. :param str callback_url: Optional Callback URL :return: None If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.build_with_http_info(build_execution_configuration, **kwargs) else: (data) = self.build_with_http_info(build_execution_configuration, **kwargs) return data
def build_task_completed(self, task_id, build_result, **kwargs): """ Notifies the completion of externally managed build task process. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.build_task_completed(task_id, build_result, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int task_id: Build task id (required) :param str build_result: Build result (required) :return: None If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.build_task_completed_with_http_info(task_id, build_result, **kwargs) else: (data) = self.build_task_completed_with_http_info(task_id, build_result, **kwargs) return data
def cancel_bbuild(self, build_execution_configuration_id, **kwargs): """ Cancel the build execution defined with given executionConfigurationId. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.cancel_bbuild(build_execution_configuration_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int build_execution_configuration_id: Build Execution Configuration ID. See org.jboss.pnc.spi.executor.BuildExecutionConfiguration. (required) :return: None If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.cancel_bbuild_with_http_info(build_execution_configuration_id, **kwargs) else: (data) = self.cancel_bbuild_with_http_info(build_execution_configuration_id, **kwargs) return data
def _load_config(self): """ Load project's config and return dict. TODO: Convert the original dotted representation to hierarchical. """ config = import_module('config') variables = [var for var in dir(config) if not var.startswith('_')] return {var: getattr(config, var) for var in variables}
def get(name, default=None): """ Return variable by name from the project's config. Name can be a dotted path, like: 'rails.db.type'. """ if '.' not in name: raise Exception("Config path should be divided by at least one dot") section_name, var_path = name.split('.', 1) section = Config._data.get(section_name) return section.get(var_path)
def find_le(self, k): 'Return last item with a key <= k. Raise ValueError if not found.' i = bisect_right(self._keys, k) if i: return self._items[i - 1] raise ValueError('No item found with key at or below: %r' % (k,))
def find_le_index(self, k): 'Return last item with a key <= k. Raise ValueError if not found.' i = bisect_right(self._keys, k) if i: return i - 1 raise ValueError('No item found with key at or below: %r' % (k,))
def find_lt(self, k): """Return last item with a key < k. Raise ValueError if not found. """ i = bisect_left(self._keys, k) if i: return self._items[i - 1] raise ValueError('No item found with key below: %r' % (k,))
def find_ge_index(self, k): 'Return first item with a key >= equal to k. Raise ValueError if not found' i = bisect_left(self._keys, k) if i != len(self): return i raise ValueError('No item found with key at or above: %r' % (k,))
def get_dbg_brk_linux64(): ''' Return the current brk value in the debugged process (only x86_64 Linux) ''' # TODO this method is so weird, find a unused address to inject code not # the base address debugger = get_debugger() code = b'\x0f\x05' # syscall rax = debugger.get_reg("rax") rdi = debugger.get_reg("rdi") rip = debugger.get_reg("rip") efl = debugger.get_reg("efl") debugger.set_reg("rax", 12) # sys_brk debugger.set_reg("rdi", 0) base = debugger.image_base() inj = base save = debugger.get_bytes(inj, len(code)) debugger.put_bytes(inj, code) debugger.set_reg("rip", inj) debugger.step_into() debugger.wait_ready() brk_res = debugger.get_reg("rax") debugger.set_reg("rax", rax) debugger.set_reg("rdi", rdi) debugger.set_reg("rip", rip) debugger.set_reg("efl", efl) debugger.put_bytes(inj, save) return brk_res
def get_dbg_brk_linux32(): ''' Return the current brk value in the debugged process (only x86 Linux) ''' # TODO this method is so weird, find a unused address to inject code not # the base address debugger = get_debugger() code = b'\xcd\x80' # int 0x80 eax = debugger.get_reg("eax") ebx = debugger.get_reg("ebx") eip = debugger.get_reg("eip") efl = debugger.get_reg("efl") debugger.set_reg("eax", 45) # sys_brk debugger.set_reg("ebx", 0) base = debugger.image_base() inj = base save = debugger.get_bytes(inj, len(code)) debugger.put_bytes(inj, code) debugger.set_reg("eip", inj) debugger.step_into() debugger.wait_ready() brk_res = debugger.get_reg("eax") debugger.set_reg("eax", eax) debugger.set_reg("ebx", ebx) debugger.set_reg("eip", eip) debugger.set_reg("efl", efl) debugger.put_bytes(inj, save) return brk_res
def start( loop: abstract_loop = None, interval: float = 0.5, hook: hook_type = None) -> asyncio.Task: """ Start the reloader. Create the task which is watching loaded modules and manually added files via ``watch()`` and reloading the process in case of modification. Attach this task to the loop. If ``hook`` is provided, it will be called right before the application goes to the reload stage. """ if loop is None: loop = asyncio.get_event_loop() global reload_hook if hook is not None: reload_hook = hook global task if not task: modify_times = {} executor = ThreadPoolExecutor(1) task = call_periodically( loop, interval, check_and_reload, modify_times, executor, ) return task
def strftime(dt, fmt): ''' `strftime` implementation working before 1900 ''' if _illegal_s.search(fmt): raise TypeError("This strftime implementation does not handle %s") if dt.year > 1900: return dt.strftime(fmt) fmt = fmt.replace('%c', '%a %b %d %H:%M:%S %Y')\ .replace('%Y', str(dt.year))\ .replace('%y', '{:04}'.format(dt.year)[-2:]) year = dt.year # For every non-leap year century, advance by # 6 years to get into the 28-year repeat cycle delta = 2000 - year off = 6*(delta // 100 + delta // 400) year = year + off # Move to around the year 2000 year = year + ((2000 - year)//28)*28 timetuple = dt.timetuple() return time.strftime(fmt, (year,) + timetuple[1:])
def library_pg(args, l, config): """Report on the operation of a Postgres Library database""" import tabulate import terminaltables from textwrap import fill from ambry.util.text import getTerminalSize import sys if args.connect: try: l.database.connection.execute('SELECT * FROM pg_stat_activity;') sys.exit(0) except Exception as e: prt(str(e)) sys.exit(1) db = l.database (x, y) = getTerminalSize() if args.processes: headers = None rows = [] for row in db.connection.execute('SELECT pid, client_addr, application_name ass, query FROM pg_stat_activity '): if not headers: headers = row.keys() row = list(str(e) for e in row) row[3] = fill(row[3],x-50) rows.append(row) #print tabulate.tabulate(rows, headers) table = terminaltables.UnixTable([headers]+rows) print table.table if args.blocks: headers = None rows = [] q1 = """ SELECT pid, database, mode, locktype, mode, relation, tuple, virtualxid FROM pg_locks order by pid; """ q2 = """ SELECT blocked_locks.pid AS blocked_pid, -- blocked_activity.usename
AS blocked_user, blocking_locks.pid AS blocking_pid, -- blocking_activity.usename AS blocking_user, blocked_activity.query AS blocked_statement, blocking_activity.query AS current_statement_in_blocking_process FROM pg_catalog.pg_locks blocked_locks JOIN pg_catalog.pg_stat_activity blocked_activity ON blocked_activity.pid = blocked_locks.pid JOIN pg_catalog.pg_locks blocking_locks ON blocking_locks.locktype = blocked_locks.locktype AND blocking_locks.DATABASE IS NOT DISTINCT FROM blocked_locks.DATABASE AND blocking_locks.relation IS NOT DISTINCT FROM blocked_locks.relation AND blocking_locks.page IS NOT DISTINCT FROM blocked_locks.page AND blocking_locks.tuple IS NOT DISTINCT FROM blocked_locks.tuple AND blocking_locks.virtualxid IS NOT DISTINCT FROM blocked_locks.virtualxid AND blocking_locks.transactionid IS NOT DISTINCT FROM blocked_locks.transactionid AND blocking_locks.classid IS NOT DISTINCT FROM blocked_locks.classid AND blocking_locks.objid IS NOT DISTINCT FROM blocked_locks.objid AND blocking_locks.objsubid IS NOT DISTINCT FROM blocked_locks.objsubid AND blocking_locks.pid != blocked_locks.pid JOIN pg_catalog.pg_stat_activity blocking_activity ON blocking_activity.pid = blocking_locks.pid WHERE NOT blocked_locks.GRANTED; """ for row in db.connection.execute(q2): if not headers: headers = row.keys() row = list(str(e) for e in row) row[2]
= fill(row[2],(x-50)/2) row[3] = fill(row[3], (x-50)/2) rows.append(row) if rows: table = terminaltables.UnixTable([headers] + rows) print table.table if args.locks: headers = None rows = [] q = """ SELECT pid, database, mode, locktype, mode, relation::regclass, tuple, virtualxid FROM pg_locks ORDER BY pid ; """ for row in db.connection.execute(q): if not headers: headers = row.keys() row = list(str(e) for e in row) row[2] = fill(row[2], (x - 50) / 2) row[3] = fill(row[3], (x - 50) / 2) rows.append(row) if rows: table = terminaltables.UnixTable([headers] + rows) print table.table
def _detect_timezone_windows(): """Detect timezone on the windows platform.""" # pylint: disable=global-statement global win32timezone_to_en # Try and fetch the key_name for the timezone using # Get(Dynamic)TimeZoneInformation tzi = DTZI_c() kernel32 = ctypes.windll.kernel32 getter = kernel32.GetTimeZoneInformation getter = getattr(kernel32, "GetDynamicTimeZoneInformation", getter) # code is for daylight savings: 0 means disabled/not defined, 1 means enabled # but inactive, 2 means enabled and active _ = getter(ctypes.byref(tzi)) win32tz_key_name = tzi.key_name if not win32tz_key_name: if win32timezone is None: return None # We're on Windows before Vista/Server 2008 - need to look up the # standard_name in the registry. # This will not work in some multilingual setups if running in a language # other than the operating system default win32tz_name = tzi.standard_name if not win32timezone_to_en: win32timezone_to_en = dict( win32timezone.TimeZoneInfo._get_indexed_time_zone_keys("Std")) win32tz_key_name = win32timezone_to_en.get(win32tz_name, win32tz_name)
territory = locale.getdefaultlocale()[0].split("_", 1)[1] olson_name = win32tz_map.win32timezones.get((win32tz_key_name, territory), win32tz_map.win32timezones.get(win32tz_key_name, None)) if not olson_name: return None if not isinstance(olson_name, str): olson_name = olson_name.encode("ascii") return pytz.timezone(olson_name)
def encrypt_password(raw_password, algorithm='sha1', salt=None): """ Returns a string of the hexdigest of the given plaintext password and salt using the given algorithm ('md5', 'sha1' or other supported by hashlib). """ if salt is None: salt = binascii.hexlify(os.urandom(3))[:5] else: salt = salt.encode('utf-8') raw_password = raw_password.encode('utf-8') hash = hashlib.new(algorithm, salt+raw_password).hexdigest() return '{}${}${}'.format(algorithm, salt.decode('utf-8'), hash)
def check_password(raw_password, enc_password): """ Returns a boolean of whether the raw_password was correct. Handles encryption formats behind the scenes. """ algo, salt, hsh = enc_password.split('$') return enc_password == encrypt_password(raw_password, algorithm=algo, salt=salt)
def login(self, template='login'): ''' This property will return component which will handle login requests. auth.login(template='login.html') ''' def _login(env, data): form = self._login_form(env) next = env.request.GET.get('next', '/') login_failed = False if env.request.method == 'POST': if form.accept(env.request.POST): user_identity = self.get_user_identity( env, **form.python_data) if user_identity is not None: response = HTTPSeeOther(location=next) return self.login_identity(user_identity, response) login_failed = True data.form = form data.login_failed = login_failed data.login_url = env.root.login.as_url.qs_set(next=next) return env.template.render_to_response(template, data.as_dict()) return web.match('/login', 'login') | _login
def logout(self, redirect_to='/'): ''' This property will return component which will handle logout requests. It only handles POST requests and do not display any rendered content. This handler deletes session id from `storage`. If there is no session id provided or id is incorrect handler silently redirects to login url and does not throw any exception. ''' def _logout(env, data): location = redirect_to if location is None and env.request.referer: location = env.request.referer elif location is None: location = '/' response = HTTPSeeOther(location=str(location)) self.logout_user(env.request, response) return response return web.match('/logout', 'logout') | web.method('post') | _logout
def content(self, content): """ Sets the content of this SupportLevelPage. :param content: The content of this SupportLevelPage. :type: list[str] """ allowed_values = ["UNRELEASED", "EARLYACCESS", "SUPPORTED", "EXTENDED_SUPPORT", "EOL"] if not set(content).issubset(set(allowed_values)): raise ValueError( "Invalid values for `content` [{0}], must be a subset of [{1}]" .format(", ".join(map(str, set(content)-set(allowed_values))), ", ".join(map(str, allowed_values))) ) self._content = content
def get_bpm_task_by_id(self, task_id, **kwargs): """ Get single (recently) active BPM task. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_bpm_task_by_id(task_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int task_id: BPM task ID (required) :return: BpmTaskRestSingleton If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.get_bpm_task_by_id_with_http_info(task_id, **kwargs) else: (data) = self.get_bpm_task_by_id_with_http_info(task_id, **kwargs) return data
def get_bpm_tasks(self, **kwargs): """ List of (recently) active BPM tasks. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_bpm_tasks(callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int page_index: Page Index :param int page_size: Pagination size :return: BpmTaskRestPage If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.get_bpm_tasks_with_http_info(**kwargs) else: (data) = self.get_bpm_tasks_with_http_info(**kwargs) return data