sentence1
stringlengths
52
3.87M
sentence2
stringlengths
1
47.2k
label
stringclasses
1 value
def permissions(cls, instance, db_session=None): """ returns all non-resource permissions based on what groups user belongs and directly set ones for this user :param instance: :param db_session: :return: """ db_session = get_db_session(db_session, instance) query = db_session.query( cls.models_proxy.GroupPermission.group_id.label("owner_id"), cls.models_proxy.GroupPermission.perm_name.label("perm_name"), sa.literal("group").label("type"), ) query = query.filter( cls.models_proxy.GroupPermission.group_id == cls.models_proxy.UserGroup.group_id ) query = query.filter( cls.models_proxy.User.id == cls.models_proxy.UserGroup.user_id ) query = query.filter(cls.models_proxy.User.id == instance.id) query2 = db_session.query( cls.models_proxy.UserPermission.user_id.label("owner_id"), cls.models_proxy.UserPermission.perm_name.label("perm_name"), sa.literal("user").label("type"), ) query2 = query2.filter(cls.models_proxy.UserPermission.user_id == instance.id) query = query.union(query2) groups_dict = dict([(g.id, g) for g in instance.groups]) return [ PermissionTuple( instance, row.perm_name, row.type, groups_dict.get(row.owner_id) if row.type == "group" else None, None, False, True, ) for row in query ]
returns all non-resource permissions based on what groups user belongs and directly set ones for this user :param instance: :param db_session: :return:
entailment
def resources_with_perms( cls, instance, perms, resource_ids=None, resource_types=None, db_session=None ): """ returns all resources that user has perms for (note that at least one perm needs to be met) :param instance: :param perms: :param resource_ids: restricts the search to specific resources :param resource_types: :param db_session: :return: """ # owned entities have ALL permissions so we return those resources too # even without explict perms set # TODO: implement admin superrule perm - maybe return all apps db_session = get_db_session(db_session, instance) query = db_session.query(cls.models_proxy.Resource).distinct() group_ids = [gr.id for gr in instance.groups] # if user has some groups lets try to join based on their permissions if group_ids: join_conditions = ( cls.models_proxy.GroupResourcePermission.group_id.in_(group_ids), cls.models_proxy.Resource.resource_id == cls.models_proxy.GroupResourcePermission.resource_id, cls.models_proxy.GroupResourcePermission.perm_name.in_(perms), ) query = query.outerjoin( (cls.models_proxy.GroupResourcePermission, sa.and_(*join_conditions)) ) # ensure outerjoin permissions are correct - # dont add empty rows from join # conditions are - join ON possible group permissions # OR owning group/user query = query.filter( sa.or_( cls.models_proxy.Resource.owner_user_id == instance.id, cls.models_proxy.Resource.owner_group_id.in_(group_ids), cls.models_proxy.GroupResourcePermission.perm_name != None, ) # noqa ) else: # filter just by username query = query.filter(cls.models_proxy.Resource.owner_user_id == instance.id) # lets try by custom user permissions for resource query2 = db_session.query(cls.models_proxy.Resource).distinct() query2 = query2.filter( cls.models_proxy.UserResourcePermission.user_id == instance.id ) query2 = query2.filter( cls.models_proxy.Resource.resource_id == cls.models_proxy.UserResourcePermission.resource_id ) query2 = query2.filter( cls.models_proxy.UserResourcePermission.perm_name.in_(perms) ) if resource_ids: query = query.filter( cls.models_proxy.Resource.resource_id.in_(resource_ids) ) query2 = query2.filter( cls.models_proxy.Resource.resource_id.in_(resource_ids) ) if resource_types: query = query.filter( cls.models_proxy.Resource.resource_type.in_(resource_types) ) query2 = query2.filter( cls.models_proxy.Resource.resource_type.in_(resource_types) ) query = query.union(query2) query = query.order_by(cls.models_proxy.Resource.resource_name) return query
returns all resources that user has perms for (note that at least one perm needs to be met) :param instance: :param perms: :param resource_ids: restricts the search to specific resources :param resource_types: :param db_session: :return:
entailment
def groups_with_resources(cls, instance): """ Returns a list of groups users belongs to with eager loaded resources owned by those groups :param instance: :return: """ return instance.groups_dynamic.options( sa.orm.eagerload(cls.models_proxy.Group.resources) )
Returns a list of groups users belongs to with eager loaded resources owned by those groups :param instance: :return:
entailment
def resources_with_possible_perms( cls, instance, resource_ids=None, resource_types=None, db_session=None ): """ returns list of permissions and resources for this user :param instance: :param resource_ids: restricts the search to specific resources :param resource_types: restricts the search to specific resource types :param db_session: :return: """ perms = resource_permissions_for_users( cls.models_proxy, ANY_PERMISSION, resource_ids=resource_ids, resource_types=resource_types, user_ids=[instance.id], db_session=db_session, ) for resource in instance.resources: perms.append( PermissionTuple( instance, ALL_PERMISSIONS, "user", None, resource, True, True ) ) for group in cls.groups_with_resources(instance): for resource in group.resources: perms.append( PermissionTuple( instance, ALL_PERMISSIONS, "group", group, resource, True, True ) ) return perms
returns list of permissions and resources for this user :param instance: :param resource_ids: restricts the search to specific resources :param resource_types: restricts the search to specific resource types :param db_session: :return:
entailment
def gravatar_url(cls, instance, default="mm", **kwargs): """ returns user gravatar url :param instance: :param default: :param kwargs: :return: """ # construct the url hash = hashlib.md5(instance.email.encode("utf8").lower()).hexdigest() if "d" not in kwargs: kwargs["d"] = default params = "&".join( [ six.moves.urllib.parse.urlencode({key: value}) for key, value in kwargs.items() ] ) return "https://secure.gravatar.com/avatar/{}?{}".format(hash, params)
returns user gravatar url :param instance: :param default: :param kwargs: :return:
entailment
def set_password(cls, instance, raw_password): """ sets new password on a user using password manager :param instance: :param raw_password: :return: """ # support API for both passlib 1.x and 2.x hash_callable = getattr( instance.passwordmanager, "hash", instance.passwordmanager.encrypt ) password = hash_callable(raw_password) if six.PY2: instance.user_password = password.decode("utf8") else: instance.user_password = password cls.regenerate_security_code(instance)
sets new password on a user using password manager :param instance: :param raw_password: :return:
entailment
def check_password(cls, instance, raw_password, enable_hash_migration=True): """ checks string with users password hash using password manager :param instance: :param raw_password: :param enable_hash_migration: if legacy hashes should be migrated :return: """ verified, replacement_hash = instance.passwordmanager.verify_and_update( raw_password, instance.user_password ) if enable_hash_migration and replacement_hash: if six.PY2: instance.user_password = replacement_hash.decode("utf8") else: instance.user_password = replacement_hash return verified
checks string with users password hash using password manager :param instance: :param raw_password: :param enable_hash_migration: if legacy hashes should be migrated :return:
entailment
def by_id(cls, user_id, db_session=None): """ fetch user by user id :param user_id: :param db_session: :return: """ db_session = get_db_session(db_session) query = db_session.query(cls.model) query = query.filter(cls.model.id == user_id) query = query.options(sa.orm.eagerload("groups")) return query.first()
fetch user by user id :param user_id: :param db_session: :return:
entailment
def by_user_name_and_security_code(cls, user_name, security_code, db_session=None): """ fetch user objects by user name and security code :param user_name: :param security_code: :param db_session: :return: """ db_session = get_db_session(db_session) query = db_session.query(cls.model) query = query.filter( sa.func.lower(cls.model.user_name) == (user_name or "").lower() ) query = query.filter(cls.model.security_code == security_code) return query.first()
fetch user objects by user name and security code :param user_name: :param security_code: :param db_session: :return:
entailment
def by_user_names(cls, user_names, db_session=None): """ fetch user objects by user names :param user_names: :param db_session: :return: """ user_names = [(name or "").lower() for name in user_names] db_session = get_db_session(db_session) query = db_session.query(cls.model) query = query.filter(sa.func.lower(cls.model.user_name).in_(user_names)) # q = q.options(sa.orm.eagerload(cls.groups)) return query
fetch user objects by user names :param user_names: :param db_session: :return:
entailment
def user_names_like(cls, user_name, db_session=None): """ fetch users with similar names using LIKE clause :param user_name: :param db_session: :return: """ db_session = get_db_session(db_session) query = db_session.query(cls.model) query = query.filter( sa.func.lower(cls.model.user_name).like((user_name or "").lower()) ) query = query.order_by(cls.model.user_name) # q = q.options(sa.orm.eagerload('groups')) return query
fetch users with similar names using LIKE clause :param user_name: :param db_session: :return:
entailment
def by_email(cls, email, db_session=None): """ fetch user object by email :param email: :param db_session: :return: """ db_session = get_db_session(db_session) query = db_session.query(cls.model).filter( sa.func.lower(cls.model.email) == (email or "").lower() ) query = query.options(sa.orm.eagerload("groups")) return query.first()
fetch user object by email :param email: :param db_session: :return:
entailment
def users_for_perms(cls, perm_names, db_session=None): """ return users hat have one of given permissions :param perm_names: :param db_session: :return: """ db_session = get_db_session(db_session) query = db_session.query(cls.model) query = query.filter( cls.models_proxy.User.id == cls.models_proxy.UserGroup.user_id ) query = query.filter( cls.models_proxy.UserGroup.group_id == cls.models_proxy.GroupPermission.group_id ) query = query.filter(cls.models_proxy.GroupPermission.perm_name.in_(perm_names)) query2 = db_session.query(cls.model) query2 = query2.filter( cls.models_proxy.User.id == cls.models_proxy.UserPermission.user_id ) query2 = query2.filter( cls.models_proxy.UserPermission.perm_name.in_(perm_names) ) users = query.union(query2).order_by(cls.model.id) return users
return users hat have one of given permissions :param perm_names: :param db_session: :return:
entailment
def handle_joined(self, connection, event): """ Store join times for current nicknames when we first join. """ nicknames = [s.lstrip("@+") for s in event.arguments()[-1].split()] for nickname in nicknames: self.joined[nickname] = datetime.now()
Store join times for current nicknames when we first join.
entailment
def handle_join(self, connection, event): """ Store join time for a nickname when it joins. """ nickname = self.get_nickname(event) self.joined[nickname] = datetime.now()
Store join time for a nickname when it joins.
entailment
def handle_quit(self, connection, event): """ Store quit time for a nickname when it quits. """ nickname = self.get_nickname(event) self.quit[nickname] = datetime.now() del self.joined[nickname]
Store quit time for a nickname when it quits.
entailment
def timesince(self, when): """ Returns human friendly version of the timespan between now and the given datetime. """ units = ( ("year", 60 * 60 * 24 * 365), ("week", 60 * 60 * 24 * 7), ("day", 60 * 60 * 24), ("hour", 60 * 60), ("minute", 60), ("second", 1), ) delta = datetime.now() - when total_seconds = delta.days * 60 * 60 * 24 + delta.seconds parts = [] for name, seconds in units: value = total_seconds / seconds if value > 0: total_seconds %= seconds s = "s" if value != 1 else "" parts.append("%s %s%s" % (value, name, s)) return " and ".join(", ".join(parts).rsplit(", ", 1))
Returns human friendly version of the timespan between now and the given datetime.
entailment
def version(self, event): """ Shows version information. """ name = "%s.%s" % (self.__class__.__module__, self.__class__.__name__) return "%s [%s]" % (settings.GNOTTY_VERSION_STRING, name)
Shows version information.
entailment
def commands(self, event): """ Lists all available commands. """ commands = sorted(self.commands_dict().keys()) return "Available commands: %s" % " ".join(commands)
Lists all available commands.
entailment
def help(self, event, command_name=None): """ Shows the help message for the bot. Takes an optional command name which when given, will show help for that command. """ if command_name is None: return ("Type !commands for a list of all commands. Type " "!help [command] to see help for a specific command.") try: command = self.commands_dict()[command_name] except KeyError: return "%s is not a command" % command_name argspec = getargspec(command) args = argspec.args[2:] defaults = argspec.defaults or [] for i in range(-1, -len(defaults) - 1, -1): args[i] = "%s [default: %s]" % (args[i], defaults[i]) args = ", ".join(args) help = getdoc(command).replace("\n", " ") return "help for %s: (args: %s) %s" % (command_name, args, help)
Shows the help message for the bot. Takes an optional command name which when given, will show help for that command.
entailment
def uptime(self, event, nickname=None): """ Shows the amount of time since the given nickname has been in the channel. If no nickname is given, I'll use my own. """ if nickname and nickname != self.nickname: try: uptime = self.timesince(self.joined[nickname]) except KeyError: return "%s is not in the channel" % nickname else: if nickname == self.get_nickname(event): prefix = "you have" else: prefix = "%s has" % nickname return "%s been here for %s" % (prefix, uptime) uptime = self.timesince(self.joined[self.nickname]) return "I've been here for %s" % uptime
Shows the amount of time since the given nickname has been in the channel. If no nickname is given, I'll use my own.
entailment
def seen(self, event, nickname): """ Shows the amount of time since the given nickname was last seen in the channel. """ try: self.joined[nickname] except KeyError: pass else: if nickname == self.get_nickname(event): prefix = "you are" else: prefix = "%s is" % nickname return "%s here right now" % prefix try: seen = self.timesince(self.quit[nickname]) except KeyError: return "%s has never been seen" % nickname else: return "%s was last seen %s ago" % (nickname, seen)
Shows the amount of time since the given nickname was last seen in the channel.
entailment
def id(self): """ Unique identifier of user object""" return sa.Column(sa.Integer, primary_key=True, autoincrement=True)
Unique identifier of user object
entailment
def last_login_date(self): """ Date of user's last login """ return sa.Column( sa.TIMESTAMP(timezone=False), default=lambda x: datetime.utcnow(), server_default=sa.func.now(), )
Date of user's last login
entailment
def security_code_date(self): """ Date of user's security code update """ return sa.Column( sa.TIMESTAMP(timezone=False), default=datetime(2000, 1, 1), server_default="2000-01-01 01:01", )
Date of user's security code update
entailment
def groups_dynamic(self): """ returns dynamic relationship for groups - allowing for filtering of data """ return sa.orm.relationship( "Group", secondary="users_groups", lazy="dynamic", passive_deletes=True, passive_updates=True, )
returns dynamic relationship for groups - allowing for filtering of data
entailment
def resources(self): """ Returns all resources directly owned by user, can be used to assign ownership of new resources:: user.resources.append(resource) """ return sa.orm.relationship( "Resource", cascade="all", passive_deletes=True, passive_updates=True, backref="owner", lazy="dynamic", )
Returns all resources directly owned by user, can be used to assign ownership of new resources:: user.resources.append(resource)
entailment
def resources_dynamic(self): """ Returns all resources directly owned by group, can be used to assign ownership of new resources:: user.resources.append(resource) """ return sa.orm.relationship( "Resource", cascade="all", passive_deletes=True, passive_updates=True, lazy="dynamic", )
Returns all resources directly owned by group, can be used to assign ownership of new resources:: user.resources.append(resource)
entailment
def validate_permission(self, key, permission): """ validates if group can get assigned with permission""" if permission.perm_name not in self.__possible_permissions__: raise AssertionError( "perm_name is not one of {}".format(self.__possible_permissions__) ) return permission
validates if group can get assigned with permission
entailment
def parse_feeds(self, message_channel=True): """ Iterates through each of the feed URLs, parses their items, and sends any items to the channel that have not been previously been parsed. """ if parse: for feed_url in self.feeds: feed = parse(feed_url) for item in feed.entries: if item["id"] not in self.feed_items: self.feed_items.add(item["id"]) if message_channel: message = self.format_item_message(feed, item) self.message_channel(message) return
Iterates through each of the feed URLs, parses their items, and sends any items to the channel that have not been previously been parsed.
entailment
def ChunkBy(self: dict, f=None): """ [ { 'self': [1, 1, 3, 3, 1, 1], 'f': lambda x: x%2, 'assert': lambda ret: ret == [[1, 1], [3, 3], [1, 1]] } ] """ if f is None: return _chunk(self.items()) if is_to_destruct(f): f = destruct_func(f) return _chunk(self.items(), f)
[ { 'self': [1, 1, 3, 3, 1, 1], 'f': lambda x: x%2, 'assert': lambda ret: ret == [[1, 1], [3, 3], [1, 1]] } ]
entailment
def GroupBy(self: dict, f=None): """ [ { 'self': [1, 2, 3], 'f': lambda x: x%2, 'assert': lambda ret: ret[0] == [2] and ret[1] == [1, 3] } ] """ if f and is_to_destruct(f): f = destruct_func(f) return _group_by(self.items(), f)
[ { 'self': [1, 2, 3], 'f': lambda x: x%2, 'assert': lambda ret: ret[0] == [2] and ret[1] == [1, 3] } ]
entailment
def Take(self: dict, n): """ [ { 'self': [1, 2, 3], 'n': 2, 'assert': lambda ret: list(ret) == [1, 2] } ] """ for i, e in enumerate(self.items()): if i == n: break yield e
[ { 'self': [1, 2, 3], 'n': 2, 'assert': lambda ret: list(ret) == [1, 2] } ]
entailment
def TakeIf(self: dict, f): """ [ { 'self': [1, 2, 3], 'f': lambda e: e%2, 'assert': lambda ret: list(ret) == [1, 3] } ] """ if is_to_destruct(f): f = destruct_func(f) return (e for e in self.items() if f(e))
[ { 'self': [1, 2, 3], 'f': lambda e: e%2, 'assert': lambda ret: list(ret) == [1, 3] } ]
entailment
def TakeWhile(self: dict, f): """ [ { 'self': [1, 2, 3, 4, 5], 'f': lambda x: x < 4, 'assert': lambda ret: list(ret) == [1, 2, 3] } ] """ if is_to_destruct(f): f = destruct_func(f) for e in self.items(): if not f(e): break yield e
[ { 'self': [1, 2, 3, 4, 5], 'f': lambda x: x < 4, 'assert': lambda ret: list(ret) == [1, 2, 3] } ]
entailment
def Drop(self: dict, n): """ [ { 'self': [1, 2, 3, 4, 5], 'n': 3, 'assert': lambda ret: list(ret) == [1, 2] } ] """ n = len(self) - n if n <= 0: yield from self.items() else: for i, e in enumerate(self.items()): if i == n: break yield e
[ { 'self': [1, 2, 3, 4, 5], 'n': 3, 'assert': lambda ret: list(ret) == [1, 2] } ]
entailment
def Skip(self: dict, n): """ [ { 'self': [1, 2, 3, 4, 5], 'n': 3, 'assert': lambda ret: list(ret) == [4, 5] } ] """ con = self.items() for i, _ in enumerate(con): if i == n: break return con
[ { 'self': [1, 2, 3, 4, 5], 'n': 3, 'assert': lambda ret: list(ret) == [4, 5] } ]
entailment
def print_meter_record(file_path, rows=5): """ Output readings for specified number of rows to console """ m = nr.read_nem_file(file_path) print('Header:', m.header) print('Transactions:', m.transactions) for nmi in m.readings: for channel in m.readings[nmi]: print(nmi, 'Channel', channel) for reading in m.readings[nmi][channel][-rows:]: print('', reading)
Output readings for specified number of rows to console
entailment
def users(self): """ returns all users that have permissions for this resource""" return sa.orm.relationship( "User", secondary="users_resources_permissions", passive_deletes=True, passive_updates=True, )
returns all users that have permissions for this resource
entailment
def from_resource_deeper( cls, resource_id=None, limit_depth=1000000, db_session=None, *args, **kwargs ): """ This returns you subtree of ordered objects relative to the start resource_id (currently only implemented in postgresql) :param resource_id: :param limit_depth: :param db_session: :return: """ tablename = cls.model.__table__.name raw_q = """ WITH RECURSIVE subtree AS ( SELECT res.*, 1 AS depth, LPAD(res.ordering::CHARACTER VARYING, 7, '0') AS sorting, res.resource_id::CHARACTER VARYING AS path FROM {tablename} AS res WHERE res.resource_id = :resource_id UNION ALL SELECT res_u.*, depth+1 AS depth, (st.sorting::CHARACTER VARYING || '/' || LPAD(res_u.ordering::CHARACTER VARYING, 7, '0') ) AS sorting, (st.path::CHARACTER VARYING || '/' || res_u.resource_id::CHARACTER VARYING ) AS path FROM {tablename} res_u, subtree st WHERE res_u.parent_id = st.resource_id ) SELECT * FROM subtree WHERE depth<=:depth ORDER BY sorting; """.format( tablename=tablename ) # noqa db_session = get_db_session(db_session) text_obj = sa.text(raw_q) query = db_session.query(cls.model, "depth", "sorting", "path") query = query.from_statement(text_obj) query = query.params(resource_id=resource_id, depth=limit_depth) return query
This returns you subtree of ordered objects relative to the start resource_id (currently only implemented in postgresql) :param resource_id: :param limit_depth: :param db_session: :return:
entailment
def delete_branch(cls, resource_id=None, db_session=None, *args, **kwargs): """ This deletes whole branch with children starting from resource_id :param resource_id: :param db_session: :return: """ tablename = cls.model.__table__.name # lets lock rows to prevent bad tree states resource = ResourceService.lock_resource_for_update( resource_id=resource_id, db_session=db_session ) parent_id = resource.parent_id ordering = resource.ordering raw_q = """ WITH RECURSIVE subtree AS ( SELECT res.resource_id FROM {tablename} AS res WHERE res.resource_id = :resource_id UNION ALL SELECT res_u.resource_id FROM {tablename} res_u, subtree st WHERE res_u.parent_id = st.resource_id ) DELETE FROM resources where resource_id in (select * from subtree); """.format( tablename=tablename ) # noqa db_session = get_db_session(db_session) text_obj = sa.text(raw_q) db_session.execute(text_obj, params={"resource_id": resource_id}) cls.shift_ordering_down(parent_id, ordering, db_session=db_session) return True
This deletes whole branch with children starting from resource_id :param resource_id: :param db_session: :return:
entailment
def from_parent_deeper( cls, parent_id=None, limit_depth=1000000, db_session=None, *args, **kwargs ): """ This returns you subtree of ordered objects relative to the start parent_id (currently only implemented in postgresql) :param resource_id: :param limit_depth: :param db_session: :return: """ if parent_id: limiting_clause = "res.parent_id = :parent_id" else: limiting_clause = "res.parent_id is null" tablename = cls.model.__table__.name raw_q = """ WITH RECURSIVE subtree AS ( SELECT res.*, 1 AS depth, LPAD(res.ordering::CHARACTER VARYING, 7, '0') AS sorting, res.resource_id::CHARACTER VARYING AS path FROM {tablename} AS res WHERE {limiting_clause} UNION ALL SELECT res_u.*, depth+1 AS depth, (st.sorting::CHARACTER VARYING || '/' || LPAD(res_u.ordering::CHARACTER VARYING, 7, '0') ) AS sorting, (st.path::CHARACTER VARYING || '/' || res_u.resource_id::CHARACTER VARYING ) AS path FROM {tablename} res_u, subtree st WHERE res_u.parent_id = st.resource_id ) SELECT * FROM subtree WHERE depth<=:depth ORDER BY sorting; """.format( tablename=tablename, limiting_clause=limiting_clause ) # noqa db_session = get_db_session(db_session) text_obj = sa.text(raw_q) query = db_session.query(cls.model, "depth", "sorting", "path") query = query.from_statement(text_obj) query = query.params(parent_id=parent_id, depth=limit_depth) return query
This returns you subtree of ordered objects relative to the start parent_id (currently only implemented in postgresql) :param resource_id: :param limit_depth: :param db_session: :return:
entailment
def build_subtree_strut(self, result, *args, **kwargs): """ Returns a dictionary in form of {node:Resource, children:{node_id: Resource}} :param result: :return: """ items = list(result) root_elem = {"node": None, "children": OrderedDict()} if len(items) == 0: return root_elem for _, node in enumerate(items): new_elem = {"node": node.Resource, "children": OrderedDict()} path = list(map(int, node.path.split("/"))) parent_node = root_elem normalized_path = path[:-1] if normalized_path: for path_part in normalized_path: parent_node = parent_node["children"][path_part] parent_node["children"][new_elem["node"].resource_id] = new_elem return root_elem
Returns a dictionary in form of {node:Resource, children:{node_id: Resource}} :param result: :return:
entailment
def path_upper( cls, object_id, limit_depth=1000000, db_session=None, *args, **kwargs ): """ This returns you path to root node starting from object_id currently only for postgresql :param object_id: :param limit_depth: :param db_session: :return: """ tablename = cls.model.__table__.name raw_q = """ WITH RECURSIVE subtree AS ( SELECT res.*, 1 as depth FROM {tablename} res WHERE res.resource_id = :resource_id UNION ALL SELECT res_u.*, depth+1 as depth FROM {tablename} res_u, subtree st WHERE res_u.resource_id = st.parent_id ) SELECT * FROM subtree WHERE depth<=:depth; """.format( tablename=tablename ) db_session = get_db_session(db_session) q = ( db_session.query(cls.model) .from_statement(sa.text(raw_q)) .params(resource_id=object_id, depth=limit_depth) ) return q
This returns you path to root node starting from object_id currently only for postgresql :param object_id: :param limit_depth: :param db_session: :return:
entailment
def move_to_position( cls, resource_id, to_position, new_parent_id=noop, db_session=None, *args, **kwargs ): """ Moves node to new location in the tree :param resource_id: resource to move :param to_position: new position :param new_parent_id: new parent id :param db_session: :return: """ db_session = get_db_session(db_session) # lets lock rows to prevent bad tree states resource = ResourceService.lock_resource_for_update( resource_id=resource_id, db_session=db_session ) ResourceService.lock_resource_for_update( resource_id=resource.parent_id, db_session=db_session ) same_branch = False # reset if parent is same as old if new_parent_id == resource.parent_id: new_parent_id = noop if new_parent_id is not noop: cls.check_node_parent(resource_id, new_parent_id, db_session=db_session) else: same_branch = True if new_parent_id is noop: # it is not guaranteed that parent exists parent_id = resource.parent_id if resource else None else: parent_id = new_parent_id cls.check_node_position( parent_id, to_position, on_same_branch=same_branch, db_session=db_session ) # move on same branch if new_parent_id is noop: order_range = list(sorted((resource.ordering, to_position))) move_down = resource.ordering > to_position query = db_session.query(cls.model) query = query.filter(cls.model.parent_id == parent_id) query = query.filter(cls.model.ordering.between(*order_range)) if move_down: query.update( {cls.model.ordering: cls.model.ordering + 1}, synchronize_session=False, ) else: query.update( {cls.model.ordering: cls.model.ordering - 1}, synchronize_session=False, ) db_session.flush() db_session.expire(resource) resource.ordering = to_position # move between branches else: cls.shift_ordering_down( resource.parent_id, resource.ordering, db_session=db_session ) cls.shift_ordering_up(new_parent_id, to_position, db_session=db_session) db_session.expire(resource) resource.parent_id = new_parent_id resource.ordering = to_position db_session.flush() return True
Moves node to new location in the tree :param resource_id: resource to move :param to_position: new position :param new_parent_id: new parent id :param db_session: :return:
entailment
def shift_ordering_up(cls, parent_id, position, db_session=None, *args, **kwargs): """ Shifts ordering to "open a gap" for node insertion, begins the shift from given position :param parent_id: :param position: :param db_session: :return: """ db_session = get_db_session(db_session) query = db_session.query(cls.model) query = query.filter(cls.model.parent_id == parent_id) query = query.filter(cls.model.ordering >= position) query.update( {cls.model.ordering: cls.model.ordering + 1}, synchronize_session=False ) db_session.flush()
Shifts ordering to "open a gap" for node insertion, begins the shift from given position :param parent_id: :param position: :param db_session: :return:
entailment
def set_position(cls, resource_id, to_position, db_session=None, *args, **kwargs): """ Sets node position for new node in the tree :param resource_id: resource to move :param to_position: new position :param db_session: :return:def count_children(cls, resource_id, db_session=None): """ db_session = get_db_session(db_session) # lets lock rows to prevent bad tree states resource = ResourceService.lock_resource_for_update( resource_id=resource_id, db_session=db_session ) cls.check_node_position( resource.parent_id, to_position, on_same_branch=True, db_session=db_session ) cls.shift_ordering_up(resource.parent_id, to_position, db_session=db_session) db_session.flush() db_session.expire(resource) resource.ordering = to_position return True
Sets node position for new node in the tree :param resource_id: resource to move :param to_position: new position :param db_session: :return:def count_children(cls, resource_id, db_session=None):
entailment
def check_node_parent( cls, resource_id, new_parent_id, db_session=None, *args, **kwargs ): """ Checks if parent destination is valid for node :param resource_id: :param new_parent_id: :param db_session: :return: """ db_session = get_db_session(db_session) new_parent = ResourceService.lock_resource_for_update( resource_id=new_parent_id, db_session=db_session ) # we are not moving to "root" so parent should be found if not new_parent and new_parent_id is not None: raise ZigguratResourceTreeMissingException("New parent node not found") else: result = cls.path_upper(new_parent_id, db_session=db_session) path_ids = [r.resource_id for r in result] if resource_id in path_ids: raise ZigguratResourceTreePathException( "Trying to insert node into itself" )
Checks if parent destination is valid for node :param resource_id: :param new_parent_id: :param db_session: :return:
entailment
def count_children(cls, resource_id, db_session=None, *args, **kwargs): """ Counts children of resource node :param resource_id: :param db_session: :return: """ query = db_session.query(cls.model.resource_id) query = query.filter(cls.model.parent_id == resource_id) return query.count()
Counts children of resource node :param resource_id: :param db_session: :return:
entailment
def check_node_position( cls, parent_id, position, on_same_branch, db_session=None, *args, **kwargs ): """ Checks if node position for given parent is valid, raises exception if this is not the case :param parent_id: :param position: :param on_same_branch: indicates that we are checking same branch :param db_session: :return: """ db_session = get_db_session(db_session) if not position or position < 1: raise ZigguratResourceOutOfBoundaryException( "Position is lower than {}", value=1 ) item_count = cls.count_children(parent_id, db_session=db_session) max_value = item_count if on_same_branch else item_count + 1 if position > max_value: raise ZigguratResourceOutOfBoundaryException( "Maximum resource ordering is {}", value=max_value )
Checks if node position for given parent is valid, raises exception if this is not the case :param parent_id: :param position: :param on_same_branch: indicates that we are checking same branch :param db_session: :return:
entailment
def egcd(b, n): ''' Given two integers (b, n), returns (gcd(b, n), a, m) such that a*b + n*m = gcd(b, n). Adapted from several sources: https://brilliant.org/wiki/extended-euclidean-algorithm/ https://rosettacode.org/wiki/Modular_inverse https://en.wikibooks.org/wiki/Algorithm_Implementation/Mathematics/Extended_Euclidean_algorithm https://en.wikipedia.org/wiki/Euclidean_algorithm >>> egcd(1, 1) (1, 0, 1) >>> egcd(12, 8) (4, 1, -1) >>> egcd(23894798501898, 23948178468116) (2, 2437250447493, -2431817869532) >>> egcd(pow(2, 50), pow(3, 50)) (1, -260414429242905345185687, 408415383037561) ''' (x0, x1, y0, y1) = (1, 0, 0, 1) while n != 0: (q, b, n) = (b // n, n, b % n) (x0, x1) = (x1, x0 - q * x1) (y0, y1) = (y1, y0 - q * y1) return (b, x0, y0)
Given two integers (b, n), returns (gcd(b, n), a, m) such that a*b + n*m = gcd(b, n). Adapted from several sources: https://brilliant.org/wiki/extended-euclidean-algorithm/ https://rosettacode.org/wiki/Modular_inverse https://en.wikibooks.org/wiki/Algorithm_Implementation/Mathematics/Extended_Euclidean_algorithm https://en.wikipedia.org/wiki/Euclidean_algorithm >>> egcd(1, 1) (1, 0, 1) >>> egcd(12, 8) (4, 1, -1) >>> egcd(23894798501898, 23948178468116) (2, 2437250447493, -2431817869532) >>> egcd(pow(2, 50), pow(3, 50)) (1, -260414429242905345185687, 408415383037561)
entailment
def register(linter): """Add the needed transformations and supressions. """ linter.register_checker(MongoEngineChecker(linter)) add_transform('mongoengine') add_transform('mongomotor') suppress_qs_decorator_messages(linter) suppress_fields_attrs_messages(linter)
Add the needed transformations and supressions.
entailment
def output_as_csv(file_name, nmi=None, output_file=None): """ Transpose all channels and output a csv that is easier to read and do charting on :param file_name: The NEM file to process :param nmi: Which NMI to output if more than one :param output_file: Specify different output location :returns: The file that was created """ m = read_nem_file(file_name) if nmi is None: nmi = list(m.readings.keys())[0] # Use first NMI channels = list(m.transactions[nmi].keys()) num_records = len(m.readings[nmi][channels[0]]) last_date = m.readings[nmi][channels[0]][-1].t_end if output_file is None: output_file = '{}_{}_transposed.csv'.format( nmi, last_date.strftime('%Y%m%d')) with open(output_file, 'w', newline='') as csvfile: cwriter = csv.writer( csvfile, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL) heading_list = ['period_start', 'period_end'] for channel in channels: heading_list.append(channel) heading_list.append('quality_method') cwriter.writerow(heading_list) for i in range(0, num_records): t_start = m.readings[nmi][channels[0]][i].t_start t_end = m.readings[nmi][channels[0]][i].t_end quality_method = m.readings[nmi][channels[0]][i].quality_method row_list = [t_start, t_end] for ch in channels: val = m.readings[nmi][ch][i].read_value row_list.append(val) row_list.append(quality_method) cwriter.writerow(row_list) return output_file
Transpose all channels and output a csv that is easier to read and do charting on :param file_name: The NEM file to process :param nmi: Which NMI to output if more than one :param output_file: Specify different output location :returns: The file that was created
entailment
def get(cls, group_id, db_session=None): """ Fetch row using primary key - will use existing object in session if already present :param group_id: :param db_session: :return: """ db_session = get_db_session(db_session) return db_session.query(cls.model).get(group_id)
Fetch row using primary key - will use existing object in session if already present :param group_id: :param db_session: :return:
entailment
def by_group_name(cls, group_name, db_session=None): """ fetch group by name :param group_name: :param db_session: :return: """ db_session = get_db_session(db_session) query = db_session.query(cls.model).filter(cls.model.group_name == group_name) return query.first()
fetch group by name :param group_name: :param db_session: :return:
entailment
def get_user_paginator( cls, instance, page=1, item_count=None, items_per_page=50, user_ids=None, GET_params=None, ): """ returns paginator over users belonging to the group :param instance: :param page: :param item_count: :param items_per_page: :param user_ids: :param GET_params: :return: """ if not GET_params: GET_params = {} GET_params.pop("page", None) query = instance.users_dynamic if user_ids: query = query.filter(cls.models_proxy.UserGroup.user_id.in_(user_ids)) return SqlalchemyOrmPage( query, page=page, item_count=item_count, items_per_page=items_per_page, **GET_params )
returns paginator over users belonging to the group :param instance: :param page: :param item_count: :param items_per_page: :param user_ids: :param GET_params: :return:
entailment
def resources_with_possible_perms( cls, instance, perm_names=None, resource_ids=None, resource_types=None, db_session=None, ): """ returns list of permissions and resources for this group, resource_ids restricts the search to specific resources :param instance: :param perm_names: :param resource_ids: :param resource_types: :param db_session: :return: """ db_session = get_db_session(db_session, instance) query = db_session.query( cls.models_proxy.GroupResourcePermission.perm_name, cls.models_proxy.Group, cls.models_proxy.Resource, ) query = query.filter( cls.models_proxy.Resource.resource_id == cls.models_proxy.GroupResourcePermission.resource_id ) query = query.filter( cls.models_proxy.Group.id == cls.models_proxy.GroupResourcePermission.group_id ) if resource_ids: query = query.filter( cls.models_proxy.GroupResourcePermission.resource_id.in_(resource_ids) ) if resource_types: query = query.filter( cls.models_proxy.Resource.resource_type.in_(resource_types) ) if perm_names not in ([ANY_PERMISSION], ANY_PERMISSION) and perm_names: query = query.filter( cls.models_proxy.GroupResourcePermission.perm_name.in_(perm_names) ) query = query.filter( cls.models_proxy.GroupResourcePermission.group_id == instance.id ) perms = [ PermissionTuple( None, row.perm_name, "group", instance, row.Resource, False, True ) for row in query ] for resource in instance.resources: perms.append( PermissionTuple( None, ALL_PERMISSIONS, "group", instance, resource, True, True ) ) return perms
returns list of permissions and resources for this group, resource_ids restricts the search to specific resources :param instance: :param perm_names: :param resource_ids: :param resource_types: :param db_session: :return:
entailment
def create_pool( database, minsize=1, maxsize=10, echo=False, loop=None, **kwargs ): """ 创建支持上下文管理的pool """ coro = _create_pool( database=database, minsize=minsize, maxsize=maxsize, echo=echo, loop=loop, **kwargs ) return _PoolContextManager(coro)
创建支持上下文管理的pool
entailment
def wait_closed(self): """ Wait for closing all pool's connections. """ if self._closed: return if not self._closing: raise RuntimeError( ".wait_closed() should be called " "after .close()" ) while self._free: conn = self._free.popleft() if not conn.closed: yield from conn.close() else: # pragma: no cover pass with (yield from self._cond): while self.size > self.freesize: yield from self._cond.wait() self._used.clear() self._closed = True
Wait for closing all pool's connections.
entailment
def sync_close(self): """ 同步关闭 """ if self._closed: return while self._free: conn = self._free.popleft() if not conn.closed: # pragma: no cover conn.sync_close() for conn in self._used: if not conn.closed: # pragma: no cover conn.sync_close() self._terminated.add(conn) self._used.clear() self._closed = True
同步关闭
entailment
def _fill_free_pool(self, override_min): """ iterate over free connections and remove timeouted ones """ while self.size < self.minsize: self._acquiring += 1 try: conn = yield from connect( database=self._database, echo=self._echo, loop=self._loop, **self._conn_kwargs ) self._free.append(conn) self._cond.notify() finally: self._acquiring -= 1 if self._free: return if override_min and self.size < self.maxsize: self._acquiring += 1 try: conn = yield from connect( database=self._database, echo=self._echo, loop=self._loop, **self._conn_kwargs ) self._free.append(conn) self._cond.notify() finally: self._acquiring -= 1
iterate over free connections and remove timeouted ones
entailment
def add_function(self, function): """ Adds the function to the list of registered functions. """ function = self.build_function(function) if function.name in self.functions: raise FunctionAlreadyRegistered(function.name) self.functions[function.name] = function
Adds the function to the list of registered functions.
entailment
def get_one(self, context, name): """ Returns a function if it is registered, the context is ignored. """ try: return self.functions[name] except KeyError: raise FunctionNotFound(name)
Returns a function if it is registered, the context is ignored.
entailment
def subfield_get(self, obj, type=None): """ Verbatim copy from: https://github.com/django/django/blob/1.9.13/django/db/models/fields/subclassing.py#L38 """ if obj is None: return self return obj.__dict__[self.field.name]
Verbatim copy from: https://github.com/django/django/blob/1.9.13/django/db/models/fields/subclassing.py#L38
entailment
def _preprocess_kwargs(self, initial_kwargs): """ Replace generic key related attribute with filters by object_id and content_type fields """ kwargs = initial_kwargs.copy() generic_key_related_kwargs = self._get_generic_key_related_kwargs(initial_kwargs) for key, value in generic_key_related_kwargs.items(): # delete old kwarg that was related to generic key del kwargs[key] try: suffix = key.split('__')[1] except IndexError: suffix = None # add new kwargs that related to object_id and content_type fields new_kwargs = self._get_filter_object_id_and_content_type_filter_kwargs(value, suffix) kwargs.update(new_kwargs) return kwargs
Replace generic key related attribute with filters by object_id and content_type fields
entailment
def categorize( data, col_name: str = None, new_col_name: str = None, categories: dict = None, max_categories: float = 0.15 ): """ :param data: :param col_name: :param new_col_name: :param categories: :param max_categories: max proportion threshold of categories :return: new categories :rtype dict: """ _categories = {} if col_name is None: if categories is not None: raise Exception( 'col_name is None when categories was defined.' ) # create a list of cols with all object columns cols = [ k for k in data.keys() if data[k].dtype == 'object' and (data[k].unique() / data[k].count()) <= max_categories ] else: # create a list with col_name if new_col_name is not None: data[new_col_name] = data[col_name] col_name = new_col_name cols = [col_name] for c in cols: if categories is not None: # assert all keys is a number assert all(type(k) in (int, float) for k in categories.keys()) # replace values using given categories dict data[c].replace(categories, inplace=True) # change column to categorical type data[c] = data[c].astype('category') # update categories information _categories.update({c: categories}) else: # change column to categorical type data[c] = data[c].astype('category') # change column to categorical type _categories.update({ c: dict(enumerate( data[c].cat.categories, )) }) return _categories
:param data: :param col_name: :param new_col_name: :param categories: :param max_categories: max proportion threshold of categories :return: new categories :rtype dict:
entailment
def dropna(data: pd.DataFrame, axis: int, **params): """ Remove columns with more NA values than threshold level :param data: :param axis: Axes are defined for arrays with more than one dimension. A 2-dimensional array has two corresponding axes: the first running vertically downwards across rows (axis 0), and the second running horizontally across columns (axis 1). (https://docs.scipy.org/doc/numpy-1.10.0/glossary.html) :param params: :return: """ if axis == 0: dropna_rows(data=data, **params) else: dropna_columns(data=data, **params)
Remove columns with more NA values than threshold level :param data: :param axis: Axes are defined for arrays with more than one dimension. A 2-dimensional array has two corresponding axes: the first running vertically downwards across rows (axis 0), and the second running horizontally across columns (axis 1). (https://docs.scipy.org/doc/numpy-1.10.0/glossary.html) :param params: :return:
entailment
def dropna_columns(data: pd.DataFrame, max_na_values: int=0.15): """ Remove columns with more NA values than threshold level :param data: :param max_na_values: proportion threshold of max na values :return: """ size = data.shape[0] df_na = (data.isnull().sum()/size) >= max_na_values data.drop(df_na[df_na].index, axis=1, inplace=True)
Remove columns with more NA values than threshold level :param data: :param max_na_values: proportion threshold of max na values :return:
entailment
def dropna_rows(data: pd.DataFrame, columns_name: str=None): """ Remove columns with more NA values than threshold level :param data: :param columns_name: :return: """ params = {} if columns_name is not None: params.update({'subset': columns_name.split(',')}) data.dropna(inplace=True, **params)
Remove columns with more NA values than threshold level :param data: :param columns_name: :return:
entailment
def drop_columns_with_unique_values( data: pd.DataFrame, max_unique_values: int = 0.25 ): """ Remove columns when the proportion of the total of unique values is more than the max_unique_values threshold, just for columns with type as object or category :param data: :param max_unique_values: :return: """ size = data.shape[0] df_uv = data.apply( lambda se: ( (se.dropna().unique().shape[0]/size) > max_unique_values and se.dtype in ['object', 'category'] ) ) data.drop(df_uv[df_uv].index, axis=1, inplace=True)
Remove columns when the proportion of the total of unique values is more than the max_unique_values threshold, just for columns with type as object or category :param data: :param max_unique_values: :return:
entailment
def list(self, request, *args, **kwargs): """ To get an actual value for object quotas limit and usage issue a **GET** request against */api/<objects>/*. To get all quotas visible to the user issue a **GET** request against */api/quotas/* """ return super(QuotaViewSet, self).list(request, *args, **kwargs)
To get an actual value for object quotas limit and usage issue a **GET** request against */api/<objects>/*. To get all quotas visible to the user issue a **GET** request against */api/quotas/*
entailment
def retrieve(self, request, *args, **kwargs): """ To set quota limit issue a **PUT** request against */api/quotas/<quota uuid>** with limit values. Please note that if a quota is a cache of a backend quota (e.g. 'storage' size of an OpenStack tenant), it will be impossible to modify it through */api/quotas/<quota uuid>** endpoint. Example of changing quota limit: .. code-block:: http POST /api/quotas/6ad5f49d6d6c49648573b2b71f44a42b/ HTTP/1.1 Content-Type: application/json Accept: application/json Authorization: Token c84d653b9ec92c6cbac41c706593e66f567a7fa4 Host: example.com { "limit": 2000.0 } Example of changing quota threshold: .. code-block:: http PUT /api/quotas/6ad5f49d6d6c49648573b2b71f44a42b/ HTTP/1.1 Content-Type: application/json Accept: application/json Authorization: Token c84d653b9ec92c6cbac41c706593e66f567a7fa4 Host: example.com { "threshold": 100.0 } """ return super(QuotaViewSet, self).retrieve(request, *args, **kwargs)
To set quota limit issue a **PUT** request against */api/quotas/<quota uuid>** with limit values. Please note that if a quota is a cache of a backend quota (e.g. 'storage' size of an OpenStack tenant), it will be impossible to modify it through */api/quotas/<quota uuid>** endpoint. Example of changing quota limit: .. code-block:: http POST /api/quotas/6ad5f49d6d6c49648573b2b71f44a42b/ HTTP/1.1 Content-Type: application/json Accept: application/json Authorization: Token c84d653b9ec92c6cbac41c706593e66f567a7fa4 Host: example.com { "limit": 2000.0 } Example of changing quota threshold: .. code-block:: http PUT /api/quotas/6ad5f49d6d6c49648573b2b71f44a42b/ HTTP/1.1 Content-Type: application/json Accept: application/json Authorization: Token c84d653b9ec92c6cbac41c706593e66f567a7fa4 Host: example.com { "threshold": 100.0 }
entailment
def history(self, request, uuid=None): """ Historical data endpoints could be available for any objects (currently implemented for quotas and events count). The data is available at *<object_endpoint>/history/*, for example: */api/quotas/<uuid>/history/*. There are two ways to define datetime points for historical data. 1. Send *?point=<timestamp>* parameter that can list. Response will contain historical data for each given point in the same order. 2. Send *?start=<timestamp>*, *?end=<timestamp>*, *?points_count=<integer>* parameters. Result will contain <points_count> points from <start> to <end>. Response format: .. code-block:: javascript [ { "point": <timestamp>, "object": {<object_representation>} }, { "point": <timestamp> "object": {<object_representation>} }, ... ] NB! There will not be any "object" for corresponding point in response if there is no data about object for a given timestamp. """ mapped = { 'start': request.query_params.get('start'), 'end': request.query_params.get('end'), 'points_count': request.query_params.get('points_count'), 'point_list': request.query_params.getlist('point'), } history_serializer = HistorySerializer(data={k: v for k, v in mapped.items() if v}) history_serializer.is_valid(raise_exception=True) quota = self.get_object() serializer = self.get_serializer(quota) serialized_versions = [] for point_date in history_serializer.get_filter_data(): serialized = {'point': datetime_to_timestamp(point_date)} version = Version.objects.get_for_object(quota).filter(revision__date_created__lte=point_date) if version.exists(): # make copy of serialized data and update field that are stored in version version_object = version.first()._object_version.object serialized['object'] = serializer.data.copy() serialized['object'].update({ f: getattr(version_object, f) for f in quota.get_version_fields() }) serialized_versions.append(serialized) return response.Response(serialized_versions, status=status.HTTP_200_OK)
Historical data endpoints could be available for any objects (currently implemented for quotas and events count). The data is available at *<object_endpoint>/history/*, for example: */api/quotas/<uuid>/history/*. There are two ways to define datetime points for historical data. 1. Send *?point=<timestamp>* parameter that can list. Response will contain historical data for each given point in the same order. 2. Send *?start=<timestamp>*, *?end=<timestamp>*, *?points_count=<integer>* parameters. Result will contain <points_count> points from <start> to <end>. Response format: .. code-block:: javascript [ { "point": <timestamp>, "object": {<object_representation>} }, { "point": <timestamp> "object": {<object_representation>} }, ... ] NB! There will not be any "object" for corresponding point in response if there is no data about object for a given timestamp.
entailment
def _get_url(self, obj): """ Gets object url """ format_kwargs = { 'app_label': obj._meta.app_label, } try: format_kwargs['model_name'] = getattr(obj.__class__, 'get_url_name')() except AttributeError: format_kwargs['model_name'] = obj._meta.object_name.lower() return self._default_view_name % format_kwargs
Gets object url
entailment
def to_representation(self, obj): """ Serializes any object to his url representation """ kwargs = None for field in self.lookup_fields: if hasattr(obj, field): kwargs = {field: getattr(obj, field)} break if kwargs is None: raise AttributeError('Related object does not have any of lookup_fields') request = self._get_request() return request.build_absolute_uri(reverse(self._get_url(obj), kwargs=kwargs))
Serializes any object to his url representation
entailment
def to_internal_value(self, data): """ Restores model instance from its url """ if not data: return None request = self._get_request() user = request.user try: obj = core_utils.instance_from_url(data, user=user) model = obj.__class__ except ValueError: raise serializers.ValidationError(_('URL is invalid: %s.') % data) except (Resolver404, AttributeError, MultipleObjectsReturned, ObjectDoesNotExist): raise serializers.ValidationError(_("Can't restore object from url: %s") % data) if model not in self.related_models: raise serializers.ValidationError(_('%s object does not support such relationship.') % six.text_type(obj)) return obj
Restores model instance from its url
entailment
def validate(self, data): """ Check that the start is before the end. """ if 'start' in data and 'end' in data and data['start'] >= data['end']: raise serializers.ValidationError(_('End must occur after start.')) return data
Check that the start is before the end.
entailment
def send_task(app_label, task_name): """ A helper function to deal with waldur_core "high-level" tasks. Define high-level task with explicit name using a pattern: waldur_core.<app_label>.<task_name> .. code-block:: python @shared_task(name='waldur_core.openstack.provision_instance') def provision_instance_fn(instance_uuid, backend_flavor_id) pass Call it by name: .. code-block:: python send_task('openstack', 'provision_instance')(instance_uuid, backend_flavor_id) Which is identical to: .. code-block:: python provision_instance_fn.delay(instance_uuid, backend_flavor_id) """ def delay(*args, **kwargs): full_task_name = 'waldur_core.%s.%s' % (app_label, task_name) send_celery_task(full_task_name, args, kwargs, countdown=2) return delay
A helper function to deal with waldur_core "high-level" tasks. Define high-level task with explicit name using a pattern: waldur_core.<app_label>.<task_name> .. code-block:: python @shared_task(name='waldur_core.openstack.provision_instance') def provision_instance_fn(instance_uuid, backend_flavor_id) pass Call it by name: .. code-block:: python send_task('openstack', 'provision_instance')(instance_uuid, backend_flavor_id) Which is identical to: .. code-block:: python provision_instance_fn.delay(instance_uuid, backend_flavor_id)
entailment
def log_celery_task(request): """ Add description to celery log output """ task = request.task description = None if isinstance(task, Task): try: description = task.get_description(*request.args, **request.kwargs) except NotImplementedError: pass except Exception as e: # Logging should never break workflow. logger.exception('Cannot get description for task %s. Error: %s' % (task.__class__.__name__, e)) return '{0.name}[{0.id}]{1}{2}{3}'.format( request, ' {0}'.format(description) if description else '', ' eta:[{0}]'.format(request.eta) if request.eta else '', ' expires:[{0}]'.format(request.expires) if request.expires else '', )
Add description to celery log output
entailment
def run(self, serialized_instance, *args, **kwargs): """ Deserialize input data and start backend operation execution """ try: instance = utils.deserialize_instance(serialized_instance) except ObjectDoesNotExist: message = ('Cannot restore instance from serialized object %s. Probably it was deleted.' % serialized_instance) six.reraise(ObjectDoesNotExist, message) self.args = args self.kwargs = kwargs self.pre_execute(instance) result = self.execute(instance, *self.args, **self.kwargs) self.post_execute(instance) if result and isinstance(result, django_models.Model): result = utils.serialize_instance(result) return result
Deserialize input data and start backend operation execution
entailment
def is_previous_task_processing(self, *args, **kwargs): """ Return True if exist task that is equal to current and is uncompleted """ app = self._get_app() inspect = app.control.inspect() active = inspect.active() or {} scheduled = inspect.scheduled() or {} reserved = inspect.reserved() or {} uncompleted = sum(list(active.values()) + list(scheduled.values()) + reserved.values(), []) return any(self.is_equal(task, *args, **kwargs) for task in uncompleted)
Return True if exist task that is equal to current and is uncompleted
entailment
def apply_async(self, args=None, kwargs=None, **options): """ Do not run background task if previous task is uncompleted """ if self.is_previous_task_processing(*args, **kwargs): message = 'Background task %s was not scheduled, because its predecessor is not completed yet.' % self.name logger.info(message) # It is expected by Celery that apply_async return AsyncResult, otherwise celerybeat dies return self.AsyncResult(options.get('task_id') or str(uuid4())) return super(BackgroundTask, self).apply_async(args=args, kwargs=kwargs, **options)
Do not run background task if previous task is uncompleted
entailment
def _get_cache_key(self, args, kwargs): """ Returns key to be used in cache """ hash_input = json.dumps({'name': self.name, 'args': args, 'kwargs': kwargs}, sort_keys=True) # md5 is used for internal caching, not need to care about security return hashlib.md5(hash_input).hexdigest()
Returns key to be used in cache
entailment
def apply_async(self, args=None, kwargs=None, **options): """ Checks whether task must be skipped and decreases the counter in that case. """ key = self._get_cache_key(args, kwargs) counter, penalty = cache.get(key, (0, 0)) if not counter: return super(PenalizedBackgroundTask, self).apply_async(args=args, kwargs=kwargs, **options) cache.set(key, (counter - 1, penalty), self.CACHE_LIFETIME) logger.info('The task %s will not be executed due to the penalty.' % self.name) return self.AsyncResult(options.get('task_id') or str(uuid4()))
Checks whether task must be skipped and decreases the counter in that case.
entailment
def on_failure(self, exc, task_id, args, kwargs, einfo): """ Increases penalty for the task and resets the counter. """ key = self._get_cache_key(args, kwargs) _, penalty = cache.get(key, (0, 0)) if penalty < self.MAX_PENALTY: penalty += 1 logger.debug('The task %s is penalized and will be executed on %d run.' % (self.name, penalty)) cache.set(key, (penalty, penalty), self.CACHE_LIFETIME) return super(PenalizedBackgroundTask, self).on_failure(exc, task_id, args, kwargs, einfo)
Increases penalty for the task and resets the counter.
entailment
def on_success(self, retval, task_id, args, kwargs): """ Clears cache for the task. """ key = self._get_cache_key(args, kwargs) if cache.get(key) is not None: cache.delete(key) logger.debug('Penalty for the task %s has been removed.' % self.name) return super(PenalizedBackgroundTask, self).on_success(retval, task_id, args, kwargs)
Clears cache for the task.
entailment
def log_backend_action(action=None): """ Logging for backend method. Expects django model instance as first argument. """ def decorator(func): @functools.wraps(func) def wrapped(self, instance, *args, **kwargs): action_name = func.func_name.replace('_', ' ') if action is None else action logger.debug('About to %s `%s` (PK: %s).', action_name, instance, instance.pk) result = func(self, instance, *args, **kwargs) logger.debug('Action `%s` was executed successfully for `%s` (PK: %s).', action_name, instance, instance.pk) return result return wrapped return decorator
Logging for backend method. Expects django model instance as first argument.
entailment
def get_services(cls, request=None): """ Get a list of services endpoints. { "Oracle": "/api/oracle/", "OpenStack": "/api/openstack/", "GitLab": "/api/gitlab/", "DigitalOcean": "/api/digitalocean/" } """ return {service['name']: reverse(service['list_view'], request=request) for service in cls._registry.values()}
Get a list of services endpoints. { "Oracle": "/api/oracle/", "OpenStack": "/api/openstack/", "GitLab": "/api/gitlab/", "DigitalOcean": "/api/digitalocean/" }
entailment
def get_resources(cls, request=None): """ Get a list of resources endpoints. { "DigitalOcean.Droplet": "/api/digitalocean-droplets/", "Oracle.Database": "/api/oracle-databases/", "GitLab.Group": "/api/gitlab-groups/", "GitLab.Project": "/api/gitlab-projects/" } """ return {'.'.join([service['name'], resource['name']]): reverse(resource['list_view'], request=request) for service in cls._registry.values() for resource in service['resources'].values()}
Get a list of resources endpoints. { "DigitalOcean.Droplet": "/api/digitalocean-droplets/", "Oracle.Database": "/api/oracle-databases/", "GitLab.Group": "/api/gitlab-groups/", "GitLab.Project": "/api/gitlab-projects/" }
entailment
def get_services_with_resources(cls, request=None): """ Get a list of services and resources endpoints. { ... "GitLab": { "url": "/api/gitlab/", "service_project_link_url": "/api/gitlab-service-project-link/", "resources": { "Project": "/api/gitlab-projects/", "Group": "/api/gitlab-groups/" } }, ... } """ from django.apps import apps data = {} for service in cls._registry.values(): service_model = apps.get_model(service['model_name']) service_project_link = service_model.projects.through service_project_link_url = reverse(cls.get_list_view_for_model(service_project_link), request=request) data[service['name']] = { 'url': reverse(service['list_view'], request=request), 'service_project_link_url': service_project_link_url, 'resources': {resource['name']: reverse(resource['list_view'], request=request) for resource in service['resources'].values()}, 'properties': {resource['name']: reverse(resource['list_view'], request=request) for resource in service.get('properties', {}).values()}, 'is_public_service': cls.is_public_service(service_model) } return data
Get a list of services and resources endpoints. { ... "GitLab": { "url": "/api/gitlab/", "service_project_link_url": "/api/gitlab-service-project-link/", "resources": { "Project": "/api/gitlab-projects/", "Group": "/api/gitlab-groups/" } }, ... }
entailment
def get_service_models(cls): """ Get a list of service models. { ... 'gitlab': { "service": nodeconductor_gitlab.models.GitLabService, "service_project_link": nodeconductor_gitlab.models.GitLabServiceProjectLink, "resources": [ nodeconductor_gitlab.models.Group, nodeconductor_gitlab.models.Project ], }, ... } """ from django.apps import apps data = {} for key, service in cls._registry.items(): service_model = apps.get_model(service['model_name']) service_project_link = service_model.projects.through data[key] = { 'service': service_model, 'service_project_link': service_project_link, 'resources': [apps.get_model(r) for r in service['resources'].keys()], 'properties': [apps.get_model(r) for r in service['properties'].keys() if '.' in r], } return data
Get a list of service models. { ... 'gitlab': { "service": nodeconductor_gitlab.models.GitLabService, "service_project_link": nodeconductor_gitlab.models.GitLabServiceProjectLink, "resources": [ nodeconductor_gitlab.models.Group, nodeconductor_gitlab.models.Project ], }, ... }
entailment
def get_resource_models(cls): """ Get a list of resource models. { 'DigitalOcean.Droplet': waldur_digitalocean.models.Droplet, 'JIRA.Project': waldur_jira.models.Project, 'OpenStack.Tenant': waldur_openstack.models.Tenant } """ from django.apps import apps return {'.'.join([service['name'], attrs['name']]): apps.get_model(resource) for service in cls._registry.values() for resource, attrs in service['resources'].items()}
Get a list of resource models. { 'DigitalOcean.Droplet': waldur_digitalocean.models.Droplet, 'JIRA.Project': waldur_jira.models.Project, 'OpenStack.Tenant': waldur_openstack.models.Tenant }
entailment
def get_service_resources(cls, model): """ Get resource models by service model """ key = cls.get_model_key(model) return cls.get_service_name_resources(key)
Get resource models by service model
entailment
def get_service_name_resources(cls, service_name): """ Get resource models by service name """ from django.apps import apps resources = cls._registry[service_name]['resources'].keys() return [apps.get_model(resource) for resource in resources]
Get resource models by service name
entailment
def get_name_for_model(cls, model): """ Get a name for given class or model: -- it's a service type for a service -- it's a <service_type>.<resource_model_name> for a resource """ key = cls.get_model_key(model) model_str = cls._get_model_str(model) service = cls._registry[key] if model_str in service['resources']: return '{}.{}'.format(service['name'], service['resources'][model_str]['name']) else: return service['name']
Get a name for given class or model: -- it's a service type for a service -- it's a <service_type>.<resource_model_name> for a resource
entailment
def get_related_models(cls, model): """ Get a dictionary with related structure models for given class or model: >> SupportedServices.get_related_models(gitlab_models.Project) { 'service': nodeconductor_gitlab.models.GitLabService, 'service_project_link': nodeconductor_gitlab.models.GitLabServiceProjectLink, 'resources': [ nodeconductor_gitlab.models.Group, nodeconductor_gitlab.models.Project, ] } """ from waldur_core.structure.models import ServiceSettings if isinstance(model, ServiceSettings): model_str = cls._registry.get(model.type, {}).get('model_name', '') else: model_str = cls._get_model_str(model) for models in cls.get_service_models().values(): if model_str == cls._get_model_str(models['service']) or \ model_str == cls._get_model_str(models['service_project_link']): return models for resource_model in models['resources']: if model_str == cls._get_model_str(resource_model): return models
Get a dictionary with related structure models for given class or model: >> SupportedServices.get_related_models(gitlab_models.Project) { 'service': nodeconductor_gitlab.models.GitLabService, 'service_project_link': nodeconductor_gitlab.models.GitLabServiceProjectLink, 'resources': [ nodeconductor_gitlab.models.Group, nodeconductor_gitlab.models.Project, ] }
entailment
def _is_active_model(cls, model): """ Check is model app name is in list of INSTALLED_APPS """ # We need to use such tricky way to check because of inconsistent apps names: # some apps are included in format "<module_name>.<app_name>" like "waldur_core.openstack" # other apps are included in format "<app_name>" like "nodecondcutor_sugarcrm" return ('.'.join(model.__module__.split('.')[:2]) in settings.INSTALLED_APPS or '.'.join(model.__module__.split('.')[:1]) in settings.INSTALLED_APPS)
Check is model app name is in list of INSTALLED_APPS
entailment
def process(self, event): """ Send events as push notification via Google Cloud Messaging. Expected settings as follows: # https://developers.google.com/mobile/add WALDUR_CORE['GOOGLE_API'] = { 'NOTIFICATION_TITLE': "Waldur notification", 'Android': { 'server_key': 'AIzaSyA2_7UaVIxXfKeFvxTjQNZbrzkXG9OTCkg', }, 'iOS': { 'server_key': 'AIzaSyA34zlG_y5uHOe2FmcJKwfk2vG-3RW05vk', } } """ conf = settings.WALDUR_CORE.get('GOOGLE_API') or {} keys = conf.get(dict(self.Type.CHOICES)[self.type]) if not keys or not self.token: return endpoint = 'https://gcm-http.googleapis.com/gcm/send' headers = { 'Content-Type': 'application/json', 'Authorization': 'key=%s' % keys['server_key'], } payload = { 'to': self.token, 'notification': { 'body': event.get('message', 'New event'), 'title': conf.get('NOTIFICATION_TITLE', 'Waldur notification'), 'image': 'icon', }, 'data': { 'event': event }, } if self.type == self.Type.IOS: payload['content-available'] = '1' logger.debug('Submitting GCM push notification with headers %s, payload: %s' % (headers, payload)) requests.post(endpoint, json=payload, headers=headers)
Send events as push notification via Google Cloud Messaging. Expected settings as follows: # https://developers.google.com/mobile/add WALDUR_CORE['GOOGLE_API'] = { 'NOTIFICATION_TITLE': "Waldur notification", 'Android': { 'server_key': 'AIzaSyA2_7UaVIxXfKeFvxTjQNZbrzkXG9OTCkg', }, 'iOS': { 'server_key': 'AIzaSyA34zlG_y5uHOe2FmcJKwfk2vG-3RW05vk', } }
entailment
def get_context_data_from_headers(request, headers_schema): """ Extracts context data from request headers according to specified schema. >>> from lxml import etree as et >>> from datetime import date >>> from pyws.functions.args import TypeFactory >>> Fake = type('Fake', (object, ), {}) >>> request = Fake() >>> request.parsed_data = Fake() >>> request.parsed_data.xml = et.fromstring( ... '<s:Envelope xmlns:s="http://schemas.xmlsoap.org/soap/envelope/">' ... '<s:Header>' ... '<headers>' ... '<string>hello</string>' ... '<number>100</number>' ... '<date>2011-08-12</date>' ... '</headers>' ... '</s:Header>' ... '</s:Envelope>') >>> data = get_context_data_from_headers(request, TypeFactory( ... {0: 'Headers', 'string': str, 'number': int, 'date': date})) >>> data == {'string': 'hello', 'number': 100, 'date': date(2011, 8, 12)} True """ if not headers_schema: return None env = request.parsed_data.xml.xpath( '/soap:Envelope', namespaces=SoapProtocol.namespaces)[0] header = env.xpath( './soap:Header/*', namespaces=SoapProtocol.namespaces) if len(header) < 1: return None return headers_schema.validate(xml2obj(header[0], headers_schema))
Extracts context data from request headers according to specified schema. >>> from lxml import etree as et >>> from datetime import date >>> from pyws.functions.args import TypeFactory >>> Fake = type('Fake', (object, ), {}) >>> request = Fake() >>> request.parsed_data = Fake() >>> request.parsed_data.xml = et.fromstring( ... '<s:Envelope xmlns:s="http://schemas.xmlsoap.org/soap/envelope/">' ... '<s:Header>' ... '<headers>' ... '<string>hello</string>' ... '<number>100</number>' ... '<date>2011-08-12</date>' ... '</headers>' ... '</s:Header>' ... '</s:Envelope>') >>> data = get_context_data_from_headers(request, TypeFactory( ... {0: 'Headers', 'string': str, 'number': int, 'date': date})) >>> data == {'string': 'hello', 'number': 100, 'date': date(2011, 8, 12)} True
entailment
def lazy_constant(fn): """Decorator to make a function that takes no arguments use the LazyConstant class.""" class NewLazyConstant(LazyConstant): @functools.wraps(fn) def __call__(self): return self.get_value() return NewLazyConstant(fn)
Decorator to make a function that takes no arguments use the LazyConstant class.
entailment