sentence1
stringlengths
52
3.87M
sentence2
stringlengths
1
47.2k
label
stringclasses
1 value
def _estimateCubicCurveLength(pt0, pt1, pt2, pt3, precision=10): """ Estimate the length of this curve by iterating through it and averaging the length of the flat bits. """ points = [] length = 0 step = 1.0 / precision factors = range(0, precision + 1) for i in factors: points.append(_getCubicPoint(i * step, pt0, pt1, pt2, pt3)) for i in range(len(points) - 1): pta = points[i] ptb = points[i + 1] length += _distance(pta, ptb) return length
Estimate the length of this curve by iterating through it and averaging the length of the flat bits.
entailment
def _mid(pt1, pt2): """ (Point, Point) -> Point Return the point that lies in between the two input points. """ (x0, y0), (x1, y1) = pt1, pt2 return 0.5 * (x0 + x1), 0.5 * (y0 + y1)
(Point, Point) -> Point Return the point that lies in between the two input points.
entailment
def split(self, tValues): """ Split the segment according the t values """ if self.segmentType == "curve": on1 = self.previousOnCurve off1 = self.points[0].coordinates off2 = self.points[1].coordinates on2 = self.points[2].coordinates return bezierTools.splitCubicAtT(on1, off1, off2, on2, *tValues) elif self.segmentType == "line": segments = [] x1, y1 = self.previousOnCurve x2, y2 = self.points[0].coordinates dx = x2 - x1 dy = y2 - y1 pp = x1, y1 for t in tValues: np = (x1+dx*t, y1+dy*t) segments.append([pp, np]) pp = np segments.append([pp, (x2, y2)]) return segments elif self.segmentType == "qcurve": raise NotImplementedError else: raise NotImplementedError
Split the segment according the t values
entailment
def tValueForPoint(self, point): """ get a t values for a given point required: the point must be a point on the curve. in an overlap cause the point will be an intersection points wich is alwasy a point on the curve """ if self.segmentType == "curve": on1 = self.previousOnCurve off1 = self.points[0].coordinates off2 = self.points[1].coordinates on2 = self.points[2].coordinates return _tValueForPointOnCubicCurve(point, (on1, off1, off2, on2)) elif self.segmentType == "line": return _tValueForPointOnLine(point, (self.previousOnCurve, self.points[0].coordinates)) elif self.segmentType == "qcurve": raise NotImplementedError else: raise NotImplementedError
get a t values for a given point required: the point must be a point on the curve. in an overlap cause the point will be an intersection points wich is alwasy a point on the curve
entailment
def getData(self): """ Return a list of normalized InputPoint objects for the contour drawn with this pen. """ # organize the points into segments # 1. make sure there is an on curve haveOnCurve = False for point in self._points: if point.segmentType is not None: haveOnCurve = True break # 2. move the off curves to front of the list if haveOnCurve: _prepPointsForSegments(self._points) # 3. ignore double points on start and end firstPoint = self._points[0] lastPoint = self._points[-1] if firstPoint.segmentType is not None and lastPoint.segmentType is not None: if firstPoint.coordinates == lastPoint.coordinates: if (firstPoint.segmentType in ["line", "move"]): del self._points[0] else: raise AssertionError("Unhandled point type sequence") # done return self._points
Return a list of normalized InputPoint objects for the contour drawn with this pen.
entailment
def reCurveFromEntireInputContour(self, inputContour): """ Match if entire input contour matches entire output contour, allowing for different start point. """ if self.clockwise: inputFlat = inputContour.clockwiseFlat else: inputFlat = inputContour.counterClockwiseFlat outputFlat = [] for segment in self.segments: # XXX this could be expensive assert segment.segmentType == "flat" outputFlat += segment.points # test lengths haveMatch = False if len(inputFlat) == len(outputFlat): if inputFlat == outputFlat: haveMatch = True else: inputStart = inputFlat[0] if inputStart in outputFlat: # there should be only one occurance of the point # but handle it just in case if outputFlat.count(inputStart) > 1: startIndexes = [index for index, point in enumerate(outputFlat) if point == inputStart] else: startIndexes = [outputFlat.index(inputStart)] # slice and dice to test possible orders for startIndex in startIndexes: test = outputFlat[startIndex:] + outputFlat[:startIndex] if inputFlat == test: haveMatch = True break if haveMatch: # clear out the flat points self.segments = [] # replace with the appropriate points from the input if self.clockwise: inputSegments = inputContour.clockwiseSegments else: inputSegments = inputContour.counterClockwiseSegments for inputSegment in inputSegments: self.segments.append( OutputSegment( segmentType=inputSegment.segmentType, points=[ OutputPoint( coordinates=point.coordinates, segmentType=point.segmentType, smooth=point.smooth, name=point.name, kwargs=point.kwargs ) for point in inputSegment.points ], final=True ) ) inputSegment.used = True # reset the direction of the final contour self.clockwise = inputContour.clockwise return True return False
Match if entire input contour matches entire output contour, allowing for different start point.
entailment
def _is_custom_qs_manager(funcdef): """Checks if a function definition is a queryset manager created with the @queryset_manager decorator.""" decors = getattr(funcdef, 'decorators', None) if decors: for dec in decors.get_children(): try: if dec.name == 'queryset_manager': # pragma no branch return True except AttributeError: continue return False
Checks if a function definition is a queryset manager created with the @queryset_manager decorator.
entailment
def _is_call2custom_manager(node): """Checks if the call is being done to a custom queryset manager.""" called = safe_infer(node.func) funcdef = getattr(called, '_proxied', None) return _is_custom_qs_manager(funcdef)
Checks if the call is being done to a custom queryset manager.
entailment
def _is_custom_manager_attribute(node): """Checks if the attribute is a valid attribute for a queryset manager. """ attrname = node.attrname if not name_is_from_qs(attrname): return False for attr in node.get_children(): inferred = safe_infer(attr) funcdef = getattr(inferred, '_proxied', None) if _is_custom_qs_manager(funcdef): return True return False
Checks if the attribute is a valid attribute for a queryset manager.
entailment
def by_group_and_perm(cls, group_id, perm_name, db_session=None): """ return by by_user_and_perm and permission name :param group_id: :param perm_name: :param db_session: :return: """ db_session = get_db_session(db_session) query = db_session.query(cls.model).filter(cls.model.group_id == group_id) query = query.filter(cls.model.perm_name == perm_name) return query.first()
return by by_user_and_perm and permission name :param group_id: :param perm_name: :param db_session: :return:
entailment
def serve_forever(django=False): """ Starts the gevent-socketio server. """ logger = getLogger("irc.dispatch") logger.setLevel(settings.LOG_LEVEL) logger.addHandler(StreamHandler()) app = IRCApplication(django) server = SocketIOServer((settings.HTTP_HOST, settings.HTTP_PORT), app) print "%s [Bot: %s] listening on %s:%s" % ( settings.GNOTTY_VERSION_STRING, app.bot.__class__.__name__, settings.HTTP_HOST, settings.HTTP_PORT, ) server.serve_forever()
Starts the gevent-socketio server.
entailment
def kill(pid_file): """ Attempts to shut down a previously started daemon. """ try: with open(pid_file) as f: os.kill(int(f.read()), 9) os.remove(pid_file) except (IOError, OSError): return False return True
Attempts to shut down a previously started daemon.
entailment
def run(): """ CLI entry point. Parses args and starts the gevent-socketio server. """ settings.parse_args() pid_name = "gnotty-%s-%s.pid" % (settings.HTTP_HOST, settings.HTTP_PORT) pid_file = settings.PID_FILE or os.path.join(gettempdir(), pid_name) if settings.KILL: if kill(pid_file): print "Daemon killed" else: print "Could not kill any daemons" return elif kill(pid_file): print "Running daemon killed" if settings.DAEMON: daemonize(pid_file) serve_forever()
CLI entry point. Parses args and starts the gevent-socketio server.
entailment
def on_start(self, host, port, channel, nickname, password): """ A WebSocket session has started - create a greenlet to host the IRC client, and start it. """ self.client = WebSocketIRCClient(host, port, channel, nickname, password, self) self.spawn(self.client.start)
A WebSocket session has started - create a greenlet to host the IRC client, and start it.
entailment
def disconnect(self, *args, **kwargs): """ WebSocket was disconnected - leave the IRC channel. """ quit_message = "%s %s" % (settings.GNOTTY_VERSION_STRING, settings.GNOTTY_PROJECT_URL) self.client.connection.quit(quit_message) super(IRCNamespace, self).disconnect(*args, **kwargs)
WebSocket was disconnected - leave the IRC channel.
entailment
def bot_watcher(self): """ Thread (greenlet) that will try and reconnect the bot if it's not connected. """ default_interval = 5 interval = default_interval while True: if not self.bot.connection.connected: if self.bot.reconnect(): interval = default_interval else: interval *= 2 sleep(interval)
Thread (greenlet) that will try and reconnect the bot if it's not connected.
entailment
def respond_webhook(self, environ): """ Passes the request onto a bot with a webhook if the webhook path is requested. """ request = FieldStorage(fp=environ["wsgi.input"], environ=environ) url = environ["PATH_INFO"] params = dict([(k, request[k].value) for k in request]) try: if self.bot is None: raise NotImplementedError response = self.bot.handle_webhook_event(environ, url, params) except NotImplementedError: return 404 except: self.logger.debug(format_exc()) return 500 return response or 200
Passes the request onto a bot with a webhook if the webhook path is requested.
entailment
def respond_static(self, environ): """ Serves a static file when Django isn't being used. """ path = os.path.normpath(environ["PATH_INFO"]) if path == "/": content = self.index() content_type = "text/html" else: path = os.path.join(os.path.dirname(__file__), path.lstrip("/")) try: with open(path, "r") as f: content = f.read() except IOError: return 404 content_type = guess_type(path)[0] return (200, [("Content-Type", content_type)], content)
Serves a static file when Django isn't being used.
entailment
def index(self): """ Loads the chat interface template when Django isn't being used, manually dealing with the Django template bits. """ root_dir = os.path.dirname(__file__) template_dir = os.path.join(root_dir, "templates", "gnotty") with open(os.path.join(template_dir, "base.html"), "r") as f: base = f.read() with open(os.path.join(template_dir, "chat.html"), "r") as f: base = base.replace("{% block content %}", f.read()) replace = { "{% block content %}": "", "{% block extrahead %}": "", "{% endblock %}": "", "{% load gnotty_tags %}": "", "{% extends \"gnotty/base.html\" %}": "", "{% url gnotty_chat %}": "/", "{% gnotty_nav %}": "", "{% templatetag openvariable %}": "{{", "{% templatetag closevariable %}": "}}", } for k, v in replace.items(): base = base.replace(k, v) for k, v in settings.items(): base = base.replace("{{ %s }}" % k, unicode(v or "")) return base
Loads the chat interface template when Django isn't being used, manually dealing with the Django template bits.
entailment
def authorized(self, environ): """ If we're running Django and ``GNOTTY_LOGIN_REQUIRED`` is set to ``True``, pull the session cookie from the environment and validate that the user is authenticated. """ if self.django and settings.LOGIN_REQUIRED: try: from django.conf import settings as django_settings from django.contrib.auth import SESSION_KEY from django.contrib.auth.models import User from django.contrib.sessions.models import Session from django.core.exceptions import ObjectDoesNotExist cookie = SimpleCookie(environ["HTTP_COOKIE"]) cookie_name = django_settings.SESSION_COOKIE_NAME session_key = cookie[cookie_name].value session = Session.objects.get(session_key=session_key) user_id = session.get_decoded().get(SESSION_KEY) user = User.objects.get(id=user_id) except (ImportError, KeyError, ObjectDoesNotExist): return False return True
If we're running Django and ``GNOTTY_LOGIN_REQUIRED`` is set to ``True``, pull the session cookie from the environment and validate that the user is authenticated.
entailment
def forwards(self, orm): "Write your forwards methods here." # Note: Remember to use orm['appname.ModelName'] rather than "from appname.models..." if not db.dry_run: orm['gnotty.IRCMessage'].objects.filter(message="joins").update(join_or_leave=True) orm['gnotty.IRCMessage'].objects.filter(message="leaves").update(join_or_leave=True)
Write your forwards methods here.
entailment
def resource_permissions_for_users( models_proxy, perm_names, resource_ids=None, user_ids=None, group_ids=None, resource_types=None, limit_group_permissions=False, skip_user_perms=False, skip_group_perms=False, db_session=None, ): """ Returns permission tuples that match one of passed permission names perm_names - list of permissions that can be matched user_ids - restrict to specific users group_ids - restrict to specific groups resource_ids - restrict to specific resources limit_group_permissions - should be used if we do not want to have user objects returned for group permissions, this might cause performance issues for big groups """ db_session = get_db_session(db_session) # fetch groups and their permissions (possibly with users belonging # to group if needed) query = db_session.query( models_proxy.GroupResourcePermission.perm_name, models_proxy.User, models_proxy.Group, sa.literal("group").label("type"), models_proxy.Resource, ) query = query.join( models_proxy.Group, models_proxy.Group.id == models_proxy.GroupResourcePermission.group_id, ) query = query.join( models_proxy.Resource, models_proxy.Resource.resource_id == models_proxy.GroupResourcePermission.resource_id, ) if limit_group_permissions: query = query.outerjoin(models_proxy.User, models_proxy.User.id == None) # noqa else: query = query.join( models_proxy.UserGroup, models_proxy.UserGroup.group_id == models_proxy.GroupResourcePermission.group_id, ) query = query.outerjoin( models_proxy.User, models_proxy.User.id == models_proxy.UserGroup.user_id ) if resource_ids: query = query.filter( models_proxy.GroupResourcePermission.resource_id.in_(resource_ids) ) if resource_types: query = query.filter(models_proxy.Resource.resource_type.in_(resource_types)) if perm_names not in ([ANY_PERMISSION], ANY_PERMISSION) and perm_names: query = query.filter( models_proxy.GroupResourcePermission.perm_name.in_(perm_names) ) if group_ids: query = query.filter( models_proxy.GroupResourcePermission.group_id.in_(group_ids) ) if user_ids and not limit_group_permissions: query = query.filter(models_proxy.UserGroup.user_id.in_(user_ids)) # 2nd query that will fetch users with direct resource permissions query2 = db_session.query( models_proxy.UserResourcePermission.perm_name, models_proxy.User, models_proxy.Group, sa.literal("user").label("type"), models_proxy.Resource, ) query2 = query2.join( models_proxy.User, models_proxy.User.id == models_proxy.UserResourcePermission.user_id, ) query2 = query2.join( models_proxy.Resource, models_proxy.Resource.resource_id == models_proxy.UserResourcePermission.resource_id, ) # group needs to be present to work for union, but never actually matched query2 = query2.outerjoin(models_proxy.Group, models_proxy.Group.id == None) # noqa if perm_names not in ([ANY_PERMISSION], ANY_PERMISSION) and perm_names: query2 = query2.filter( models_proxy.UserResourcePermission.perm_name.in_(perm_names) ) if resource_ids: query2 = query2.filter( models_proxy.UserResourcePermission.resource_id.in_(resource_ids) ) if resource_types: query2 = query2.filter(models_proxy.Resource.resource_type.in_(resource_types)) if user_ids: query2 = query2.filter( models_proxy.UserResourcePermission.user_id.in_(user_ids) ) if not skip_group_perms and not skip_user_perms: query = query.union(query2) elif skip_group_perms: query = query2 users = [ PermissionTuple( row.User, row.perm_name, row.type, row.Group or None, row.Resource, False, True, ) for row in query ] return users
Returns permission tuples that match one of passed permission names perm_names - list of permissions that can be matched user_ids - restrict to specific users group_ids - restrict to specific groups resource_ids - restrict to specific resources limit_group_permissions - should be used if we do not want to have user objects returned for group permissions, this might cause performance issues for big groups
entailment
def permission_to_04_acls(permissions): """ Legacy acl format kept for bw. compatibility :param permissions: :return: """ acls = [] for perm in permissions: if perm.type == "user": acls.append((perm.user.id, perm.perm_name)) elif perm.type == "group": acls.append(("group:%s" % perm.group.id, perm.perm_name)) return acls
Legacy acl format kept for bw. compatibility :param permissions: :return:
entailment
def permission_to_pyramid_acls(permissions): """ Returns a list of permissions in a format understood by pyramid :param permissions: :return: """ acls = [] for perm in permissions: if perm.type == "user": acls.append((Allow, perm.user.id, perm.perm_name)) elif perm.type == "group": acls.append((Allow, "group:%s" % perm.group.id, perm.perm_name)) return acls
Returns a list of permissions in a format understood by pyramid :param permissions: :return:
entailment
def ChunkBy(self, f=None): """ [ { 'self': [1, 1, 3, 3, 1, 1], 'f': lambda x: x%2, 'assert': lambda ret: ret == [[1, 1], [3, 3], [1, 1]] } ] """ if f is None: return _chunk(self) if is_to_destruct(f): f = destruct_func(f) return _chunk(self, f)
[ { 'self': [1, 1, 3, 3, 1, 1], 'f': lambda x: x%2, 'assert': lambda ret: ret == [[1, 1], [3, 3], [1, 1]] } ]
entailment
def GroupBy(self: Iterable, f=None): """ [ { 'self': [1, 2, 3], 'f': lambda x: x%2, 'assert': lambda ret: ret[0] == [2] and ret[1] == [1, 3] } ] """ if f and is_to_destruct(f): f = destruct_func(f) return _group_by(self, f)
[ { 'self': [1, 2, 3], 'f': lambda x: x%2, 'assert': lambda ret: ret[0] == [2] and ret[1] == [1, 3] } ]
entailment
def Take(self: Iterable, n): """ [ { 'self': [1, 2, 3], 'n': 2, 'assert': lambda ret: list(ret) == [1, 2] } ] """ for i, e in enumerate(self): if i == n: break yield e
[ { 'self': [1, 2, 3], 'n': 2, 'assert': lambda ret: list(ret) == [1, 2] } ]
entailment
def TakeIf(self: Iterable, f): """ [ { 'self': [1, 2, 3], 'f': lambda e: e%2, 'assert': lambda ret: list(ret) == [1, 3] } ] """ if is_to_destruct(f): f = destruct_func(f) return (e for e in self if f(e))
[ { 'self': [1, 2, 3], 'f': lambda e: e%2, 'assert': lambda ret: list(ret) == [1, 3] } ]
entailment
def TakeWhile(self: Iterable, f): """ [ { 'self': [1, 2, 3, 4, 5], 'f': lambda x: x < 4, 'assert': lambda ret: list(ret) == [1, 2, 3] } ] """ if is_to_destruct(f): f = destruct_func(f) for e in self: if not f(e): break yield e
[ { 'self': [1, 2, 3, 4, 5], 'f': lambda x: x < 4, 'assert': lambda ret: list(ret) == [1, 2, 3] } ]
entailment
def Drop(self: Iterable, n): """ [ { 'self': [1, 2, 3, 4, 5], 'n': 3, 'assert': lambda ret: list(ret) == [1, 2] } ] """ con = tuple(self) n = len(con) - n if n <= 0: yield from con else: for i, e in enumerate(con): if i == n: break yield e
[ { 'self': [1, 2, 3, 4, 5], 'n': 3, 'assert': lambda ret: list(ret) == [1, 2] } ]
entailment
def Skip(self: Iterable, n): """ [ { 'self': [1, 2, 3, 4, 5], 'n': 3, 'assert': lambda ret: list(ret) == [4, 5] } ] """ con = iter(self) for i, _ in enumerate(con): if i == n: break return con
[ { 'self': [1, 2, 3, 4, 5], 'n': 3, 'assert': lambda ret: list(ret) == [4, 5] } ]
entailment
def Shift(self, n): """ [ { 'self': [1, 2, 3, 4, 5], 'n': 3, 'assert': lambda ret: list(ret) == [4, 5, 1, 2, 3] } ] """ headn = tuple(Take(self, n)) yield from self yield from headn
[ { 'self': [1, 2, 3, 4, 5], 'n': 3, 'assert': lambda ret: list(ret) == [4, 5, 1, 2, 3] } ]
entailment
def Concat(self: Iterable, *others): """ [ { 'self': [1, 2, 3], ':args': [[4, 5, 6], [7, 8, 9]], 'assert': lambda ret: list(ret) == [1, 2, 3, 4, 5, 6, 7, 8, 9] } ] """ return concat_generator(self, *[unbox_if_flow(other) for other in others])
[ { 'self': [1, 2, 3], ':args': [[4, 5, 6], [7, 8, 9]], 'assert': lambda ret: list(ret) == [1, 2, 3, 4, 5, 6, 7, 8, 9] } ]
entailment
def _called_thru_default_qs(self, node): """Checks if an attribute is being accessed throught the default queryset manager, ie: MyClass.objects.filter(some='value')""" last_child = node.last_child() if not last_child: return False # the default qs manager is called 'objects', we check for it here attrname = getattr(last_child, 'attrname', None) if attrname != 'objects': return False base_cls = last_child.last_child() base_classes = DOCUMENT_BASES for cls in base_cls.inferred(): if node_is_subclass(cls, *base_classes): return True return False
Checks if an attribute is being accessed throught the default queryset manager, ie: MyClass.objects.filter(some='value')
entailment
def all(cls, klass, db_session=None): """ returns all objects of specific type - will work correctly with sqlalchemy inheritance models, you should normally use models base_query() instead of this function its for bw. compat purposes :param klass: :param db_session: :return: """ db_session = get_db_session(db_session) return db_session.query(klass)
returns all objects of specific type - will work correctly with sqlalchemy inheritance models, you should normally use models base_query() instead of this function its for bw. compat purposes :param klass: :param db_session: :return:
entailment
def base_query(cls, db_session=None): """ returns base query for specific service :param db_session: :return: query """ db_session = get_db_session(db_session) return db_session.query(cls.model)
returns base query for specific service :param db_session: :return: query
entailment
def on(event, *args, **kwargs): """ Event method wrapper for bot mixins. When a bot is constructed, its metaclass inspects all members of all base classes, and looks for methods marked with an event attribute which is assigned via this wrapper. It then stores all the methods in a dict that maps event names to lists of these methods, which are each called when the event occurs. """ def wrapper(func): for i, arg in args: kwargs[i] = arg func.event = Event(event, kwargs) return func return wrapper
Event method wrapper for bot mixins. When a bot is constructed, its metaclass inspects all members of all base classes, and looks for methods marked with an event attribute which is assigned via this wrapper. It then stores all the methods in a dict that maps event names to lists of these methods, which are each called when the event occurs.
entailment
def get_db_session(session=None, obj=None): """ utility function that attempts to return sqlalchemy session that could have been created/passed in one of few ways: * It first tries to read session attached to instance if object argument was passed * then it tries to return session passed as argument * finally tries to read pylons-like threadlocal called DBSession * if this fails exception is thrown :param session: :param obj: :return: """ # try to read the session from instance from ziggurat_foundations import models if obj: return sa.orm.session.object_session(obj) # try passed session elif session: return session # try global pylons-like session then elif models.DBSession: return models.DBSession raise ZigguratSessionException("No Session found")
utility function that attempts to return sqlalchemy session that could have been created/passed in one of few ways: * It first tries to read session attached to instance if object argument was passed * then it tries to return session passed as argument * finally tries to read pylons-like threadlocal called DBSession * if this fails exception is thrown :param session: :param obj: :return:
entailment
def get_dict(self, exclude_keys=None, include_keys=None): """ return dictionary of keys and values corresponding to this model's data - if include_keys is null the function will return all keys :param exclude_keys: (optional) is a list of columns from model that should not be returned by this function :param include_keys: (optional) is a list of columns from model that should be returned by this function :return: """ d = {} exclude_keys_list = exclude_keys or [] include_keys_list = include_keys or [] for k in self._get_keys(): if k not in exclude_keys_list and ( k in include_keys_list or not include_keys ): d[k] = getattr(self, k) return d
return dictionary of keys and values corresponding to this model's data - if include_keys is null the function will return all keys :param exclude_keys: (optional) is a list of columns from model that should not be returned by this function :param include_keys: (optional) is a list of columns from model that should be returned by this function :return:
entailment
def get_appstruct(self): """ return list of tuples keys and values corresponding to this model's data """ result = [] for k in self._get_keys(): result.append((k, getattr(self, k))) return result
return list of tuples keys and values corresponding to this model's data
entailment
def populate_obj(self, appstruct, exclude_keys=None, include_keys=None): """ updates instance properties *for column names that exist* for this model and are keys present in passed dictionary :param appstruct: (dictionary) :param exclude_keys: (optional) is a list of columns from model that should not be updated by this function :param include_keys: (optional) is a list of columns from model that should be updated by this function :return: """ exclude_keys_list = exclude_keys or [] include_keys_list = include_keys or [] for k in self._get_keys(): if ( k in appstruct and k not in exclude_keys_list and (k in include_keys_list or not include_keys) ): setattr(self, k, appstruct[k])
updates instance properties *for column names that exist* for this model and are keys present in passed dictionary :param appstruct: (dictionary) :param exclude_keys: (optional) is a list of columns from model that should not be updated by this function :param include_keys: (optional) is a list of columns from model that should be updated by this function :return:
entailment
def populate_obj_from_obj(self, instance, exclude_keys=None, include_keys=None): """ updates instance properties *for column names that exist* for this model and are properties present in passed dictionary :param instance: :param exclude_keys: (optional) is a list of columns from model that should not be updated by this function :param include_keys: (optional) is a list of columns from model that should be updated by this function :return: """ exclude_keys_list = exclude_keys or [] include_keys_list = include_keys or [] for k in self._get_keys(): if ( hasattr(instance, k) and k not in exclude_keys_list and (k in include_keys_list or not include_keys) ): setattr(self, k, getattr(instance, k))
updates instance properties *for column names that exist* for this model and are properties present in passed dictionary :param instance: :param exclude_keys: (optional) is a list of columns from model that should not be updated by this function :param include_keys: (optional) is a list of columns from model that should be updated by this function :return:
entailment
def delete(self, db_session=None): """ Deletes the object via session, this will permanently delete the object from storage on commit :param db_session: :return: """ db_session = get_db_session(db_session, self) db_session.delete(self)
Deletes the object via session, this will permanently delete the object from storage on commit :param db_session: :return:
entailment
def power_down(self): """ turn off the HX711 :return: always True :rtype bool """ GPIO.output(self._pd_sck, False) GPIO.output(self._pd_sck, True) time.sleep(0.01) return True
turn off the HX711 :return: always True :rtype bool
entailment
def power_up(self): """ power up the HX711 :return: always True :rtype bool """ GPIO.output(self._pd_sck, False) time.sleep(0.01) return True
power up the HX711 :return: always True :rtype bool
entailment
def reset(self): """ reset the HX711 and prepare it for the next reading :return: True on success :rtype bool :raises GenericHX711Exception """ logging.debug("power down") self.power_down() logging.debug("power up") self.power_up() logging.debug("read some raw data") result = self.get_raw_data(6) if result is False: raise GenericHX711Exception("failed to reset HX711") else: return True
reset the HX711 and prepare it for the next reading :return: True on success :rtype bool :raises GenericHX711Exception
entailment
def _validate_measure_count(self, times): """ check if "times" is within the borders defined in the class :param times: "times" to check :type times: int """ if not self.min_measures <= times <= self.max_measures: raise ParameterValidationError( "{times} is not within the borders defined in the class".format( times=times ) )
check if "times" is within the borders defined in the class :param times: "times" to check :type times: int
entailment
def _validate_gain_A_value(self, gain_A): """ validate a given value for gain_A :type gain_A: int :raises: ValueError """ if gain_A not in self._valid_gains_for_channel_A: raise ParameterValidationError("{gain_A} is not a valid gain".format(gain_A=gain_A))
validate a given value for gain_A :type gain_A: int :raises: ValueError
entailment
def _ready(self): """ check if ther is som data is ready to get read. :return True if there is some date :rtype bool """ # if DOUT pin is low, data is ready for reading _is_ready = GPIO.input(self._dout) == 0 logging.debug("check data ready for reading: {result}".format( result="YES" if _is_ready is True else "NO" )) return _is_ready
check if ther is som data is ready to get read. :return True if there is some date :rtype bool
entailment
def _set_channel_gain(self, num): """ Finish data transmission from HX711 by setting next required gain and channel Only called from the _read function. :param num: how often so do the set (1...3) :type num: int :return True on success :rtype bool """ if not 1 <= num <= 3: raise AttributeError( """"num" has to be in the range of 1 to 3""" ) for _ in range(num): logging.debug("_set_channel_gain called") start_counter = time.perf_counter() # start timer now. GPIO.output(self._pd_sck, True) # set high GPIO.output(self._pd_sck, False) # set low end_counter = time.perf_counter() # stop timer time_elapsed = float(end_counter - start_counter) # check if HX711 did not turn off... # if pd_sck pin is HIGH for 60 µs and more the HX 711 enters power down mode. if time_elapsed >= 0.00006: logging.warning( 'setting gain and channel took more than 60µs. ' 'Time elapsed: {:0.8f}'.format(time_elapsed) ) # hx711 has turned off. First few readings are inaccurate. # Despite this reading was ok and data can be used. result = self.get_raw_data(times=6) # set for the next reading. if result is False: raise GenericHX711Exception("channel was not set properly") return True
Finish data transmission from HX711 by setting next required gain and channel Only called from the _read function. :param num: how often so do the set (1...3) :type num: int :return True on success :rtype bool
entailment
def _read(self, max_tries=40): """ - read the bit stream from HX711 and convert to an int value. - validates the acquired data :param max_tries: how often to try to get data :type max_tries: int :return raw data :rtype: int """ # start by setting the pd_sck to false GPIO.output(self._pd_sck, False) # init the counter ready_counter = 0 # loop until HX711 is ready # halt when maximum number of tires is reached while self._ready() is False: time.sleep(0.01) # sleep for 10 ms before next try ready_counter += 1 # increment counter # check loop count # and stop when defined maximum is reached if ready_counter >= max_tries: logging.debug('self._read() not ready after 40 trials\n') return False data_in = 0 # 2's complement data from hx 711 # read first 24 bits of data for i in range(24): # start timer start_counter = time.perf_counter() # request next bit from HX711 GPIO.output(self._pd_sck, True) GPIO.output(self._pd_sck, False) # stop timer end_counter = time.perf_counter() time_elapsed = float(end_counter - start_counter) # check if the hx 711 did not turn off: # if pd_sck pin is HIGH for 60 us and more than the HX 711 enters power down mode. if time_elapsed >= 0.00006: logging.debug('Reading data took longer than 60µs. Time elapsed: {:0.8f}'.format(time_elapsed)) return False # Shift the bits as they come to data_in variable. # Left shift by one bit then bitwise OR with the new bit. data_in = (data_in << 1) | GPIO.input(self._dout) if self.channel == 'A' and self.channel_a_gain == 128: self._set_channel_gain(num=1) # send one bit elif self.channel == 'A' and self.channel_a_gain == 64: self._set_channel_gain(num=3) # send three bits else: self._set_channel_gain(num=2) # send two bits logging.debug('Binary value as it has come: ' + str(bin(data_in))) # check if data is valid # 0x800000 is the lowest # 0x7fffff is the highest possible value from HX711 if data_in == 0x7fffff or data_in == 0x800000: logging.debug('Invalid data detected: ' + str(data_in)) return False # calculate int from 2's complement signed_data = 0 if (data_in & 0x800000): # 0b1000 0000 0000 0000 0000 0000 check if the sign bit is 1. Negative number. signed_data = -((data_in ^ 0xffffff) + 1) # convert from 2's complement to int else: # else do not do anything the value is positive number signed_data = data_in logging.debug('Converted 2\'s complement value: ' + str(signed_data)) return signed_data
- read the bit stream from HX711 and convert to an int value. - validates the acquired data :param max_tries: how often to try to get data :type max_tries: int :return raw data :rtype: int
entailment
def get_raw_data(self, times=5): """ do some readings and aggregate them using the defined statistics function :param times: how many measures to aggregate :type times: int :return: the aggregate of the measured values :rtype float """ self._validate_measure_count(times) data_list = [] while len(data_list) < times: data = self._read() if data not in [False, -1]: data_list.append(data) return data_list
do some readings and aggregate them using the defined statistics function :param times: how many measures to aggregate :type times: int :return: the aggregate of the measured values :rtype float
entailment
def from_resource_deeper( self, resource_id=None, limit_depth=1000000, db_session=None, *args, **kwargs ): """ This returns you subtree of ordered objects relative to the start resource_id (currently only implemented in postgresql) :param resource_id: :param limit_depth: :param db_session: :return: """ return self.service.from_resource_deeper( resource_id=resource_id, limit_depth=limit_depth, db_session=db_session, *args, **kwargs )
This returns you subtree of ordered objects relative to the start resource_id (currently only implemented in postgresql) :param resource_id: :param limit_depth: :param db_session: :return:
entailment
def delete_branch(self, resource_id=None, db_session=None, *args, **kwargs): """ This deletes whole branch with children starting from resource_id :param resource_id: :param db_session: :return: """ return self.service.delete_branch( resource_id=resource_id, db_session=db_session, *args, **kwargs )
This deletes whole branch with children starting from resource_id :param resource_id: :param db_session: :return:
entailment
def from_parent_deeper( self, parent_id=None, limit_depth=1000000, db_session=None, *args, **kwargs ): """ This returns you subtree of ordered objects relative to the start parent_id (currently only implemented in postgresql) :param resource_id: :param limit_depth: :param db_session: :return: """ return self.service.from_parent_deeper( parent_id=parent_id, limit_depth=limit_depth, db_session=db_session, *args, **kwargs )
This returns you subtree of ordered objects relative to the start parent_id (currently only implemented in postgresql) :param resource_id: :param limit_depth: :param db_session: :return:
entailment
def build_subtree_strut(self, result, *args, **kwargs): """ Returns a dictionary in form of {node:Resource, children:{node_id: Resource}} :param result: :return: """ return self.service.build_subtree_strut(result=result, *args, **kwargs)
Returns a dictionary in form of {node:Resource, children:{node_id: Resource}} :param result: :return:
entailment
def path_upper( self, object_id, limit_depth=1000000, db_session=None, *args, **kwargs ): """ This returns you path to root node starting from object_id currently only for postgresql :param object_id: :param limit_depth: :param db_session: :return: """ return self.service.path_upper( object_id=object_id, limit_depth=limit_depth, db_session=db_session, *args, **kwargs )
This returns you path to root node starting from object_id currently only for postgresql :param object_id: :param limit_depth: :param db_session: :return:
entailment
def move_to_position( self, resource_id, to_position, new_parent_id=noop, db_session=None, *args, **kwargs ): """ Moves node to new location in the tree :param resource_id: resource to move :param to_position: new position :param new_parent_id: new parent id :param db_session: :return: """ return self.service.move_to_position( resource_id=resource_id, to_position=to_position, new_parent_id=new_parent_id, db_session=db_session, *args, **kwargs )
Moves node to new location in the tree :param resource_id: resource to move :param to_position: new position :param new_parent_id: new parent id :param db_session: :return:
entailment
def shift_ordering_down( self, parent_id, position, db_session=None, *args, **kwargs ): """ Shifts ordering to "close gaps" after node deletion or being moved to another branch, begins the shift from given position :param parent_id: :param position: :param db_session: :return: """ return self.service.shift_ordering_down( parent_id=parent_id, position=position, db_session=db_session, *args, **kwargs )
Shifts ordering to "close gaps" after node deletion or being moved to another branch, begins the shift from given position :param parent_id: :param position: :param db_session: :return:
entailment
def shift_ordering_up(self, parent_id, position, db_session=None, *args, **kwargs): """ Shifts ordering to "open a gap" for node insertion, begins the shift from given position :param parent_id: :param position: :param db_session: :return: """ return self.service.shift_ordering_up( parent_id=parent_id, position=position, db_session=db_session, *args, **kwargs )
Shifts ordering to "open a gap" for node insertion, begins the shift from given position :param parent_id: :param position: :param db_session: :return:
entailment
def set_position(self, resource_id, to_position, db_session=None, *args, **kwargs): """ Sets node position for new node in the tree :param resource_id: resource to move :param to_position: new position :param db_session: :return:def count_children(cls, resource_id, db_session=None): """ return self.service.set_position( resource_id=resource_id, to_position=to_position, db_session=db_session, *args, **kwargs )
Sets node position for new node in the tree :param resource_id: resource to move :param to_position: new position :param db_session: :return:def count_children(cls, resource_id, db_session=None):
entailment
def check_node_parent( self, resource_id, new_parent_id, db_session=None, *args, **kwargs ): """ Checks if parent destination is valid for node :param resource_id: :param new_parent_id: :param db_session: :return: """ return self.service.check_node_parent( resource_id=resource_id, new_parent_id=new_parent_id, db_session=db_session, *args, **kwargs )
Checks if parent destination is valid for node :param resource_id: :param new_parent_id: :param db_session: :return:
entailment
def count_children(self, resource_id, db_session=None, *args, **kwargs): """ Counts children of resource node :param resource_id: :param db_session: :return: """ return self.service.count_children( resource_id=resource_id, db_session=db_session, *args, **kwargs )
Counts children of resource node :param resource_id: :param db_session: :return:
entailment
def check_node_position( self, parent_id, position, on_same_branch, db_session=None, *args, **kwargs ): """ Checks if node position for given parent is valid, raises exception if this is not the case :param parent_id: :param position: :param on_same_branch: indicates that we are checking same branch :param db_session: :return: """ return self.service.check_node_position( parent_id=parent_id, position=position, on_same_branch=on_same_branch, db_session=db_session, *args, **kwargs )
Checks if node position for given parent is valid, raises exception if this is not the case :param parent_id: :param position: :param on_same_branch: indicates that we are checking same branch :param db_session: :return:
entailment
def flatten_list(l: List[list]) -> list: """ takes a list of lists, l and returns a flat list """ return [v for inner_l in l for v in inner_l]
takes a list of lists, l and returns a flat list
entailment
def read_nem_file(file_path: str) -> NEMFile: """ Read in NEM file and return meter readings named tuple :param file_path: The NEM file to process :returns: The file that was created """ _, file_extension = os.path.splitext(file_path) if file_extension.lower() == '.zip': with zipfile.ZipFile(file_path, 'r') as archive: for csv_file in archive.namelist(): with archive.open(csv_file) as csv_text: # Zip file is open in binary mode # So decode then convert back to list nmi_file = csv_text.read().decode('utf-8').splitlines() reader = csv.reader(nmi_file, delimiter=',') return parse_nem_rows(reader, file_name=csv_file) with open(file_path) as nmi_file: return parse_nem_file(nmi_file)
Read in NEM file and return meter readings named tuple :param file_path: The NEM file to process :returns: The file that was created
entailment
def parse_nem_file(nem_file) -> NEMFile: """ Parse NEM file and return meter readings named tuple """ reader = csv.reader(nem_file, delimiter=',') return parse_nem_rows(reader, file_name=nem_file)
Parse NEM file and return meter readings named tuple
entailment
def parse_nem_rows(nem_list: Iterable, file_name=None) -> NEMFile: """ Parse NEM row iterator and return meter readings named tuple """ header = HeaderRecord(None, None, None, None, file_name) readings = dict() # readings nested by NMI then channel trans = dict() # transactions nested by NMI then channel nmi_d = None # current NMI details block that readings apply to for i, row in enumerate(nem_list): record_indicator = int(row[0]) if i == 0 and record_indicator != 100: raise ValueError("NEM Files must start with a 100 row") if record_indicator == 100: header = parse_100_row(row, file_name) if header.version_header not in ['NEM12', 'NEM13']: raise ValueError("Invalid NEM version {}".format( header.version_header)) elif record_indicator == 900: for nmi in readings: for suffix in readings[nmi]: readings[nmi][suffix] = flatten_list(readings[nmi][suffix]) break # End of file elif header.version_header == 'NEM12' and record_indicator == 200: try: nmi_details = parse_200_row(row) except ValueError: logging.error('Error passing 200 row:') logging.error(row) raise nmi_d = nmi_details if nmi_d.nmi not in readings: readings[nmi_d.nmi] = {} if nmi_d.nmi_suffix not in readings[nmi_d.nmi]: readings[nmi_d.nmi][nmi_d.nmi_suffix] = [] if nmi_d.nmi not in trans: trans[nmi_d.nmi] = {} if nmi_d.nmi_suffix not in trans[nmi_d.nmi]: trans[nmi_d.nmi][nmi_d.nmi_suffix] = [] elif header.version_header == 'NEM12' and record_indicator == 300: num_intervals = int(24 * 60 / nmi_d.interval_length) assert len(row) > num_intervals, "Incomplete 300 Row in {}".format( file_name) interval_record = parse_300_row(row, nmi_d.interval_length, nmi_d.uom) # don't flatten the list of interval readings at this stage, # as they may need to be adjusted by a 400 row readings[nmi_d.nmi][nmi_d.nmi_suffix].append( interval_record.interval_values) elif header.version_header == 'NEM12' and record_indicator == 400: event_record = parse_400_row(row) readings[nmi_d.nmi][nmi_d.nmi_suffix][-1] = update_reading_events( readings[nmi_d.nmi][nmi_d.nmi_suffix][-1], event_record) elif header.version_header == 'NEM12' and record_indicator == 500: b2b_details = parse_500_row(row) trans[nmi_d.nmi][nmi_d.nmi_suffix].append(b2b_details) elif header.version_header == 'NEM13' and record_indicator == 550: b2b_details = parse_550_row(row) trans[nmi_d.nmi][nmi_d.nmi_suffix].append(b2b_details) elif header.version_header == 'NEM13' and record_indicator == 250: basic_data = parse_250_row(row) reading = calculate_manual_reading(basic_data) nmi_d = basic_data if basic_data.nmi not in readings: readings[nmi_d.nmi] = {} if nmi_d.nmi_suffix not in readings[nmi_d.nmi]: readings[nmi_d.nmi][nmi_d.nmi_suffix] = [] if nmi_d.nmi not in trans: trans[nmi_d.nmi] = {} if nmi_d.nmi_suffix not in trans[nmi_d.nmi]: trans[nmi_d.nmi][nmi_d.nmi_suffix] = [] readings[nmi_d.nmi][nmi_d.nmi_suffix].append([reading]) else: logging.warning( "Record indicator %s not supported and was skipped", record_indicator) return NEMFile(header, readings, trans)
Parse NEM row iterator and return meter readings named tuple
entailment
def calculate_manual_reading(basic_data: BasicMeterData) -> Reading: """ Calculate the interval between two manual readings """ t_start = basic_data.previous_register_read_datetime t_end = basic_data.current_register_read_datetime read_start = basic_data.previous_register_read read_end = basic_data.current_register_read value = basic_data.quantity uom = basic_data.uom quality_method = basic_data.current_quality_method return Reading(t_start, t_end, value, uom, quality_method, "", "", read_start, read_end)
Calculate the interval between two manual readings
entailment
def parse_100_row(row: list, file_name: str) -> HeaderRecord: """ Parse header record (100) """ return HeaderRecord( row[1], parse_datetime(row[2]), row[3], row[4], file_name, )
Parse header record (100)
entailment
def parse_200_row(row: list) -> NmiDetails: """ Parse NMI data details record (200) """ return NmiDetails(row[1], row[2], row[3], row[4], row[5], row[6], row[7], int(row[8]), parse_datetime(row[9]))
Parse NMI data details record (200)
entailment
def parse_250_row(row: list) -> BasicMeterData: """ Parse basic meter data record (250) """ return BasicMeterData(row[1], row[2], row[3], row[4], row[5], row[6], row[7], float(row[8]), parse_datetime(row[9]), row[10], row[11], row[12], float(row[13]), parse_datetime( row[14]), row[15], row[16], row[17], float(row[18]), row[19], row[20], parse_datetime(row[21]), parse_datetime(row[22]))
Parse basic meter data record (250)
entailment
def parse_300_row(row: list, interval: int, uom: str) -> IntervalRecord: """ Interval data record (300) """ num_intervals = int(24 * 60 / interval) interval_date = parse_datetime(row[1]) last_interval = 2 + num_intervals quality_method = row[last_interval] interval_values = parse_interval_records( row[2:last_interval], interval_date, interval, uom, quality_method) return IntervalRecord(interval_date, interval_values, row[last_interval + 0], row[last_interval + 1], row[last_interval + 2], parse_datetime(row[last_interval + 3]), parse_datetime(row[last_interval + 4]))
Interval data record (300)
entailment
def parse_interval_records(interval_record, interval_date, interval, uom, quality_method) -> List[Reading]: """ Convert interval values into tuples with datetime """ interval_delta = timedelta(minutes=interval) return [ Reading( t_start=interval_date + (i * interval_delta), t_end=interval_date + (i * interval_delta) + interval_delta, read_value=parse_reading(val), uom=uom, quality_method=quality_method, event_code="", # event is unknown at time of reading event_desc="", # event is unknown at time of reading read_start=None, read_end=None # No before and after readings for intervals ) for i, val in enumerate(interval_record) ]
Convert interval values into tuples with datetime
entailment
def parse_reading(val: str) -> Optional[float]: """ Convert reading value to float (if possible) """ try: return float(val) except ValueError: logging.warning('Reading of "%s" is not a number', val) return None
Convert reading value to float (if possible)
entailment
def parse_400_row(row: list) -> tuple: """ Interval event record (400) """ return EventRecord(int(row[1]), int(row[2]), row[3], row[4], row[5])
Interval event record (400)
entailment
def update_reading_events(readings, event_record): """ Updates readings from a 300 row to reflect any events found in a subsequent 400 row """ # event intervals are 1-indexed for i in range(event_record.start_interval - 1, event_record.end_interval): readings[i] = Reading( t_start=readings[i].t_start, t_end=readings[i].t_end, read_value=readings[i].read_value, uom=readings[i].uom, quality_method=event_record.quality_method, event_code=event_record.reason_code, event_desc=event_record.reason_description, read_start=readings[i].read_start, read_end=readings[i].read_end) return readings
Updates readings from a 300 row to reflect any events found in a subsequent 400 row
entailment
def parse_datetime(record: str) -> Optional[datetime]: """ Parse a datetime string into a python datetime object """ # NEM defines Date8, DateTime12 and DateTime14 format_strings = {8: '%Y%m%d', 12: '%Y%m%d%H%M', 14: '%Y%m%d%H%M%S'} if record == '': return None return datetime.strptime(record.strip(), format_strings[len(record.strip())])
Parse a datetime string into a python datetime object
entailment
def parse_args(self): """ Called from ``gnotty.server.run`` and parses any CLI args provided. Also handles loading settings from the Python module specified with the ``--conf-file`` arg. CLI args take precedence over any settings defined in the Python module defined by ``--conf-file``. """ options, _ = parser.parse_args() file_settings = {} if options.CONF_FILE: execfile(options.CONF_FILE, {}, file_settings) for option in self.option_list: if option.dest: file_value = file_settings.get("GNOTTY_%s" % option.dest, None) # optparse doesn't seem to provide a way to determine if # an option's value was provided as a CLI arg, or if the # default is being used, so we manually check sys.argv, # since provided CLI args should take precedence over # any settings defined in a conf module. flags = option._short_opts + option._long_opts in_argv = set(flags) & set(sys.argv) options_value = getattr(options, option.dest) if file_value and not in_argv: self[option.dest] = file_value elif in_argv: self[option.dest] = options_value else: self[option.dest] = self.get(option.dest, options_value) self.set_max_message_length() self["STATIC_URL"] = "/static/" self["LOG_LEVEL"] = getattr(logging, self["LOG_LEVEL"])
Called from ``gnotty.server.run`` and parses any CLI args provided. Also handles loading settings from the Python module specified with the ``--conf-file`` arg. CLI args take precedence over any settings defined in the Python module defined by ``--conf-file``.
entailment
def color(nickname): """ Provides a consistent color for a nickname. Uses first 6 chars of nickname's md5 hash, and then slightly darkens the rgb values for use on a light background. """ _hex = md5(nickname).hexdigest()[:6] darken = lambda s: str(int(round(int(s, 16) * .7))) return "rgb(%s)" % ",".join([darken(_hex[i:i+2]) for i in range(6)[::2]])
Provides a consistent color for a nickname. Uses first 6 chars of nickname's md5 hash, and then slightly darkens the rgb values for use on a light background.
entailment
def on_welcome(self, connection, event): """ Join the channel once connected to the IRC server. """ connection.join(self.channel, key=settings.IRC_CHANNEL_KEY or "")
Join the channel once connected to the IRC server.
entailment
def on_nicknameinuse(self, connection, event): """ Increment a digit on the nickname if it's in use, and re-connect. """ digits = "" while self.nickname[-1].isdigit(): digits = self.nickname[-1] + digits self.nickname = self.nickname[:-1] digits = 1 if not digits else int(digits) + 1 self.nickname += str(digits) self.connect(self.host, self.port, self.nickname)
Increment a digit on the nickname if it's in use, and re-connect.
entailment
def message_channel(self, message): """ Nicer shortcut for sending a message to a channel. Also irclib doesn't handle unicode so we bypass its privmsg -> send_raw methods and use its socket directly. """ data = "PRIVMSG %s :%s\r\n" % (self.channel, message) self.connection.socket.send(data.encode("utf-8"))
Nicer shortcut for sending a message to a channel. Also irclib doesn't handle unicode so we bypass its privmsg -> send_raw methods and use its socket directly.
entailment
def emit_message(self, message): """ Send a message to the channel. We also emit the message back to the sender's WebSocket. """ try: nickname_color = self.nicknames[self.nickname] except KeyError: # Only accept messages if we've joined. return message = message[:settings.MAX_MESSAGE_LENGTH] # Handle IRC commands. if message.startswith("/"): self.connection.send_raw(message.lstrip("/")) return self.message_channel(message) self.namespace.emit("message", self.nickname, message, nickname_color)
Send a message to the channel. We also emit the message back to the sender's WebSocket.
entailment
def emit_nicknames(self): """ Send the nickname list to the Websocket. Called whenever the nicknames list changes. """ nicknames = [{"nickname": name, "color": color(name)} for name in sorted(self.nicknames.keys())] self.namespace.emit("nicknames", nicknames)
Send the nickname list to the Websocket. Called whenever the nicknames list changes.
entailment
def on_namreply(self, connection, event): """ Initial list of nicknames received - remove op/voice prefixes, and send the list to the WebSocket. """ for nickname in event.arguments()[-1].split(): nickname = nickname.lstrip("@+") self.nicknames[nickname] = color(nickname) self.emit_nicknames()
Initial list of nicknames received - remove op/voice prefixes, and send the list to the WebSocket.
entailment
def on_join(self, connection, event): """ Someone joined the channel - send the nicknames list to the WebSocket. """ #from time import sleep; sleep(10) # Simulate a slow connection nickname = self.get_nickname(event) nickname_color = color(nickname) self.nicknames[nickname] = nickname_color self.namespace.emit("join") self.namespace.emit("message", nickname, "joins", nickname_color) self.emit_nicknames()
Someone joined the channel - send the nicknames list to the WebSocket.
entailment
def on_nick(self, connection, event): """ Someone changed their nickname - send the nicknames list to the WebSocket. """ old_nickname = self.get_nickname(event) old_color = self.nicknames.pop(old_nickname) new_nickname = event.target() message = "is now known as %s" % new_nickname self.namespace.emit("message", old_nickname, message, old_color) new_color = color(new_nickname) self.nicknames[new_nickname] = new_color self.emit_nicknames() if self.nickname == old_nickname: self.nickname = new_nickname
Someone changed their nickname - send the nicknames list to the WebSocket.
entailment
def on_quit(self, connection, event): """ Someone left the channel - send the nicknames list to the WebSocket. """ nickname = self.get_nickname(event) nickname_color = self.nicknames[nickname] del self.nicknames[nickname] self.namespace.emit("message", nickname, "leaves", nickname_color) self.emit_nicknames()
Someone left the channel - send the nicknames list to the WebSocket.
entailment
def on_pubmsg(self, connection, event): """ Messages received in the channel - send them to the WebSocket. """ for message in event.arguments(): nickname = self.get_nickname(event) nickname_color = self.nicknames[nickname] self.namespace.emit("message", nickname, message, nickname_color)
Messages received in the channel - send them to the WebSocket.
entailment
def get(cls, resource_id, db_session=None): """ Fetch row using primary key - will use existing object in session if already present :param resource_id: :param db_session: :return: """ db_session = get_db_session(db_session) return db_session.query(cls.model).get(resource_id)
Fetch row using primary key - will use existing object in session if already present :param resource_id: :param db_session: :return:
entailment
def perms_for_user(cls, instance, user, db_session=None): """ returns all permissions that given user has for this resource from groups and directly set ones too :param instance: :param user: :param db_session: :return: """ db_session = get_db_session(db_session, instance) query = db_session.query( cls.models_proxy.GroupResourcePermission.group_id.label("owner_id"), cls.models_proxy.GroupResourcePermission.perm_name, sa.literal("group").label("type"), ) query = query.filter( cls.models_proxy.GroupResourcePermission.group_id.in_( [gr.id for gr in user.groups] ) ) query = query.filter( cls.models_proxy.GroupResourcePermission.resource_id == instance.resource_id ) query2 = db_session.query( cls.models_proxy.UserResourcePermission.user_id.label("owner_id"), cls.models_proxy.UserResourcePermission.perm_name, sa.literal("user").label("type"), ) query2 = query2.filter( cls.models_proxy.UserResourcePermission.user_id == user.id ) query2 = query2.filter( cls.models_proxy.UserResourcePermission.resource_id == instance.resource_id ) query = query.union(query2) groups_dict = dict([(g.id, g) for g in user.groups]) perms = [ PermissionTuple( user, row.perm_name, row.type, groups_dict.get(row.owner_id) if row.type == "group" else None, instance, False, True, ) for row in query ] # include all perms if user is the owner of this resource if instance.owner_user_id == user.id: perms.append( PermissionTuple( user, ALL_PERMISSIONS, "user", None, instance, True, True ) ) groups_dict = dict([(g.id, g) for g in user.groups]) if instance.owner_group_id in groups_dict: perms.append( PermissionTuple( user, ALL_PERMISSIONS, "group", groups_dict.get(instance.owner_group_id), instance, True, True, ) ) return perms
returns all permissions that given user has for this resource from groups and directly set ones too :param instance: :param user: :param db_session: :return:
entailment
def direct_perms_for_user(cls, instance, user, db_session=None): """ returns permissions that given user has for this resource without ones inherited from groups that user belongs to :param instance: :param user: :param db_session: :return: """ db_session = get_db_session(db_session, instance) query = db_session.query( cls.models_proxy.UserResourcePermission.user_id, cls.models_proxy.UserResourcePermission.perm_name, ) query = query.filter(cls.models_proxy.UserResourcePermission.user_id == user.id) query = query.filter( cls.models_proxy.UserResourcePermission.resource_id == instance.resource_id ) perms = [ PermissionTuple(user, row.perm_name, "user", None, instance, False, True) for row in query ] # include all perms if user is the owner of this resource if instance.owner_user_id == user.id: perms.append( PermissionTuple(user, ALL_PERMISSIONS, "user", None, instance, True) ) return perms
returns permissions that given user has for this resource without ones inherited from groups that user belongs to :param instance: :param user: :param db_session: :return:
entailment
def group_perms_for_user(cls, instance, user, db_session=None): """ returns permissions that given user has for this resource that are inherited from groups :param instance: :param user: :param db_session: :return: """ db_session = get_db_session(db_session, instance) perms = resource_permissions_for_users( cls.models_proxy, ANY_PERMISSION, resource_ids=[instance.resource_id], user_ids=[user.id], db_session=db_session, ) perms = [p for p in perms if p.type == "group"] # include all perms if user is the owner of this resource groups_dict = dict([(g.id, g) for g in user.groups]) if instance.owner_group_id in groups_dict: perms.append( PermissionTuple( user, ALL_PERMISSIONS, "group", groups_dict.get(instance.owner_group_id), instance, True, True, ) ) return perms
returns permissions that given user has for this resource that are inherited from groups :param instance: :param user: :param db_session: :return:
entailment
def users_for_perm( cls, instance, perm_name, user_ids=None, group_ids=None, limit_group_permissions=False, skip_group_perms=False, db_session=None, ): """ return PermissionTuples for users AND groups that have given permission for the resource, perm_name is __any_permission__ then users with any permission will be listed :param instance: :param perm_name: :param user_ids: limits the permissions to specific user ids :param group_ids: limits the permissions to specific group ids :param limit_group_permissions: should be used if we do not want to have user objects returned for group permissions, this might cause performance issues for big groups :param skip_group_perms: do not attach group permissions to the resultset :param db_session: :return: """ # noqa db_session = get_db_session(db_session, instance) users_perms = resource_permissions_for_users( cls.models_proxy, [perm_name], [instance.resource_id], user_ids=user_ids, group_ids=group_ids, limit_group_permissions=limit_group_permissions, skip_group_perms=skip_group_perms, db_session=db_session, ) if instance.owner_user_id: users_perms.append( PermissionTuple( instance.owner, ALL_PERMISSIONS, "user", None, instance, True, True ) ) if instance.owner_group_id and not skip_group_perms: for user in instance.owner_group.users: users_perms.append( PermissionTuple( user, ALL_PERMISSIONS, "group", instance.owner_group, instance, True, True, ) ) return users_perms
return PermissionTuples for users AND groups that have given permission for the resource, perm_name is __any_permission__ then users with any permission will be listed :param instance: :param perm_name: :param user_ids: limits the permissions to specific user ids :param group_ids: limits the permissions to specific group ids :param limit_group_permissions: should be used if we do not want to have user objects returned for group permissions, this might cause performance issues for big groups :param skip_group_perms: do not attach group permissions to the resultset :param db_session: :return:
entailment
def by_resource_id(cls, resource_id, db_session=None): """ fetch the resouce by id :param resource_id: :param db_session: :return: """ db_session = get_db_session(db_session) query = db_session.query(cls.model).filter( cls.model.resource_id == int(resource_id) ) return query.first()
fetch the resouce by id :param resource_id: :param db_session: :return:
entailment
def perm_by_group_and_perm_name( cls, resource_id, group_id, perm_name, db_session=None ): """ fetch permissions by group and permission name :param resource_id: :param group_id: :param perm_name: :param db_session: :return: """ db_session = get_db_session(db_session) query = db_session.query(cls.models_proxy.GroupResourcePermission) query = query.filter( cls.models_proxy.GroupResourcePermission.group_id == group_id ) query = query.filter( cls.models_proxy.GroupResourcePermission.perm_name == perm_name ) query = query.filter( cls.models_proxy.GroupResourcePermission.resource_id == resource_id ) return query.first()
fetch permissions by group and permission name :param resource_id: :param group_id: :param perm_name: :param db_session: :return:
entailment
def groups_for_perm( cls, instance, perm_name, group_ids=None, limit_group_permissions=False, db_session=None, ): """ return PermissionTuples for groups that have given permission for the resource, perm_name is __any_permission__ then users with any permission will be listed :param instance: :param perm_name: :param group_ids: limits the permissions to specific group ids :param limit_group_permissions: should be used if we do not want to have user objects returned for group permissions, this might cause performance issues for big groups :param db_session: :return: """ # noqa db_session = get_db_session(db_session, instance) group_perms = resource_permissions_for_users( cls.models_proxy, [perm_name], [instance.resource_id], group_ids=group_ids, limit_group_permissions=limit_group_permissions, skip_user_perms=True, db_session=db_session, ) if instance.owner_group_id: for user in instance.owner_group.users: group_perms.append( PermissionTuple( user, ALL_PERMISSIONS, "group", instance.owner_group, instance, True, True, ) ) return group_perms
return PermissionTuples for groups that have given permission for the resource, perm_name is __any_permission__ then users with any permission will be listed :param instance: :param perm_name: :param group_ids: limits the permissions to specific group ids :param limit_group_permissions: should be used if we do not want to have user objects returned for group permissions, this might cause performance issues for big groups :param db_session: :return:
entailment
def lock_resource_for_update(cls, resource_id, db_session): """ Selects resource for update - locking access for other transactions :param resource_id: :param db_session: :return: """ db_session = get_db_session(db_session) query = db_session.query(cls.model) query = query.filter(cls.model.resource_id == resource_id) query = query.with_for_update() return query.first()
Selects resource for update - locking access for other transactions :param resource_id: :param db_session: :return:
entailment
def get(cls, user_id, db_session=None): """ Fetch row using primary key - will use existing object in session if already present :param user_id: :param db_session: :return: """ db_session = get_db_session(db_session) return db_session.query(cls.model).get(user_id)
Fetch row using primary key - will use existing object in session if already present :param user_id: :param db_session: :return:
entailment