Search is not available for this dataset
text
stringlengths
75
104k
def arrange_all(self): """ Arrange the components of the node using Graphviz. """ # FIXME: Circular reference avoidance. import godot.dot_data_parser import godot.graph graph = godot.graph.Graph( ID="g", directed=True ) self.conn = "->" graph.edges.append( self ) xdot_data = graph.create( format="xdot" ) # print "XDOT DATA:", xdot_data parser = godot.dot_data_parser.GodotDataParser() ndata = xdot_data.replace('\\\n','') tokens = parser.dotparser.parseString(ndata)[0] for element in tokens[3]: cmd = element[0] if cmd == "add_edge": cmd, src, dest, opts = element self.set( **opts )
def _parse_xdot_directive(self, name, new): """ Handles parsing Xdot drawing directives. """ parser = XdotAttrParser() components = parser.parse_xdot_data(new) # The absolute coordinate of the drawing container wrt graph origin. x1 = min( [c.x for c in components] ) y1 = min( [c.y for c in components] ) print "X1/Y1:", name, x1, y1 # Components are positioned relative to their container. This # function positions the bottom-left corner of the components at # their origin rather than relative to the graph. # move_to_origin( components ) for c in components: if isinstance(c, Ellipse): component.x_origin -= x1 component.y_origin -= y1 # c.position = [ c.x - x1, c.y - y1 ] elif isinstance(c, (Polygon, BSpline)): print "Points:", c.points c.points = [ (t[0] - x1, t[1] - y1) for t in c.points ] print "Points:", c.points elif isinstance(c, Text): # font = str_to_font( str(c.pen.font) ) c.text_x, c.text_y = c.x - x1, c.y - y1 container = Container(auto_size=True, position=[ x1, y1 ], bgcolor="yellow") container.add( *components ) if name == "_draw_": self.drawing = container elif name == "_hdraw_": self.arrowhead_drawing = container else: raise
def _on_drawing(self, object, name, old, new): """ Handles the containers of drawing components being set. """ attrs = [ "drawing", "arrowhead_drawing" ] others = [getattr(self, a) for a in attrs \ if (a != name) and (getattr(self, a) is not None)] x, y = self.component.position print "POS:", x, y, self.component.position abs_x = [d.x + x for d in others] abs_y = [d.y + y for d in others] print "ABS:", abs_x, abs_y # Assume that he new drawing is positioned relative to graph origin. x1 = min( abs_x + [new.x] ) y1 = min( abs_y + [new.y] ) print "DRAW:", new.position new.position = [ new.x - x1, new.y - y1 ] print "DRAW:", new.position # for i, b in enumerate( others ): # self.drawing.position = [100, 100] # self.drawing.request_redraw() # print "OTHER:", b.position, abs_x[i] - x1 # b.position = [ abs_x[i] - x1, abs_y[i] - y1 ] # b.x = 50 # b.y = 50 # print "OTHER:", b.position, abs_x[i], x1 # for attr in attrs: # if attr != name: # if getattr(self, attr) is not None: # drawing = getattr(self, attr) # drawing.position = [50, 50] if old is not None: self.component.remove( old ) if new is not None: self.component.add( new ) print "POS NEW:", self.component.position self.component.position = [ x1, y1 ] print "POS NEW:", self.component.position self.component.request_redraw() print "POS NEW:", self.component.position
def node_factory(**row_factory_kw): """ Give new nodes a unique ID. """ if "__table_editor__" in row_factory_kw: graph = row_factory_kw["__table_editor__"].object ID = make_unique_name("n", [node.ID for node in graph.nodes]) del row_factory_kw["__table_editor__"] return godot.node.Node(ID) else: return godot.node.Node(uuid.uuid4().hex[:6])
def edge_factory(**row_factory_kw): """ Give new edges a unique ID. """ if "__table_editor__" in row_factory_kw: table_editor = row_factory_kw["__table_editor__"] graph = table_editor.object ID = make_unique_name("node", [node.ID for node in graph.nodes]) n_nodes = len(graph.nodes) IDs = [v.ID for v in graph.nodes] if n_nodes == 0: tail_node = godot.Node(ID=make_unique_name("n", IDs)) head_node = godot.Node(ID=make_unique_name("n", IDs)) elif n_nodes == 1: tail_node = graph.nodes[0] head_node = godot.Node(ID=make_unique_name("n", IDs)) else: tail_node = graph.nodes[0] head_node = graph.nodes[1] return godot.edge.Edge(tail_node, head_node, _nodes=graph.nodes) else: return None
def start(self, context): """Initialize the database connection.""" self.config['alias'] = self.alias safe_config = dict(self.config) del safe_config['host'] log.info("Connecting MongoEngine database layer.", extra=dict( uri = redact_uri(self.config['host']), config = self.config, )) self.connection = connect(**self.config)
def prepare(self, context): """Attach this connection's default database to the context using our alias.""" context.db[self.alias] = MongoEngineProxy(self.connection)
def _component_default(self): """ Trait initialiser. """ component = Container(fit_window=False, auto_size=True, bgcolor="green")#, position=list(self.pos) ) component.tools.append( MoveTool(component) ) # component.tools.append( TraitsTool(component) ) return component
def _vp_default(self): """ Trait initialiser. """ vp = Viewport(component=self.component) vp.enable_zoom=True # vp.view_position = [-10, -10] vp.tools.append(ViewportPanTool(vp)) return vp
def arrange_all(self): """ Arrange the components of the node using Graphviz. """ # FIXME: Circular reference avoidance. import godot.dot_data_parser import godot.graph graph = godot.graph.Graph(ID="g") graph.add_node(self) print "GRAPH DOT:\n", str(graph) xdot_data = graph.create( format = "xdot" ) print "XDOT DATA:\n", xdot_data parser = godot.dot_data_parser.GodotDataParser() # parser.parse_dot_data(xdot_data) flat_data = xdot_data.replace('\\\n','') tokens = parser.dotparser.parseString(flat_data)[0] for element in tokens[3]: print "TOK:", element cmd = element[0] if cmd == 'add_node': cmd, nodename, opts = element assert nodename == self.ID print "OPTIONS:", opts self.set( **opts )
def parse_xdot_drawing_directive(self, new): """ Parses the drawing directive, updating the node components. """ components = XdotAttrParser().parse_xdot_data(new) max_x = max( [c.bounds[0] for c in components] + [1] ) max_y = max( [c.bounds[1] for c in components] + [1] ) pos_x = min( [c.x for c in components] ) pos_y = min( [c.y for c in components] ) move_to_origin(components) container = Container(auto_size=True, position=[pos_x-self.pos[0], pos_y-self.pos[1]], bgcolor="blue") # self.bounds = bounds=[max_x, max_y] # container = Container(fit_window=False, auto_size=True, bgcolor="blue") container.add( *components ) self.drawing = container
def parse_xdot_label_directive(self, new): """ Parses the label drawing directive, updating the label components. """ components = XdotAttrParser().parse_xdot_data(new) pos_x = min( [c.x for c in components] ) pos_y = min( [c.y for c in components] ) move_to_origin(components) container = Container(auto_size=True, position=[pos_x-self.pos[0], pos_y-self.pos[1]], bgcolor="red") container.add( *components ) self.label_drawing = container
def _drawing_changed(self, old, new): """ Handles the container of drawing components changing. """ if old is not None: self.component.remove( old ) if new is not None: # new.bgcolor="pink" self.component.add( new ) w, h = self.component.bounds self.component.position = [ self.pos[0] - (w/2), self.pos[1] - (h/2) ] # self.component.position = [ self.pos[0], self.pos[1] ] self.component.request_redraw()
def _on_position_change(self, new): """ Handles the poition of the component changing. """ w, h = self.component.bounds self.pos = tuple([ new[0] + (w/2), new[1] + (h/2) ])
def _pos_changed(self, new): """ Handles the Graphviz position attribute changing. """ w, h = self.component.bounds self.component.position = [ new[0] - (w/2), new[1] - (h/2) ] # self.component.position = list( new ) self.component.request_redraw()
def normal_right_down(self, event): """ Handles the right mouse button being clicked when the tool is in the 'normal' state. If the event occurred on this tool's component (or any contained component of that component), the method opens a context menu with menu items from any tool of the parent component that implements MenuItemTool interface i.e. has a get_item() method. """ x = event.x y = event.y # First determine what component or components we are going to hittest # on. If our component is a container, then we add its non-container # components to the list of candidates. # candidates = [] component = self.component # if isinstance(component, Container): # candidates = get_nested_components(self.component) # else: # # We don't support clicking on unrecognized components # return # # # Hittest against all the candidate and take the first one # item = None # for candidate, offset in candidates: # if candidate.is_in(x-offset[0], y-offset[1]): # item = candidate # break for tool in component.tools: component.active_tool = self # Do it event.handled = True component.active_tool = None component.request_redraw() return
def highlight_info(ctx, style): """Outputs the CSS which can be customized for highlighted code""" click.secho("The following styles are available to choose from:", fg="green") click.echo(list(pygments.styles.get_all_styles())) click.echo() click.secho( f'The following CSS for the "{style}" style can be customized:', fg="green" ) click.echo(pygments.formatters.HtmlFormatter(style=style).get_style_defs())
def _draw_mainlayer(self, gc, view_bounds=None, mode="default"): """ Draws a closed polygon """ gc.save_state() try: # self._draw_bounds(gc) if len(self.points) >= 2: # Set the drawing parameters. gc.set_fill_color(self.pen.fill_color_) gc.set_stroke_color(self.pen.color_) gc.set_line_width(self.pen.line_width) # Draw the path. gc.begin_path() # x0 = self.points[0][0] - self.x # y0 = self.points[0][1] + self.y # gc.move_to(x0, y0) # offset_points = [(x-self.x, y+self.y) for x, y in self.points] gc.lines(self.points) gc.close_path() if self.filled: gc.draw_path(self.inside_rule_) else: gc.stroke_path() finally: gc.restore_state()
def is_in(self, point_x, point_y): """ Test if a point is within this polygonal region """ point_array = array(((point_x, point_y),)) vertices = array(self.points) winding = self.inside_rule == "winding" result = points_in_polygon(point_array, vertices, winding) return result[0]
def _draw_mainlayer(self, gc, view_bounds=None, mode="default"): """ Draws the Bezier component """ if not self.points: return gc.save_state() try: gc.set_fill_color(self.pen.fill_color_) gc.set_line_width(self.pen.line_width) gc.set_stroke_color(self.pen.color_) gc.begin_path() start_x, start_y = self.points[0] gc.move_to(start_x, start_y) for triple in nsplit(self.points[1:], 3): x1, y1 = triple[0] x2, y2 = triple[1] end_x, end_y = triple[2] gc.curve_to(x1, y1, x2, y2, end_x, end_y) # One point overlap gc.move_to(end_x, end_y) gc.stroke_path() finally: gc.restore_state()
def _connect(self, context): """Initialize the database connection.""" if __debug__: log.info("Connecting " + self.engine.partition(':')[0] + " database layer.", extra=dict( uri = redact_uri(self.uri, self.protect), config = self.config, alias = self.alias, )) self.connection = context.db[self.alias] = self._connector(self.uri, **self.config)
def _handle_event(self, event, *args, **kw): """Broadcast an event to the database connections registered.""" for engine in self.engines.values(): if hasattr(engine, event): getattr(engine, event)(*args, **kw)
def run(self): """ Method that gets run when the Worker thread is started. When there's an item in in_queue, it takes it out, passes it to func as an argument, and puts the result in out_queue. """ while not self.stopper.is_set(): try: item = self.in_queue.get(timeout=5) except queue.Empty: continue try: result = self.func(item) except TypeError: continue else: self.out_queue.put(result)
def get_full_page_url(self, page_number, scheme=None): """Get the full, external URL for this page, optinally with the passed in URL scheme""" args = dict( request.view_args, _external=True, ) if scheme is not None: args['_scheme'] = scheme if page_number != 1: args['page'] = page_number return url_for(request.endpoint, **args)
def render_prev_next_links(self, scheme=None): """Render the rel=prev and rel=next links to a Markup object for injection into a template""" output = '' if self.has_prev: output += '<link rel="prev" href="{}" />\n'.format(self.get_full_page_url(self.prev, scheme=scheme)) if self.has_next: output += '<link rel="next" href="{}" />\n'.format(self.get_full_page_url(self.next, scheme=scheme)) return Markup(output)
def render_seo_links(self, scheme=None): """Render the rel=canonical, rel=prev and rel=next links to a Markup object for injection into a template""" out = self.render_prev_next_links(scheme=scheme) if self.total_pages == 1: out += self.render_canonical_link(scheme=scheme) return out
def last_item_number(self): """ :return: The last "item number", used when displaying messages to the user like "Displaying items 1 to 10 of 123" - in this example 10 would be returned """ n = self.first_item_number + self.page_size - 1 if n > self.total_items: return self.total_items return n
def _content_type_matches(candidate, pattern): """Is ``candidate`` an exact match or sub-type of ``pattern``?""" def _wildcard_compare(type_spec, type_pattern): return type_pattern == '*' or type_spec == type_pattern return ( _wildcard_compare(candidate.content_type, pattern.content_type) and _wildcard_compare(candidate.content_subtype, pattern.content_subtype) )
def select_content_type(requested, available): """Selects the best content type. :param requested: a sequence of :class:`.ContentType` instances :param available: a sequence of :class:`.ContentType` instances that the server is capable of producing :returns: the selected content type (from ``available``) and the pattern that it matched (from ``requested``) :rtype: :class:`tuple` of :class:`.ContentType` instances :raises: :class:`.NoMatch` when a suitable match was not found This function implements the *Proactive Content Negotiation* algorithm as described in sections 3.4.1 and 5.3 of :rfc:`7231`. The input is the `Accept`_ header as parsed by :func:`.parse_http_accept_header` and a list of parsed :class:`.ContentType` instances. The ``available`` sequence should be a sequence of content types that the server is capable of producing. The selected value should ultimately be used as the `Content-Type`_ header in the generated response. .. _Accept: http://tools.ietf.org/html/rfc7231#section-5.3.2 .. _Content-Type: http://tools.ietf.org/html/rfc7231#section-3.1.1.5 """ class Match(object): """Sorting assistant. Sorting matches is a tricky business. We need a way to prefer content types by *specificity*. The definition of *more specific* is a little less than clear. This class treats the strength of a match as the most important thing. Wild cards are less specific in all cases. This is tracked by the ``match_type`` attribute. If we the candidate and pattern differ only by parameters, then the strength is based on the number of pattern parameters that match parameters from the candidate. The easiest way to track this is to count the number of candidate parameters that are matched by the pattern. This is what ``parameter_distance`` tracks. The final key to the solution is to order the result set such that the most specific matches are first in the list. This is done by carefully choosing values for ``match_type`` such that full matches bubble up to the front. We also need a scheme of counting matching parameters that pushes stronger matches to the front of the list. The ``parameter_distance`` attribute starts at the number of candidate parameters and decreases for each matching parameter - the lesser the value, the stronger the match. """ WILDCARD, PARTIAL, FULL_TYPE, = 2, 1, 0 def __init__(self, candidate, pattern): self.candidate = candidate self.pattern = pattern if pattern.content_type == pattern.content_subtype == '*': self.match_type = self.WILDCARD elif pattern.content_subtype == '*': self.match_type = self.PARTIAL else: self.match_type = self.FULL_TYPE self.parameter_distance = len(self.candidate.parameters) for key, value in candidate.parameters.items(): if key in pattern.parameters: if pattern.parameters[key] == value: self.parameter_distance -= 1 else: self.parameter_distance += 1 def extract_quality(obj): return getattr(obj, 'quality', 1.0) matches = [] for pattern in sorted(requested, key=extract_quality, reverse=True): for candidate in sorted(available): if _content_type_matches(candidate, pattern): if candidate == pattern: # exact match!!! if extract_quality(pattern) == 0.0: raise errors.NoMatch # quality of 0 means NO return candidate, pattern matches.append(Match(candidate, pattern)) if not matches: raise errors.NoMatch matches = sorted(matches, key=attrgetter('match_type', 'parameter_distance')) return matches[0].candidate, matches[0].pattern
def rewrite_url(input_url, **kwargs): """ Create a new URL from `input_url` with modifications applied. :param str input_url: the URL to modify :keyword str fragment: if specified, this keyword sets the fragment portion of the URL. A value of :data:`None` will remove the fragment portion of the URL. :keyword str host: if specified, this keyword sets the host portion of the network location. A value of :data:`None` will remove the network location portion of the URL. :keyword str password: if specified, this keyword sets the password portion of the URL. A value of :data:`None` will remove the password from the URL. :keyword str path: if specified, this keyword sets the path portion of the URL. A value of :data:`None` will remove the path from the URL. :keyword int port: if specified, this keyword sets the port portion of the network location. A value of :data:`None` will remove the port from the URL. :keyword query: if specified, this keyword sets the query portion of the URL. See the comments for a description of this parameter. :keyword str scheme: if specified, this keyword sets the scheme portion of the URL. A value of :data:`None` will remove the scheme. Note that this will make the URL relative and may have unintended consequences. :keyword str user: if specified, this keyword sets the user portion of the URL. A value of :data:`None` will remove the user and password portions. :keyword bool enable_long_host: if this keyword is specified and it is :data:`True`, then the host name length restriction from :rfc:`3986#section-3.2.2` is relaxed. :keyword bool encode_with_idna: if this keyword is specified and it is :data:`True`, then the ``host`` parameter will be encoded using IDN. If this value is provided as :data:`False`, then the percent-encoding scheme is used instead. If this parameter is omitted or included with a different value, then the ``host`` parameter is processed using :data:`IDNA_SCHEMES`. :return: the modified URL :raises ValueError: when a keyword parameter is given an invalid value If the `host` parameter is specified and not :data:`None`, then it will be processed as an Internationalized Domain Name (IDN) if the scheme appears in :data:`IDNA_SCHEMES`. Otherwise, it will be encoded as UTF-8 and percent encoded. The handling of the `query` parameter requires some additional explanation. You can specify a query value in three different ways - as a *mapping*, as a *sequence* of pairs, or as a *string*. This flexibility makes it possible to meet the wide range of finicky use cases. *If the query parameter is a mapping*, then the key + value pairs are *sorted by the key* before they are encoded. Use this method whenever possible. *If the query parameter is a sequence of pairs*, then each pair is encoded *in the given order*. Use this method if you require that parameter order is controlled. *If the query parameter is a string*, then it is *used as-is*. This form SHOULD BE AVOIDED since it can easily result in broken URLs since *no URL escaping is performed*. This is the obvious pass through case that is almost always present. """ scheme, netloc, path, query, fragment = parse.urlsplit(input_url) if 'scheme' in kwargs: scheme = kwargs['scheme'] ident, host_n_port = parse.splituser(netloc) user, password = parse.splitpasswd(ident) if ident else (None, None) if 'user' in kwargs: user = kwargs['user'] elif user is not None: user = parse.unquote_to_bytes(user).decode('utf-8') if 'password' in kwargs: password = kwargs['password'] elif password is not None: password = parse.unquote_to_bytes(password).decode('utf-8') ident = _create_url_identifier(user, password) host, port = parse.splitnport(host_n_port, defport=None) if 'host' in kwargs: host = kwargs['host'] if host is not None: host = _normalize_host( host, enable_long_host=kwargs.get('enable_long_host', False), encode_with_idna=kwargs.get('encode_with_idna', None), scheme=scheme, ) if 'port' in kwargs: port = kwargs['port'] if port is not None: port = int(kwargs['port']) if port < 0: raise ValueError('port is required to be non-negative') if host is None or host == '': host_n_port = None elif port is None: host_n_port = host else: host_n_port = '{0}:{1}'.format(host, port) if 'path' in kwargs: path = kwargs['path'] if path is None: path = '/' else: path = parse.quote(path.encode('utf-8'), safe=PATH_SAFE_CHARS) netloc = '{0}@{1}'.format(ident, host_n_port) if ident else host_n_port if 'query' in kwargs: new_query = kwargs['query'] if new_query is None: query = None else: params = [] try: for param in sorted(new_query.keys()): params.append((param, new_query[param])) except AttributeError: # arg is None or not a dict pass if not params: # maybe a sequence of tuples? try: params = [(param, value) for param, value in new_query] except ValueError: # guess not... pass if params: query = parse.urlencode(params) else: query = new_query if 'fragment' in kwargs: fragment = kwargs['fragment'] if fragment is not None: fragment = parse.quote(fragment.encode('utf-8'), safe=FRAGMENT_SAFE_CHARS) # The following is necessary to get around some interesting special # case code in urllib.parse._coerce_args in Python 3.4. Setting # scheme to None causes urlunsplit to assume that all non-``None`` # parameters with be byte strings.... if scheme is None: scheme = '' return parse.urlunsplit((scheme, netloc, path, query, fragment))
def remove_url_auth(url): """ Removes the user & password and returns them along with a new url. :param str url: the URL to sanitize :return: a :class:`tuple` containing the authorization portion and the sanitized URL. The authorization is a simple user & password :class:`tuple`. >>> auth, sanitized = remove_url_auth('http://foo:bar@example.com') >>> auth ('foo', 'bar') >>> sanitized 'http://example.com' The return value from this function is simple named tuple with the following fields: - *auth* the username and password as a tuple - *username* the username portion of the URL or :data:`None` - *password* the password portion of the URL or :data:`None` - *url* the sanitized URL >>> result = remove_url_auth('http://me:secret@example.com') >>> result.username 'me' >>> result.password 'secret' >>> result.url 'http://example.com' """ parts = parse.urlsplit(url) return RemoveUrlAuthResult(auth=(parts.username or None, parts.password), url=rewrite_url(url, user=None, password=None))
def _create_url_identifier(user, password): """ Generate the user+password portion of a URL. :param str user: the user name or :data:`None` :param str password: the password or :data:`None` """ if user is not None: user = parse.quote(user.encode('utf-8'), safe=USERINFO_SAFE_CHARS) if password: password = parse.quote(password.encode('utf-8'), safe=USERINFO_SAFE_CHARS) return '{0}:{1}'.format(user, password) return user return None
def _normalize_host(host, enable_long_host=False, encode_with_idna=None, scheme=None): """ Normalize a host for a URL. :param str host: the host name to normalize :keyword bool enable_long_host: if this keyword is specified and it is :data:`True`, then the host name length restriction from :rfc:`3986#section-3.2.2` is relaxed. :keyword bool encode_with_idna: if this keyword is specified and it is :data:`True`, then the ``host`` parameter will be encoded using IDN. If this value is provided as :data:`False`, then the percent-encoding scheme is used instead. If this parameter is omitted or included with a different value, then the ``host`` parameter is processed using :data:`IDNA_SCHEMES`. :keyword str scheme: if this keyword is specified, then it is used to determine whether to apply IDN rules or not. This parameter is ignored if `encode_with_idna` is not :data:`None`. :return: the normalized and encoded string ready for inclusion into a URL """ if encode_with_idna is not None: enable_idna = encode_with_idna else: enable_idna = scheme.lower() in IDNA_SCHEMES if scheme else False if enable_idna: try: host = '.'.join(segment.encode('idna').decode() for segment in host.split('.')) except UnicodeError as exc: raise ValueError('host is invalid - {0}'.format(exc)) else: host = parse.quote(host.encode('utf-8'), safe=HOST_SAFE_CHARS) if len(host) > 255 and not enable_long_host: raise ValueError('host too long') return host
def transform(self, X): ''' :X: numpy ndarray ''' noise = self._noise_func(*self._args, size=X.shape) results = X + noise self.relative_noise_size_ = self.relative_noise_size(X, results) return results
def relative_noise_size(self, data, noise): ''' :data: original data as numpy matrix :noise: noise matrix as numpy matrix ''' return np.mean([ sci_dist.cosine(u / la.norm(u), v / la.norm(v)) for u, v in zip(noise, data) ])
def general(*dargs, **dkwargs): """ 将被装饰函数封装为一个 :class:`click.core.Command` 类, 此装饰器并不提供额外的复杂功能,仅提供将被装饰方法注册为一个 ``mohand`` 子命令的功能 该装饰器作为一个一般装饰器使用(如: ``@hand.general`` ) .. note:: 该装饰器会在插件系统加载外部插件前辈注册到 :data:`.hands.hand` 中。 此处的 ``general`` 装饰器同时兼容有参和无参调用方式 :param int log_level: 当前子命令的日志输出等级,默认为: ``logging.INFO`` :return: 被封装后的函数 :rtype: function """ invoked = bool(len(dargs) == 1 and not dkwargs and callable(dargs[0])) if invoked: func = dargs[0] def wrapper(func): @hand._click.command( name=func.__name__.lower(), help=func.__doc__) def _wrapper(*args, **kwargs): log_level = dkwargs.pop('log_level', logging.INFO) log.setLevel(log_level) log.debug("decrator param: {} {}".format(dargs, dkwargs)) log.debug("function param: {} {}".format(args, kwargs)) func(*args, **kwargs) return _wrapper return wrapper if not invoked else wrapper(func)
def discover_modules(directory): """ Attempts to list all of the modules and submodules found within a given directory tree. This function searches the top-level of the directory tree for potential python modules and returns a list of candidate names. **Note:** This function returns a list of strings representing discovered module names, not the actual, loaded modules. :param directory: the directory to search for modules. """ found = list() if os.path.isdir(directory): for entry in os.listdir(directory): next_dir = os.path.join(directory, entry) # Scan only if there's an __init__.py file if os.path.isfile(os.path.join(next_dir, MODULE_INIT_FILE)): found.append(entry) return found
def rdiscover_modules(directory): """ Attempts to list all of the modules and submodules found within a given directory tree. This function recursively searches the directory tree for potential python modules and returns a list of candidate names. **Note:** This function returns a list of strings representing discovered module names, not the actual, loaded modules. :param directory: the directory to search for modules. """ found = list() if os.path.isdir(directory): for entry in os.listdir(directory): next_dir = os.path.join(directory, entry) # Scan only if there's an __init__.py file if os.path.isfile(os.path.join(next_dir, MODULE_INIT_FILE)): modules = _search_for_modules(next_dir, True, entry) found.extend(modules) return found
def rlist_modules(mname): """ Attempts to the submodules under a module recursively. This function works for modules located in the default path as well as extended paths via the sys.meta_path hooks. This function carries the expectation that the hidden module variable '__path__' has been set correctly. :param mname: the module name to descend into """ module = import_module(mname) if not module: raise ImportError('Unable to load module {}'.format(mname)) found = list() if _should_use_module_path(module): mpath = module.__path__[0] else: mpaths = sys.path mpath = _scan_paths_for(mname, mpaths) if mpath: for pmname in _search_for_modules(mpath, recursive=True): found_mod = MODULE_PATH_SEP.join((mname, pmname)) found.append(found_mod) return found
def list_classes(mname, cls_filter=None): """ Attempts to list all of the classes within a specified module. This function works for modules located in the default path as well as extended paths via the sys.meta_path hooks. If a class filter is set, it will be called with each class as its parameter. This filter's return value must be interpretable as a boolean. Results that evaluate as True will include the type in the list of returned classes. Results that evaluate as False will exclude the type in the list of returned classes. :param mname: of the module to descend into :param cls_filter: a function to call to determine what classes should be included. """ found = list() module = import_module(mname) if inspect.ismodule(module): [found.append(mod) for mod in _list_classes(module, cls_filter)] return found
def rlist_classes(module, cls_filter=None): """ Attempts to list all of the classes within a given module namespace. This method, unlike list_classes, will recurse into discovered submodules. If a type filter is set, it will be called with each class as its parameter. This filter's return value must be interpretable as a boolean. Results that evaluate as True will include the type in the list of returned classes. Results that evaluate as False will exclude the type in the list of returned classes. :param mname: of the module to descend into :param cls_filter: a function to call to determine what classes should be included. """ found = list() mnames = rlist_modules(module) for mname in mnames: [found.append(c) for c in list_classes(mname, cls_filter)] return found
def rgb_to_hsl(r, g, b): """ Converts an RGB color value to HSL. :param r: The red color value :param g: The green color value :param b: The blue color value :return: The HSL representation """ r = float(r) / 255.0 g = float(g) / 255.0 b = float(b) / 255.0 max_value = max(r, g, b) min_value = min(r, g, b) h = None s = None l = (max_value + min_value) / 2 d = max_value - min_value if d == 0: # achromatic h = 0 s = 0 else: s = d / (1 - abs(2 * l - 1)) if r == max_value: h = 60 * ((g - b) % 6) if b > g: h += 360 if g == max_value: h = 60 * ((b - r) / d + 2) if b == max_value: h = 60 * ((r - g) / d + 4) return round(h, 2), round(s, 2), round(l, 2)
def html_color_to_rgba(html_colour, alpha): """ :param html_colour: Colour string like FF0088 :param alpha: Alpha value (opacity) :return: RGBA semitransparent version of colour for use in css """ html_colour = html_colour.upper() if html_colour[0] == '#': html_colour = html_colour[1:] r_str = html_colour[0:2] g_str = html_colour[2:4] b_str = html_colour[4:6] r = int(r_str, 16) g = int(g_str, 16) b = int(b_str, 16) return 'rgba(%s, %s, %s, %s)' % (r, g, b, alpha)
def blend_html_colour_to_white(html_colour, alpha): """ :param html_colour: Colour string like FF552B or #334455 :param alpha: Alpha value :return: Html colour alpha blended onto white """ html_colour = html_colour.upper() has_hash = False if html_colour[0] == '#': has_hash = True html_colour = html_colour[1:] r_str = html_colour[0:2] g_str = html_colour[2:4] b_str = html_colour[4:6] r = int(r_str, 16) g = int(g_str, 16) b = int(b_str, 16) r = int(alpha * r + (1 - alpha) * 255) g = int(alpha * g + (1 - alpha) * 255) b = int(alpha * b + (1 - alpha) * 255) out = '{:02X}{:02X}{:02X}'.format(r, g, b) if has_hash: out = '#' + out return out
def fit(self, X, y): ''' :X: list of dict :y: labels ''' self._avgs = average_by_label(X, y, self.reference_label) return self
def transform(self, X, y=None): ''' :X: list of dict ''' return map_dict_list( X, key_func=lambda k, v: self.names[k.lower()], if_func=lambda k, v: k.lower() in self.names)
def format_price_commas(price): """ Formats prices, rounding (i.e. to the nearest whole number of pounds) with commas """ if price is None: return None if price >= 0: return jinja2.Markup('&pound;{:,.2f}'.format(price)) else: return jinja2.Markup('-&pound;{:,.2f}'.format(-price))
def format_multiline_html(text): """ Turns a string like 'a\nb\nc' into 'a<br>b<br>c' and marks as Markup Note: Will remove all \r characters from output (if present) """ if text is None: return None if '\n' not in text: return text.replace('\r', '') parts = text.replace('\r', '').split('\n') out = flask.Markup() for part in parts: if out: out += flask.Markup('<br>') out += flask.escape(part) return out
def ensure_dir(path): """Ensure that a needed directory exists, creating it if it doesn't""" try: log.info('Ensuring directory exists: %s' % path) os.makedirs(path) except OSError: if not os.path.isdir(path): raise
def make_csv_response(csv_data, filename): """ :param csv_data: A string with the contents of a csv file in it :param filename: The filename that the file will be saved as when it is downloaded by the user :return: The response to return to the web connection """ resp = make_response(csv_data) resp.headers['Content-Type'] = 'application/octet-stream' resp.headers['Content-Disposition'] = 'attachment; filename=%s' % filename return resp
def to_base62(n): """ Converts a number to base 62 :param n: The number to convert :return: Base 62 representation of number (string) """ remainder = n % 62 result = BASE62_MAP[remainder] num = n // 62 while num > 0: remainder = num % 62 result = '%s%s' % (BASE62_MAP[remainder], result) num = num // 62 return result
def from_base62(s): """ Convert a base62 String back into a number :param s: The base62 encoded String :return: The number encoded in the String (integer) """ result = 0 for c in s: if c not in BASE62_MAP: raise Exception('Invalid base64 string: %s' % s) result = result * 62 + BASE62_MAP.index(c) return result
def list_dataset_uris(cls, base_uri, config_path): """Return list containing URIs with base URI.""" storage_account_name = generous_parse_uri(base_uri).netloc blobservice = get_blob_service(storage_account_name, config_path) containers = blobservice.list_containers(include_metadata=True) uri_list = [] for c in containers: admin_metadata = c.metadata uri = cls.generate_uri( admin_metadata['name'], admin_metadata['uuid'], base_uri ) uri_list.append(uri) return uri_list
def list_overlay_names(self): """Return list of overlay names.""" overlay_names = [] for blob in self._blobservice.list_blobs( self.uuid, prefix=self.overlays_key_prefix ): overlay_file = blob.name.rsplit('/', 1)[-1] overlay_name, ext = overlay_file.split('.') overlay_names.append(overlay_name) return overlay_names
def add_item_metadata(self, handle, key, value): """Store the given key:value pair for the item associated with handle. :param handle: handle for accessing an item before the dataset is frozen :param key: metadata key :param value: metadata value """ identifier = generate_identifier(handle) metadata_blob_suffix = "{}.{}.json".format(identifier, key) metadata_blob_name = self.fragments_key_prefix + metadata_blob_suffix self._blobservice.create_blob_from_text( self.uuid, metadata_blob_name, json.dumps(value) ) self._blobservice.set_blob_metadata( container_name=self.uuid, blob_name=metadata_blob_name, metadata={ "type": "item_metadata" } )
def put_text(self, key, contents): """Store the given text contents so that they are later retrievable by the given key.""" self._blobservice.create_blob_from_text( self.uuid, key, contents )
def get_item_abspath(self, identifier): """Return absolute path at which item content can be accessed. :param identifier: item identifier :returns: absolute path from which the item content can be accessed """ admin_metadata = self.get_admin_metadata() uuid = admin_metadata["uuid"] # Create directory for the specific dataset. dataset_cache_abspath = os.path.join(self._azure_cache_abspath, uuid) mkdir_parents(dataset_cache_abspath) metadata = self._blobservice.get_blob_metadata( self.uuid, identifier ) relpath = metadata['relpath'] _, ext = os.path.splitext(relpath) local_item_abspath = os.path.join( dataset_cache_abspath, identifier + ext ) if not os.path.isfile(local_item_abspath): tmp_local_item_abspath = local_item_abspath + ".tmp" self._blobservice.get_blob_to_path( self.uuid, identifier, tmp_local_item_abspath ) os.rename(tmp_local_item_abspath, local_item_abspath) return local_item_abspath
def iter_item_handles(self): """Return iterator over item handles.""" blob_generator = self._blobservice.list_blobs( self.uuid, include='metadata' ) for blob in blob_generator: if 'type' in blob.metadata: if blob.metadata['type'] == 'item': handle = blob.metadata['relpath'] yield handle
def get_item_metadata(self, handle): """Return dictionary containing all metadata associated with handle. In other words all the metadata added using the ``add_item_metadata`` method. :param handle: handle for accessing an item before the dataset is frozen :returns: dictionary containing item metadata """ metadata = {} identifier = generate_identifier(handle) prefix = self.fragments_key_prefix + '{}'.format(identifier) blob_generator = self._blobservice.list_blobs( self.uuid, include='metadata', prefix=prefix ) for blob in blob_generator: metadata_key = blob.name.split('.')[-2] value_as_string = self.get_text(blob.name) value = json.loads(value_as_string) metadata[metadata_key] = value return metadata
def file_md5sum(filename): """ :param filename: The filename of the file to process :returns: The MD5 hash of the file """ hash_md5 = hashlib.md5() with open(filename, 'rb') as f: for chunk in iter(lambda: f.read(1024 * 4), b''): hash_md5.update(chunk) return hash_md5.hexdigest()
def luhn_check(card_number): """ checks to make sure that the card passes a luhn mod-10 checksum """ sum = 0 num_digits = len(card_number) oddeven = num_digits & 1 for count in range(0, num_digits): digit = int(card_number[count]) if not ((count & 1) ^ oddeven): digit *= 2 if digit > 9: digit -= 9 sum += digit return (sum % 10) == 0
def get_git_version(): """ Return the git hash as a string. Apparently someone got this from numpy's setup.py. It has since been modified a few times. """ # Return the git revision as a string # copied from numpy setup.py def _minimal_ext_cmd(cmd): # construct minimal environment env = {} for k in ['SYSTEMROOT', 'PATH']: v = os.environ.get(k) if v is not None: env[k] = v # LANGUAGE is used on win32 env['LANGUAGE'] = 'C' env['LANG'] = 'C' env['LC_ALL'] = 'C' with open(os.devnull, 'w') as err_out: out = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=err_out, # maybe debug later? env=env).communicate()[0] return out try: git_dir = os.path.dirname(os.path.realpath(__file__)) out = _minimal_ext_cmd(['git', '-C', git_dir, 'rev-parse', 'HEAD']) GIT_REVISION = out.strip().decode('ascii') except OSError: GIT_REVISION = 'Unknown' return GIT_REVISION
def load_hands(): """ 加载hand扩展插件 :return: 返回hand注册字典(单例) :rtype: HandDict """ # 优先进行自带 hand 的注册加载 import mohand.decorator # noqa # 注册hand插件 mgr = stevedore.ExtensionManager( namespace=env.plugin_namespace, invoke_on_load=True) def register_hand(ext): _hand = ext.obj.register() if hasattr(hand, _hand.__name__): raise HandDuplicationOfNameError(_hand.__name__) hand[_hand.__name__] = _hand _pkg, _ver = ext.obj.version() env.version[_pkg] = _ver try: mgr.map(register_hand) except stevedore.exception.NoMatches: pass return hand
def partial_fit(self, X, y): """ :X: {array-like, sparse matrix}, shape [n_samples, n_features] The data used to compute the mean and standard deviation used for later scaling along the features axis. :y: Healthy 'h' or 'sick_name' """ X, y = filter_by_label(X, y, self.reference_label) super().partial_fit(X, y) return self
def load_module(self, module_name): """ Loads a module's code and sets the module's expected hidden variables. For more information on these variables and what they are for, please see PEP302. :param module_name: the full name of the module to load """ if module_name != self.module_name: raise LoaderError( 'Requesting a module that the loader is unaware of.') if module_name in sys.modules: return sys.modules[module_name] module = self.load_module_py_path(module_name, self.load_target) if self.is_pkg: module.__path__ = [self.module_path] module.__package__ = module_name else: module.__package__ = module_name.rpartition('.')[0] sys.modules[module_name] = module return module
def add_path(self, path): """ Adds a path to search through when attempting to look up a module. :param path: the path the add to the list of searchable paths """ if path not in self.paths: self.paths.append(path)
def find_module(self, module_name, path=None): """ Searches the paths for the required module. :param module_name: the full name of the module to find :param path: set to None when the module in being searched for is a top-level module - otherwise this is set to package.__path__ for submodules and subpackages (unused) """ module_path = os.path.join(*module_name.split(MODULE_PATH_SEP)) for search_root in self.paths: target_path = os.path.join(search_root, module_path) is_pkg = False # If the target references a directory, try to load it as # a module by referencing the __init__.py file, otherwise # append .py and attempt to resolve it. if os.path.isdir(target_path): target_file = os.path.join(target_path, '__init__.py') is_pkg = True else: target_file = '{}.py'.format(target_path) if os.path.exists(target_file): return ModuleLoader( target_path, module_name, target_file, is_pkg) return None
def tag_to_text(tag): """ :param tag: Beautiful soup tag :return: Flattened text """ out = [] for item in tag.contents: # If it has a name, it is a tag if item.name: out.append(tag_to_text(item)) else: # Just text! out.append(item) return ' '.join(out)
def split_line(line, min_line_length=30, max_line_length=100): """ This is designed to work with prettified output from Beautiful Soup which indents with a single space. :param line: The line to split :param min_line_length: The minimum desired line length :param max_line_length: The maximum desired line length :return: A list of lines """ if len(line) <= max_line_length: # No need to split! return [line] # First work out the indentation on the beginning of the line indent = 0 while line[indent] == ' ' and indent < len(line): indent += 1 # Try to split the line # Start looking for a space at character max_line_length working backwards i = max_line_length split_point = None while i > min_line_length: if line[i] == ' ': split_point = i break i -= 1 if split_point is None: # We didn't find a split point - search beyond the end of the line i = max_line_length + 1 while i < len(line): if line[i] == ' ': split_point = i break i += 1 if split_point is None: # There is nowhere to split the line! return [line] else: # Split it! line1 = line[:split_point] line2 = ' ' * indent + line[split_point + 1:] return [line1] + split_line(line2, min_line_length, max_line_length)
def pretty_print(html, max_line_length=110, tab_width=4): """ Pretty print HTML, splitting it into lines of a reasonable length (if possible). This probably needs a whole lot more testing! :param html: The HTML to format :param max_line_length: The desired maximum line length. Will not be strictly adhered to :param min_line_length: The desired minimum line length :param tab_width: Essentially, the tabs that indent the code will be treated as this many spaces when counting the length of each line :return: Beautifully formatted HTML """ if tab_width < 2: raise ValueError('tab_width must be at least 2 (or bad things would happen!)') # Double curly brackets to avoid problems with .format() html = html.replace('{', '{{').replace('}', '}}') soup = BeautifulSoup(html, 'lxml') soup.html.unwrap() soup.body.unwrap() unformatted_tag_list = [] # Here we are taking the tags out of the content and replacing them with placeholders, # and adding the tags to a list. I didn't come up with this... for i, tag in enumerate(soup.find_all(INLINE_TAGS)): unformatted_tag_list.append(str(tag)) tag.replace_with('{' + 'unformatted_tag_list[{0}]'.format(i) + '}') # If we prettify this, there will still be some weird indentation going on, based on # the original markup, so we need to convert it into a string again, and then parse # it again processed_html = str(soup) soup2 = BeautifulSoup(processed_html, 'lxml') soup2.html.unwrap() soup2.body.unwrap() # Prettify it, substitute in the unformatted tags pretty_markup = soup2.prettify().format(unformatted_tag_list=unformatted_tag_list) # Convert indendtations to a tab width of 4 pretty_markup = re.sub(r'^(\s+)', r'\1' * tab_width, pretty_markup, flags=re.MULTILINE) # Final step - pass over the formatted html, convert the indentations into tabs and cut # the lines to length lines = pretty_markup.splitlines() out = '' for line in lines: for line_part in split_line(line, max_line_length=max_line_length): out += line_part out += '\n' # Final final step! Convert space indentations into tabs return out.replace(' ' * tab_width, '\t')
def transform(self, X, y=None): ''' :X: list of dict :y: labels ''' return [{ new_feature: self._fisher_pval(x, old_features) for new_feature, old_features in self.feature_groups.items() if len(set(x.keys()) & set(old_features)) } for x in X]
def _filtered_values(self, x: dict, feature_set: list=None): ''' :x: dict which contains feature names and values :return: pairs of values which shows number of feature makes filter function true or false ''' feature_set = feature_set or x n = sum(self.filter_func(x[i]) for i in feature_set if i in x) return [len(feature_set) - n, n]
def print_location(**kwargs): """ :param kwargs: Pass in the arguments to the function and they will be printed too! """ stack = inspect.stack()[1] debug_print('{}:{} {}()'.format(stack[1], stack[2], stack[3])) for k, v in kwargs.items(): lesser_debug_print('{} = {}'.format(k, v))
def remove_namespaces(root): """Call this on an lxml.etree document to remove all namespaces""" for elem in root.getiterator(): if not hasattr(elem.tag, 'find'): continue i = elem.tag.find('}') if i >= 0: elem.tag = elem.tag[i + 1:] objectify.deannotate(root, cleanup_namespaces=True)
def consistency(self, desired_version=None, include_package=False, strictness=None): """Checks that the versions are consistent Parameters ---------- desired_version: str optional; the version that all of these should match include_package: bool whether to check the special 'package' version for consistency (default False) strictness: str """ keys_to_check = list(self.versions.keys()) if not include_package and 'package' in keys_to_check: keys_to_check.remove('package') if desired_version is None: # if we have to guess, we trust setup.py try: desired_version = self.versions['setup.py'] except KeyError: desired_version = self.versions[keys_to_check[0]] if strictness is None: strictness = self.strictness desired = self._version(desired_version, strictness) error_keys = [] for key in keys_to_check: test = self._version(self.versions[key], strictness) if test != desired: error_keys += [key] # make the error message msg = "" for key in error_keys: msg += "Error: desired {d} != {v} ({k})\n".format( d=str(desired), v=str(self.versions[key]), k=str(key) ) return msg
def setup_is_release(setup, expected=True): """ Returns ------- bool or None : None if we can't tell """ try: is_release = setup.IS_RELEASE except AttributeError: return None else: if is_release and expected: return "" elif not is_release and not expected: return "" else: return ("Unexpected value of setup.py IS_RELEASE. Found " + str(is_release) + ".\n")
def from_yaml(cls, **kwargs): """Creates a new instance of a rule in relation to the config file. This updates the dictionary of the class with the added details, which allows for flexibility in the configuation file. Only called when parsing the default configuation file. """ ret = cls() for k, v in kwargs.iteritems(): ret.__dict__[k] = v return ret
def merge(self, new_dict): """Merges a dictionary into the Rule object.""" actions = new_dict.pop("actions") for action in actions: self.add_action(action) self.__dict__.update(new_dict)
def execute_actions(self, cwd): """Iterates over the actions and executes them in order.""" self._execute_globals(cwd) for action in self.actions: logger.info("executing {}".format(action)) p = subprocess.Popen(action, shell=True, cwd=cwd) p.wait()
def from_yaml(cls, defaults, **kwargs): """Creates a new instance of a rule by merging two dictionaries. This allows for independant configuration files to be merged into the defaults.""" # TODO: I hate myself for this. Fix it later mmkay? if "token" not in defaults: kwargs["token"] = None defaults = copy.deepcopy(defaults) return cls( defaults=defaults, token=kwargs.pop("token"), directory=kwargs.pop("directory"), **kwargs )
def parse_address(formatted_address): """ :param formatted_address: A string like "email@address.com" or "My Email <email@address.com>" :return: Tuple: (address, name) """ if email_regex.match(formatted_address): # Just a raw address return (formatted_address, None) match = formatted_address_regex.match(formatted_address) if match: (name, email) = match.group(1, 2) return email.strip(), name.strip() raise ValueError('"{}" is not a valid formatted address'.format(formatted_address))
def send_mail(recipient_list, subject, body, html=False, from_address=None): """ :param recipient_list: List of recipients i.e. ['testing@fig14.com', 'Stephen Brown <steve@fig14.com>'] :param subject: The subject :param body: The email body :param html: Is this a html email? Defaults to False :param from_address: From email address or name and address i.e. 'Test System <errors@test.com> :return: """ if not _configured: raise Exception('LFS Mailer hasn\'t been configured') if from_address is None: from_address = default_email_from mime_type = 'html' if html else 'plain' log.debug('Sending {} mail to {}: {}'.format(mime_type, ', '.join(recipient_list), subject)) if dump_email_body: log.info(body) s = smtplib.SMTP(host, port) if use_tls: s.ehlo() s.starttls() s.ehlo() if username: s.login(username, password) if email_to_override: subject = '[to %s] %s' % (', '.join(recipient_list), subject) recipient_list = [email_to_override] log.info('Using email override: %s' % ', '.join(recipient_list)) msg = MIMEText(body, mime_type, 'utf-8') msg['To'] = ', '.join(recipient_list) msg['Subject'] = subject msg['From'] = from_address msg['Date'] = email.utils.formatdate() s.sendmail(from_address, recipient_list, msg.as_string()) s.quit()
def add_details(self, message): """ Add extra details to the message. Separate so that it can be overridden """ msg = message # Try to append Flask request details try: from flask import request url = request.url method = request.method endpoint = request.endpoint # Obscure password field and prettify a little bit form_dict = dict(request.form) for key in form_dict: if key.lower() in _error_reporting_obscured_fields: form_dict[key] = '******' elif len(form_dict[key]) == 1: form_dict[key] = form_dict[key][0] form = pprint.pformat(form_dict).replace('\n', '\n ') msg = '%s\nRequest:\n\nurl: %s\nmethod: %s\nendpoint: %s\nform: %s\n' % \ (msg, url, method, endpoint, form) except Exception: traceback.print_exc() # Try to append the session try: from flask import session from flask.json import JSONEncoder session_str = json.dumps( dict(**session), indent=2, cls=JSONEncoder ) msg = '%s\nSession:\n\n%s\n' % (msg, session_str) except Exception: traceback.print_exc() return msg
def emit(self, record): """ Emit a record. Format the record and send it to the specified addressees. """ try: # First, remove all records from the rate limiter list that are over a minute old now = timetool.unix_time() one_minute_ago = now - 60 new_rate_limiter = [x for x in self.rate_limiter if x > one_minute_ago] log.debug('Rate limiter %s -> %s' % (len(self.rate_limiter), len(new_rate_limiter))) self.rate_limiter = new_rate_limiter # Now, get the number of emails sent in the last minute. If it's less than the threshold, add another # entry to the rate limiter list recent_sends = len(self.rate_limiter) send_email = recent_sends < self.max_sends_per_minute if send_email: self.rate_limiter.append(now) msg = self.format(record) msg = self.add_details(msg) # Finally send the message! if send_email: if DEBUG_ERROR_EMAIL_SENDING: log.info('@@@> ! Sending error email to {} !'.format(self.toaddrs)) send_text_mail(self.toaddrs, self.subject, msg, self.fromaddr) else: log.info('!! WARNING: Not sending email as too many emails have been sent in the past minute !!') log.info(msg) except (KeyboardInterrupt, SystemExit): raise except Exception: self.handleError(record)
def get_context(self, value): """Ensure `image_rendition` is added to the global context.""" context = super(RenditionAwareStructBlock, self).get_context(value) context['image_rendition'] = self.rendition.\ image_rendition or 'original' return context
def log_attempt(self, key): """ Log an attempt against key, incrementing the number of attempts for that key and potentially adding a lock to the lock table """ with self.lock: if key not in self.attempts: self.attempts[key] = 1 else: self.attempts[key] += 1 if self.attempts[key] >= self.max_attempts: log.info('Account %s locked due to too many login attempts' % key) # lock account self.locks[key] = datetime.datetime.utcnow() + datetime.timedelta(seconds=self.lock_duration)
def service(self): """ Decrease the countdowns, and remove any expired locks. Should be called once every <decrease_every> seconds. """ with self.lock: # Decrement / remove all attempts for key in list(self.attempts.keys()): log.debug('Decrementing count for %s' % key) if key in self.attempts: if self.attempts[key] <= 1: del self.attempts[key] else: self.attempts[key] -= 1 # Remove expired locks now = datetime.datetime.utcnow() for key in list(self.locks.keys()): if key in self.locks and self.locks[key] < now: log.info('Expiring login lock for %s' % key) del self.locks[key]
def add_to_queue(self, url): """ Adds an URL to the download queue. :param str url: URL to the music service track """ if self.connection_handler.current_music is None: log.error('Music service is not initialized. URL was not added to queue.') elif self.connection_handler.current_storage is None: log.error('Drive service is not initialized. URL was not added to queue.') else: self.queues['download'].put(url)
def use_music_service(self, service_name, api_key=None): """ Sets the current music service to service_name. :param str service_name: Name of the music service :param str api_key: Optional API key if necessary """ self.connection_handler.use_music_service(service_name, api_key=api_key)
def use_storage_service(self, service_name, custom_path=None): """ Sets the current storage service to service_name and attempts to connect to it. :param str service_name: Name of the storage service :param str custom_path: Custom path where to download tracks for local storage (optional, and must already exist, use absolute paths only) """ self.connection_handler.use_storage_service(service_name, custom_path=custom_path)
def start_workers(self, workers_per_task=1): """ Creates and starts the workers, as well as attaching a handler to terminate them gracefully when a SIGINT signal is received. :param int workers_per_task: Number of workers to create for each task in the pipeline """ if not self.workers: for _ in range(workers_per_task): self.workers.append(Worker(self._download, self.queues['download'], self.queues['convert'], self.stopper)) self.workers.append(Worker(self._convert, self.queues['convert'], self.queues['upload'], self.stopper)) self.workers.append(Worker(self._upload, self.queues['upload'], self.queues['delete'], self.stopper)) self.workers.append(Worker(self._delete, self.queues['delete'], self.queues['done'], self.stopper)) self.signal_handler = SignalHandler(self.workers, self.stopper) signal.signal(signal.SIGINT, self.signal_handler) for worker in self.workers: worker.start()
def set(self, k, v): """Add or update a key, value pair to the database""" k = k.lstrip('/') url = '{}/{}'.format(self.endpoint, k) r = requests.put(url, data=str(v)) if r.status_code != 200 or r.json() is not True: raise KVStoreError('PUT returned {}'.format(r.status_code))
def get(self, k, wait=False, wait_index=False, timeout='5m'): """Get the value of a given key""" k = k.lstrip('/') url = '{}/{}'.format(self.endpoint, k) params = {} if wait: params['index'] = wait_index params['wait'] = timeout r = requests.get(url, params=params) if r.status_code == 404: raise KeyDoesNotExist("Key " + k + " does not exist") if r.status_code != 200: raise KVStoreError('GET returned {}'.format(r.status_code)) try: return base64.b64decode(r.json()[0]['Value']) except TypeError as e: # Value was empty and wild None appeared return ""
def recurse(self, k, wait=False, wait_index=None, timeout='5m'): """Recursively get the tree below the given key""" k = k.lstrip('/') url = '{}/{}'.format(self.endpoint, k) params = {} params['recurse'] = 'true' if wait: params['wait'] = timeout if not wait_index: params['index'] = self.index(k, recursive=True) else: params['index'] = wait_index r = requests.get(url, params=params) if r.status_code == 404: raise KeyDoesNotExist("Key " + k + " does not exist") if r.status_code != 200: raise KVStoreError('GET returned {}'.format(r.status_code)) entries = {} for e in r.json(): if e['Value']: entries[e['Key']] = base64.b64decode(e['Value']) else: entries[e['Key']] = '' return entries
def index(self, k, recursive=False): """Get the current index of the key or the subtree. This is needed for later creating long polling requests """ k = k.lstrip('/') url = '{}/{}'.format(self.endpoint, k) params = {} if recursive: params['recurse'] = '' r = requests.get(url, params=params) return r.headers['X-Consul-Index']
def delete(self, k, recursive=False): """Delete a given key or recursively delete the tree below it""" k = k.lstrip('/') url = '{}/{}'.format(self.endpoint, k) params = {} if recursive: params['recurse'] = '' r = requests.delete(url, params=params) if r.status_code != 200: raise KVStoreError('DELETE returned {}'.format(r.status_code))
def internal_error(exception, template_path, is_admin, db=None): """ Render an "internal error" page. The following variables will be populated when rendering the template: title: The page title message: The body of the error message to display to the user preformat: Boolean stating whether to wrap the error message in a pre As well as rendering the error message to the user, this will also log the exception :param exception: The exception that was caught :param template_path: The template to render (i.e. "main/error.html") :param is_admin: Can the logged in user always view detailed error reports? :param db: The Flask-SQLAlchemy instance :return: Flask Response """ if db: try: db.session.rollback() except: # noqa: E722 pass title = str(exception) message = traceback.format_exc() preformat = True log.error('Exception caught: {}\n{}'.format(title, message)) if current_app.config.get('TEST_MODE'): show_detailed_error = True message = 'Note: You are seeing this error message because the server is in test mode.\n\n{}'.format(message) elif is_admin: show_detailed_error = True message = 'Note: You are seeing this error message because you are a member of staff.\n\n{}'.format(message) else: title = '500 Internal Server Error' message = 'Something went wrong while processing your request.' preformat = False show_detailed_error = False try: return render_template(template_path, title=title, message=message, preformat=preformat, exception=exception, is_admin=is_admin, show_detailed_error=show_detailed_error), 500 except: # noqa: E722 log.exception('Error rendering error page!') return '500 Internal Server Error', 500
def plot_heatmap(X, y, top_n=10, metric='correlation', method='complete'): ''' Plot heatmap which shows features with classes. :param X: list of dict :param y: labels :param top_n: most important n feature :param metric: metric which will be used for clustering :param method: method which will be used for clustering ''' sns.set(color_codes=True) df = feature_importance_report(X, y) df_sns = pd.DataFrame().from_records(X)[df[:top_n].index].T df_sns.columns = y color_mapping = dict(zip(set(y), sns.mpl_palette("Set2", len(set(y))))) return sns.clustermap(df_sns, figsize=(22, 22), z_score=0, metric=metric, method=method, col_colors=[color_mapping[i] for i in y])
def get_setup_version(): """ 获取打包使用的版本号,符合 PYPI 官方推荐的版本号方案 :return: PYPI 打包版本号 :rtype: str """ ver = '.'.join(map(str, VERSION[:3])) # 若后缀描述字串为 None ,则直接返回主版本号 if not VERSION[3]: return ver # 否则,追加版本号后缀 hyphen = '' suffix = hyphen.join(map(str, VERSION[-2:])) if VERSION[3] in [VERSION_SUFFIX_DEV, VERSION_SUFFIX_POST]: hyphen = '.' ver = hyphen.join([ver, suffix]) return ver
def get_cli_version(): """ 获取终端命令版本号,若存在VERSION文件则使用其中的版本号, 否则使用 :meth:`.get_setup_version` :return: 终端命令版本号 :rtype: str """ directory = os.path.dirname(os.path.abspath(__file__)) version_path = os.path.join(directory, 'VERSION') if os.path.exists(version_path): with open(version_path) as f: ver = f.read() return ver return get_setup_version()