code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
if ok_did(uri): return uri if uri.startswith('did:sov:'): rv = uri[8:] if ok_did(rv): return rv raise BadIdentifier('Bad specification {} does not correspond to a sovrin DID'.format(uri))
def canon_did(uri: str) -> str
Convert a URI into a DID if need be, left-stripping 'did:sov:' if present. Return input if already a DID. Raise BadIdentifier for invalid input. :param uri: input URI or DID :return: corresponding DID
6.531617
4.772577
1.368572
if not ok_did(did): raise BadIdentifier('Bad DID {} cannot act as DID document identifier'.format(did)) if ok_did(ref): # e.g., LjgpST2rjsoxYegQDRm7EL return 'did:sov:{}'.format(did) if ok_did(resource(ref, delimiter)): # e.g., LjgpST2rjsoxYegQDRm7EL#keys-1 return 'did:sov:{}'.format(ref) if ref.startswith('did:sov:'): # e.g., did:sov:LjgpST2rjsoxYegQDRm7EL, did:sov:LjgpST2rjsoxYegQDRm7EL#3 rv = ref[8:] if ok_did(resource(rv, delimiter)): return ref raise BadIdentifier('Bad URI {} does not correspond to a sovrin DID'.format(ref)) if urlparse(ref).scheme: # e.g., https://example.com/messages/8377464 return ref return 'did:sov:{}{}{}'.format(did, delimiter if delimiter else '#', ref)
def canon_ref(did: str, ref: str, delimiter: str = None)
Given a reference in a DID document, return it in its canonical form of a URI. :param did: DID acting as the identifier of the DID document :param ref: reference to canonicalize, either a DID or a fragment pointing to a location in the DID doc :param delimiter: delimiter character marking fragment (default '#') or introducing identifier (';') against DID resource
3.865269
3.629482
1.064964
if not tags: return True depth = 0 queue = [(i, depth+1) for i in tags.values() if isinstance(i, dict)] max_depth = 0 while queue and max_depth < 2: sub, depth = queue.pop() max_depth = max(max_depth, depth) queue = queue + [(i, depth+1) for i in sub.values() if isinstance(i, dict)] return max_depth < 2 and all(isinstance(k, str) and isinstance(tags[k], str) for k in tags)
def ok_tags(tags: dict) -> bool
Whether input tags dict is OK as an indy-sdk tags structure (depth=1, string values).
2.85114
2.521218
1.130858
if not StorageRecord.ok_tags(val): LOGGER.debug('StorageRecord.__init__ <!< Tags %s must map strings to strings', val) raise BadRecord('Tags {} must map strings to strings'.format(val)) self._tags = val or {}
def tags(self, val: str) -> None
Accessor for record tags (metadata). :param val: record tags
12.680481
10.931378
1.160008
return {t: self.tags[t] for t in (self.tags or {}) if t.startswith('~')} or None
def clear_tags(self) -> dict
Accessor for record tags (metadata) stored in the clear. :return: record tags stored in the clear
10.19972
9.023088
1.130402
return {t: self._tags[t] for t in self.tags or {} if not t.startswith('~')} or None
def encr_tags(self) -> dict
Accessor for record tags (metadata) stored encrypted. :return: record tags stored encrypted
12.550588
12.686605
0.989279
try: response = requests.get( url=API_ENDPOINT.format(prefix), headers={'User-Agent': USER_AGENT}, timeout=getattr( settings, 'PWNED_PASSWORDS_API_TIMEOUT', REQUEST_TIMEOUT, ), ) response.raise_for_status() except requests.RequestException as e: # Gracefully handle timeouts and HTTP error response codes. log.warning( 'Skipped Pwned Passwords check due to error: %r', e ) return None results = {} for line in response.text.splitlines(): line_suffix, _, times = line.partition(':') results[line_suffix] = int(times) return results
def _get_pwned(prefix)
Fetches a dict of all hash suffixes from Pwned Passwords for a given SHA-1 prefix.
3.430573
3.116034
1.100942
if not isinstance(password, text_type): raise TypeError('Password values to check must be Unicode strings.') password_hash = hashlib.sha1(password.encode('utf-8')).hexdigest().upper() prefix, suffix = password_hash[:5], password_hash[5:] results = _get_pwned(prefix) if results is None: # Gracefully handle timeouts and HTTP error response codes. return None return results.get(suffix, 0)
def pwned_password(password)
Checks a password against the Pwned Passwords database.
4.298088
4.009541
1.071965
# if proxfun is a string, grab the corresponding function from operators.py if isinstance(proxfun, str): try: proxfun_name = proxfun.split(None, 1)[0] # Ignore everything after white space op = getattr(operators, proxfun_name) self.objectives.append(lambda theta, rho: op(theta.copy(), float(rho), **kwargs)) except AttributeError as e: print(str(e) + '\n' + 'Could not find the function ' + proxfun + ' in the operators module!') # if proxfun is a function, add it as its own proximal operator elif hasattr(proxfun, '__call__'): self.objectives.append(lambda theta, rho: proxfun(theta.copy(), float(rho))) # type of proxfun must be a string or a function else: raise TypeError('The argument "proxfun" must be a string or a function!')
def add_regularizer(self, proxfun, **kwargs)
Add a regularizer from the operators module to the list of objectives Parameters ---------- proxfun : string or function If a string, then it must be the name of a corresponding function in the `operators` module. If a function, then it must apply a proximal update given an initial point x0, momentum parameter rho, and optional arguments given in `**kwargs`. \\*\\*kwargs : keyword arguments Any optional arguments required for the given function
3.37551
3.067006
1.100588
# clear existing operators if clear: self.clear() # add new regularizers list([self.add_regularizer(proxfun, **regularizers[proxfun]) for proxfun in regularizers.keys()])
def set_regularizers(self, regularizers, clear=True)
Adds a set of regularizers Parameters ---------- regularizers : dict Each key is the name of a corresponding proximal operator, and the value associated with that key is a set of keyword arguments clear : boolean, optional Whether or not to clear the existing regularizers. (Default: True)
6.878461
5.784634
1.189092
# get list of objectives for this parameter num_obj = len(self.objectives) assert num_obj >= 1, "There must be at least one objective!" # initialize lists of primal and dual variable copies, one for each objective orig_shape = theta_init.shape primals = [theta_init.flatten() for _ in range(num_obj)] duals = [np.zeros(theta_init.size) for _ in range(num_obj)] theta_avg = np.mean(primals, axis=0).ravel() # initialize penalty parameter tau = namedtuple('tau', ('init', 'inc', 'dec'))(*tau) rho = tau.init # store cumulative runtimes of each iteration, starting now tstart = time.time() # clear metadata self.metadata = defaultdict(list) # run ADMM iterations self.converged = False for cur_iter in range(max_iter): # store the parameters from the previous iteration theta_prev = theta_avg # update each primal variable copy by taking a proximal step via each objective for varidx, dual in enumerate(duals): primals[varidx] = self.objectives[varidx]((theta_prev - dual).reshape(orig_shape), rho).ravel() # average primal copies theta_avg = np.mean(primals, axis=0) # update the dual variables (after primal update has finished) for varidx, primal in enumerate(primals): duals[varidx] += primal - theta_avg # compute primal and dual residuals primal_resid = float(np.sum([np.linalg.norm(primal - theta_avg) for primal in primals])) dual_resid = num_obj * rho ** 2 * np.linalg.norm(theta_avg - theta_prev) # update penalty parameter according to primal and dual residuals # (see sect. 3.4.1 of the Boyd and Parikh ADMM paper) if primal_resid > tau.init * dual_resid: rho *= float(tau.inc) elif dual_resid > tau.init * primal_resid: rho /= float(tau.dec) # update metadata for this iteration self.metadata['Primal resid'].append(primal_resid) self.metadata['Dual resid'].append(dual_resid) self.metadata['Time (s)'].append(time.time() - tstart) self.metadata['rho'].append(rho) # invoke the callback function with the current parameters and # history if callback is not None: # get the metadata from this iteration data = valmap(last, self.metadata) callback(theta_avg.reshape(orig_shape), data) # update the display self.update_display(cur_iter + 1, disp) # check for convergence if (primal_resid <= tol) & (dual_resid <= tol): self.converged = True break # clean up display self.update_display(-1, disp) # store and return final parameters self.theta = theta_avg.reshape(orig_shape) return self.theta
def minimize(self, theta_init, max_iter=50, callback=None, disp=0, tau=(10., 2., 2.), tol=1e-3)
Minimize a list of objectives using a proximal consensus algorithm Parameters ---------- theta_init : ndarray Initial parameter vector (numpy array) max_iter : int, optional Maximum number of iterations to run (default: 50) callback : function, optional a function that gets called on each iteration with the following arguments: the current parameter value (ndarray), and a dictionary that contains a information about the status of the algorithm disp : int, optional determines how much information to display when running. Ranges from 0 (nothing) to 3 (lots of information) Returns ------- theta : ndarray The parameters found after running the optimization procedure Other Parameters ---------------- tau : (float, float, float), optional initial, increment and decrement parameters for the momentum scheduler (default: (10, 2, 2)) tol : float, optional residual tolerance for assessing convergence. if both the primal and dual residuals are less than this value, then the algorithm has converged (default: 1e-3)
3.339645
3.246763
1.028608
return else: # simple update, no table if disp_level == 1 and iteration >= 0: print('[Iteration %i]' % iteration) # fancy table updates if disp_level > 1: # get the metadata from this iteration data = valmap(last, self.metadata) # choose what keys to use keys = ['Time (s)', 'Primal resid', 'Dual resid', 'rho'] # initial update. print out table headers if iteration == 1: print(tableprint.header(keys, width=col_width)) # print data print(tableprint.row([data[k] for k in keys], width=col_width, format_spec='4g')) if iteration == -1: print(tableprint.bottom(len(keys), width=col_width) + '\n') # print convergence statement if iteration == -1 and self.converged: print('Converged after %i iterations!' % len(self.metadata['Primal resid']))
def update_display(self, iteration, disp_level, col_width=12): # pragma: no cover # exit and print nothing if disp_level is zero if disp_level == 0
Prints information about the optimization procedure to standard output Parameters ---------- iteration : int The current iteration. Must either a positive integer or -1, which indicates the end of the algorithm disp_level : int An integer which controls how much information to display, ranging from 0 (nothing) to 3 (lots of stuff) col_width : int The width of each column in the data table, used if disp_level > 1
5.631881
5.527504
1.018883
assert type(x) == Tensor, "Input array must be a Tensor" while True: # proximal operator for the Fro. norm x = squared_error(x, rho, x_obs) # sequential singular value thresholding for ix, penalty in enumerate(penalties): x = x.unfold(ix).svt(penalty / rho).fold() yield x
def susvd(x, x_obs, rho, penalties)
Sequential unfolding SVD Parameters ---------- x : Tensor x_obs : array_like rho : float penalties : array_like penalty for each unfolding of the input tensor
12.066865
11.350965
1.06307
stream = BytesIO() self.build_stream(obj, stream, context) return stream.getvalue()
def build(self, obj, context=None) -> bytes
Build bytes from the python object. :param obj: Python object to build bytes from. :param context: Optional context dictionary.
4.762589
5.987102
0.795475
stream = BytesIO(data) return self.parse_stream(stream, context)
def parse(self, data: bytes, context=None)
Parse some python object from the data. :param data: Data to be parsed. :param context: Optional context dictionary.
6.149755
9.111869
0.674917
if context is None: context = Context() if not isinstance(context, Context): context = Context(context) try: self._build_stream(obj, stream, context) except Error: raise except Exception as exc: raise BuildingError(str(exc))
def build_stream(self, obj, stream: BytesIO, context=None) -> None
Build bytes from the python object into the stream. :param obj: Python object to build bytes from. :param stream: A ``io.BytesIO`` instance to write bytes into. :param context: Optional context dictionary.
3.586201
3.749485
0.956452
if context is None: context = Context() if not isinstance(context, Context): context = Context(context) try: return self._parse_stream(stream, context) except Error: raise except Exception as exc: raise ParsingError(str(exc))
def parse_stream(self, stream: BytesIO, context=None)
Parse some python object from the stream. :param stream: Stream from which the data is read and parsed. :param context: Optional context dictionary.
3.12683
3.336013
0.937296
if context is None: context = Context() if not isinstance(context, Context): context = Context(context) try: return self._sizeof(context) except Error: raise except Exception as exc: raise SizeofError(str(exc))
def sizeof(self, context=None) -> int
Return the size of the construct in bytes. :param context: Optional context dictionary.
3.1237
3.459349
0.902973
# set the current parameter value of SFO to the given value optimizer.set_theta(x0, float(rho)) # set the previous ADMM location as the flattened paramter array optimizer.theta_admm_prev = optimizer.theta_original_to_flat(x0) # run the optimizer for n steps return optimizer.optimize(num_steps=num_steps)
def sfo(x0, rho, optimizer, num_steps=50)
Proximal operator for an arbitrary function minimized via the Sum-of-Functions optimizer (SFO) Notes ----- SFO is a function optimizer for the case where the target function breaks into a sum over minibatches, or a sum over contributing functions. It is described in more detail in [1]_. Parameters ---------- x0 : array_like The starting or initial point used in the proximal update step rho : float Momentum parameter for the proximal step (larger value -> stays closer to x0) optimizer : SFO instance Instance of the SFO object in `SFO_admm.py` num_steps : int, optional Number of SFO steps to take Returns ------- theta : array_like The parameter vector found after running `num_steps` iterations of the SFO optimizer References ---------- .. [1] Jascha Sohl-Dickstein, Ben Poole, and Surya Ganguli. Fast large-scale optimization by unifying stochastic gradient and quasi-Newton methods. International Conference on Machine Learning (2014). `arXiv preprint arXiv:1311.2115 (2013) <http://arxiv.org/abs/1311.2115>`_.
7.793672
9.020455
0.864
# objective and gradient n = float(x.shape[0]) f = lambda w: np.mean(np.exp(x.dot(w)) - y * x.dot(w)) df = lambda w: (x.T.dot(np.exp(x.dot(w))) - x.T.dot(y)) / n # minimize via BFGS return bfgs(x0, rho, f, df)
def poissreg(x0, rho, x, y)
Proximal operator for Poisson regression Computes the proximal operator of the negative log-likelihood loss assumping a Poisson noise distribution. Parameters ---------- x0 : array_like The starting or initial point used in the proximal update step rho : float Momentum parameter for the proximal step (larger value -> stays closer to x0) x : (n, k) array_like A design matrix consisting of n examples of k-dimensional features (or input). y : (n,) array_like A vector containing the responses (outupt) to the n features given in x. Returns ------- theta : array_like The parameter vector found after running the proximal update step
3.479497
4.244838
0.819701
# keep track of the original shape orig_shape = x0.shape # specify the objective function and gradient for the proximal operator def f_df_augmented(x): xk = x.reshape(orig_shape) obj, grad = f_df(xk) g = obj + (rho / 2.) * np.sum((xk - x0) ** 2) dg = (grad + rho * (xk - x0)).ravel() return g, dg # minimize via BFGS options = {'maxiter': maxiter, 'disp': False} return opt.minimize(f_df_augmented, x0.ravel(), method=method, jac=True, options=options).x.reshape(orig_shape)
def bfgs(x0, rho, f_df, maxiter=50, method='BFGS')
Proximal operator for minimizing an arbitrary function using BFGS Uses the BFGS algorithm to find the proximal update for an arbitrary function, `f`, whose gradient is known. Parameters ---------- x0 : array_like The starting or initial point used in the proximal update step rho : float Momentum parameter for the proximal step (larger value -> stays closer to x0) f_df : function The objective function and gradient maxiter : int, optional Maximum number of iterations to take (default: 50) method : str, optional Which scipy.optimize algorithm to use (default: 'BFGS') Returns ------- theta : array_like The parameter vector found after running the proximal update step
3.380836
3.482492
0.97081
# Apply Laplacian smoothing n = x0.shape[axis] lap_op = spdiags([(2 + rho / gamma) * np.ones(n), -1 * np.ones(n), -1 * np.ones(n)], [0, -1, 1], n, n, format='csc') x_out = np.rollaxis(spsolve(gamma * lap_op, rho * np.rollaxis(x0, axis, 0)), axis, 0) return x_out
def smooth(x0, rho, gamma, axis=0)
Proximal operator for a smoothing function enforced via the discrete laplacian operator Notes ----- Currently only works with matrices (2-D arrays) as input Parameters ---------- x0 : array_like The starting or initial point used in the proximal update step rho : float Momentum parameter for the proximal step (larger value -> stays closer to x0) gamma : float A constant that weights how strongly to enforce the constraint Returns ------- theta : array_like The parameter vector found after running the proximal update step
3.145752
3.587426
0.876883
# compute SVD u, s, v = np.linalg.svd(x0, full_matrices=False) # soft threshold the singular values sthr = np.maximum(s - (gamma / float(rho)), 0) # reconstruct x_out = (u.dot(np.diag(sthr)).dot(v)) return x_out
def nucnorm(x0, rho, gamma)
Proximal operator for the nuclear norm (sum of the singular values of a matrix) Parameters ---------- x0 : array_like The starting or initial point used in the proximal update step rho : float Momentum parameter for the proximal step (larger value -> stays closer to x0) gamma : float A constant that weights how strongly to enforce the constraint Returns ------- theta : array_like The parameter vector found after running the proximal update step
3.714139
4.151677
0.894612
try: from skimage.restoration import denoise_tv_bregman except ImportError: print('Error: scikit-image not found. TVD will not work.') return x0 return denoise_tv_bregman(x0, rho / gamma)
def tvd(x0, rho, gamma)
Proximal operator for the total variation denoising penalty Requires scikit-image be installed Parameters ---------- x0 : array_like The starting or initial point used in the proximal update step rho : float Momentum parameter for the proximal step (larger value -> stays closer to x0) gamma : float A constant that weights how strongly to enforce the constraint Returns ------- theta : array_like The parameter vector found after running the proximal update step Raises ------ ImportError If scikit-image fails to be imported
3.482772
3.285349
1.060092
lmbda = float(gamma) / rho return (x0 - lmbda) * (x0 >= lmbda) + (x0 + lmbda) * (x0 <= -lmbda)
def sparse(x0, rho, gamma)
Proximal operator for the l1 norm (induces sparsity) Parameters ---------- x0 : array_like The starting or initial point used in the proximal update step rho : float Momentum parameter for the proximal step (larger value -> stays closer to x0) gamma : float A constant that weights how strongly to enforce the constraint Returns ------- theta : array_like The parameter vector found after running the proximal update step
3.559844
6.486281
0.548827
return np.linalg.solve(rho * np.eye(q.shape[0]) + P, rho * x0.copy() + q)
def linsys(x0, rho, P, q)
Proximal operator for the linear approximation Ax = b Minimizes the function: .. math:: f(x) = (1/2)||Ax-b||_2^2 = (1/2)x^TA^TAx - (b^TA)x + b^Tb Parameters ---------- x0 : array_like The starting or initial point used in the proximal update step rho : float Momentum parameter for the proximal step (larger value -> stays closer to x0) P : array_like The symmetric matrix A^TA, where we are trying to approximate Ax=b q : array_like The vector A^Tb, where we are trying to approximate Ax=b Returns ------- theta : array_like The parameter vector found after running the proximal update step
3.998657
7.028327
0.568934
status = _HTTP_STATUS_LINES.get(self._status_code) return str(status or ('{} Unknown'.format(self._status_code)))
def status(self)
The HTTP status line as a string (e.g. ``404 Not Found``).
9.246833
6.336024
1.459406
if 'Content-Type' not in self.headers: self.headers.add_header('Content-Type', self.default_content_type) if self._cookies: for c in self._cookies.values(): self.headers.add_header('Set-Cookie', c.OutputString()) return self.headers.items()
def headerlist(self)
WSGI conform list of (header, value) tuples.
2.686768
2.333904
1.151191
self.assertIn( response.status_code, self.redirect_codes, self._get_redirect_assertion_message(response), ) if expected_url: location_header = response._headers.get('location', None) self.assertEqual( location_header, ('Location', str(expected_url)), 'Response should redirect to {0}, but it redirects to {1} instead'.format( expected_url, location_header[1], ) )
def assert_redirect(self, response, expected_url=None)
assertRedirects from Django TestCase follows the redirects chains, this assertion does not - which is more like real unit testing
3.172963
2.984926
1.062996
return _InstanceContext( self.assert_instance_does_not_exist, self.assert_instance_exists, model_class, **kwargs )
def assert_instance_created(self, model_class, **kwargs)
Checks if a model instance was created in the database. For example:: >>> with self.assert_instance_created(Article, slug='lorem-ipsum'): ... Article.objects.create(slug='lorem-ipsum')
4.591779
6.888742
0.666563
return _InstanceContext( self.assert_instance_exists, self.assert_instance_does_not_exist, model_class, **kwargs )
def assert_instance_deleted(self, model_class, **kwargs)
Checks if the model instance was deleted from the database. For example:: >>> with self.assert_instance_deleted(Article, slug='lorem-ipsum'): ... Article.objects.get(slug='lorem-ipsum').delete()
4.718481
6.350738
0.742982
if ';' in x: content_type, priority = x.split(';') casted_priority = float(priority.split('=')[1]) else: content_type, casted_priority = x, 1.0 content_type = content_type.lstrip().rstrip() # Replace ' text/html' to 'text/html' return content_type, casted_priority
def _split_into_mimetype_and_priority(x)
Split an accept header item into mimetype and priority. >>> _split_into_mimetype_and_priority('text/*') ('text/*', 1.0) >>> _split_into_mimetype_and_priority('application/json;q=0.5') ('application/json', 0.5)
3.065468
3.331691
0.920094
return sorted([_split_into_mimetype_and_priority(x) for x in accept_header.split(',')], key=lambda x: x[1], reverse=True)
def _parse_and_sort_accept_header(accept_header)
Parse and sort the accept header items. >>> _parse_and_sort_accept_header('application/json;q=0.5, text/*') [('text/*', 1.0), ('application/json', 0.5)]
4.066489
4.710294
0.86332
for mimetype_pattern, _ in _parse_and_sort_accept_header(accept_header): matched_types = fnmatch.filter(mimetypes, mimetype_pattern) if matched_types: return matched_types[0] return mimetypes[0]
def accept_best_match(accept_header, mimetypes)
Return a mimetype best matched the accept headers. >>> accept_best_match('application/json, text/html', ['application/json', 'text/plain']) 'application/json' >>> accept_best_match('application/json;q=0.5, text/*', ['application/json', 'text/plain']) 'text/plain'
3.562397
4.412205
0.807396
typed_url_vars = {} try: for k, v in url_vars.items(): arg_type = type_hints.get(k) if arg_type and arg_type != str: typed_url_vars[k] = arg_type(v) else: typed_url_vars[k] = v except ValueError: return False, {} return True, typed_url_vars
def match_url_vars_type(url_vars, type_hints)
Match types of url vars. >>> match_url_vars_type({'user_id': '1'}, {'user_id': int}) (True, {'user_id': 1}) >>> match_url_vars_type({'user_id': 'foo'}, {'user_id': int}) (False, {})
2.089617
2.292449
0.911522
split_rule = split_by_slash(rule) split_path = split_by_slash(path) url_vars = {} if len(split_rule) != len(split_path): return False, {} for r, p in zip(split_rule, split_path): if r.startswith('{') and r.endswith('}'): url_vars[r[1:-1]] = p continue if r != p: return False, {} return True, url_vars
def match_path(rule, path)
Match path. >>> match_path('/foo', '/foo') (True, {}) >>> match_path('/foo', '/bar') (False, {}) >>> match_path('/users/{user_id}', '/users/1') (True, {'user_id': '1'}) >>> match_path('/users/{user_id}', '/users/not-integer') (True, {'user_id': 'not-integer'})
2.023793
2.322536
0.871372
if path != '/': path = path.rstrip('/') method = method.upper() status = 404 for p, n, m in self.endpoints: matched, url_vars = match_path(p, path) if not matched: # path: not matched continue if method not in m: # path: matched, method: not matched status = 405 raise HTTPError(status=status, body=f'Method not found: {path} {method}') # it has security issue?? callback, type_hints = m[method] type_matched, typed_url_vars = match_url_vars_type(url_vars, type_hints) if not type_matched: continue # path: not matched (types are different) return callback, typed_url_vars raise HTTPError(status=status, body=f'Not found: {path}')
def match(self, path, method)
Get callback and url_vars. >>> from kobin import Response >>> r = Router() >>> def view(user_id: int) -> Response: ... return Response(f'You are {user_id}') ... >>> r.add('/users/{user_id}', 'GET', 'user-detail', view) >>> callback, url_vars = r.match('/users/1', 'GET') >>> url_vars {'user_id': 1} >>> response = callback(**url_vars) >>> response.body [b'You are 1'] >>> callback, url_vars = r.match('/notfound', 'GET') Traceback (most recent call last): ... kobin.responses.HTTPError
4.552688
4.545883
1.001497
if rule != '/': rule = rule.rstrip('/') method = method.upper() for i, e in enumerate(self.endpoints): r, n, callbacks = e if r == rule: assert name == n and n is not None, ( "A same path should set a same name for reverse routing." ) callbacks[method] = (callback, get_type_hints(callback)) self.endpoints[i] = (r, name, callbacks) break else: e = (rule, name, {method: (callback, get_type_hints(callback))}) self.endpoints.append(e)
def add(self, rule, method, name, callback)
Add a new rule or replace the target for an existing rule. >>> from kobin import Response >>> r = Router() >>> def view(user_id: int) -> Response: ... return Response(f'You are {user_id}') ... >>> r.add('/users/{user_id}', 'GET', 'user-detail', view) >>> path, name, methods = r.endpoints[0] >>> path '/users/{user_id}' >>> name 'user-detail' >>> callback, type_hints = methods['GET'] >>> view == callback True >>> type_hints['user_id'] == int True
3.972716
4.084863
0.972546
for p, n, _ in self.endpoints: if name == n: return p.format(**kwargs)
def reverse(self, name, **kwargs)
Reverse routing. >>> from kobin import Response >>> r = Router() >>> def view(user_id: int) -> Response: ... return Response(f'You are {user_id}') ... >>> r.add('/users/{user_id}', 'GET', 'user-detail', view) >>> r.reverse('user-detail', user_id=1) '/users/1'
9.026961
13.889907
0.649894
''' A decorator which acquires a lock before attempting to execute its wrapped function. Releases the lock in a finally clause. :param fn: The function to wrap. ''' lock = threading.Lock() @functools.wraps(fn) def decorated(*args, **kwargs): lock.acquire() try: return fn(*args, **kwargs) finally: lock.release() return decorated
def synchronized(fn)
A decorator which acquires a lock before attempting to execute its wrapped function. Releases the lock in a finally clause. :param fn: The function to wrap.
3.045427
1.763492
1.72693
''' Atomically sets the value to `value`. :param value: The value to set. ''' with self._lock.exclusive: self._value = value return value
def set(self, value)
Atomically sets the value to `value`. :param value: The value to set.
6.022813
4.420111
1.362593
''' Atomically sets the value to `value` and returns the old value. :param value: The value to set. ''' with self._lock.exclusive: oldval = self._value self._value = value return oldval
def get_and_set(self, value)
Atomically sets the value to `value` and returns the old value. :param value: The value to set.
3.963571
3.195276
1.240447
''' Atomically sets the value to `update` if the current value is equal to `expect`. :param expect: The expected current value. :param update: The value to set if and only if `expect` equals the current value. ''' with self._lock.exclusive: if self._value == expect: self._value = update return True return False
def compare_and_set(self, expect, update)
Atomically sets the value to `update` if the current value is equal to `expect`. :param expect: The expected current value. :param update: The value to set if and only if `expect` equals the current value.
3.331143
2.112452
1.576908
''' Atomically adds `delta` to the current value. :param delta: The delta to add. ''' with self._lock.exclusive: self._value += delta return self._value
def add_and_get(self, delta)
Atomically adds `delta` to the current value. :param delta: The delta to add.
5.038958
3.889558
1.295509
''' Atomically adds `delta` to the current value and returns the old value. :param delta: The delta to add. ''' with self._lock.exclusive: oldval = self._value self._value += delta return oldval
def get_and_add(self, delta)
Atomically adds `delta` to the current value and returns the old value. :param delta: The delta to add.
4.30987
3.019706
1.427248
''' Atomically subtracts `delta` from the current value. :param delta: The delta to subtract. ''' with self._lock.exclusive: self._value -= delta return self._value
def subtract_and_get(self, delta)
Atomically subtracts `delta` from the current value. :param delta: The delta to subtract.
5.11192
3.565198
1.433839
''' Atomically subtracts `delta` from the current value and returns the old value. :param delta: The delta to subtract. ''' with self._lock.exclusive: oldval = self._value self._value -= delta return oldval
def get_and_subtract(self, delta)
Atomically subtracts `delta` from the current value and returns the old value. :param delta: The delta to subtract.
4.512388
3.025818
1.491295
''' Passes `oldval` and `newval` to each `fn` in the watches dictionary, passing along its respective key and the reference to this object. :param oldval: The old value which will be passed to the watch. :param newval: The new value which will be passed to the watch. ''' watches = self._watches.copy() for k in watches: fn = watches[k] if isinstance(fn, collections.Callable): fn(k, self, oldval, newval)
def notify_watches(self, oldval, newval)
Passes `oldval` and `newval` to each `fn` in the watches dictionary, passing along its respective key and the reference to this object. :param oldval: The old value which will be passed to the watch. :param newval: The new value which will be passed to the watch.
3.55031
1.617369
2.195115
''' Given a mutator `fn`, calls `fn` with the atom's current state, `args`, and `kwargs`. The return value of this invocation becomes the new value of the atom. Returns the new value. :param fn: A function which will be passed the current state. Should return a new state. This absolutely *MUST NOT* mutate the reference to the current state! If it does, this function may loop indefinitely. :param \*args: Arguments to be passed to `fn`. :param \*\*kwargs: Keyword arguments to be passed to `fn`. ''' while True: oldval = self.deref() newval = fn(oldval, *args, **kwargs) if self._state.compare_and_set(oldval, newval): self.notify_watches(oldval, newval) return newval
def swap(self, fn, *args, **kwargs)
Given a mutator `fn`, calls `fn` with the atom's current state, `args`, and `kwargs`. The return value of this invocation becomes the new value of the atom. Returns the new value. :param fn: A function which will be passed the current state. Should return a new state. This absolutely *MUST NOT* mutate the reference to the current state! If it does, this function may loop indefinitely. :param \*args: Arguments to be passed to `fn`. :param \*\*kwargs: Keyword arguments to be passed to `fn`.
4.237253
1.607051
2.636663
''' Resets the atom's value to `newval`, returning `newval`. :param newval: The new value to set. ''' oldval = self._state.get() self._state.set(newval) self.notify_watches(oldval, newval) return newval
def reset(self, newval)
Resets the atom's value to `newval`, returning `newval`. :param newval: The new value to set.
4.74829
2.962466
1.602817
''' Given `oldval` and `newval`, sets the atom's value to `newval` if and only if `oldval` is the atom's current value. Returns `True` upon success, otherwise `False`. :param oldval: The old expected value. :param newval: The new value which will be set if and only if `oldval` equals the current value. ''' ret = self._state.compare_and_set(oldval, newval) if ret: self.notify_watches(oldval, newval) return ret
def compare_and_set(self, oldval, newval)
Given `oldval` and `newval`, sets the atom's value to `newval` if and only if `oldval` is the atom's current value. Returns `True` upon success, otherwise `False`. :param oldval: The old expected value. :param newval: The new value which will be set if and only if `oldval` equals the current value.
3.504154
1.773548
1.975788
''' Atomically sets the value to `value`. :param value: The value to set. ''' with self._reference.get_lock(): self._reference.value = value return value
def set(self, value)
Atomically sets the value to `value`. :param value: The value to set.
5.666901
4.386539
1.291885
''' Atomically sets the value to `value` and returns the old value. :param value: The value to set. ''' with self._reference.get_lock(): oldval = self._reference.value self._reference.value = value return oldval
def get_and_set(self, value)
Atomically sets the value to `value` and returns the old value. :param value: The value to set.
3.622559
3.031595
1.194935
''' Atomically sets the value to `update` if the current value is equal to `expect`. :param expect: The expected current value. :param update: The value to set if and only if `expect` equals the current value. ''' with self._reference.get_lock(): if self._reference.value == expect: self._reference.value = update return True return False
def compare_and_set(self, expect, update)
Atomically sets the value to `update` if the current value is equal to `expect`. :param expect: The expected current value. :param update: The value to set if and only if `expect` equals the current value.
3.125544
2.035957
1.535172
''' Atomically adds `delta` to the current value. :param delta: The delta to add. ''' with self._reference.get_lock(): self._reference.value += delta return self._reference.value
def add_and_get(self, delta)
Atomically adds `delta` to the current value. :param delta: The delta to add.
4.410401
3.642921
1.210677
''' Atomically adds `delta` to the current value and returns the old value. :param delta: The delta to add. ''' with self._reference.get_lock(): oldval = self._reference.value self._reference.value += delta return oldval
def get_and_add(self, delta)
Atomically adds `delta` to the current value and returns the old value. :param delta: The delta to add.
3.84036
2.812952
1.365242
''' Atomically subtracts `delta` from the current value. :param delta: The delta to subtract. ''' with self._reference.get_lock(): self._reference.value -= delta return self._reference.value
def subtract_and_get(self, delta)
Atomically subtracts `delta` from the current value. :param delta: The delta to subtract.
4.509558
3.39104
1.329845
''' Atomically subtracts `delta` from the current value and returns the old value. :param delta: The delta to subtract. ''' with self._reference.get_lock(): oldval = self._reference.value self._reference.value -= delta return oldval
def get_and_subtract(self, delta)
Atomically subtracts `delta` from the current value and returns the old value. :param delta: The delta to subtract.
4.075436
2.853676
1.428135
i18n = { "Use a few words, avoid common phrases": _( "Use a few words, avoid common phrases" ), "No need for symbols, digits, or uppercase letters": _( "No need for symbols, digits, or uppercase letters" ), "Add another word or two. Uncommon words are better.": _( "Add another word or two. Uncommon words are better." ), "Straight rows of keys are easy to guess": _( "Straight rows of keys are easy to guess" ), "Short keyboard patterns are easy to guess": _( "Short keyboard patterns are easy to guess" ), "Use a longer keyboard pattern with more turns": _( "Use a longer keyboard pattern with more turns" ), 'Repeats like "aaa" are easy to guess': _( 'Repeats like "aaa" are easy to guess' ), 'Repeats like "abcabcabc" are only slightly harder to guess than "abc"': _( 'Repeats like "abcabcabc" are only slightly harder to guess than "abc"' ), "Avoid repeated words and characters": _("Avoid repeated words and characters"), 'Sequences like "abc" or "6543" are easy to guess': _( 'Sequences like "abc" or "6543" are easy to guess' ), "Avoid sequences": _("Avoid sequences"), "Recent years are easy to guess": _("Recent years are easy to guess"), "Avoid recent years": _("Avoid recent years"), "Avoid years that are associated with you": _( "Avoid years that are associated with you" ), "Dates are often easy to guess": _("Dates are often easy to guess"), "Avoid dates and years that are associated with you": _( "Avoid dates and years that are associated with you" ), "This is a top-10 common password": _("This is a top-10 common password"), "This is a top-100 common password": _("This is a top-100 common password"), "This is a very common password": _("This is a very common password"), "This is similar to a commonly used password": _( "This is similar to a commonly used password" ), "A word by itself is easy to guess": _("A word by itself is easy to guess"), "Names and surnames by themselves are easy to guess": _( "Names and surnames by themselves are easy to guess" ), "Common names and surnames are easy to guess": _( "Common names and surnames are easy to guess" ), "Capitalization doesn't help very much": _( "Capitalization doesn't help very much" ), "All-uppercase is almost as easy to guess as all-lowercase": _( "All-uppercase is almost as easy to guess as all-lowercase" ), "Reversed words aren't much harder to guess": _( "Reversed words aren't much harder to guess" ), "Predictable substitutions like '@' instead of 'a' don't help very much": _( "Predictable substitutions like '@' instead of 'a' don't help very much" ), } translated_text = i18n.get(text) if translated_text is None: # zxcvbn is inconsistent, sometime there is a dot, sometime not translated_text = i18n.get(text[:-1]) if translated_text is None: LOGGER.warning( "No translation for '%s' or '%s', update the generatei18ndict command.", text, text[:-1], ) return text return translated_text
def translate_zxcvbn_text(text)
This PR would make it cleaner, but it will also be very slow to be integrated in python-zxcvbn and we want this to work now : https://github.com/dropbox/zxcvbn/pull/124
2.344877
2.305357
1.017143
USER_HOME = os.path.expanduser('~') CONDA_HOME = os.environ.get('CONDA_HOME', '') PROGRAMDATA = os.environ.get('PROGRAMDATA', '') # Search common install paths and sys path search_paths = [ # Windows join(PROGRAMDATA, 'miniconda2', 'scripts'), join(PROGRAMDATA, 'miniconda3', 'scripts'), join(USER_HOME, 'miniconda2', 'scripts'), join(USER_HOME, 'miniconda3', 'scripts'), join(CONDA_HOME, 'scripts'), # Linux join(USER_HOME, 'miniconda2', 'bin'), join(USER_HOME, 'miniconda3', 'bin'), join(CONDA_HOME, 'bin'), # TODO: OSX ] + os.environ.get("PATH", "").split(";" if 'win' in sys.path else ":") cmd = 'conda.exe' if IS_WIN else 'conda' for conda_path in search_paths: conda = join(conda_path, cmd) if exists(conda): return sh.Command(conda) # Try to let the system find it return sh.conda
def find_conda()
Try to find conda on the system
3.024407
2.999127
1.008429
print("[DEBUG]: -> copying {} to {}".format(src, dst)) if os.path.isfile(src): if not exists(dirname(dst)): os.makedirs(dirname(dst)) shutil.copy(src, dst) else: copy_tree(src, dst)
def cp(src, dst)
Like cp -R src dst
3.259579
3.373625
0.966195
cmds = [] for subclass in cls.__subclasses__(): cmds.append(subclass) cmds.extend(find_commands(subclass)) return cmds
def find_commands(cls)
Finds commands by finding the subclasses of Command
2.923439
2.377253
1.229755
# Check if a custom linker exists to handle linking this package #for ep in pkg_resources.iter_entry_points(group="enaml_native_linker"): # if ep.name.replace("-", '_') == pkg.replace("-", '_'): # linker = ep.load() # print("Custom linker {} found for '{}'. Linking...".format( # linker, pkg)) # if linker(self.ctx, path): # return #: Use the default builtin linker script if exists(join(path, pkg, 'build.gradle')): print(Colors.BLUE+"[INFO] Linking {}/build.gradle".format( pkg)+Colors.RESET) self.link_android(path, pkg) if exists(join(path, pkg, 'Podfile')): print(Colors.BLUE+"[INFO] Linking {}/Podfile".format( pkg)+Colors.RESET) self.link_ios(path, pkg)
def link(self, path, pkg)
Link the package in the current directory.
4.382955
4.419465
0.991739
for line in source.split("\n"): if re.search(r"include\s*['\"]:{}['\"]".format(pkg), line): return True return False
def is_settings_linked(source, pkg)
Returns true if the "include ':<project>'" line exists in the file
4.613045
2.983601
1.546133
for line in source.split("\n"): if re.search(r"(api|compile)\s+project\(['\"]:{}['\"]\)".format(pkg), line): return True return False
def is_build_linked(source, pkg)
Returns true if the "compile project(':<project>')" line exists exists in the file
8.743882
5.889483
1.48466
matches = [] root = join(path, 'src', 'main', 'java') for folder, dirnames, filenames in os.walk(root): for filename in fnmatch.filter(filenames, '*Package.java'): #: Open and make sure it's an EnamlPackage somewhere with open(join(folder, filename)) as f: if "implements EnamlPackage" in f.read(): package = os.path.relpath(folder, root) matches.append(os.path.join(package, filename)) return matches
def find_packages(path)
Find all java files matching the "*Package.java" pattern within the given enaml package directory relative to the java source path.
4.29827
3.447031
1.246948
for line in source.split("\n"): if java_package in line: return True return False
def is_app_linked(source, pkg, java_package)
Returns true if the compile project line exists exists in the file
4.134443
2.9489
1.402029
print(Colors.BLUE+"[INFO] Unlinking {}...".format( args.names)+Colors.RESET) for name in args.names: self.unlink(Link.package_dir, name)
def run(self, args=None)
The name IS required here.
13.238237
11.526819
1.148473
#: Check if a custom unlinker exists to handle unlinking this package for ep in pkg_resources.iter_entry_points( group="enaml_native_unlinker"): if ep.name.replace("-", '_') == pkg.replace("-", '_'): unlinker = ep.load() print("Custom unlinker {} found for '{}'. " "Unlinking...".format(unlinker, pkg)) if unlinker(self.ctx, path): return if exists(join(path, 'android', pkg, 'build.gradle')): print("[Android] unlinking {}".format(pkg)) self.unlink_android(path, pkg) for target in ['iphoneos', 'iphonesimulator']: if exists(join(path, target, pkg, 'Podfile')): print("[iOS] unlinking {}".format(pkg)) self.unlink_ios(path, pkg)
def unlink(self, path, pkg)
Unlink the package in the current directory.
4.446556
4.380933
1.014979
server = self import tornado.ioloop import tornado.web import tornado.websocket ioloop = tornado.ioloop.IOLoop.current() class DevWebSocketHandler(tornado.websocket.WebSocketHandler): def open(self): super(DevWebSocketHandler, self).open() server.on_open(self) def on_message(self, message): server.on_message(self, message) def on_close(self): super(DevWebSocketHandler, self).on_close() server.on_close(self) class MainHandler(tornado.web.RequestHandler): def get(self): self.write(server.index_page) #: Set the call later method server.call_later = ioloop.call_later server.add_callback = ioloop.add_callback app = tornado.web.Application([ (r"/", MainHandler), (r"/dev", DevWebSocketHandler), ]) app.listen(self.port) print("Tornado Dev server started on {}".format(self.port)) ioloop.start()
def run_tornado(self, args)
Tornado dev server implementation
2.234092
2.17772
1.025886
server = self from twisted.internet import reactor from twisted.web import resource from twisted.web.static import File from twisted.web.server import Site from autobahn.twisted.websocket import (WebSocketServerFactory, WebSocketServerProtocol) from autobahn.twisted.resource import WebSocketResource class DevWebSocketHandler(WebSocketServerProtocol): def onConnect(self, request): super(DevWebSocketHandler, self).onConnect(request) server.on_open(self) def onMessage(self, payload, isBinary): server.on_message(self, payload) def onClose(self, wasClean, code, reason): super(DevWebSocketHandler,self).onClose(wasClean, code, reason) server.on_close(self) def write_message(self, message, binary=False): self.sendMessage(message, binary) #: Set the call later method server.call_later = reactor.callLater server.add_callback = reactor.callFromThread factory = WebSocketServerFactory(u"ws://0.0.0.0:{}".format(self.port)) factory.protocol = DevWebSocketHandler class MainHandler(resource.Resource): def render_GET(self, req): return str(server.index_page) root = resource.Resource() root.putChild("", MainHandler()) root.putChild("dev", WebSocketResource(factory)) reactor.listenTCP(self.port, Site(root)) print("Twisted Dev server started on {}".format(self.port)) reactor.run()
def run_twisted(self, args)
Twisted dev server implementation
2.27904
2.24236
1.016358
if self.remote_debugging: #: Forward to other clients for h in self.handlers: if h != handler: h.write_message(msg, True) else: print(msg)
def on_message(self, handler, msg)
In remote debugging mode this simply acts as a forwarding proxy for the two clients.
7.75821
5.632235
1.377466
if not self.handlers: return #: Client not connected for h in self.handlers: h.write_message(msg)
def send_message(self, msg)
Send a message to the client. This should not be used in remote debugging mode.
8.11923
7.457364
1.088753
commands = [c() for c in find_commands(Command)] #: Get commands installed via entry points for ep in pkg_resources.iter_entry_points( group="enaml_native_command"): c = ep.load() if not issubclass(c, Command): print("Warning: entry point {} did not return a valid enaml " "cli command! This command will be ignored!".format( ep.name)) commands.append(c()) return commands
def _default_commands(self)
Build the list of CLI commands by finding subclasses of the Command class Also allows commands to be installed using the "enaml_native_command" entry point. This entry point should return a Command subclass
5.556754
4.382113
1.268054
if not self.in_app_directory: print("Warning: {} does not exist. Using the default.".format( self.package)) ctx = {} else: with open(self.package) as f: ctx = dict(yaml.load(f, Loader=yaml.RoundTripLoader)) if self.in_app_directory: # Update the env for each platform excluded = list(ctx.get('excluded', [])) for env in [ctx['ios'], ctx['android']]: if 'python_build_dir' not in env: env['python_build_dir'] = expanduser(abspath('build/python')) if 'conda_prefix' not in env: env['conda_prefix'] = os.environ.get( 'CONDA_PREFIX', expanduser(abspath('venv'))) # Join the shared and local exclusions env['excluded'] = list(env.get('excluded', [])) + excluded return ctx
def _default_ctx(self)
Return the package config or context and normalize some of the values
4.709142
4.466484
1.054329
parser = ArgumentParser(prog='enaml-native') #: Build commands by name cmds = {c.title: c for c in self.commands} #: Build parser, prepare commands subparsers = parser.add_subparsers() for c in self.commands: p = subparsers.add_parser(c.title, help=c.help) c.parser = p for (flags, kwargs) in c.args: p.add_argument(*flags.split(), **kwargs) p.set_defaults(cmd=c) c.ctx = self.ctx c.cmds = cmds c.cli = self return parser
def _default_parser(self)
Generate a parser using the command list
4.29426
4.15775
1.032833
self.check_dependencies() self.args = self.parser.parse_args() # Python 3 doesn't set the cmd if no args are given if not hasattr(self.args, 'cmd'): self.parser.print_help() return cmd = self.args.cmd try: if cmd.app_dir_required and not self.in_app_directory: raise EnvironmentError( "'enaml-native {}' must be run within an app root " "directory not: {}".format(cmd.title, os.getcwd())) cmd.run(self.args) except sh.ErrorReturnCode as e: raise
def start(self)
Run the commands
5.65273
5.472051
1.033018
for (path, directories, filenames) in os.walk(folder): for filename in filenames: yield os.path.join('..', path, filename)
def find_data(folder)
Include everything in the folder
2.939379
2.776193
1.05878
return joiner.join(choice(words) for each in range(number))
def generate(number=4, choice=SystemRandom().choice, words=words, joiner=" ")
Generate a random passphrase from the GSL.
4.169101
4.306958
0.967992
"Worker method for writing meta info" write_varlen(self.data, 0) # tick write_byte(self.data, byte1) write_byte(self.data, byte2) write_varlen(self.data, len(data)) write_chars(self.data, data)
def write_meta_info(self, byte1, byte2, data)
Worker method for writing meta info
4.181903
3.61535
1.156708
return ((choices ** length) / (speed * optimism))
def how_long(length=4, choices=len(words), speed=1000 * 1000 * 1000 * 1000, optimism=2)
How long might it take to guess a password? @param length: the number of words that we're going to choose. @type length: L{int} @param choice: the number of words we might choose between. @type choice: L{int} @param speed: the speed of our hypothetical password guesser, in guesses per second. @type speed: L{int} @param optimism: When we start guessing all the options, we probably won't have to guess I{all} of them to get a hit. This assumes that the guesser will have to guess only C{1/optimism} of the total number of possible options before it finds a hit.
16.910307
54.380287
0.310964
result = [] value = initial_value for divisor, label in factors: if not divisor: remainder = value if not remainder: break else: value, remainder = divmod(value, divisor) if not value and not remainder: break if remainder == 1: # depluralize label = label[:-1] if six.PY2: addition = unicode(remainder) + ' ' + unicode(label) else: addition = str(remainder) + ' ' + str(label) result.insert(0, addition) if len(result) > 1: result[-1] = "and " + result[-1] if result: return ', '.join(result) else: return "instantly"
def redivmod(initial_value, factors)
Chop up C{initial_value} according to the list of C{factors} and return a formatted string.
3.201208
3.112067
1.028644
return self.__class__(p1 % p2 for p1, p2 in zip(self, other))
def zip(self, other)
zips two sequences unifying the corresponding points.
6.112164
4.912226
1.244276
from sebastian.core.transforms import lilypond seq = HSeq(self) | lilypond() lily_output = write_lilypond.lily_format(seq) if not lily_output.strip(): #In the case of empty lily outputs, return self to get a textual display return self if format == "png": suffix = ".preview.png" args = ["lilypond", "--png", "-dno-print-pages", "-dpreview"] elif format == "svg": suffix = ".preview.svg" args = ["lilypond", "-dbackend=svg", "-dno-print-pages", "-dpreview"] f = tempfile.NamedTemporaryFile(suffix=suffix) basename = f.name[:-len(suffix)] args.extend(["-o" + basename, "-"]) #Pass shell=True so that if your $PATH contains ~ it will #get expanded. This also changes the way the arguments get #passed in. To work correctly, pass them as a string p = sp.Popen(" ".join(args), stdin=sp.PIPE, shell=True) stdout, stderr = p.communicate("{ %s }" % lily_output) if p.returncode != 0: # there was an error #raise IOError("Lilypond execution failed: %s%s" % (stdout, stderr)) return None if not ipython: return f.read() if format == "png": return Image(data=f.read(), filename=f.name, format="png") else: return SVG(data=f.read(), filename=f.name)
def display(self, format="png")
Return an object that can be used to display this sequence. This is used for IPython Notebook. :param format: "png" or "svg"
4.995382
4.992111
1.000655
point = Point(point) self._elements.append(point)
def append(self, point)
appends a copy of the given point to this sequence
7.832922
6.807663
1.150604
x = HSeq() for i in range(count): x = x.concatenate(self) return x
def repeat(self, count)
repeat sequence given number of times to produce a new sequence
6.246914
5.31355
1.175657
from sebastian.core import DURATION_64 def subseq_iter(start_offset, end_offset): cur_offset = 0 for point in self._elements: try: cur_offset += point[DURATION_64] except KeyError: raise ValueError("HSeq.subseq requires all points to have a %s attribute" % DURATION_64) #Skip until start if cur_offset < start_offset: continue #Yield points start_offset <= point < end_offset if end_offset is None or cur_offset < end_offset: yield point else: raise StopIteration return HSeq(subseq_iter(start_offset, end_offset))
def subseq(self, start_offset=0, end_offset=None)
Return a subset of the sequence starting at start_offset (defaulting to the beginning) ending at end_offset (None representing the end, whih is the default) Raises ValueError if duration_64 is missing on any element
4.494763
3.920361
1.146518
point['sequence'] = HSeq(point['sequence'][i] for i in pattern) return point
def arpeggio(pattern, point)
turns each subsequence into an arpeggio matching the given ``pattern``.
13.690904
15.588294
0.878281
point['sequence'] = point['sequence'] * (point[DURATION_64] / (8 * duration)) | add({DURATION_64: duration}) return point
def fill(duration, point)
fills the subsequence of the point with repetitions of its subsequence and sets the ``duration`` of each point.
20.430494
18.03808
1.132631
expanse = [] for point in sequence: if 'sequence' in point: expanse.extend(expand(point['sequence'])) else: expanse.append(point) return sequence.__class__(expanse)
def expand(sequence)
expands a tree of sequences into a single, flat sequence, recursively.
3.503271
3.022978
1.158881
points = [] for i, p in enumerate(sequence): copy = Point(p) copy['index'] = i points.append(copy) return sequence.__class__(points)
def debug(sequence)
adds information to the sequence for better debugging, currently only an index property on each point in the sequence.
5.2238
3.249908
1.607368
@wraps(f) def wrapper(*args, **kwargs): #The arguments here are the arguments passed to the transform, #ie, there will be no "point" argument #Send a function to seq.map_points with all of its arguments applied except #point return lambda seq: seq.map_points(partial(f, *args, **kwargs)) return wrapper
def transform_sequence(f)
A decorator to take a function operating on a point and turn it into a function returning a callable operating on a sequence. The functions passed to this decorator must define a kwarg called "point", or have point be the last positional argument
9.932734
8.833548
1.124433
def _(sequence): return sequence.subseq(start_offset, end_offset) return _
def subseq(start_offset=0, end_offset=None)
Return a portion of the input sequence
5.853618
7.09188
0.825397
#If lilypond already computed, leave as is if "lilypond" in point: return point #Defaults: pitch_string = "" octave_string = "" duration_string = "" preamble = "" dynamic_string = "" if "pitch" in point: octave = point["octave"] pitch = point["pitch"] if octave > 4: octave_string = "'" * (octave - 4) elif octave < 4: octave_string = "," * (4 - octave) else: octave_string = "" m = modifiers(pitch) if m > 0: modifier_string = "is" * m elif m < 0: modifier_string = "es" * -m else: modifier_string = "" pitch_string = letter(pitch).lower() + modifier_string if DURATION_64 in point: duration = point[DURATION_64] if duration > 0: if duration % 3 == 0: # dotted note duration_string = str(192 // (2 * duration)) + "." else: duration_string = str(64 // duration) #TODO: for now, if we have a duration but no pitch, show a 'c' with an x note if duration_string: if not pitch_string: pitch_string = "c" octave_string = "'" preamble = r'\xNote ' if "dynamic" in point: dynamic = point["dynamic"] if dynamic == "crescendo": dynamic_string = "\<" elif dynamic == "diminuendo": dynamic_string = "\>" else: dynamic_string = "\%s" % (dynamic,) point["lilypond"] = "%s%s%s%s%s" % (preamble, pitch_string, octave_string, duration_string, dynamic_string) return point
def lilypond(point)
Generate lilypond representation for a point
3.480459
3.455701
1.007164
def _(sequence): if start in _dynamic_markers_to_velocity: start_velocity = _dynamic_markers_to_velocity[start] start_marker = start else: raise ValueError("Unknown start dynamic: %s, must be in %s" % (start, _dynamic_markers_to_velocity.keys())) if end is None: end_velocity = start_velocity end_marker = start_marker elif end in _dynamic_markers_to_velocity: end_velocity = _dynamic_markers_to_velocity[end] end_marker = end else: raise ValueError("Unknown end dynamic: %s, must be in %s" % (start, _dynamic_markers_to_velocity.keys())) retval = sequence.__class__([Point(point) for point in sequence._elements]) velocity_interval = (float(end_velocity) - float(start_velocity)) / (len(retval) - 1) if len(retval) > 1 else 0 velocities = [int(start_velocity + velocity_interval * pos) for pos in range(len(retval))] # insert dynamics markers for lilypond if start_velocity > end_velocity: retval[0]["dynamic"] = "diminuendo" retval[-1]["dynamic"] = end_marker elif start_velocity < end_velocity: retval[0]["dynamic"] = "crescendo" retval[-1]["dynamic"] = end_marker else: retval[0]["dynamic"] = start_marker for point, velocity in zip(retval, velocities): point["velocity"] = velocity return retval return _
def dynamics(start, end=None)
Apply dynamics to a sequence. If end is specified, it will crescendo or diminuendo linearly from start to end dynamics. You can pass any of these strings as dynamic markers: ['pppppp', 'ppppp', 'pppp', 'ppp', 'pp', 'p', 'mp', 'mf', 'f', 'ff', 'fff', ''ffff] Args: start: beginning dynamic marker, if no end is specified all notes will get this marker end: ending dynamic marker, if unspecified the entire sequence will get the start dynamic marker Example usage: s1 | dynamics('p') # play a sequence in piano s2 | dynamics('p', 'ff') # crescendo from p to ff s3 | dynamics('ff', 'p') # diminuendo from ff to p
2.692121
2.541314
1.059342
logger.debug("Getting blocks for {0}".format(user_text)) doc = session.get(action='query', list='blocks', bkusers=user_text, bkprop=['id', 'timestamp']) return [mwtypes.Timestamp(b['timestamp']) for b in doc['query']['blocks']]
def user_blocks(user_text, session)
Returns a list of blocks for a single user
6.698046
5.998682
1.116586
logger.debug("Getting user_blocks for {0}".format(user_text)) doc = session.get(action='query', list='blocks', bkusers=user_text, bkprop=['id']) return doc['query']['blocks']
def get_user_blocks(session, user_text)
Returns a list of blocks for a single user
6.326558
5.941597
1.064791
doc = session.get(action='query', prop='revisions', revids=revids, **params) for page_doc in doc['query'].get('pages', {}).values(): revisions = page_doc.get('revisions', []) if 'revisions' in page_doc: del page_doc['revisions'] for revision_doc in revisions: revision_doc['page'] = page_doc yield revision_doc
def query_revisions_by_revids(session, revids, **params)
Gets a set of revisions by their IDs by repeatedly querying in batches. If an ID cannot be found, it is ignored.
2.82549
2.9273
0.965221
env = jinja2.Environment( loader=jinja2.FileSystemLoader(templates_path), lstrip_blocks=True, trim_blocks=True ) def norm_alg_filename(alg_name): if alg_name in variables['globals']['algorithm_filename_parts']: return variables['globals']['algorithm_filename_parts'][alg_name] else: raise KeyError("{0} not found in globals.algorithm_filename_parts" .format(alg_name)) env.globals.update(norm_alg_filename=norm_alg_filename) template = env.from_string(main_template) return template.render(variables) + "\n"
def generate(variables, templates_path, main_template)
:Parameters: variables : dict Template parameters, passed through. templates_path : str Root directory for transclusions. main_template : str Contents of the main template. Returns the rendered output.
2.800968
2.857562
0.980195
try: with open(filepath, 'r', encoding='utf-8') as file: return email.message_from_file(file) except FileNotFoundError: logger.warning("Category file %s not found", filepath) orm.delete(c for c in model.Category if c.file_path == filepath) orm.commit() return None
def load_metafile(filepath)
Load a metadata file from the filesystem
4.247115
4.281512
0.991966