content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def gen_rsa(): """ Generate an RSA Key Pair for digital signature this is designed to be called once per user TODO maybe this belongs in server-specific code since server will need to know public and private keys """ pkey = PKey() pkey.generate_key(TYPE_RSA, RSA_BITS) pkey.check() return pkey
ea6566c6486eaf524f026092f41adb414161ed69
3,649,100
import sys def get_token_type_str(token): """根据传入的token对象的类型返回该token的类型的字符串表达 参数 ---- token : Token 返回 ---- str : str """ token_type = token.type if token_type == TOKEN_TYPE_IF: return '(keyword)if' elif token_type == TOKEN_TYPE_ELIF: return '(keyword)elif' elif token_type == TOKEN_TYPE_ELSE: return '(keyword)else' elif token_type == TOKEN_TYPE_FOR: return '(keyword)for' elif token_type == TOKEN_TYPE_IN: return '(keyword)in' elif token_type == TOKEN_TYPE_WHILE: return '(keyword)while' elif token_type == TOKEN_TYPE_BREAK: return '(keyword)break' elif token_type == TOKEN_TYPE_NOT: return '(keyword)not' elif token_type == TOKEN_TYPE_AND: return '(keyword)and' elif token_type == TOKEN_TYPE_OR: return '(keyword)or' elif token_type == TOKEN_TYPE_RETURN: return '(keyword)return' elif token_type == TOKEN_TYPE_IMPORT: return '(keyword)import' elif token_type == TOKEN_TYPE_FUN: return '(keyword)fun' elif token_type == TOKEN_TYPE_CLASS: return '(keyword)class' elif token_type == TOKEN_TYPE_LET: return '(keyword)let' elif token_type == TOKEN_TYPE_GLOBAL: return '(keyword)global' elif token_type == TOKEN_TYPE_TRUE: return '(keyword)True' elif token_type == TOKEN_TYPE_FALSE: return '(keyword)False' elif token_type == TOKEN_TYPE_CONTINUE: return '(keyword)continue' elif token_type == TOKEN_TYPE_DEL: return '(keyword)del' elif token_type == TOKEN_TYPE_ADD: return '(add)+' elif token_type == TOKEN_TYPE_SUB: return '(sub)-' elif token_type == TOKEN_TYPE_MUL: return '(mul)*' elif token_type == TOKEN_TYPE_DIV: return '(div)/' elif token_type == TOKEN_TYPE_MOD: return '(mod)%' elif token_type == TOKEN_TYPE_POWER: return '(power)**' elif token_type == TOKEN_TYPE_EQU: return '(equ)==' elif token_type == TOKEN_TYPE_NEQU: return '(nequ)!=' elif token_type == TOKEN_TYPE_GT: return '(gt)>' elif token_type == TOKEN_TYPE_LT: return '(lt)<' elif token_type == TOKEN_TYPE_GE: return '(ge)>=' elif token_type == TOKEN_TYPE_LE: return '(le)<=' elif token_type == TOKEN_TYPE_ASSIGN: return '(assign)=' elif token_type == TOKEN_TYPE_LOGIC_AND: return '(logic_and)&' elif token_type == TOKEN_TYPE_LOGIC_OR: return '(logic_or)|' elif token_type == TOKEN_TYPE_LOGIC_NOT: return '(logic_not)~' elif token_type == TOKEN_TYPE_LOGIC_XOR: return '(logic_xor)^' elif token_type == TOKEN_TYPE_LOGIC_SHL: return '(logic_shl)<<' elif token_type == TOKEN_TYPE_LOGIC_SHR: return '(logic_shr)>>' elif token_type == TOKEN_TYPE_NUM: return '(num)' + token.str elif token_type == TOKEN_TYPE_STR: return '(str)' + token.str elif token_type == TOKEN_TYPE_COMMA: return '(comma),' elif token_type == TOKEN_TYPE_POINT: return '(point).' elif token_type == TOKEN_TYPE_COLON: return '(colon):' elif token_type == TOKEN_TYPE_SEMICOLON: return '(semicolon);' elif token_type == TOKEN_TYPE_LEFT_PARENT: return '(left_parent)(' elif token_type == TOKEN_TYPE_RIGHT_PARENT: return '(right_parent))' elif token_type == TOKEN_TYPE_LEFT_BRACKET: return '(left_bracket)[' elif token_type == TOKEN_TYPE_RIGHT_BRACKET: return '(right_bracket)]' elif token_type == TOKEN_TYPE_LEFT_BRACE: return '(left_brace){' elif token_type == TOKEN_TYPE_RIGHT_BRACE: return '(right_brace)}' elif token_type == TOKEN_TYPE_DOUBLE_QUOTATION: return '(double_quotation)"' elif token_type == TOKEN_TYPE_SINGLE_QUOTE: return "(single_quote)'" elif token_type == TOKEN_TYPE_ID: return '(id)' + token.str elif token_type == TOKEN_TYPE_STR_LINES: return '(str_line)' + token.str elif token_type == TOKEN_TYPE_SELF: return '(keyword)this' elif token_type == TOKEN_TYPE_UNKNOWN: return '(unknown)UNKNOWN' elif token_type == TOKEN_TYPE_EOF: return '(eof)EOF' print("Token '%s' doesn't exist!" % token.str) sys.exit(1)
66d3c811eaf32ca891e0d130bdb7eaed35d9c5c4
3,649,101
import os def _( sklearn_model: ensemble.GradientBoostingRegressor, path: os.PathLike, ) -> tf.keras.Model: """Converts a gradient boosting regression model into a TFDF model.""" if isinstance(sklearn_model.init_, dummy.DummyRegressor): # If the initial estimator is a DummyRegressor, then it predicts a constant # which can be passed to GradientBoostedTreeBuilder as a bias. init_pytree = None bias = sklearn_model.init_.constant_[0][0] elif isinstance(sklearn_model.init_, tree.DecisionTreeRegressor): # If the initial estimator is a DecisionTreeRegressor, we add it as the # first tree in the ensemble and set the bias to zero. We could also support # other tree-based initial estimators (e.g. RandomForest), but this seems # like a niche enough use case that we don't for the moment. init_pytree = convert_sklearn_tree_to_tfdf_pytree(sklearn_model.init_) bias = 0.0 elif sklearn_model.init_ == "zero": init_pytree = None bias = 0.0 else: raise ValueError("The initial estimator must be either a DummyRegressor" "or a DecisionTreeRegressor, but got" f"{type(sklearn_model.init_)}.") gbt_builder = tfdf.builder.GradientBoostedTreeBuilder( path=path, objective=tfdf.py_tree.objective.RegressionObjective(label="label"), bias=bias, ) if init_pytree: gbt_builder.add_tree(init_pytree) for weak_learner in sklearn_model.estimators_.ravel(): gbt_builder.add_tree(convert_sklearn_tree_to_tfdf_pytree( weak_learner, weight=sklearn_model.learning_rate, )) gbt_builder.close() return tf.keras.models.load_model(path)
d40ae1ff430cf0a5096611b97ff4f7fee40fe8bf
3,649,102
import queue def task_checkqueue(storage): """ Task that watches a queue for messages and acts on them when received. """ # Get the queue object from the storage dictionary thequeue = storage.get("queue") try: # Use a timeout so it blocks for at-most 0.5 seconds while waiting for a message. Smaller values can be used to # increase the cycling of the task and responsiveness to Threadify control signals (like pause) if desired. msg = thequeue.get(block=True, timeout=.5) except queue.Empty: print("_", end="") else: if msg == "QUIT": return False # Print received message print("{:s}".format(msg), end="") return True
3c7e8cfda53abb0551916894719e66b3d27886e9
3,649,103
import torch def to_sparse(x): """ converts dense tensor x to sparse format """ x_typename = torch.typename(x).split('.')[-1] sparse_tensortype = getattr(torch.sparse, x_typename) indices = torch.nonzero(x) if len(indices.shape) == 0: # if all elements are zeros return sparse_tensortype(*x.shape) indices = indices.t() values = x[tuple(indices[i] for i in range(indices.shape[0]))] return sparse_tensortype(indices, values, x.size())
b9af99c3c6e41e4f6a73ad213f58338110329dbc
3,649,104
def get_top_article_categories(): """ 获取顶级文章分类列表 自定义模版标签 """ return Category.objects.filter(level=1)
88ed0aefe81b3190590974a38c9363f862b8db6c
3,649,105
def filter_variants_top_k(log, k, parameters=None): """ Keeps the top-k variants of the log Parameters ------------- log Event log k Number of variants that should be kept parameters Parameters Returns ------------- filtered_log Filtered log """ if parameters is None: parameters = {} variants = variants_get.get_variants_count(log, parameters=parameters) variant_count = [] for variant in variants: variant_count.append([variant, variants[variant]]) variant_count = sorted(variant_count, key=lambda x: (x[1], x[0]), reverse=True) variant_count = variant_count[:min(k, len(variant_count))] variants_to_filter = [x[0] for x in variant_count] return apply(log, variants_to_filter, parameters=parameters)
20e273f5d3ac88e3bc9d0795566b2536c10b5703
3,649,106
def get_weighted_average(embedding, x, w): """ Compute the weighted average vectors :param embedding: embedding[i,:] is the vector for word i :param x: x[i, :] are the indices of the words in sentence i :param w: w[i, :] are the weights for the words in sentence i :return: emb[i, :] are the weighted average vector for sentence i """ n_samples = x.shape[0] emb = np.zeros((n_samples, embedding.shape[1])) for i in range(n_samples): emb[i, :] = w[i, :].dot(embedding[x[i, :], :]) / np.count_nonzero(w[i, :]) return emb
e5cd9984e49075530f981c8600c7e6d86de3c113
3,649,107
from glyphsLib import glyphdata # Expensive import def _build_gdef(ufo): """Build a table GDEF statement for ligature carets.""" bases, ligatures, marks, carets = set(), set(), set(), {} category_key = GLYPHLIB_PREFIX + 'category' subCategory_key = GLYPHLIB_PREFIX + 'subCategory' for glyph in ufo: has_attaching_anchor = False for anchor in glyph.anchors: name = anchor.name if name and not name.startswith('_'): has_attaching_anchor = True if name and name.startswith('caret_') and 'x' in anchor: carets.setdefault(glyph.name, []).append(round(anchor['x'])) lib = glyph.lib glyphinfo = glyphdata.get_glyph(glyph.name) # first check glyph.lib for category/subCategory overrides; else use # global values from GlyphData category = lib.get(category_key) if category is None: category = glyphinfo.category subCategory = lib.get(subCategory_key) if subCategory is None: subCategory = glyphinfo.subCategory # Glyphs.app assigns glyph classes like this: # # * Base: any glyph that has an attaching anchor # (such as "top"; "_top" does not count) and is neither # classified as Ligature nor Mark using the definitions below; # # * Ligature: if subCategory is "Ligature" and the glyph has # at least one attaching anchor; # # * Mark: if category is "Mark" and subCategory is either # "Nonspacing" or "Spacing Combining"; # # * Compound: never assigned by Glyphs.app. # # https://github.com/googlei18n/glyphsLib/issues/85 # https://github.com/googlei18n/glyphsLib/pull/100#issuecomment-275430289 if subCategory == 'Ligature' and has_attaching_anchor: ligatures.add(glyph.name) elif category == 'Mark' and (subCategory == 'Nonspacing' or subCategory == 'Spacing Combining'): marks.add(glyph.name) elif has_attaching_anchor: bases.add(glyph.name) if not any((bases, ligatures, marks, carets)): return None lines = ['table GDEF {', ' # automatic'] glyphOrder = ufo.lib[PUBLIC_PREFIX + 'glyphOrder'] glyphIndex = lambda glyph: glyphOrder.index(glyph) fmt = lambda g: ('[%s]' % ' '.join(sorted(g, key=glyphIndex))) if g else '' lines.extend([ ' GlyphClassDef', ' %s, # Base' % fmt(bases), ' %s, # Liga' % fmt(ligatures), ' %s, # Mark' % fmt(marks), ' ;']) for glyph, caretPos in sorted(carets.items()): lines.append(' LigatureCaretByPos %s %s;' % (glyph, ' '.join(unicode(p) for p in sorted(caretPos)))) lines.append('} GDEF;') return '\n'.join(lines)
2163971557a8908cce5f142f2e9dfc7fe360f190
3,649,108
def winningRate2(r, s, X, Y): """ revised version, now we want to investigate how value of X and Y will affect. r: int = remaining round of game s: int = current score X: int = points winning for X-head Y: int = points wining for Y-head (assuming X and Y are both fair, and we always assume Y > X) """ if X > Y: X, Y = Y, X def rec(r, s): if (r, s) not in cache: if r < 1: raise (TypeError("r can not be smaller than 1.")) if r == 1: if s <= -Y: # only Y head for the win. cache[(r, s)] = 0 return cache[(r, s)] if s >= (-Y + 1) and s <= X: # play X or Y shall be the same cache[(r, s)] = 0.5 return cache[(r, s)] if s > X: # play X, guarenteed win cache[(r, s)] = 1 return cache[(r, s)] cache[(r, s)] = max( (rec(r - 1, s + X) + rec(r - 1, s - X)) / 2, (rec(r - 1, s + Y) + rec(r - 1, s - Y)) / 2, ) return cache[(r, s)] return rec(r, s)
d33b05aa429044cb76b33842e33b99c1d1d6de7f
3,649,109
import json import os def handler(event, context): """Deletes all user content based on username provided in body, only accessible from authenticated users with the custom:group=admin""" logger.info(f"Received event: {json.dumps(event)}") try: if event["requestContext"]["authorizer"]["claims"]["custom:group"] != "admin": logger.error("User does not have permissions to call this function") retval = { "body": "ERROR: User does not have permissions to call this function", "headers": httpHeaders, "statusCode": 200, } return retval except KeyError: logger.error("custom:group field not found in token") retval = { "body": "ERROR: custom:group field not found in token", "headers": httpHeaders, "statusCode": 200, } return retval username = json.loads(event["body"])["username"] user_pool_id = os.environ["USER_POOL_ID"] table = ddb.Table(os.environ["USER_TABLE"]) # Query user and return contents of assets response = table.query(KeyConditionExpression=Key("userName").eq(username)) if len(response["Items"]) == 1: if response["Items"][0]["assets"] == None: # User exists but no assets have been created. Only delete the Cognito user AWS_delete.cognito_user(username, user_pool_id) logger.info(f"INFO: User: {username} delete from Cognito, no other assets found") else: assets = response["Items"][0]["assets"] # Remove dispenser from DispenserTable (and entry into to event table) AWS_delete.clean_dispenser_tables(assets["iot"]["thingName"]) # Detach Cognito identity from IoT policy AWS_delete.cognito_identity_iot_policy( cognito_identity_id = assets["cognito"]["principalId"], iot_policy=assets["cognito"]["iotPolicy"] ) # Delete AWS thing, cert AWS_delete.iot_thing_certificate( assets["iot"]["certificateArn"], assets["iot"]["thingName"] ) AWS_delete.cloud9(environment_id=assets["cloud9"]["environmentId"]) # Delete Cognito AWS_delete.cognito_user(username, user_pool_id) # Delete IAM user last AWS_delete.iam_user(username) try: # Delete User's DynamoDB record response = table.delete_item(Key={"userName": username}) except ClientError as e: if e.response["Error"]["Code"] == "ConditionalCheckFailedException": print(e.response["Error"]["Message"]) else: raise logger.info(f"INFO: User: {username} assets and entry deleted") retval = { "body": f"INFO: User: {username} assets and entry deleted", "headers": httpHeaders, "statusCode": 200, } else: retval = { "body": f"WARNING: User: {username} not found, no action taken", "headers": httpHeaders, "statusCode": 200, } return retval
392fdd6a2a67b94909d095e15bfe7870ba349f42
3,649,110
def DayOfWeek(year,month,day): """DayOfWeek returns the day of week 1-7, 1 being Monday for the given year, month and day""" num=year*365 num=num+year//4+1 num=num-(year//100+1) num=num+year//400+1 if month<3 and LeapYear(year): num=num-1 return (num+MONTH_OFFSETS[month-1]+day+4)%7+1
41c974e1342e65d553702d0610e8dc9c671538a6
3,649,111
import os def get_stand_exe() -> str: """Get the path to standexe Returns: Path to standexe Raises: ValueError: If STAND_EXE is not found in environment variables. """ if os.environ['STAND_EXE']: return os.environ['STAND_EXE'] else: raise ValueError('STAND_EXE environment variable is not found.')
da44d23239060874965617c24ab0bd678c9535b9
3,649,112
def chef_execute_cli_commands(configuration): """ API to generate sonic cli commands with the provided configuration :param configuration: :return: """ if not configuration: return False commands = "" action_run = "action:run" for module in configuration: if module == "vlans": member_commands = config_cmd = member_config_cmd = "" for action in configuration[module]: if action == "add": module_action = "vlan_add" member_action = "vlan_member_add" elif action == "del": module_action = "vlan_del" member_action = "vlan_member_del" commands += "execute '{}' do\n".format(module_action) member_commands += "execute '{}' do\n".format(member_action) for vlan_id in configuration[module][action]: config_cmd += "config vlan {} {}".format(action, vlan_id) + " && " if "members" in configuration[module][action][vlan_id]: for member in configuration[module][action][vlan_id]["members"]: untag = "" if member["tagged"] or member["tagged"] == "True" else "-u" member_config_cmd += "config vlan member {} {} {} {}".format(action, vlan_id, member["port"], untag).strip() + " && " else: member_commands = "" config_cmd = config_cmd.rstrip(" &") member_config_cmd = member_config_cmd.rstrip(" &") commands += " command '{}'\n".format(config_cmd) member_commands += " command '{}'\n".format(member_config_cmd) commands += " {}\n".format(action_run) commands += "end\n\n" if member_commands: member_commands += " {}\n".format(action_run) member_commands += "end\n\n" commands += member_commands if module == "fdbs": for action in configuration[module]: config_cmd = "" if action == "add": module_action = "fdb_add" elif action == "del": module_action = "fdb_del" commands += "execute '{}' do\n".format(module_action) for entry in configuration[module][action]: mac = entry["mac"] if "mac" in entry else "" vlan_id = entry["vlan_id"] if "vlan_id" in entry else "" port = entry["port"] if "port" in entry else "" if action == "del": config_cmd += "config mac {} {} {}".format(action, mac, vlan_id)+" && " else: config_cmd += "config mac {} {} {} {}".format(action, mac, vlan_id, port)+" && " config_cmd = config_cmd.rstrip(" && ") commands += " command '{}'\n".format(config_cmd) commands += " {}\n".format(action_run) commands += "end\n\n" if module == "lags": member_commands = "" for action in configuration[module]: fallback = min_links = config_cmd = member_config_cmd = "" if action == "add": module_action = "lag_add" member_action = "lag_member_add" elif action == "del": module_action = "lag_del" member_action = "lag_member_del" commands += "execute '{}' do\n".format(module_action) member_commands += "execute '{}' do\n".format(member_action) for portchannel in configuration[module][action]: portchannel_config = configuration[module][action][portchannel] if "fallback" in portchannel_config and ( portchannel_config["fallback"] or portchannel_config["fallback"] == "True"): fallback = "--fallback true" if "min-links" in portchannel_config: min_links = "--min-links {}".format(portchannel_config["min-links"]) config_cmd += "config portchannel {} {} {} {}".format(action, portchannel, fallback, min_links).strip() + " && " if "links" in configuration[module][action][portchannel]: for member in configuration[module][action][portchannel]["links"]: member_config_cmd += "config portchannel member {} {} {}".format(action, portchannel, member) + " && " else: member_commands = "" config_cmd = config_cmd.rstrip(" && ") member_config_cmd = member_config_cmd.rstrip(" && ") member_commands += " command '{}'\n".format(member_config_cmd) commands += " command '{}'\n".format(config_cmd) commands += " {}\n".format(action_run) commands += "end\n\n" if member_commands: member_commands += " {}\n".format(action_run) member_commands += "end\n\n" commands += member_commands if module == "interfaces": config_cmd = "" commands += "execute 'interface' do\n" for interface in configuration[module]: if "admin_status" in configuration[module][interface]: operation = "shutdown" if configuration[module][interface]["admin_status"] == "down" else "startup" config_cmd += "config interface {} {}".format(operation, interface) + " && " if "speed" in configuration[module][interface]: config_cmd += "config interface {} speed {}".format(interface, configuration[module][interface][ "speed"]) + " && " config_cmd = config_cmd.rstrip(" && ") commands += " command '{}'\n".format(config_cmd) commands += " {}\n".format(action_run) commands += "end\n\n" st.log("complete_command: \n{}".format(commands)) return commands
439b7310015a9707ea6796ea3d24577a5dec069f
3,649,113
def _normalize(vector): """Returns a normalized version of a numpy vector.""" return vector/np.sqrt(np.dot(vector, vector));
42942ea19af176f6c9fa0ad39b7e060dd518c086
3,649,114
def load_ext(ext_name, name, func=None, endpoint=None): """ Load an external module. Example: ``load_ext("distkv_ext","owfs","model")`` loads …/distkv_ext/owfs/model.py and returns its global dict. When "ep" is given it returns the entry point. Any additional keywords are added to the module dictionary. TODO: This doesn't yet return a proper module. Don't use this with modules that are also loaded the regular way. """ if ext_name not in _ext_cache: _cache_ext(ext_name) n = f"{ext_name}.{name}" past = this_load.set(n) try: if endpoint is None: return load_one(ext_name, name, endpoint=func) else: return load_one(n, func, endpoint=endpoint) finally: this_load.reset(past)
1c47abb64af732f1820e80eb4da714b338ec3e50
3,649,115
def get_chol_factor(lower_tri_vals): """ Args: lower_tri_vals: numpy array, shaped as the number of lower triangular elements, number of observations. The values ordered according to np.tril_indices(p) where p is the dimension of the multivariate normal distn Returns: Nxpxp numpy array, with the lower triangle filled in. The diagonal is exponentiated. """ lower_size, N = lower_tri_vals.shape # solve p(p+3)/2 = lower_size to get the # number of dimensions. p = (-1 + (1 + 8 * lower_size) ** 0.5) / 2 p = int(p) if not isinstance(lower_tri_vals, np.ndarray): lower_tri_vals = np.array(lower_tri_vals) L = np.zeros((N, p, p)) for par_ind, (k, l) in enumerate(zip(*np.tril_indices(p))): if k == l: # Add a small number to avoid singular matrices. L[:, k, l] = np.exp(lower_tri_vals[par_ind, :]) + 1e-6 else: L[:, k, l] = lower_tri_vals[par_ind, :] return L
fa27afefb49a87bdeac8bceee9f95b34e6c01d3f
3,649,116
def get_angles_gram_mask(gram, mask): """ Input: (gram) square numpy array, (mask) square numpy array where 1 = select, 0 = do not select Output: (angles) numpy array or angles in mask in degrees """ angles = gram * mask angles = angles[angles != 0] angles = np.degrees(np.arccos(angles)) return angles
3303e318f42b2a7c3a15b4d267f07b7618026b25
3,649,117
def _expectation(p, kern1, feat1, kern2, feat2, nghp=None): """ Compute the expectation: expectation[n] = <Ka_{Z1, x_n} Kb_{x_n, Z2}>_p(x_n) - Ka_{.,.}, Kb_{.,.} :: Linear kernels Ka and Kb as well as Z1 and Z2 can differ from each other, but this is supported only if the Gaussian p is Diagonal (p.cov NxD) and Ka, Kb have disjoint active_dims in which case the joint expectations simplify into a product of expectations :return: NxMxM """ if kern1.on_separate_dims(kern2) and isinstance(p, DiagonalGaussian): # no joint expectations required eKxz1 = expectation(p, (kern1, feat1)) eKxz2 = expectation(p, (kern2, feat2)) return eKxz1[:, :, None] * eKxz2[:, None, :] if kern1 != kern2 or feat1 != feat2: raise NotImplementedError("The expectation over two kernels has only an " "analytical implementation if both kernels are equal.") kern = kern1 feat = feat1 with params_as_tensors_for(kern, feat): # use only active dimensions Xcov = kern._slice_cov(tf.matrix_diag(p.cov) if isinstance(p, DiagonalGaussian) else p.cov) Z, Xmu = kern._slice(feat.Z, p.mu) N = tf.shape(Xmu)[0] var_Z = kern.variance * Z tiled_Z = tf.tile(tf.expand_dims(var_Z, 0), (N, 1, 1)) # NxMxD XX = Xcov + tf.expand_dims(Xmu, 1) * tf.expand_dims(Xmu, 2) # NxDxD return tf.matmul(tf.matmul(tiled_Z, XX), tiled_Z, transpose_b=True)
105a6445f1a37e2208c65c8bcbcfe76227516991
3,649,118
def rotX(angle): """ ----------------------------------------------------------------------- Purpose: Calculate the matrix that represents a 3d rotation around the X axis. Input: Rotation angle in degrees Returns: A 3x3 matrix representing the rotation about angle around X axis. Reference: Diebel, J. 2006, Stanford University, Representing Attitude: Euler angles, Unit Quaternions and Rotation Vectors. http://ai.stanford.edu/~diebel/attitude.html Notes: Return the rotation matrix for a rotation around the X axis. This is a rotation in the YZ plane. Note that we construct a new vector with: xnew = R1.x In the literature, this rotation is usually called R1 ----------------------------------------------------------------------- """ a = d2r(angle) v = n.asmatrix(n.zeros((3,3), 'd')) cosa = n.cos(a) sina = n.sin(a) v[0,0] = 1.0; v[0,1] = 0.0; v[0,2] = 0.0; v[1,0] = 0.0; v[1,1] = cosa; v[1,2] = sina; v[2,0] = 0.0; v[2,1] = -sina; v[2,2] = cosa; return v
b1dd62497cf9db137edbd90f1ff4f6fdb36d54d5
3,649,119
import requests import logging def extract_url(url): """ extract the real url from yahoo rss feed item """ _url = None if '*' in url: # old style yahoo redirect link _url = "http" + url.split("*http")[-1] elif url.startswith("http://finance.yahoo.com/r/"): # new style yahoo redirect link headers = { "User-Agent": "Mozilla/5.0 (iPad; U; CPU OS 4_2_1 like Mac OS X; en-gb) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8C148 Safari/6533.18.5", "From": "http://finance.yahoo.com" } res = requests.get(url, headers=headers) if res.status_code == 200: page_source = res.text if page_source.startswith("<script src="): # yahoo now uses javascript to make page redirection _url = page_source.split("URL=\'")[-1].split("\'")[0] else: _url = url # TODO: is this correct? else: logging.warning("%sabnormal http status code [%s] url=%s%s", Back.RED, res.status_code, url, Style.RESET_ALL) else: _url = url # if _url is not None: # if "=yahoo" in _url: # ignore redirect tracking parameters # _url = "{0}://{1}{2}".format(*urlparse.urlparse(_url)) return _url
0707aaaf5677fe33542f14a4b0335fef98711cf2
3,649,120
import re def LF_positive_MeshTerm(report): """ Looking for positive mesh terms """ for idx in range(1,len(categories)): reg_pos = re.compile(categories[idx],re.IGNORECASE) reg_neg = re.compile('(No|without|resolution)\\s([a-zA-Z0-9\-,_]*\\s){0,10}'+categories[idx],re.IGNORECASE) for s in report.report_text.text.split("."): if reg_pos.search(s) and (not reg_neg.search(s)) and (not reg_equivocation.search(s)): return ABNORMAL_VAL return ABSTAIN_VAL
391d5775cf109c9f4b0d254162b93937882605ee
3,649,121
def user_ratio_shuffle_split_with_targets(X, train_ratio=0.8, n_valid_users=1000, n_test_users=1000, minimum_interaction=3, rand_state=None): """ Split given test / valid user records into subsets User records are splitted proportionally per user as same as `user_ratio_shuffle_split`. However, split is only made for randomly selected test / valid user population. Inputs: X (scipy.sparse.csr_matrix): user-item matrix train_ratio (float): ratio of training records per user n_valid_users (int): number of validation users n_test_users (int): number of testing users minimum_interaction (int): minimum interaction of user to be considered. if it's smaller than this, put all records to the training set rand_state (bool or int): random state seed number or None Returns: scipy.sparse.csr_matrix: training matrix scipy.sparse.csr_matrix: validation matrix scipy.sparse.csr_matrix: testing matrix """ # first draw valid / test users rnd_idx = np.random.permutation(X.shape[0]) valid_users = rnd_idx[:n_valid_users] test_users = rnd_idx[n_valid_users:n_valid_users + n_test_users] train_users = rnd_idx[n_valid_users + n_test_users:] # split records for valid / test users Xvl, Xvl_vl, Xvl_ts = user_ratio_shuffle_split(X[valid_users], train_ratio, 0.5, # valid_ratio minimum_interaction, rand_state) # merge them, as this scheme does not need within user validation set Xvl_ts = Xvl_vl + Xvl_ts Xts, Xts_vl, Xts_ts = user_ratio_shuffle_split(X[test_users], train_ratio, 0.5, # valid ratio minimum_interaction, rand_state) Xts_ts = Xts_vl + Xts_ts # merge # assign them back to the original data Xtr = X[train_users] Xtr_ = sp.vstack([Xvl, Xts, Xtr]) Xts_ = sp.vstack([Xvl_ts, Xts_ts, Xtr]) # un-shuffle reverse_idx = {j:i for i, j in enumerate(rnd_idx)} reverse_idx = [reverse_idx[i] for i in range(X.shape[0])] Xtr_ = Xtr_[reverse_idx] Xts_ = Xts_[reverse_idx] return Xtr_, Xts_, (train_users, valid_users, test_users)
0f6bc42e94caff49c0be29215b9a6ec4f2203ff7
3,649,122
import torch def slice_core(core_tensor, inputs): """ Get matrix slices by indexing or contracting inputs, depending on input dtype """ assert isinstance(core_tensor, torch.Tensor) assert isinstance(inputs, torch.Tensor) if is_int_type(inputs): return core_tensor[:, inputs, :] else: return torch.einsum("jak,ba->jbk", core_tensor, inputs)
01a70a678286977b3a36ca24a2f67dce4dbc01fe
3,649,123
import argparse def start_with_strategy(args, strategy, ants): """Reads command-line arguments and starts a game with those options.""" parser = argparse.ArgumentParser(description="Play Ants vs. SomeBees") parser.add_argument('-d', type=str, metavar='DIFFICULTY', help='sets difficulty of game (test/easy/normal/hard/extra-hard)') parser.add_argument('-w', '--water', action='store_true', help='loads a full layout with water') parser.add_argument('--food', type=int, help='number of food to start with when testing', default=2) args = parser.parse_args() assault_plan = make_normal_assault_plan(ants) layout = ants.dry_layout tunnel_length = 10 num_tunnels = 3 food = args.food if args.water: layout = ants.wet_layout if args.d in ['t', 'test']: assault_plan = make_test_assault_plan(ants) num_tunnels = 1 elif args.d in ['e', 'easy']: assault_plan = make_easy_assault_plan(ants) num_tunnels = 2 elif args.d in ['n', 'normal']: assault_plan = make_normal_assault_plan(ants) num_tunnels = 3 elif args.d in ['h', 'hard']: assault_plan = make_hard_assault_plan(ants) num_tunnels = 4 elif args.d in ['i', 'extra-hard']: assault_plan = make_extra_hard_assault_plan(ants) num_tunnels = 4 beehive = ants.Hive(assault_plan) dimensions = (num_tunnels, tunnel_length) return ants.GameState(strategy, beehive, ants.ant_types(), layout, dimensions, food).simulate()
bbcd0e01dd86b19110cf3ab83e5d4d2ad6f17710
3,649,124
import math def geo2xy(ff_lat_pto, ff_lng_pto, ff_lat_ref=cdf.M_REF_LAT, ff_lng_ref=cdf.M_REF_LNG): """ transforma coordenadas geográficas em coordenadas cartesianas :param ff_lat_pto: latitude em graus :param ff_lng_pto: longitude em graus :param ff_lat_ref: latitude do ponto de referência :param ff_lng_ref: longitude do ponto de referência :returns: coordenadas polares do ponto (azimute, distância em NM) """ # logger M_LOG.info(">> geo2xy") # check input assert -90. <= ff_lat_pto <= 90. assert -180. <= ff_lng_pto <= 180. assert -90. <= ff_lat_ref <= 90. assert -180. <= ff_lng_ref <= 180. # converte de geográfica para polar lf_azim, lf_dist = geo2pol(ff_lat_pto, ff_lng_pto, ff_lat_ref, ff_lng_ref) # converte de polar para cartesiana lf_x = lf_dist * math.sin(math.radians(lf_azim)) lf_y = lf_dist * math.cos(math.radians(lf_azim)) # correção das coordenadas X e Y devido ao efeito da declinação magnetica # lf_x, lf_y = decl_xyz(lf_x, lf_y, lf_z, f_ref.f_dcl_mag) # return return lf_x, lf_y
93dbe8a41aecb3029d5ac6fa3e68c740c625a486
3,649,125
def size(e): """ :rtype: Column """ return col(Size(parse(e)))
06b904583dc25a9e40b97ca6655bb0e5dcb28304
3,649,126
def b64pad(b64data): """Pad base64 string with '=' to achieve a length that is a multiple of 4 """ return b64data + '=' * (4 - (len(b64data) % 4))
bdc14821bfbdbf220ff371fbe5e486d3e682337b
3,649,127
def get_peak_electric_demand(points_on_line): """ Initialize Power Demand :param points_on_line: information about every node in study case :type points_on_line: GeoDataFrame :returns: - **dict_peak_el**: Value is the ELECTRIC peak demand depending on thermally connected or disconnected. :rtype: dict[node index][thermally connected bool] """ dict_peak_el = {} dict_peak_el['thermally_conn_peak_el'] = {} dict_peak_el['thermally_disconn_peak_el'] = {} for idx_node, node in points_on_line.iterrows(): if not np.isnan(node['GRID0_kW']): thermally_conn_peak_el = (node['Eal0_kW'] + node['Edata0_kW'] + node['Epro0_kW'] + node['Eaux0_kW'] + node['E_ww0_kW']) thermally_disconn_peak_el = (thermally_conn_peak_el + node['E_hs0_kW'] + node['E_cs0_kW']) dict_peak_el['thermally_conn_peak_el'][idx_node] = thermally_conn_peak_el / (S_BASE * 10 ** 3) # kW/MW dict_peak_el['thermally_disconn_peak_el'][idx_node] = thermally_disconn_peak_el / ( S_BASE * 10 ** 3) # kW / MW return dict_peak_el
015fe10835f49f060e24681335d77a79586e31ea
3,649,128
def _GetDatabaseLookupFunction(filename, flaps, omega_hat, thrust_coeff): """Produces a lookup function from an aero database file.""" db = load_database.AeroDatabase(filename) def _Lookup(alpha, beta, dflaps=None, domega=None): if dflaps is None: dflaps = np.zeros((system_types.kNumFlaps,)) if domega is None: domega = np.zeros((3,)) return db.CalcFMCoeff(alpha, beta, flaps + dflaps, omega_hat + domega, thrust_coeff) return _Lookup, db.format
5b1e7a4636aa824466e791b54dd7a67a4208962e
3,649,129
def parse_copy_core_dump(raw_result): """ Parse the 'parse_copy_core_dump' command raw output. :param str raw_result: copy core-dump raw result string. :rtype: dict :return: The parsed result of the copy core-dump to server: :: { 0:{ 'status': 'success' 'reason': 'core dump copied' } } """ if "Error code " in raw_result: return {"status": "failed", "reason": "Error found while coping"} if "No coredump found for" in raw_result: return {"status": "failed", "reason": "no core dump found"} if "Failed to validate instance ID" in raw_result: return {"status": "failed", "reason": "instance ID not valid"} if "ssh: connect to host" in raw_result: return {"status": "failed", "reason": "ssh-connection issue for SFTP"} if ( "copying ..." in raw_result and "Sent " in raw_result and "bytes" in raw_result and "seconds" in raw_result ): return {"status": "success", "reason": "core dump copied"} else: return {"status": "failed", "reason": "undefined error"}
4ce168c9bc8c462ecc36beba889adb36cc64135d
3,649,130
def _handle_requirements(hass, component, name): """Install the requirements for a component.""" if hass.config.skip_pip or not hasattr(component, 'REQUIREMENTS'): return True for req in component.REQUIREMENTS: if not pkg_util.install_package(req, target=hass.config.path('lib')): _LOGGER.error('Not initializing %s because could not install ' 'dependency %s', name, req) return False return True
efa0c371150a9aee9f26136a17ad1e33b9760340
3,649,131
def ticket_id_url(workspace, number): """ The url for a specific ticket in a specific workspace :param workspace: The workspace :param number: The number of the ticket :return: The url to fetch that specific ticket """ return basic_url + ' spaces/' + workspace + '/tickets/' + number + '.json'
a0ffeb53062c635f74feb011af793a2c00c361c4
3,649,132
import os def get_cifar10(data_path): """Returns cifar10 dataset. Args: data_path: dataset location. Returns: tuple (training instances, training labels, testing instances, testing labels) Instances of dimension # of instances X dimension. """ x_train = np.zeros((50000, 3072)) y_train = np.zeros((50000,), dtype=int) x_val = np.zeros((10000, 3072)) y_val = np.zeros((10000,), dtype=int) cur = 0 for batch_index in range(1, 6): with tf.gfile.Open( os.path.join(data_path, "cifar-10-batches-py/data_batch_%d" % batch_index), "rb") as fo: batch_data = cPickle.load(fo) m = batch_data["data"].shape[0] x_train[cur:cur + m, :] = batch_data["data"].astype(np.float32) y_train[cur:cur + m] = np.array(batch_data["labels"]) cur += m assert cur == 50000 with tf.gfile.Open( os.path.join(data_path, "cifar-10-batches-py/test_batch"), "rb") as fo: batch_data = cPickle.load(fo) x_val = batch_data["data"].astype(np.float32) y_val = np.array(batch_data["labels"]) x_train /= 255.0 x_val /= 255.0 return (x_train, y_train, x_val, y_val)
93f29763ebd2a2cd3a5631ac1c3072ef8451fc25
3,649,133
def compute_lifting_parameter(lamb, lambda_plane_idxs, lambda_offset_idxs, cutoff): """One way to compute a per-particle "4D" offset in terms of an adjustable lamb and constant per-particle parameters. Notes ----- (ytz): this initializes the 4th dimension to a fixed plane adjust by an offset followed by a scaling by cutoff. lambda_plane_idxs are typically 0 or 1 and allows us to turn off an interaction independent of the lambda value. lambda_offset_idxs are typically 0 and 1, and allows us to adjust the w coordinate in a lambda-dependent way. """ w = cutoff * (lambda_plane_idxs + lambda_offset_idxs * lamb) return w
a9455ed67fcb21bcf1382fe66a77e0563f467421
3,649,134
import re import json def create_controller(): """ 1. Check the token 2. Call the worker method """ minimum_buffer_min = 3 if views.ds_token_ok(minimum_buffer_min): # 2. Call the worker method # More data validation would be a good idea here # Strip anything other than characters listed pattern = re.compile('([^\w \-\@\.\,])+') signer_email = pattern.sub('', request.form.get('signer_email')) signer_name = pattern.sub('', request.form.get('signer_name')) cc_email = pattern.sub('', request.form.get('cc_email')) cc_name = pattern.sub('', request.form.get('cc_name')) envelope_args = { 'signer_email': signer_email, 'signer_name': signer_name, 'cc_email': cc_email, 'cc_name': cc_name, 'status': 'sent', } args = { 'account_id': session['ds_account_id'], 'base_path': session['ds_base_path'], 'ds_access_token': session['ds_access_token'], 'envelope_args': envelope_args } try: results = worker(args) except ApiException as err: error_body_json = err and hasattr(err, 'body') and err.body # we can pull the DocuSign error code and message from the response body error_body = json.loads(error_body_json) error_code = error_body and 'errorCode' in error_body and error_body['errorCode'] error_message = error_body and 'message' in error_body and error_body['message'] # In production, may want to provide customized error messages and # remediation advice to the user. return render_template('error.html', err=err, error_code=error_code, error_message=error_message ) if results: session["envelope_id"] = results["envelope_id"] # Save for use by other examples # which need an envelopeId return render_template('example_done.html', title="Envelope sent", h1="Envelope sent", message=f"""The envelope has been created and sent!<br/> Envelope ID {results["envelope_id"]}.""" ) else: flash('Sorry, you need to re-authenticate.') # We could store the parameters of the requested operation # so it could be restarted automatically. # But since it should be rare to have a token issue here, # we'll make the user re-enter the form data after # authentication. session['eg'] = url_for(eg) return redirect(url_for('ds_must_authenticate'))
1aa777b66f110d575ea16531ca4bd72e0117e0b0
3,649,135
def reproduce_load_profile(neural_model, simulation_model: CHPP_HWT, input_data, logger): """ Tries to follow a real load profile """ # make sure the random seeds are different in each process #np.random.seed(int.from_bytes(os.urandom(4), byteorder='little')) temperature, powers, heat_demand = input_data time_step_count = powers.shape[0] # save initial states to restore them later result = {} result['temp_offset'] = max(-min(temperature) + 60, 0) temperature += result['temp_offset'] # determine the initial state simulation_model.eval() # sample with eval() setting simulation_model.chpp.mode = 0 if powers[0] > -3000 else 1 simulation_model.chpp.min_off_time = 900 simulation_model.chpp.min_on_time = 900 simulation_model.chpp.dwell_time = 900 simulation_model.hwt.temperature = temperature[0] simulation_model.demand.demand = heat_demand[0] simulation_model.demand.forecast_series = heat_demand[1:].reshape(-1,1) neural_model.load_state(simulation_model.state) simulation_model.train() # strict constraints (which the ANN should have learned) # do a forecast in order to predetermine the external input and the mask required to update inputs sampling_parameters = {} forecast, forecast_mask = simulation_model.forecast(time_step_count, **sampling_parameters) result['infeasible_at'] = time_step_count result['classified_infeasible_at'] = time_step_count delta_temp_ann = [] delta_temp_sim = [] for step, power in enumerate(powers): ann_feasible = neural_model.feasible_actions sim_feasible = simulation_model.feasible_actions delta_temp_ann.append(neural_model.state[-2] - temperature[step]) delta_temp_sim.append(simulation_model.state[-2] - temperature[step]) # identify the correct action to follow if power > -3000: # off action_choice = simulation_model.chpp.state_matrix[simulation_model.chpp.mode][0][0] else: # on action_choice = simulation_model.chpp.state_matrix[simulation_model.chpp.mode][1][0] if not np.isin(action_choice, sim_feasible) and result['infeasible_at'] >= time_step_count: # infeasible action and therefore an infeasible load profile # an entry smaller than time_step_count means it has already been detected as infeasible result['infeasible_at'] = step if not np.isin(action_choice, ann_feasible) and result['classified_infeasible_at'] >= time_step_count: # action deemed infeasible # an entry smaller than time_step_count means it has already been detected as infeasible result['classified_infeasible_at'] = step # keep going to see whether the simulation model can reproduce the schedule or not # while a not detected infeasibility is actually an error at this moment, # the remaining load schedule could still provide further indications that it is actually infeasible # (proceeding like this is also required for comparability with Bremer2015) state, interaction = neural_model.transition(action_choice) simulation_model.transition(action_choice) if step + 1 < time_step_count: # post processing to incorporate forecasts neural_model.state = state * (1-forecast_mask[step+1]) + forecast_mask[step+1] * forecast[step+1] #else: # reached final step without stopping due to a detected infeasibility result['delta_temp'] = delta_temp_ann result['[delta_temp]'] = delta_temp_sim return result
078fa837c0c82ea17c42ca949ba9bb33cdaeaaa0
3,649,136
def session_pca(imgs, mask_img, parameters, n_components=20, confounds=None, memory_level=0, memory=Memory(cachedir=None), verbose=0, copy=True): """Filter, mask and compute PCA on Niimg-like objects This is an helper function whose first call `base_masker.filter_and_mask` and then apply a PCA to reduce the number of time series. Parameters ---------- imgs: list of Niimg-like objects See http://nilearn.github.io/building_blocks/manipulating_mr_images.html#niimg. List of subject data mask_img: Niimg-like object See http://nilearn.github.io/building_blocks/manipulating_mr_images.html#niimg. Mask to apply on the data parameters: dictionary Dictionary of parameters passed to `filter_and_mask`. Please see the documentation of the `NiftiMasker` for more informations. confounds: CSV file path or 2D matrix This parameter is passed to signal.clean. Please see the corresponding documentation for details. n_components: integer, optional Number of components to be extracted by the PCA memory_level: integer, optional Integer indicating the level of memorization. The higher, the more function calls are cached. memory: joblib.Memory Used to cache the function calls. verbose: integer, optional Indicate the level of verbosity (0 means no messages). copy: boolean, optional Whether or not data should be copied """ data, affine = cache( filter_and_mask, memory, memory_level=memory_level, func_memory_level=2, ignore=['verbose', 'memory', 'memory_level', 'copy'])( imgs, mask_img, parameters, memory_level=memory_level, memory=memory, verbose=verbose, confounds=confounds, copy=copy) if n_components <= data.shape[0] // 4: U, S, _ = randomized_svd(data.T, n_components) else: U, S, _ = linalg.svd(data.T, full_matrices=False) U = U.T[:n_components].copy() S = S[:n_components] return U, S
f175ac8d9c39e133c34a4db215b5e87288bdafd4
3,649,137
def numpy_napoleon(prnt_doc, child_doc): """Behaves identically to the 'numpy' style, but abides by the docstring sections specified by the "Napoleon" standard. For more info regarding the Napoleon standard, see: http://sphinxcontrib-napoleon.readthedocs.io/en/latest/index.html#docstring-sections Example: - parent's docstring: ''' Parent's line Keyword Arguments ----------------- x: int description of x y: Union[None, int] description of y Raises ------ NotImplemented Error''' - child's docstring: ''' Child's line Returns ------- int Notes ----- notes blah blah''' - docstring that is ultimately inherited: ''' Child's line Keyword Arguments ----------------- x: int description of x y: Union[None, int] description of y Returns ------- int Notes ----- notes blah blah''' """ return merge_numpy_napoleon_docs(prnt_doc, child_doc)
1795ddd1cfeeb8aee07cb17a369c6043b5fda52f
3,649,138
def search_unique_identities_slice(db, term, offset, limit): """Look for unique identities using slicing. This function returns those unique identities which match with the given `term`. The term will be compared with name, email, username and source values of each identity. When an empty term is given, all unique identities will be returned. The results are limited by `offset` (starting on 0) and `limit`. Along with the list of unique identities, this function returns the total number of unique identities that match the given `term`. :param db: database manager :param term: term to match with unique identities data :param offset: return results starting on this position :param limit: maximum number of unique identities to return :raises InvalidValueError: raised when either the given value of `offset` or `limit` is lower than zero """ uidentities = [] pattern = '%' + term + '%' if term else None if offset < 0: raise InvalidValueError('offset must be greater than 0 - %s given' % str(offset)) if limit < 0: raise InvalidValueError('limit must be greater than 0 - %s given' % str(limit)) with db.connect() as session: query = session.query(UniqueIdentity).\ join(Identity).\ filter(UniqueIdentity.uuid == Identity.uuid) if pattern: query = query.filter(Identity.name.like(pattern) | Identity.email.like(pattern) | Identity.username.like(pattern) | Identity.source.like(pattern)) query = query.group_by(UniqueIdentity).\ order_by(UniqueIdentity.uuid) # Get the total number of unique identities for that search nuids = query.count() start = offset end = offset + limit uidentities = query.slice(start, end).all() # Detach objects from the session session.expunge_all() return uidentities, nuids
1cfed05995eb90427a0f4a9475cfd8f7737d7e59
3,649,139
def get_change_description(req_sheet, row_num): """ Accessor for Change Description Args: req_sheet: A variable holding an Excel Workbook sheet in memory. row_num: A variable holding the row # of the data being accessed. Returns: A string value of the Change Description """ return (req_sheet['B' + str(row_num)].value)
7d3f286fb2586bf7bed64de8bb0cbf156e1ff954
3,649,140
def reduce_pca(data_df, n_components=None): """ Uses PCA to reduce dimension. Parameters: data_df (DataFrame): The input data in DataFrame format n_components (float): The number of components or to reduce to. If the number if between 0 and 1, n_components is the % of the principal components will be kept. Default is all components. returns: DataFrame: returns the data in the reduced dimension """ new_df = data_df.reset_index(drop=True) data_np = new_df.to_numpy() #Standardize the data by removing the mean and scaling to unit variance pca_np = StandardScaler().fit_transform(data_np) pca = PCA(n_components) embedded = pca.fit_transform(pca_np) return(pd.DataFrame(embedded, index=data_df.index))
b4b8db256b5996ddf3101a7737cb1396bc5abd06
3,649,141
def create_disjoint_intervals(draw, dtype, n_intervals=10, dt=1, time_range=(0, 100), channel_range=(2000, 2119), length_range=(1, 1), ): """ Function which generates a hypothesis strategy for a fixed number of disjoint intervals :param dtype: Can be any strax-like dtype either with endtime or dt and length field. :param n_intervals: How many disjoint intervals should be returned. :param dt: Sampling field, only needed for length + dt fields. :param time_range: Time range in which random numbers will be generated. :param channel_range: Range of channels for which the disjoint intervals will be generated. For a single channel set min/max equal. :param length_range: Range how long time intervals can be. :return: hypothesis strategy which can be used in @given Note: You can use create_disjoint_intervals().example() to see an example. If you do not want to specify the bounds for any of the "_range" parameters set the corresponding bound to None. Somehow hypothesis complains that the creation of these events takes too long ~2 s for 50 intervals. You can disable the corresponding healt checks via:" @settings( suppress_health_check=[hypothesis.HealthCheck.large_base_example, hypothesis.HealthCheck.too_slow])" """ n = 0 if not hasattr(dtype, 'fields'): # Convert dtype into numpy dtype dtype = np.dtype(dtype) is_dt = True if 'endtime' in dtype.fields: # Check whether interval uses dt fields or endtime is_dt = False stratgey_example = np.zeros(n_intervals, dtype) if is_dt: stratgey_example['dt'] = dt while n < n_intervals: # Create interval values: time = draw(hst.integers(*time_range)) channel = draw(hst.integers(*channel_range)) length = draw(hst.integers(*length_range)) # Check if objects are disjoint: if _test_disjoint(stratgey_example[:n], time, length, channel, dt): stratgey_example[n]['time'] = time stratgey_example[n]['channel'] = channel if is_dt: stratgey_example[n]['length'] = length else: stratgey_example[n]['endtime'] = time + int(length * dt) n += 1 return stratgey_example
67ed49bd8d94067cb6647164fa44beb4f8d91314
3,649,142
def get_deleted_resources(): """Get a list of resources that failed to be deleted in OVN. Get a list of resources that have been deleted from neutron but not in OVN. Once a resource is deleted in Neutron the ``standard_attr_id`` foreign key in the ovn_revision_numbers table will be set to NULL. Upon successfully deleting the resource in OVN the entry in the ovn_revision_number should also be deleted but if something fails the entry will be kept and returned in this list so the maintenance thread can later fix it. """ sort_order = sa.case(value=models.OVNRevisionNumbers.resource_type, whens=ovn_const.MAINTENANCE_DELETE_TYPE_ORDER) session = db_api.get_reader_session() with session.begin(): return session.query(models.OVNRevisionNumbers).filter_by( standard_attr_id=None).order_by(sort_order).all()
6a37fd84933ceee3a2a537aee8a01315f5869200
3,649,143
def load_base_schema(base_schema=None, verbose=False): """Load base schema, schema contains base classes for sub-classing in user schemas. """ _base = base_schema or BASE_SCHEMA or [] _base_schema = [] if "schema.org" in _base: _base_schema.append( load_schemaorg(verbose=verbose) ) if "bioschemas" in _base: _base_schema.append( load_bioschemas(verbose=verbose) ) _base_schema = merge_schema(*_base_schema) return _base_schema
18fe2b7045aa6d8e7382c37093be053b619ec216
3,649,144
def ShowX86UserStack(thread, user_lib_info = None): """ Display user space stack frame and pc addresses. params: thread: obj referencing thread value returns: Nothing """ iss = Cast(thread.machine.iss, 'x86_saved_state_t *') abi = int(iss.flavor) user_ip = 0 user_frame = 0 user_abi_ret_offset = 0 if abi == 0xf: debuglog("User process is 64 bit") user_ip = iss.uss.ss_64.isf.rip user_frame = iss.uss.ss_64.rbp user_abi_ret_offset = 8 user_abi_type = "uint64_t" else: debuglog("user process is 32 bit") user_ip = iss.uss.ss_32.eip user_frame = iss.uss.ss_32.ebp user_abi_ret_offset = 4 user_abi_type = "uint32_t" if user_ip == 0: print "This activation does not appear to have a valid user context." return False cur_ip = user_ip cur_frame = user_frame debuglog("ip= 0x%x , fr = 0x%x " % (cur_ip, cur_frame)) frameformat = "{0:d} FP: 0x{1:x} PC: 0x{2:x}" if user_lib_info is not None: frameformat = "{0:d} {3: <30s} 0x{2:x}" print frameformat.format(0, cur_frame, cur_ip, GetBinaryNameForPC(cur_ip, user_lib_info)) print kern.Symbolicate(cur_ip) frameno = 0 while True: frameno = frameno + 1 frame = GetUserDataAsString(thread.task, unsigned(cur_frame), user_abi_ret_offset*2) cur_ip = _ExtractDataFromString(frame, user_abi_ret_offset, user_abi_type) cur_frame = _ExtractDataFromString(frame, 0, user_abi_type) if not cur_frame or cur_frame == 0x0000000800000008: break print frameformat.format(frameno, cur_frame, cur_ip, GetBinaryNameForPC(cur_ip, user_lib_info)) print kern.Symbolicate(cur_ip) return
254c8797a16e560b23d92cde15d4a572f3f1660a
3,649,145
def endgame_score_connectfour(board, is_current_player_maximizer) : """Given an endgame board, returns 1000 if the maximizer has won, -1000 if the minimizer has won, or 0 in case of a tie.""" chains_1 = board.get_all_chains(current_player=is_current_player_maximizer) chains_2 = board.get_all_chains(current_player= not(is_current_player_maximizer)) for chain in chains_1: if len(chain) == 4: return 1000 for chain in chains_2: if len(chain) == 4: return -1000 return 0
bcb37381a9633377cb3405fbae45123e2a391df9
3,649,146
import os import glob def identify(path_or_file): """ Accepts a single file or list of files, Returns a list of Image file names :param path_or_file: :return: list of Image file names """ files = [] # Included capitalized formats supported_formats = set( IMAGE_FORMATS[0] + tuple(map(lambda x: x.upper(), IMAGE_FORMATS[0]))) if os.path.isdir(path_or_file): for img_format in supported_formats: files.extend( glob.iglob(os.path.join(path_or_file, '*.%s' % img_format))) elif os.path.isfile(path_or_file): # If its a single file, ignoring file extensions files = [path_or_file] if files: return files raise IOError( "%s: No image files have been scheduled for processing" % path_or_file)
9d14cd52d2e0aba09b54dc00a7823832bf742f08
3,649,147
def add_colorbar(im, aspect=20, pad_fraction=0.5, **kwargs): """Add a vertical color bar to an image plot. Taken from https://stackoverflow.com/questions/18195758/set-matplotlib-colorbar-size-to-match-graph """ divider = axes_grid1.make_axes_locatable(im.axes) width = axes_grid1.axes_size.AxesY(im.axes, aspect=1.0 / aspect) pad = axes_grid1.axes_size.Fraction(pad_fraction, width) current_ax = plt.gca() cax = divider.append_axes("right", size=width, pad=pad) plt.sca(current_ax) return im.axes.figure.colorbar(im, cax=cax, **kwargs)
acb0b21139d10393c2605bc94671fc774ada3800
3,649,148
import time def wait_procs(procs, timeout, callback=None): """Convenience function which waits for a list of processes to terminate. Return a (gone, alive) tuple indicating which processes are gone and which ones are still alive. The gone ones will have a new 'retcode' attribute indicating process exit status (may be None). 'callback' is a callable function which gets called every time a process terminates (a Process instance is passed as callback argument). Function will return as soon as all processes terminate or when timeout occurs. Tipical use case is: - send SIGTERM to a list of processes - give them some time to terminate - send SIGKILL to those ones which are still alive Example: >>> def on_terminate(proc): ... print("process {} terminated".format(proc)) ... >>> for p in procs: ... p.terminate() ... >>> gone, still_alive = wait_procs(procs, 3, callback=on_terminate) >>> for p in still_alive: ... p.kill() """ def assert_gone(proc, timeout): try: retcode = proc.wait(timeout=timeout) except TimeoutExpired: pass else: if retcode is not None or not proc.is_running(): proc.retcode = retcode gone.add(proc) if callback is not None: callback(proc) timer = getattr(time, 'monotonic', time.time) gone = set() alive = set(procs) if callback is not None and not callable(callback): raise TypeError("callback %r is not a callable" % callable) deadline = timer() + timeout while alive: if timeout <= 0: break for proc in alive: # Make sure that every complete iteration (all processes) # will last max 1 sec. # We do this because we don't want to wait too long on a # single process: in case it terminates too late other # processes may disappear in the meantime and their PID # reused. try: max_timeout = 1.0 / (len(alive) - len(gone)) except ZeroDivisionError: max_timeout = 1.0 # one alive remaining timeout = min((deadline - timer()), max_timeout) if timeout <= 0: break assert_gone(proc, timeout) alive = alive - gone if alive: # Last attempt over processes survived so far. # timeout == 0 won't make this function wait any further. for proc in alive: assert_gone(proc, 0) alive = alive - gone return (list(gone), list(alive))
624a6a1286a662a6f9e2d0680898e89b71585a7a
3,649,149
def select(sel, truecase, falsecase): """ Multiplexer returning falsecase for select==0, otherwise truecase. :param WireVector sel: used as the select input to the multiplexer :param WireVector falsecase: the WireVector selected if select==0 :param WireVector truecase: the WireVector selected if select==1 Example of mux as "ternary operator" to take the max of 'a' and 5: select( a<5, truecase=a, falsecase=5) """ sel, f, t = (as_wires(w) for w in (sel, falsecase, truecase)) f, t = match_bitwidth(f, t) outwire = WireVector(bitwidth=len(f)) net = LogicNet(op='x', op_param=None, args=(sel, f, t), dests=(outwire,)) working_block().add_net(net) # this includes sanity check on the mux return outwire
134e62fa84a16560e16f72294c9d01b3118c80e4
3,649,150
def fish_collision(sprite1, sprite2): """Algorithm for determining if there is a collision between the sprites.""" if sprite1 == sprite2: return False else: return collide_circle(sprite1, sprite2)
846024639f971c755b9ae88f8db43695d1e7c5e2
3,649,151
def normalizePeriodList(periods): """ Normalize the list of periods by merging overlapping or consecutive ranges and sorting the list by each periods start. @param list: a list of tuples of L{Period}. The list is changed in place. """ # First sort the list def sortPeriods(p1, p2): """ Compare two periods. Sort by their start and then end times. A period is a L{Period}. @param p1: first period @param p2: second period @return: 1 if p1>p2, 0 if p1==p2, -1 if p1<p2 """ assert isinstance(p1, Period), "Period is not a Period: %r" % (p1,) assert isinstance(p2, Period), "Period is not a Period: %r" % (p2,) if p1.getStart() == p2.getStart(): cmp1 = p1.getEnd() cmp2 = p2.getEnd() else: cmp1 = p1.getStart() cmp2 = p2.getStart() return compareDateTime(cmp1, cmp2) for period in periods: period.adjustToUTC() periods.sort(cmp=sortPeriods) # Now merge overlaps and consecutive periods index = None p = None pe = None for i in xrange(len(periods)): if p is None: index = i p = periods[i] pe = p.getEnd() continue ie = periods[i].getEnd() if (pe >= periods[i].getStart()): if ie > pe: periods[index] = Period(periods[index].getStart(), ie) pe = ie periods[i] = None else: index = i p = periods[i] pe = p.getEnd() periods[:] = [x for x in periods if x]
d178123e8ef65b88e46130db24f96aa86b444b11
3,649,152
from typing import Callable from typing import Optional from typing import Any import inspect from typing import Dict def assemble(the_type: Callable[..., TypeT], profile: Optional[str] = None, **kwargs: Any) -> TypeT: """Create an instance of a certain type, using constructor injection if needed.""" ready_result = _create(the_type, profile) if ready_result is not None: return ready_result signature = inspect.signature(the_type) parameters = _get_parameters(signature) arguments: Dict[str, Any] = kwargs uses_manual_args = False for parameter_name, parameter_type in parameters.items(): if parameter_name in arguments: uses_manual_args = True continue if _is_list_type(parameter_type): parameter_components = _get_components( _get_list_type_elem_type(parameter_type), profile) arguments[parameter_name] = list(map(assemble, map(lambda comp: comp.get_type(), parameter_components))) else: parameter_component = _get_component(parameter_type, profile) param_factory = _get_factory(parameter_type, profile) if parameter_component is not None: arguments[parameter_name] = assemble( parameter_component.get_type(), profile) # parameter_type? elif param_factory: arguments[parameter_name] = param_factory.get_instance() result = the_type(**arguments) stored_component = _get_component(the_type, profile) if stored_component and not uses_manual_args: stored_component.set_instance_if_singleton(result) return result
e8a39d61ddcb8834daf45089f597754a2860a334
3,649,153
def coord_ijk_to_xyz(affine, coords): """ Converts voxel `coords` in cartesian space to `affine` space Parameters ---------- affine : (4, 4) array-like Affine matrix coords : (N,) list of list Image coordinate values, where each entry is a length three list of int denoting ijk coordinates in cartesian space Returns ------ xyz : (N, 3) numpy.ndarray Provided `coords` in `affine` space """ coords = _check_coord_inputs(coords) mni_coords = np.dot(affine, coords)[:3].T return mni_coords
c7099a588df3bd85a3a5a85451e15812564aae2f
3,649,154
def _get_create_statement(server, temp_datadir, frm_file, version, options, quiet=False): """Get the CREATE statement for the .frm file This method attempts to read the CREATE statement by copying the .frm file, altering the storage engine in the .frm file to MEMORY and issuing a SHOW CREATE statement for the table/view. If this method returns None, the operation was successful and the CREATE statement was printed. If a string is returned, there was at least one error (which will be printed) and the .frm file was not readable. The returned frm file path can be used to tell the user to use the diagnostic mode for reading files byte-by-byte. See the method read_frm_files_diagnostic() above. server[in] Server instance temp_datadir[in] New data directory frm_file[in] Tuple containing (db, table, path) for .frm file version[in] Version string for the current server options[in] Options from user Returns string - None on success, path to frm file on error """ verbosity = int(options.get("verbosity", 0)) quiet = options.get("quiet", False) new_engine = options.get("new_engine", None) frm_dir = options.get("frm_dir", ".{0}".format(os.sep)) user = options.get('user', 'root') if not quiet: print "#\n# Reading the %s.frm file." % frm_file[1] try: # 1) copy the file db = frm_file[0] if not db or db == ".": db = "test" db_name = db + "_temp" new_path = os.path.normpath(os.path.join(temp_datadir, db_name)) if not os.path.exists(new_path): os.mkdir(new_path) new_frm = os.path.join(new_path, frm_file[1] + ".frm") # Check name for decoding and decode try: if requires_decoding(frm_file[1]): new_frm_file = decode(frm_file[1]) frm_file = (frm_file[0], new_frm_file, frm_file[2]) shutil.copy(frm_file[2], new_path) # Check name for encoding and encode elif requires_encoding(frm_file[1]): new_frm_file = encode(frm_file[1]) + ".frm" new_frm = os.path.join(new_path, new_frm_file) shutil.copy(frm_file[2], new_frm) else: shutil.copy(frm_file[2], new_path) except: _, e, _ = sys.exc_info() print("ERROR: {0}".format(e)) # Set permissons on copied file if user context in play if user_change_as_root(options): subprocess.call(['chown', '-R', user, new_path]) subprocess.call(['chgrp', '-R', user, new_path]) server.exec_query("CREATE DATABASE IF NOT EXISTS %s" % db_name) frm = FrmReader(db_name, frm_file[1], new_frm, options) frm_type = frm.get_type() server.exec_query("FLUSH TABLES") if frm_type == "TABLE": # 2) change engine if it is a table current_engine = frm.change_storage_engine() # Abort read if restricted engine found if current_engine[1].upper() in _CANNOT_READ_ENGINE: print ("ERROR: Cannot process tables with the %s storage " "engine. Please use the diagnostic mode to read the " "%s file." % (current_engine[1].upper(), frm_file[1])) return frm_file[2] # Check server version server_version = None if version and len(current_engine) > 1 and current_engine[2]: server_version = (int(current_engine[2][0]), int(current_engine[2][1:3]), int(current_engine[2][3:])) if verbosity > 1 and not quiet: print ("# Server version in file: %s.%s.%s" % server_version) if not server.check_version_compat(server_version[0], server_version[1], server_version[2]): versions = (server_version[0], server_version[1], server_version[2], version[0], version[1], version[2]) print ("ERROR: The server version for this " "file is too low. It requires a server version " "%s.%s.%s or higher but your server is version " "%s.%s.%s. Try using a newer server or use " "diagnostic mode." % versions) return frm_file[2] # 3) show CREATE TABLE res = server.exec_query("SHOW CREATE TABLE `%s`.`%s`" % (db_name, frm_file[1])) create_str = res[0][1] if new_engine: create_str = create_str.replace("ENGINE=MEMORY", "ENGINE=%s" % new_engine) elif not current_engine[1].upper() == "MEMORY": create_str = create_str.replace("ENGINE=MEMORY", "ENGINE=%s" % current_engine[1]) if frm_file[0] and not frm_file[0] == ".": create_str = create_str.replace("CREATE TABLE ", "CREATE TABLE `%s`." % frm_file[0]) # if requested, generate the new .frm with the altered engine if new_engine: server.exec_query("ALTER TABLE `{0}`.`{1}` " "ENGINE={2}".format(db_name, frm_file[1], new_engine)) new_frm_file = os.path.join(frm_dir, "{0}.frm".format(frm_file[1])) if os.path.exists(new_frm_file): print("#\n# WARNING: Unable to create new .frm file. " "File exists.") else: try: shutil.copyfile(new_frm, new_frm_file) print("# Copy of .frm file with new storage " "engine saved as {0}.".format(new_frm_file)) except (IOError, OSError, shutil.Error) as e: print("# WARNING: Unable to create new .frm file. " "Error: {0}".format(e)) elif frm_type == "VIEW": # 5) show CREATE VIEW res = server.exec_query("SHOW CREATE VIEW %s.%s" % (db_name, frm_file[1])) create_str = res[0][1] if frm_file[0]: create_str = create_str.replace("CREATE VIEW ", "CREATE VIEW `%s`." % frm_file[0]) # Now we must replace the string for storage engine! print "#\n# CREATE statement for %s:\n#\n" % frm_file[2] print create_str print if frm_type == "TABLE" and options.get("show_stats", False): frm.show_statistics() except: print ("ERROR: Failed to correctly read the .frm file. Please try " "reading the file with the --diagnostic mode.") return frm_file[2] return None
953b97df9f7f01540d5f61ef8099282d4aab26d6
3,649,155
def delete_driver_vehicle(driver): """delete driver""" try: driver.vehicle = None driver.save() return driver, "success" except Exception as err: logger.error("deleteVehicleForDriverRecord@error") logger.error(err) return None, str(err)
e6de3c9d1ae0ce0ac2022fe8ce38c2eccbe3b8df
3,649,156
def passivity(s: npy.ndarray) -> npy.ndarray: """ Passivity metric for a multi-port network. A metric which is proportional to the amount of power lost in a multiport network, depending on the excitation port. Specifically, this returns a matrix who's diagonals are equal to the total power received at all ports, normalized to the power at a single excitement port. mathematically, this is a test for unitary-ness of the s-parameter matrix [#]_. for two port this is .. math:: \sqrt( |S_{11}|^2 + |S_{21}|^2 \, , \, |S_{22}|^2+|S_{12}|^2) in general it is .. math:: \\sqrt( S^H \\cdot S) where :math:`H` is conjugate transpose of S, and :math:`\\cdot` is dot product. Note ---- The total amount of power dissipated in a network depends on the port matches. For example, given a matched attenuator, this metric will yield the attenuation value. However, if the attenuator is cascaded with a mismatch, the power dissipated will not be equivalent to the attenuator value, nor equal for each excitation port. Returns ------- passivity : :class:`numpy.ndarray` of shape fxnxn References ------------ .. [#] http://en.wikipedia.org/wiki/Scattering_parameters#Lossless_networks """ if s.shape[-1] == 1: raise (ValueError('Doesn\'t exist for one ports')) pas_mat = s.copy() for f in range(len(s)): pas_mat[f, :, :] = npy.sqrt(npy.dot(s[f, :, :].conj().T, s[f, :, :])) return pas_mat
9b3629aae603d8de97982113333b87bb021972e4
3,649,157
def sample_distance(sampleA, sampleB, sigma): """ I know this isn't the best distance measure, alright. """ # RBF! gamma = 1 / (2 * sigma**2) similarity = np.exp(-gamma*(np.linalg.norm(sampleA - sampleB)**2)) distance = 1 - similarity return distance
1f1bb56d8e1876c9c9ab6b1d1db26ff549d86b81
3,649,158
def read_sequence_item(fp, is_implicit_VR, is_little_endian, encoding, offset=0): """Read and return a single sequence item, i.e. a Dataset""" seq_item_tell = fp.tell() + offset if is_little_endian: tag_length_format = "<HHL" else: tag_length_format = ">HHL" try: bytes_read = fp.read(8) group, element, length = unpack(tag_length_format, bytes_read) except BaseException: raise IOError("No tag to read at file position " "{0:05x}".format(fp.tell() + offset)) tag = (group, element) if tag == SequenceDelimiterTag: # No more items, time to stop reading logger.debug( "{0:08x}: {1}".format(fp.tell() - 8 + offset, "End of Sequence")) if length != 0: logger.warning("Expected 0x00000000 after delimiter, found 0x%x, " "at position 0x%x" % ( length, fp.tell() - 4 + offset)) return None if tag != ItemTag: logger.warning("Expected sequence item with tag %s at file position " "0x%x" % (ItemTag, fp.tell() - 4 + offset)) else: logger.debug("{0:08x}: {1} Found Item tag (start of item)".format( fp.tell() - 4 + offset, bytes2hex(bytes_read))) if length == 0xFFFFFFFF: ds = read_dataset(fp, is_implicit_VR, is_little_endian, bytelength=None, parent_encoding=encoding) ds.is_undefined_length_sequence_item = True else: ds = read_dataset(fp, is_implicit_VR, is_little_endian, length, parent_encoding=encoding) ds.is_undefined_length_sequence_item = False logger.debug("%08x: Finished sequence item" % (fp.tell() + offset,)) ds.seq_item_tell = seq_item_tell return ds
fb0eeca700fa1d66e1289c2cfd106934ba54ca1a
3,649,159
from typing import Dict from typing import Set def get_filters(query_metadata: QueryMetadataTable) -> Dict[VertexPath, Set[FilterInfo]]: """Get the filters at each VertexPath.""" filters: Dict[VertexPath, Set[FilterInfo]] = {} for location, _ in query_metadata.registered_locations: filter_infos = query_metadata.get_filter_infos(location) filters.setdefault(_get_location_vertex_path(location), set()).update(filter_infos) return filters
79ef7accd1c8e1d100f48eb7086c842429a4a513
3,649,160
import numpy def auxiliary_equations(*, F, T_degC, I_sc_A_0, I_rs_1_A_0, n_1_0, I_rs_2_0_A, n_2_0, R_s_Ohm_0, G_p_S_0, E_g_eV_0, N_s, T_degC_0=T_degC_stc): """ Computes the auxiliary equations at F and T_degC for the 8-parameter DDM-G. Inputs (any broadcast-compatible combination of scalars and numpy arrays): Same as current_sum_at_diode_node(). Outputs (device-level, at each combination of broadcast inputs, return type is numpy.float64 for all scalar inputs): dict containing: I_ph_A photocurrent I_rs_1_A first diode reverse-saturation current n_1 first diode ideality factor I_rs_2_A second diode reverse-saturation current n_2 second diode ideality factor R_s_Ohm series resistance G_p_S parallel conductance N_s integer number of cells in series in each parallel string T_degC temperature """ # Temperatures must be in Kelvin. T_K = convert_temperature(T_degC, 'Celsius', 'Kelvin') T_K_0 = convert_temperature(T_degC_0, 'Celsius', 'Kelvin') # Optimization. V_therm_factor_V_0 = (N_s * k_B_J_per_K * T_K_0) / q_C # Compute variables at operating condition. # Compute band gap (constant). E_g_eV = E_g_eV_0 # Compute first diode ideality factor (constant). n_1 = n_1_0 # Compute first reverse-saturation current at T_degC (this is independent of F, I_sc_A_0, R_s_Ohm_0, and G_p_S_0). I_rs_1_A = I_rs_1_A_0 * (T_K / T_K_0)**3 * numpy.exp(E_g_eV / (n_1 * k_B_eV_per_K) * (1 / T_K_0 - 1 / T_K)) # Compute first diode ideality factor (constant). n_2 = n_2_0 # Compute first reverse-saturation current at T_degC (this is independent of F, I_sc_A_0, R_s_Ohm_0, and G_p_S_0). I_rs_2_A = I_rs_2_0_A * (T_K / T_K_0)**(5/2) * numpy.exp(E_g_eV / (n_2 * k_B_eV_per_K) * (1 / T_K_0 - 1 / T_K)) # Compute series resistance (constant). R_s_Ohm = R_s_Ohm_0 # Compute parallel conductance (constant). G_p_S = G_p_S_0 # Compute parallel conductance (photo-conductive shunt). # G_p_S = F * G_p_S_0 # Compute photo-generated current at F and T_degC (V=0 with I=Isc for this). expr1 = I_sc_A_0 * F expr2 = expr1 * R_s_Ohm I_ph_A = expr1 + I_rs_1_A * numpy.expm1(expr2 / (V_therm_factor_V_0 * n_1)) + \ I_rs_2_A * numpy.expm1(expr2 / (V_therm_factor_V_0 * n_2)) + G_p_S * expr2 return {'I_ph_A': I_ph_A, 'I_rs_1_A': I_rs_1_A, 'n_1': n_1, 'I_rs_2_A': I_rs_2_A, 'n_2': n_2, 'R_s_Ohm': R_s_Ohm, 'G_p_S': G_p_S, 'N_s': N_s, 'T_degC': T_degC}
5bbb988a7e4415f59a56985c2c867ec1a0dc5df2
3,649,161
import time import os def detector(name_file: str, chk_video_det, xy_coord: list, frame_zoom: int, size_detect: int, lab_o_proc, window, frame_shift, play_speed, but_start, but_pause) -> str: """Данная функция производит поиск движения в заданной области, в текущем файле. name_file - Имя файла, который передается в обработку chk_video_det - Флаг отображения окна воспроизведения при поиске xy_coord - Список координат зоны поиска frame_zoom - Коэффициент сжатия видео при отображении size_detect - Размер детектируемого объекта lab_o_proc - Ссылка на метку для отображения прогресса window - Ссылка на окно frame_shift - Сдвиг фреймов при обнаружении движения play_speed - Пропуск фреймов для ускорения but_start - Кнопка Старт but_pause - Кнопка Пауза """ if but_start['text'] == 'Старт': return "OK" none_frame: int = 0 # Счетчик для проверки пустых фреймов start_detect = time.time() # Получение времени начала обработки видео файла cap = cv2.VideoCapture(name_file) # Захватываем видео с файла # cap.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc('H', '2', '6', '4')) off_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) # Получаем общее количество фреймов frame_width_det = (cap.get(cv2.CAP_PROP_FRAME_WIDTH)) # Получаем размер исходного видео frame_height_det = (cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) output = cv2.VideoWriter(name_file[:-4] + "_detect" + name_file[len(name_file) - 4:], cv2.VideoWriter_fourcc('H', '2', '6', '4'), 20, (int(frame_width_det), int(frame_height_det))) # Параметры выгрузки MJPG PIM1 XVID if chk_video_det: cv2.namedWindow(name_file, 0) # Определяем окно вывода _, x_win, y_win = window.geometry().split('+') cv2.moveWindow(name_file, int(x_win)+350, int(y_win)) while True: # Вывод кадров производится в цикле if but_pause['text'] == 'Продолжить': cap.release() output.release() cv2.destroyAllWindows() return 'Pause' if but_start['text'] == 'Старт': cap.release() output.release() cv2.destroyAllWindows() break ret1, frame1 = cap.read() # Данное смещение позволяет сгруппировать очертания двигающегося объекта for _ in range(frame_shift): cap.read() ret2, frame2 = cap.read() # Данное смещение служит для ускорения for _ in range(play_speed): cap.read() if cap.get(cv2.CAP_PROP_POS_FRAMES) == off_frames: break if not ret1 * ret2: none_frame += 1 if none_frame > 10: print('Превышено допустимое количество пустых фреймов. Начато восстановление файла.') output.release() # Закрываем файл для вывода cv2.destroyAllWindows() os.remove(f'{name_file[:-4]}_detect{name_file[len(name_file) - 4:]}') # Удаляем его return 'Correct' # Возвращаем флаг, что надо запустить восстановление continue # frame1=frame1[y1_search:y2_search,x1_search:x2_search] #Обрезка фрейма до нужного размера. Может пригодиться # frame2=frame2[y1_search:y2_search,x1_search:x2_search] # Вывод в процентах прогресса lab_o_proc["text"] = str(cap.get(cv2.CAP_PROP_POS_FRAMES) * 100 // off_frames + 1) + " %" window.update() # Обновление окна для отрисовки прогресса if ret2: if chk_video_det: # Метод для визуализации массива кадров frame1 = algorithm_detector_1(frame1, frame2, xy_coord, frame_zoom, size_detect, output) cv2.imshow(name_file, frame1) cv2.resizeWindow(name_file, int(frame_width_det) // 2, int(frame_height_det) // 2) # Устанавливаем размер окна вывода else: break if chk_video_det and cv2.getWindowProperty(name_file, 1) == 1: # Выход из программы по закрытию окна break if cv2.waitKey(2) == 27: # Выход по ESC break cap.release() output.release() # Проверяем количество сохраненных фреймов output = cv2.VideoCapture(name_file[:-4] + "_detect" + name_file[len(name_file) - 4:]) frames_output = int(output.get(cv2.CAP_PROP_FRAME_COUNT)) output.release() cv2.destroyAllWindows() if frames_output == 0: # Если сохраненных фреймов нет, то удаляем файл os.remove(f'{name_file[:-4]}_detect{name_file[len(name_file) - 4:]}') # Удаляем его end_detect = time.time() # Время завершения обработки видео файла # Выводит время затраченное на обработку файла print(name_file, '->', str(time.strftime("%M:%S", time.localtime(end_detect - start_detect)))) return 'OK'
d0dafa738421fdf41479327cd7aa76774235e0c3
3,649,162
from typing import List from typing import Dict def get_highest_confidence_transcript_for_each_session( transcripts: List[db_models.Transcript], ) -> List[db_models.Transcript]: """ Filter down a list transcript documents to just a single transcript per session taking the highest confidence transcript document. Parameters ---------- transcripts: List[db_models.Transcript] List of transcript database documents. Returns ------- transcripts: List[db_models.Transcript] Filtered list of transcript database documents where only a single transcript exists for each referenced session. """ # We can't use pandas groupby because sessions objects can't be naively compared # Instead we create a Dict of session id to document model # We update as we iterate through list of all transcripts selected_transcripts: Dict[str, pd.Series] = {} for transcript in transcripts: referenced_session_id = transcript.session_ref.ref.id if referenced_session_id not in selected_transcripts: selected_transcripts[referenced_session_id] = transcript # Multiple transcripts for a single session # pick the higher confidence elif ( transcript.confidence > selected_transcripts[referenced_session_id].confidence ): selected_transcripts[referenced_session_id] = transcript return list(selected_transcripts.values())
fccf657c5c670d8b3e275641d411ede34af91e41
3,649,163
def get_groups_data(): """ Get all groups, get all users for each group and sort groups by users :return: """ groups = [group["name"] for group in jira.get_groups(limit=200)["groups"]] groups_and_users = [get_all_users(group) for group in groups] groups_and_users = [sort_users_in_group(group) for group in groups_and_users] return groups_and_users
9ec0d3772b438f10edde4a4bad591f249709de98
3,649,164
import builtins import os def get_sep(): """Returns the appropriate filepath separator char depending on OS and xonsh options set """ if ON_WINDOWS and builtins.__xonsh__.env.get("FORCE_POSIX_PATHS"): return os.altsep else: return os.sep
7b9431a05ac61ae49680e26939dd91d85df99de5
3,649,165
def hindu_lunar_holiday(l_month, l_day, g_year): """Return the list of fixed dates of occurrences of Hindu lunar month, month, day, day, in Gregorian year, g_year.""" l_year = hindu_lunar_year( hindu_lunar_from_fixed(gregorian_new_year(g_year))) date1 = hindu_date_occur(l_month, l_day, l_year) date2 = hindu_date_occur(l_month, l_day, l_year + 1) return list_range([date1, date2], gregorian_year_range(g_year))
fa9bafead696b177a137c12b7544c8e71c4f2f43
3,649,166
import copy def identify_all_failure_paths(network_df_in,edge_failure_set,flow_dataframe,path_criteria): """Identify all paths that contain an edge Parameters --------- network_df_in - Pandas DataFrame of network edge_failure_set - List of string edge ID's flow_dataframe - Pandas DataFrame of list of edge paths path_criteria - String name of column of edge paths in flow dataframe Outputs ------- network_df - Pandas DataFrame of network With removed edges edge_path_index - List of integer indexes Of locations of paths in flow dataframe """ edge_path_index = [] network_df = copy.deepcopy(network_df_in) for edge in edge_failure_set: network_df = network_df[network_df.edge_id != edge] edge_path_index += flow_dataframe.loc[flow_dataframe[path_criteria].str.contains( "'{}'".format(edge))].index.tolist() edge_path_index = list(set(edge_path_index)) return network_df, edge_path_index
db2da6ad20a4ae547c309ac63b6e68a17c3874e7
3,649,167
def wikipedia_wtap_setup(): """ A commander has 5 tanks, 2 aircraft and 1 sea vessel and is told to engage 3 targets with values 5,10,20 ... """ tanks = ["tank-{}".format(i) for i in range(5)] aircrafts = ["aircraft-{}".format(i) for i in range(2)] ships = ["ship-{}".format(i) for i in range(1)] weapons = tanks + aircrafts + ships target_values = {1: 5, 2: 10, 3: 20} tank_probabilities = [ (1, 0.3), (2, 0.2), (3, 0.5), ] aircraft_probabilities = [ (1, 0.1), (2, 0.6), (3, 0.5), ] sea_vessel_probabilities = [ (1, 0.4), (2, 0.5), (3, 0.4) ] category_and_probabilities = [ (tanks, tank_probabilities), (aircrafts, aircraft_probabilities), (ships, sea_vessel_probabilities) ] probabilities = [] for category, probs in category_and_probabilities: for vehicle in category: for prob in probs: probabilities.append((vehicle,) + prob) g = Graph(from_list=probabilities) return g, weapons, target_values
828746bef74b88bde1a9c72a79338ec05591721a
3,649,168
def allowed_file(filename: str) -> bool: """Determines whether filename is allowable Parameters ---------- filename : str a filename Returns ------- bool True if allowed """ return "." in filename and filename.rsplit(".", 1)[1].lower() in ALLOWED_EXTENSIONS
fd05abc21025c9eb49f7e3426b4e183b178361c4
3,649,169
def share_is_mounted(details): """Check if dev/share/etc is mounted, returns bool.""" mounted = False if PLATFORM == 'Darwin': # Weak and naive text search proc = run_program(['mount'], check=False) for line in proc.stdout.splitlines(): if f'{details["Address"]}/{details["Share"]}' in line: mounted = True break elif PLATFORM == 'Linux': cmd = [ 'findmnt', '--list', '--json', '--invert', '--types', ( 'autofs,binfmt_misc,bpf,cgroup,cgroup2,configfs,debugfs,devpts,' 'devtmpfs,hugetlbfs,mqueue,proc,pstore,securityfs,sysfs,tmpfs' ), '--output', 'SOURCE', ] mount_data = get_json_from_command(cmd) for row in mount_data.get('filesystems', []): if row['source'] == f'//{details["Address"]}/{details["Share"]}': mounted = True break #elif PLATFORM == 'Windows': # Done return mounted
43ca415779d133d7a90c2a8f26521e0a5cca2fcb
3,649,170
def update_bitweights(realization, asgn, tileids, tg_ids, tg_ids2idx, bitweights): """ Update bit weights for assigned science targets """ for tileid in tileids: try: # Find which targets were assigned adata = asgn.tile_location_target(tileid) for loc, tgid in adata.items(): idx = tg_ids2idx[tgid] bitweights[realization * len(tg_ids) + idx] = True except: pass return bitweights
f1b7e085d43e36b025aa1c61ab1b7156ba1d3ed7
3,649,171
def load_from_input_flags(params, params_source, input_flags): """Update params dictionary with input flags. Args: params: Python dictionary of hyperparameters. params_source: Python dictionary to record source of hyperparameters. input_flags: All the flags with non-null value of overridden hyperparameters. Returns: Python dict of hyperparameters. """ if params is None: raise ValueError( 'Input dictionary is empty. It is expected to be loaded with default ' 'values') if not isinstance(params, dict): raise ValueError( 'The base parameter set must be a Python dict, was: {}'.format( type(params))) for key in params: flag_value = input_flags.get_flag_value(key, None) if flag_value is not None: params[key] = flag_value params_source[key] = 'Command-line flags' return params, params_source
7ec8662f03469f1ed03f29c9f7e9663c49aa7056
3,649,172
import hal def hal(_module_patch): """Simulated hal module""" return hal
3ab217e0cbce54d6dab01217c829905dc61bf06c
3,649,173
def elements(all_isotopes=True): """ Loads a DataFrame of all elements and isotopes. Scraped from https://www.webelements.com/ Returns ------- pandas DataFrame with columns (element, atomic_number, isotope, atomic_weight, percent) """ el = pd.read_pickle(pkgrs.resource_filename('latools', 'resources/elements.pkl')) if all_isotopes: return el.set_index('element') else: def wmean(g): return (g.atomic_weight * g.percent).sum() / 100 iel = el.groupby('element').apply(wmean) iel.name = 'atomic_weight' return iel
d706ee5ffaa8c756c9e85f3e143876070f8f81e4
3,649,174
def datas(draw): """TODO expand to include all optional parameters.""" metric = draw(ascii()) filter = draw(filters()) return flow.Data(metric, filter=filter)
8c85202f36a25f871e7235360353f972a56ba06e
3,649,175
import typing def create_steps_sequence(num_steps: Numeric, axis: str) -> typing.List[typing.Tuple[float, str]]: """ Returns a list of num_steps tuples: [float, str], with given string parameter, and the floating-point parameter increasing lineairly from 0 to 1. Example: >>> create_steps_sequence(5, 'X') [(0.0, 'X'), (0.2, 'X'), (0.4, 'X'), (0.6, 'X'), (0.8, 'X')] """ if isinstance(num_steps, float): num_steps = int(num_steps) if num_steps == 0: return [] sequence = [] for step in range(num_steps): sequence.append((step * 1.0 / num_steps, axis)) return sequence
3f7e2010a3360c90bec81a02228b1a7590686175
3,649,176
def disable_doze_light(ad): """Force the device not in doze light mode. Args: ad: android device object. Returns: True if device is not in doze light mode. False otherwise. """ ad.adb.shell("dumpsys battery reset") ad.adb.shell("cmd deviceidle disable light") adb_shell_result = ad.adb.shell("dumpsys deviceidle get light").decode( 'utf-8') if not adb_shell_result.startswith(DozeModeStatus.ACTIVE): info = ("dumpsys deviceidle get light: {}".format(adb_shell_result)) print(info) return False return True
d2054ae8f84a45b360ded839badfbd19fea83b11
3,649,177
def jobs(): """ List all jobs """ return jsonify(job.get_jobs())
c7141011c59851586d327185892ea61d7a11ef58
3,649,178
def get_pipelines(): """Get pipelines.""" return PIPELINES
2d770a9fa189dd534528d26794f8887c638723f4
3,649,179
import re def tokenize(s): """ Tokenize on parenthesis, punctuation, spaces and American units followed by a slash. We sometimes give American units and metric units for baking recipes. For example: * 2 tablespoons/30 mililiters milk or cream * 2 1/2 cups/300 grams all-purpose flour The recipe database only allows for one unit, and we want to use the American one. But we must split the text on "cups/" etc. in order to pick it up. """ return filter(None, re.split(r"([,()])?\s+", clump_fractions(normalise(s))))
04575ff78cb73515fafcda541177d53d330bd510
3,649,180
def makeColorMatrix(n, bg_color, bg_alpha, ix=None, fg_color=[228/255.0, 26/255.0, 28/255.0], fg_alpha=1.0): """ Construct the RGBA color parameter for a matplotlib plot. This function is intended to allow for a set of "foreground" points to be colored according to integer labels (e.g. according to clustering output), while "background" points are all colored something else (e.g. light gray). It is used primarily in the interactive plot tools for DeBaCl but can also be used directly by a user to build a scatterplot from scratch using more complicated DeBaCl output. Note this function can be used to build an RGBA color matrix for any aspect of a plot, including point face color, edge color, and line color, despite use of the term "points" in the descriptions below. Parameters ---------- n : int Number of data points. bg_color : list of floats A list with three entries, specifying a color in RGB format. bg_alpha : float Specifies background point opacity. ix : list of ints, optional Identifies foreground points by index. Default is None, which does not distinguish between foreground and background points. fg_color : list of ints or list of floats, optional Only relevant if 'ix' is specified. If 'fg_color' is a list of integers then each entry in 'fg_color' indicates the color of the corresponding foreground point. If 'fg_color' is a list of 3 floats, then all foreground points will be that RGB color. The default is to color all foreground points red. fg_alpha : float, optional Opacity of the foreground points. Returns ------- rgba : 2D numpy array An 'n' x 4 RGBA array, where each row corresponds to a plot point. """ rgba = np.zeros((n, 4), dtype=np.float) rgba[:, 0:3] = bg_color rgba[:, 3] = bg_alpha if ix is not None: if np.array(fg_color).dtype.kind == 'i': palette = Palette() fg_color = palette.applyColorset(fg_color) rgba[ix, 0:3] = fg_color rgba[ix, 3] = fg_alpha return rgba
7ef7a7cfb6cd4a6bcb97086382e6b95e5340ce78
3,649,181
import itertools def closest_pair(points): """ 最近点対 O(N log N) Verify: http://judge.u-aizu.ac.jp/onlinejudge/description.jsp?id=CGL_5_A&lang=ja :param list of Point points: :rtype: (float, (Point, Point)) :return: (距離, 点対) """ assert len(points) >= 2 def _rec(xsorted): """ :param list of Point xsorted: :rtype: (float, (Point, Point)) """ n = len(xsorted) if n <= 2: return xsorted[0].dist(xsorted[1]), (xsorted[0], xsorted[1]) if n <= 3: # 全探索 d = INF pair = None for p, q in itertools.combinations(xsorted, r=2): if p.dist(q) < d: d = p.dist(q) pair = p, q return d, pair # 分割統治 # 両側の最近点対 ld, lp = _rec(xsorted[:n // 2]) rd, rp = _rec(xsorted[n // 2:]) if ld <= rd: d = ld ret_pair = lp else: d = rd ret_pair = rp mid_x = xsorted[n // 2].x # 中央から d 以内のやつを集める mid_points = [] for p in xsorted: # if abs(p.x - mid_x) < d: if abs(p.x - mid_x) - d < -EPS: mid_points.append(p) # この中で距離が d 以内のペアがあれば更新 mid_points.sort(key=lambda p: p.y) mid_n = len(mid_points) for i in range(mid_n - 1): j = i + 1 p = mid_points[i] q = mid_points[j] # while q.y - p.y < d while (q.y - p.y) - d < -EPS: pq_d = p.dist(q) if pq_d < d: d = pq_d ret_pair = p, q j += 1 if j >= mid_n: break q = mid_points[j] return d, ret_pair return _rec(list(sorted(points, key=lambda p: p.x)))
fbb189269b6d1fcbf214d8030d49bb0605b375c2
3,649,182
def less_equals(l,r): """ | Forms constraint :math:`l \leq r`. :param l: number, :ref:`scalar object<scalar_ref>` or :ref:`multidimensional object<multi_ref>`. :param r: number, :ref:`scalar object<scalar_ref>` or :ref:`multidimensional object<multi_ref>`. :return: :ref:`constraint<constr_obj>` or :ref:`list of constraints<constr_list_obj>`. """ return compare(l,LESS_EQUALS,r)
ba37a090cbbf1d7db99411d67e9eda572c1f0153
3,649,183
def ensureImageMode(tex : Image, mode="RGBA") -> Image: """Ensure the passed image is in a given mode. If it is not, convert it. https://pillow.readthedocs.io/en/stable/handbook/concepts.html#concept-modes :param Image tex: The image whose mode to check :param str mode: The mode to ensure and convert to if needed :return: tex if it is of the given mode. tex converted to mode otherwise. :rtype: Image """ return tex if tex.mode == mode else tex.convert(mode)
9b77763fbfea0f66b4b4d7151cdf595f2e2b8aa6
3,649,184
import os import glob def generate_dada_filelist(filename): """ Generate a list of DADA files from start filename Args: filename (str): Path to file. e.g. /data/dprice/2020-07-23-02:33:07.587_0000000000000000.000000.dada Returns: flist (list): A list of all associated files """ bn = os.path.basename(filename) dn = os.path.dirname(filename) bn_root = '_'.join(bn.split('_')[:-1]) # Strips off _000.000.dada bit flist = sorted(glob.glob(os.path.join(dn, bn_root + '_*.dada'))) return flist
55cde1a818e78886ace3aa89ff9535f099033a79
3,649,185
import os import shutil import subprocess def buildWheels(buildDir, requirements): """build wheels :param buildDir: directory to put wheels in (under 'wheelhouse') :type buildDir: string :param requirements: name of file holding names of Python packages :type requirements: string """ wheelhouse = os.path.join(buildDir, 'wheelhouse') if os.path.exists(wheelhouse): shutil.rmtree(wheelhouse) subprocess.check_call(['pip', 'wheel', '--requirement', requirements, '--wheel-dir', wheelhouse]) subprocess.check_call(['pip', 'wheel', 'setuptools==15.2', '--wheel-dir', wheelhouse]) subprocess.check_call(['pip', 'wheel', '.', '--wheel-dir', wheelhouse]) return wheelhouse
81bb1879ee1ce0e711dc36fe55cf0b47ad48f3c7
3,649,186
def code2name(code: int) -> str: """ Convert prefecture code to name """ return __code2name[code]
d2ca1a3977915359afd8254337e14c6fd13db8b3
3,649,187
def newton(backward_differences, max_num_iters, newton_coefficient, ode_fn_vec, order, step_size, time, tol, unitary, upper): """Runs Newton's method to solve the BDF equation.""" initial_guess = tf.reduce_sum( tf1.where( tf.range(MAX_ORDER + 1) <= order, backward_differences[:MAX_ORDER + 1], tf.zeros_like(backward_differences)[:MAX_ORDER + 1]), axis=0) rhs_constant_term = newton_coefficient * tf.reduce_sum( tf1.where( tf.range(1, MAX_ORDER + 1) <= order, RECIPROCAL_SUMS[1:, np.newaxis] * backward_differences[1:MAX_ORDER + 1], tf.zeros_like(backward_differences)[1:MAX_ORDER + 1]), axis=0) next_time = time + step_size step_size_cast = tf.cast(step_size, backward_differences.dtype) real_dtype = tf.abs(backward_differences).dtype def newton_body(iterand): """Performs one iteration of Newton's method.""" next_backward_difference = iterand.next_backward_difference next_state_vec = iterand.next_state_vec rhs = newton_coefficient * step_size_cast * ode_fn_vec( next_time, next_state_vec) - rhs_constant_term - next_backward_difference delta = tf.squeeze( tf.linalg.triangular_solve( upper, tf.matmul(tf.transpose(unitary), rhs[:, tf.newaxis]), lower=False)) num_iters = iterand.num_iters + 1 next_backward_difference += delta next_state_vec += delta delta_norm = tf.cast(tf.norm(delta), real_dtype) lipschitz_const = delta_norm / iterand.prev_delta_norm # Stop if method has converged. approx_dist_to_sol = lipschitz_const / (1. - lipschitz_const) * delta_norm close_to_sol = approx_dist_to_sol < tol delta_norm_is_zero = tf.equal(delta_norm, tf.constant(0., dtype=real_dtype)) converged = close_to_sol | delta_norm_is_zero finished = converged # Stop if any of the following conditions are met: # (A) We have hit the maximum number of iterations. # (B) The method is converging too slowly. # (C) The method is not expected to converge. too_slow = lipschitz_const > 1. finished = finished | too_slow if max_num_iters is not None: too_many_iters = tf.equal(num_iters, max_num_iters) num_iters_left = max_num_iters - num_iters num_iters_left_cast = tf.cast(num_iters_left, real_dtype) wont_converge = ( approx_dist_to_sol * lipschitz_const**num_iters_left_cast > tol) finished = finished | too_many_iters | wont_converge return [ _NewtonIterand( converged=converged, finished=finished, next_backward_difference=next_backward_difference, next_state_vec=next_state_vec, num_iters=num_iters, prev_delta_norm=delta_norm) ] iterand = _NewtonIterand( converged=False, finished=False, next_backward_difference=tf.zeros_like(initial_guess), next_state_vec=tf.identity(initial_guess), num_iters=0, prev_delta_norm=tf.constant(np.array(-0.), dtype=real_dtype)) [iterand] = tf.while_loop(lambda iterand: tf.logical_not(iterand.finished), newton_body, [iterand]) return (iterand.converged, iterand.next_backward_difference, iterand.next_state_vec, iterand.num_iters)
9a5e6e45357d2d769153bf6854818e22df7639f3
3,649,188
from unittest.mock import Mock import os def mock_handler( handler: RequestHandler, uri: str = 'https://hub.example.com', method: str = 'GET', **settings: dict ) -> RequestHandler: """Instantiate a Handler in a mock application""" application = Application( hub=Mock(base_url='/hub/', server=Mock(base_url='/hub/'),), cookie_secret=os.urandom(32), db=Mock(rollback=Mock(return_value=None)), **settings, ) request = HTTPServerRequest(method=method, uri=uri, connection=Mock(),) handler = RequestHandler(application=application, request=request,) handler._transforms = [] return handler
8615a77bd401b1e6cd9b078ae0b8d9896aecc0d7
3,649,189
import random def input(channel): """ To read the value of a GPIO pin: :param channel: :return: """ return LOW if random.random() < 0.5 else HIGH
838df044dc18c443e2f35f7f67a8e07b8276e1a3
3,649,190
def kml_start(params): """Define basic kml header string""" kmlstart = ''' <Document> <name>%s</name> <open>1</open> <description>%s</description> ''' return kmlstart % (params[0], params[1])
c2fa4c1eeff086dfc3baa41ecd067634920b25b1
3,649,191
def add_item_to_do_list(): """ Asks users to keep entering items to add to a new To Do list until they enter the word 'stop' :return: to do list with new items """ ### TO COMPLETE ### return to_do_list
4c133ea3c05024a51dda2fb9f01dcc30926f84f4
3,649,192
def parse(tokens): """Currently parse just supports fn, variable and constant definitions.""" context = Context() context.tokens = tokens while tokens: parse_token(context) if context.stack: raise CompileError("after parsing, there are still words on the stack!!:\n{0}".format( context.stack)) return context
89dce5a630dd0bd657963185ac533738bee7d6a5
3,649,193
import sys import numpy def _create_rpc_callback(label, result_counter): """Creates RPC callback function. Args: label: The correct label for the predicted example. result_counter: Counter for the prediction result. Returns: The callback function. """ def _callback(result_future): """Callback function. Calculates the statistics for the prediction result. Args: result_future: Result future of the RPC. """ exception = result_future.exception() if exception: result_counter.inc_error() print(exception) else: sys.stdout.write('.') sys.stdout.flush() response = numpy.array( result_future.result().outputs['scores'].float_val) prediction = numpy.argmax(response) if label != prediction: result_counter.inc_error() result_counter.inc_done() result_counter.dec_active() return _callback
6b3276e9db5d551cb5abdd3f3f9b1f5ce041b02e
3,649,194
def get_file_iterator(options): """ returns a sequence of files raises IOError if problemmatic raises ValueError if problemmatic """ # -------- BUILD FILE ITERATOR/GENERATOR -------- if options.f is not None: files = options.f elif options.l is not None: try: lfile = open(options.l, 'r') # make a generator of non-blank lines files = (line.strip() for line in lfile if line.strip()) except IOError: msg = "{0} does not exist.".format(options.l) raise IOError(msg) else: msg = "Must provide input files or file list." raise ValueError(msg) return files
53b16f49d14dc346e404a63415772dd2a1d10f50
3,649,195
def entropy_from_CT(SA, CT): """ Calculates specific entropy of seawater. Parameters ---------- SA : array_like Absolute salinity [g kg :sup:`-1`] CT : array_like Conservative Temperature [:math:`^\circ` C (ITS-90)] Returns ------- entropy : array_like specific entropy [J kg :sup:`-1` K :sup:`-1`] Examples -------- >>> import gsw >>> SA = [34.7118, 34.8915, 35.0256, 34.8472, 34.7366, 34.7324] >>> CT = [28.8099, 28.4392, 22.7862, 10.2262, 6.8272, 4.3236] >>> gsw.entropy_from_CT(SA, CT) array([ 400.38916315, 395.43781023, 319.86680989, 146.79103279, 98.64714648, 62.79185763]) References ---------- .. [1] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation of seawater - 2010: Calculation and use of thermodynamic properties. Intergovernmental Oceanographic Commission, Manuals and Guides No. 56, UNESCO (English), 196 pp. See appendix A.10. """ SA = np.maximum(SA, 0) pt0 = pt_from_CT(SA, CT) return -gibbs(n0, n1, n0, SA, pt0, 0)
dfaaeef93ed924bc5e49fb02c30b6cc43ef824e0
3,649,196
def checking_log(input_pdb_path: str, output_log_path: str, properties: dict = None, **kwargs) -> int: """Create :class:`CheckingLog <model.checking_log.CheckingLog>` class and execute the :meth:`launch() <model.checking_log.CheckingLog.launch>` method.""" return CheckingLog(input_pdb_path=input_pdb_path, output_log_path=output_log_path, properties=properties, **kwargs).launch()
c6cb77585920609e12b90f5f783ceb73b58afb8b
3,649,197
from typing import Optional import os import logging import json def _repoint_files_json_dir(filename: str, source_folder: str, target_folder: str, working_folder: str) -> Optional[str]: """ Repoints the DIR entry in the JSON file to the target folder Arguments: filename: the file to load and process source_folder: the source folder to replace with target folder; if empty or None, a best guess is applied target_folder: the target folder for the DIR entries working_folder: the working folder to place the updated file in Return: The name of the adjusted JSON file when successful. Otherwise the None is returned Notes: The new file will be have the same name as the original, but will be in the working folder. If a file by that name already exists in the working folder, it will be overwritten. """ # Check parameters if not os.path.isfile(filename): msg = 'Invalid file specified to repoint files JSON "%s"' % filename logging.warning(msg) return None if not os.path.isdir(working_folder): msg = 'Invalid working folder specified to repoint files JSON "%s"' % working_folder logging.warning(msg) return None # Load the JSON file_json = _load_json_file(filename) if file_json is None: msg = 'Unable to load JSON file when repointing files JSON "%s"' % filename logging.warning(msg) return None if not isinstance(file_json, dict): msg = 'Unknown JSON format when repointing files JSON "%s"' % filename logging.warning(msg) return None if 'FILE_LIST' not in file_json: msg = 'JSON missing FILE_LIST key when repointing files JSON "%s"' % filename logging.warning(msg) return None new_file = os.path.join(working_folder, os.path.basename(filename)) all_files = file_json['FILE_LIST'] if not isinstance(all_files, list) and not isinstance(all_files, tuple) and not isinstance(all_files, set): msg = 'FILE_LIST value is not a list of files for repointing files JSON "%s"' % filename logging.warning(msg) return None try: # Make sure we have a source folder to work with if not source_folder: cur_path = all_files[0]['DIR'] if cur_path[-1:] =='/' or cur_path[-1:] =='\\': cur_path = cur_path[:len(cur_path) - 1] source_folder = os.path.dirname(cur_path) # Run through the files that we have new_files = [] for one_file in all_files: cur_file = {**one_file} if cur_file['DIR'].startswith(source_folder): cur_file['DIR'] = _replace_folder_path(cur_file['DIR'], source_folder, target_folder) new_files.append(cur_file) with open(new_file, 'w', encoding='utf8') as out_file: json.dump({"FILE_LIST": new_files}, out_file, indent=2) except Exception: msg = 'Exception caught while repointing files JSON: "%s"' % filename logging.exception(msg) new_file = None return new_file
974220fb8526cc667fcd1dcaf7c3cdecda06e6b3
3,649,198
def get_registry_by_name(cli_ctx, registry_name, resource_group_name=None): """Returns a tuple of Registry object and resource group name. :param str registry_name: The name of container registry :param str resource_group_name: The name of resource group """ resource_group_name = get_resource_group_name_by_registry_name( cli_ctx, registry_name, resource_group_name) client = cf_acr_registries(cli_ctx) return client.get(resource_group_name, registry_name), resource_group_name
ca4bcee260f035a7921e772dffaace379e0ab115
3,649,199