content
stringlengths
22
815k
id
int64
0
4.91M
def rubi_integrate(expr, var, showsteps=False): """ Rule based algorithm for integration. Integrates the expression by applying transformation rules to the expression. Returns `Integrate` if an expression cannot be integrated. Parameters ========== expr : integrand expression var : variable of integration Returns Integral object if unable to integrate. """ rubi = LoadRubiReplacer().load() expr = expr.replace(sym_exp, rubi_exp) expr = process_trig(expr) expr = rubi_powsimp(expr) if isinstance(expr, (int, Integer, float, Float)): return S(expr)*var if isinstance(expr, Add): results = 0 for ex in expr.args: results += rubi.replace(Integral(ex, var)) return process_final_integral(results) results = util_rubi_integrate(Integral(expr, var)) return process_final_integral(results)
19,400
def daily_price_read(sheet_name): """ 读取股票名称和股票代码 :param sheet_name: :return: """ sql = "SELECT * FROM public.%s limit 50000" % sheet_name resultdf = pd.read_sql(sql, engine_postgre) resultdf['trade_date'] = resultdf['trade_date'].apply(lambda x: x.strftime('%Y-%m-%d')) resultdf['code'] = resultdf[['code', 'exchangeCD']].apply(lambda x: str(x[0]).zfill(6) + '.'+x[1], axis=1) return resultdf
19,401
def EWT_Boundaries_Completion(boundaries,NT): """ ====================================================================== boundaries=EWT_Boundaries_Completion(boundaries,NT) This function permits to complete the boundaries vector to get a total of NT boundaries by equally splitting the last band (highest frequencies) Inputs: -boundaries: the boundaries vector you want to complete -NT: the total number of boundaries wanted Output: -boundaries: the completed boundaries vector Author: Jerome Gilles Institution: UCLA - Department of Mathematics Year: 2013 Version: 1.0 Python Version: Vinícius Rezende Carvalho - vrcarva@ufmg.br Universidade Federal de Minas Gerais - Brasil Núcleo de Neurociências %====================================================================== """ Nd=NT-len(boundaries) deltaw=(np.pi-boundaries[-1])/(Nd+1) for k in range(Nd): boundaries = np.append(boundaries,boundaries[-1]+deltaw)
19,402
def _isfloat(string): """ Checks if a string can be converted into a float. Parameters ---------- value : str Returns ------- bool: True/False if the string can/can not be converted into a float. """ try: float(string) return True except ValueError: return False
19,403
def get(url, **kwargs): """ get json data from API :param url: :param kwargs: :return: """ try: result = _get(url, **kwargs) except (rq.ConnectionError, rq.ReadTimeout): result = {} return result
19,404
def sample_indep(p, N, T, D): """Simulate an independent sampling mask.""" obs_ind = np.full((N, T, D), -1) for n in range(N): for t in range(T): pi = np.random.binomial(n=1, p=p, size=D) ind = np.where(pi == 1)[0] count = ind.shape[0] obs_ind[n, t, :count] = ind return obs_ind
19,405
def _relabel_targets(y, s, ranks, n_relabels): """Compute relabelled targets based on predicted ranks.""" demote_ranks = set(sorted(ranks[(s == 0) & (y == 1)])[:n_relabels]) promote_ranks = set(sorted(ranks[(s == 1) & (y == 0)])[-n_relabels:]) return np.array([ _relabel(_y, _s, _r, promote_ranks, demote_ranks) for _y, _s, _r in zip(y, s, ranks)])
19,406
def exp(x): """Take exponetial of input x. Parameters ---------- x : Expr Input argument. Returns ------- y : Expr The result. """ return call_pure_intrin(x.dtype, "exp", x)
19,407
def check_table(words_in_block, block_width, num_lines_in_block): """ Check if a block is a block of tables or of text.""" # average_words_per_line=24 # total_num_words = 0 ratio_threshold = 0.50 actual_num_chars = 0 all_char_ws = [] cas = [] # total_num_words += len(line) if num_lines_in_block > 0: for word in words_in_block: if word['word']: actual_num_chars += len(word['word']) char_w = float(word['r']-word['l'])/len(word['word']) cas.append(round(char_w, 2)) all_char_ws.extend(cas) average_char_width = np.mean(all_char_ws) expected_num_chars = (float(block_width)/average_char_width)*num_lines_in_block # expected_word_count = average_words_per_line*num_lines_in_block ratio = actual_num_chars/expected_num_chars if ratio < ratio_threshold: return True else: return False else: return False
19,408
def upload_data( client, crypto_symbol, utc_timestamp, open_price, highest_price, lowest_price, closing_price, volume ): """composes json that will store all necessary data into a point, then writes it into database""" json_body = [ { "measurement": crypto_symbol, "tag" "time": utc_timestamp, "fields": { "Open price": open_price, "Highest price": highest_price, "Lowest price": lowest_price, "Closing price": closing_price, "Volume ": volume } }] client.write_points(json_body) # # few prints to help with test if if it works on the spot # # results = client.query(f'SELECT * FROM {crypto_symbol}') # print(results) # print(client.get_list_database())
19,409
def defaultSampleFunction(xy1, xy2): """ The sample function compares how similar two curves are. If they are exactly the same it will return a value of zero. The default function returns the average error between each sample point in two arrays of x/y points, xy1 and xy2. Parameters ---------- xy1 : array The first input 2D x/y array of points. xy2 : array The second input 2D x/y array of points. Returns ------- float The average "distance" between each point on the curve. The output quantity is unitless. """ x1 = xy1[:,0] x2 = xy2[:,0] y1 = xy1[:,1] y2 = xy2[:,1] diff = ((x1 - x2)**2 + (y1 - y2)**2)**(0.5) return np.sum(diff)/len(x1)
19,410
def makesubs(formula,intervals,values=None,variables=None,numden=False): """Generates a new formula which satisfies this condition: for all positive variables new formula is nonnegative iff for all variables in corresponding intervals old formula is nonnegative. >>> newproof() >>> makesubs('1-x^2','[0,1]') Substitute $x\to 1 - \frac{1}{a + 1}$ #depend on shiro.display (2*a + 1)/(a**2 + 2*a + 1) >>> makesubs('1-x^2','[0,1]',values='1/2') Substitute $x\to 1 - \frac{1}{b + 1}$ #depend on shiro.display ((2*b + 1)/(b**2 + 2*b + 1), [1]) >>> makesubs('1-x^2','[0,1]',values='1/2',numden=True) Substitute $x\to 1 - \frac{1}{c + 1}$ #depend on shiro.display (2*c + 1, c**2 + 2*c + 1, [1]) """ formula=S(formula) addsymbols(formula) intervals=_smakeiterable2(intervals) if variables: variables=_smakeiterable(variables) else: variables=sorted(formula.free_symbols,key=str) if values!=None: values=_smakeiterable(values) equations=[var-value for var,value in zip(variables,values)] else: equations=[] newvars=[] warn=0 usedvars=set() for var,interval in zip(variables,intervals): end1,end2=interval z=newvar() newvars+=[z] usedvars|={var} if (end1.free_symbols|end2.free_symbols)&usedvars: warn=1 if end1 in {S('-oo'),S('oo')}: end1,end2=end2,end1 if {end1,end2}=={S('-oo'),S('oo')}: sub1=sub2=(z-1/z) elif end2==S('oo'): sub1=sub2=(end1+z) elif end2==S('-oo'): sub1=sub2=end1-z else: sub1=end2+(end1-end2)/z sub2=end2+(end1-end2)/(1+z) formula=formula.subs(var,sub1) shiro.display(shiro.translation['Substitute']+" $"+latex(var)+'\\to '+latex(sub2)+'$') equations=[equation.subs(var,sub1) for equation in equations] num,den=fractioncancel(formula) for var,interval in zip(newvars,intervals): if {interval[0],interval[1]} & {S('oo'),S('-oo')}==set(): num=num.subs(var,var+1) den=den.subs(var,var+1) equations=[equation.subs(var,var+1) for equation in equations] if values: values=ssolve(equations,newvars) if len(values): values=values[0] num,den=expand(num),expand(den) #shiro.display(shiro.translation["Formula after substitution:"],"$$",latex(num/den),'$$') if warn: shiro.warning(shiro.translation[ 'Warning: intervals contain backwards dependencies. Consider changing order of variables and intervals.']) if values and numden: return num,den,values elif values: return num/den,values elif numden: return num,den else: return num/den
19,411
def generateKey(accountSwitchKey=None,keytype=None): """ Generate Key""" genKeyEndpoint = '/config-media-live/v2/msl-origin/generate-key' if accountSwitchKey: params = {'accountSwitchKey': accountSwitchKey} params["type"] = keytype key = prdHttpCaller.getResult(genKeyEndpoint, params) else: parms = {'type':keytype} key = prdHttpCaller.getResult(genKeyEndpoint,params) return key
19,412
def positions_count_for_one_ballot_item_doc_view(request): """ Show documentation about positionsCountForOneBallotItem """ url_root = WE_VOTE_SERVER_ROOT_URL template_values = positions_count_for_one_ballot_item_doc.positions_count_for_one_ballot_item_doc_template_values( url_root) template_values['voter_api_device_id'] = get_voter_api_device_id(request) return render(request, 'apis_v1/api_doc_page.html', template_values)
19,413
def compare_maxima(input_im, gtruth_im, min_distance=10, threshold_abs=20): """Compares image maxima Compare that the maxima found in an image matches the maxima found in a ground truth image. This function is a wrapper around `skimage.feature.peak_local_max()`. It calls this function on both images that are passed as arguments, and asserts if the resulting maxima arrays returned by this function match. """ gtruth_coordinates = _sort_list(peak_local_max(gtruth_im, min_distance=min_distance, threshold_abs=threshold_abs)) input_coordinates = _sort_list(peak_local_max(input_im, min_distance=min_distance, threshold_abs=threshold_abs)) np.testing.assert_array_equal(gtruth_coordinates, input_coordinates)
19,414
def atcab_sign_base(mode, key_id, signature): """ Executes the Sign command, which generates a signature using the ECDSA algorithm. Args: mode Mode determines what the source of the message to be signed (int) key_id Private key slot used to sign the message. (int) signature Signature is returned here. Format is R and S integers in big-endian format. 64 bytes for P256 curve (Expects bytearray) Returns: Stauts code """ if not isinstance(signature, bytearray): status = Status.ATCA_BAD_PARAM else: c_signature = create_string_buffer(64) status = get_cryptoauthlib().atcab_sign_base(mode, key_id, byref(c_signature)) signature[0:] = bytes(c_signature.raw) return status
19,415
def feature_evaluation(X: pd.DataFrame, y: pd.Series, output_path: str = ".") -> NoReturn: """ Create scatter plot between each feature and the response. - Plot title specifies feature name - Plot title specifies Pearson Correlation between feature and response - Plot saved under given folder with file name including feature name Parameters ---------- X : DataFrame of shape (n_samples, n_features) Design matrix of regression problem y : array-like of shape (n_samples, ) Response vector to evaluate against output_path: str (default ".") Path to folder in which plots are saved """ for f in X: corr = np.cov(X[f], y)[1, 0] / (np.std(X[f]) * np.std(y)) fig = px.scatter(x=X[f], y=y) fig.update_layout(title=f"{corr}" , xaxis_title=f"{f} value", yaxis_title="price") fig.write_image(f"{output_path}/{f}.png")
19,416
def load_gs( gs_path: str, src_species: str = None, dst_species: str = None, to_intersect: List[str] = None, ) -> dict: """Load the gene set file (.gs file). Parameters ---------- gs_path : str Path to the gene set file with the following two columns, separated by tab: - 'TRAIT' - 'GENESET': (1) <gene1>,<gene2>,... each gene will be weighted uniformly or (2) <gene1>:<weight1>,<gene2>:<weight2>,... each gene will be weighted by its weight. src_species : str, default=None Source species, must be either 'mmusculus' or 'hsapiens' if not None dst_species : str, default=None Destination species, must be either 'mmusculus' or 'hsapiens' if not None to_intersect : List[str], default None. Gene list to intersect with the input .gs file. Returns ------- dict_gs : dict Dictionary of gene sets: { trait1: (gene_list, gene_weight_list), trait2: (gene_list, gene_weight_list), ... } """ assert (src_species is None) == ( dst_species is None ), "src_species and dst_species must be both None or not None" # Load homolog map dict_map; only needed when src_species and dst_species # are not None and different. if ((src_species is not None) & (dst_species is not None)) and ( src_species != dst_species ): dict_map = load_homolog_mapping(src_species, dst_species) # type: ignore else: dict_map = None # type: ignore # Load gene set file dict_gs = {} df_gs = pd.read_csv(gs_path, sep="\t") for i, (trait, gs) in df_gs.iterrows(): gs_info = [g.split(":") for g in gs.split(",")] if np.all([len(g) == 1 for g in gs_info]): # if all genes are weighted uniformly dict_weights = {g[0]: 1.0 for g in gs_info} elif np.all([len(g) == 2 for g in gs_info]): # if all genes are weighted by their weights dict_weights = {g[0]: float(g[1]) for g in gs_info} else: raise ValueError(f"gene set {trait} contains genes with invalid format") # Convert species if needed # convert gene list to homologs, if gene can not be mapped, remove it # in both gene list and gene weight if dict_map is not None: dict_weights = { dict_map[g]: w for g, w in dict_weights.items() if g in dict_map } # Intersect with other gene sets if to_intersect is not None: to_intersect = set(to_intersect) dict_weights = {g: w for g, w in dict_weights.items() if g in to_intersect} gene_list = list(dict_weights.keys()) dict_gs[trait] = ( gene_list, [dict_weights[g] for g in gene_list], ) return dict_gs
19,417
def create_and_send(ip, port, address, msg): """Create a client and send a message""" osc = OSCClient(ip, port) osc.send_message(address, msg)
19,418
def sample_surface_even(mesh, count, radius=None): """ Sample the surface of a mesh, returning samples which are VERY approximately evenly spaced. This is accomplished by sampling and then rejecting pairs that are too close together. Parameters --------- mesh : trimesh.Trimesh Geometry to sample the surface of count : int Number of points to return radius : None or float Removes samples below this radius Returns --------- samples : (count, 3) float Points in space on the surface of mesh face_index : (count,) int Indices of faces for each sampled point """ from .points import remove_close # guess radius from area if radius is None: radius = np.sqrt(mesh.area / (3 * count)) # get points on the surface points, index = sample_surface(mesh, count * 3) # remove the points closer than radius points, mask = remove_close(points, radius) # we got all the samples we expect if len(points) >= count: return points[:count], index[mask][:count] # warn if we didn't get all the samples we expect util.log.warning('only got {}/{} samples!'.format( len(points), count)) return points, index[mask]
19,419
def allocate_probabilities(results, num_substations, probabilities): """ Allocate cumulative probabilities. Parameters ---------- results : list of dicts All iterations generated in the simulation function. num_substations : list The number of electricity substation nodes we wish to select for each scenario. probabilities : list Contains the cumulative probabilities we wish to use. Returns ------- output : list of dicts Contains all generated results. """ output = [] for nodes in num_substations: ranked_data = add_cp(results, nodes, probabilities) for probability in probabilities: scenario = min( ranked_data, key=lambda x: abs(float(x["cum_probability"]) - probability) ) output.append(scenario) return output
19,420
def import_one_record_sv01(r, m): """Import one ODK Site Visit 0.1 record into WAStD. Arguments r The record as dict, e.g. { "instanceID": "uuid:cc7224d7-f40f-4368-a937-1eb655e0203a", "observation_start_time": "2017-03-08T07:10:43.378Z", "reporter": "florianm", "photo_start": { "filename": "1488957113670.jpg", "type": "image/jpeg", "url": "https://..." }, "transect": "-31.9966142 115.88456594 0.0 0.0;", "photo_finish": { "filename": "1488957172832.jpg", "type": "image/jpeg", "url": "https://..." }, "comments": null, "observation_end_time": "2017-03-08T07:13:23.317Z" } m The mapping of ODK to WAStD choices All existing records will be updated. Make sure to skip existing records which should be retained unchanged. Creates a Survey, e.g. { 'started_on': datetime.datetime(2017, 1, 31, 16, 0, tzinfo=<UTC>), 'finished_on': datetime.datetime(2017, 2, 4, 16, 0, tzinfo=<UTC>), 'site_id': 17, 'source': 'direct', 'source_id': None, 'transect': None, 'comments': '', } """ src_id = r["instanceID"] new_data = dict( source="odk", source_id=src_id, site_id=17, # TODO: reconstruct site on Survey if not given transect=read_odk_linestring(r["transect"]), started_on=parse_datetime(r["observation_start_time"]), finished_on=parse_datetime(r["observation_end_time"]), # m["users"][r["reporter"]], comments=r["comments"] ) if Survey.objects.filter(source_id=src_id).exists(): logger.debug("Updating unchanged existing record {0}...".format(src_id)) Survey.objects.filter(source_id=src_id).update(**new_data) e = Survey.objects.get(source_id=src_id) else: logger.debug("Creating new record {0}...".format(src_id)) e = Survey.objects.create(**new_data) e.save() # MediaAttachments handle_media_attachment(e, r["photo_start"], title="Site conditions at start of suvey") handle_media_attachment(e, r["photo_finish"], title="Site conditions at end of suvey") logger.info(" Saved {0}\n".format(e)) e.save() return e
19,421
def assert_array_almost_equal(x: numpy.ndarray, y: List[List[float]], decimal: int): """ usage.scipy: 5 usage.sklearn: 4 """ ...
19,422
def delete_page_groups(request_ctx, group_id, url, **request_kwargs): """ Delete a wiki page :param request_ctx: The request context :type request_ctx: :class:RequestContext :param group_id: (required) ID :type group_id: string :param url: (required) ID :type url: string :return: Delete page :rtype: requests.Response (with Page data) """ path = '/v1/groups/{group_id}/pages/{url}' url = request_ctx.base_api_url + path.format(group_id=group_id, url=url) response = client.delete(request_ctx, url, **request_kwargs) return response
19,423
def envset(name): """Return True if the given environment variable is set An environment variable is considered set if it is assigned to a value other than 'no', 'n', 'false', 'off', '0', or '0.0' (case insensitive) """ return os.environ.get(name, 'no').lower() not in ['no', 'n', 'false', 'off', '0', '0.0']
19,424
def init_email_templates(): """初始化邮件模板""" from app.modules.email_templates.models import EmailTemplate template = '<p>{{ message | safe }}</p><a href="{{ url }}" target="_blank">点击访问</a>' for name in ["default", "confirm", "reset-password"]: EmailTemplate.create(name=name, template=template)
19,425
def GLM(args): """ %prog GLM GenoPrefix Pheno Outdir RUN automated GEMMA General Linear Model """ p = OptionParser(GLM.__doc__) p.set_slurm_opts(jn=True) opts, args = p.parse_args(args) if len(args) == 0: sys.exit(not p.print_help()) GenoPrefix, Pheno, Outdir = args meanG, annoG = GenoPrefix+'.mean', GenoPrefix+'.annotation' outprefix = Pheno.split('.')[0] cmd = '%s -g %s -p %s -a %s -lm 4 -outdir %s -o %s' \ %(gemma, meanG, Pheno, annoG, Outdir, outprefix) print('The command running on the local node:\n%s'%cmd) h = Slurm_header header = h%(opts.time, opts.memory, opts.prefix, opts.prefix, opts.prefix) header += cmd f = open('%s.glm.slurm'%outprefix, 'w') f.write(header) f.close() print('slurm file %s.glm.slurm has been created, you can sbatch your job file.'%outprefix)
19,426
def solver(f, p_e, mesh, degree=1): """ Solving the Darcy flow equation on a unit square media with pressure boundary conditions. """ # Creating mesh and defining function space V = FunctionSpace(mesh, 'P', degree) # Defining Dirichlet boundary p_L = Constant(1.0) def boundary_L(x, on_boundary): return on_boundary and near(x[0], 0) bc_L = DirichletBC(V, p_L, boundary_L) p_R = Constant(0.0) def boundary_R(x, on_boundary): return on_boundary and near(x[0], 1) bc_R = DirichletBC(V, p_R, boundary_R) bcs = [bc_L, bc_R] # If p = p_e on the boundary, then use:- #def boundary(x, on_boundary): #return on_boundary #bc = DirichletBC(V, p_e, boundary) # Defining variational problem p = TrialFunction(V) v = TestFunction(V) d = 2 I = Identity(d) M = Expression('fmax(0.10, exp(-pow(10.0*x[1]-1.0*sin(10.0*x[0])-5.0, 2)))', degree=2, domain=mesh) K = M*I a = dot(K*grad(p), grad(v))*dx L = inner(f, v)*dx # Computing Numerical Pressure p = Function(V) solve(a == L, p, bcs) return p
19,427
def set_stretchmatrix(coefX=1.0, coefY=1.0): """Stretching matrix Args: coefX: coefY:coefficients (float) for the matrix [coefX 0 0 coefY] Returns: strectching_matrix: matrix """ return np.array([[coefX, 0],[0, coefY]])
19,428
def double_click(self, br): """ demo: # double click on every spacer to expand column width for spc in br.select('table.ms-crm-List-Header span.ms-crm-List-Row-header-spacer'): spc.double_click(br) """ ActionChains(br).double_click(self).perform()
19,429
def _test(): """Run an example using the Crystal Maze problem.""" constraints = Constraints() domain = VariableDomain.from_range(domain_count=8, domain_start=1, domain_end=8, alpha_names=True) connected = { (domain.from_name("A"), domain.from_name("B")), (domain.from_name("A"), domain.from_name("C")), (domain.from_name("A"), domain.from_name("D")), (domain.from_name("B"), domain.from_name("C")), (domain.from_name("D"), domain.from_name("C")), (domain.from_name("H"), domain.from_name("E")), (domain.from_name("H"), domain.from_name("F")), (domain.from_name("H"), domain.from_name("G")), (domain.from_name("E"), domain.from_name("F")), (domain.from_name("G"), domain.from_name("F")), (domain.from_name("B"), domain.from_name("E")), (domain.from_name("C"), domain.from_name("F")), (domain.from_name("D"), domain.from_name("G")), (domain.from_name("B"), domain.from_name("F")), (domain.from_name("E"), domain.from_name("C")), (domain.from_name("C"), domain.from_name("G")), (domain.from_name("D"), domain.from_name("F")), } for connected_a, connected_b in connected: constraints.add_constraints(bidirectional(AdjacencyConstraint(connected_a, connected_b))) constraints.add_constraints(AllDifferent(*domain.variable_references)) solution = ForwardChecker(constraints).forward_check(domain) if solution: for variable in solution.variables: print("{} = {}".format(variable.pretty(), next(variable.values.__iter__())))
19,430
def fixjsstyle(files=0): """ Fix js files using fixjsstyle to comply with Google coding style """ files = files.split(" ") if not files == 0 else list_js_files() for file in files: with settings(hide("warnings", "running"), warn_only=True): output = local("fixjsstyle --strict --custom_jsdoc_tags function,namespace,constructs,options,augments,static,extend %s" % file, True) if output == "": print fabric.colors.white("CLEAN ", True) + file else: print fabric.colors.green("FIXED ", True) + file print output # ugly patch to indent properly JSDoc com since fixjsstyle does not file = open(file, "r+") lines = file.readlines() idx = 0 while idx < len(lines): if lines[idx].strip()[0:2] == '/*': level = lines[idx].find('/*') idx += 1 while idx < len(lines): lines[idx] = " " * level + lines[idx].strip() + "\n" if lines[idx].find('*/') != -1: break idx += 1 idx += 1 file.seek(0) file.truncate() file.write("".join(lines)) file.close()
19,431
def set_bit(v, index, x): """Set the index:th bit of v to 1 if x is truthy, else to 0, and return the new value.""" mask = 1 << index # Compute mask, an integer with just bit 'index' set. v &= ~mask # Clear the bit indicated by the mask (if x is False) if x: v |= mask # If x was True, set the bit indicated by the mask. return v
19,432
def test_credentials() -> (str, str): """ Read ~/.synapseConfig and retrieve test username and password :return: endpoint, username and api_key """ config = _get_config() return config.get(DEFAULT_CONFIG_AUTH_SECTION, DEFAULT_CONFIG_USERNAME_OPT),\ config.get(DEFAULT_CONFIG_AUTH_SECTION, DEFAULT_CONFIG_PASSWORD_OPT)
19,433
def BCA_formula_from_str(BCA_str): """ Get chemical formula string from BCA string Args: BCA_str: BCA ratio string (e.g. 'B3C1A1') """ if len(BCA_str)==6 and BCA_str[:3]=='BCA': # format: BCAxyz. suitable for single-digit integer x,y,z funits = BCA_str[-3:] else: # format: BxCyAz. suitable for multi-digit or non-integer x,y,z funits = re.split('[BCA]',BCA_str) funits = [u for u in funits if len(u) > 0] funits components = ['BaO','CaO','Al2O3'] formula = ''.join([f'({c}){n}' for c,n in zip(components, funits)]) return formula
19,434
def get_strongly_connected_components(graph): """ Get strongly connected components for a directed graph The returned list of components is in reverse topological order, i.e., such that the nodes in the first component have no dependencies on other components. """ nodes = list(graph.keys()) node_index_by_node = {node: index for index, node in enumerate(nodes)} row_indexes = [] col_indexes = [] for node, targets in graph.items(): row_indexes += [node_index_by_node[node]] * len(targets) col_indexes += [node_index_by_node[target] for target in targets] data = numpy.ones((len(row_indexes)), dtype=int) n_nodes = len(nodes) csgraph = csr_matrix((data, (row_indexes, col_indexes)), shape=(n_nodes, n_nodes)) n_components, labels = connected_components(csgraph, directed=True, connection='strong') sccs = [[] for i in range(n_components)] for index, label in enumerate(labels): sccs[label] += [nodes[index]] return [frozenset(scc) for scc in sccs]
19,435
def dot(a, b): """ Computes a @ b, for a, b of the same rank (both 2 or both 3). If the rank is 2, then the innermost dimension of `a` must match the outermost dimension of `b`. If the rank is 3, the first dimension of `a` and `b` must be equal and the function computes a batch matmul. Supports both dense and sparse multiplication (including sparse-sparse). :param a: Tensor or SparseTensor with rank 2 or 3. :param b: Tensor or SparseTensor with same rank as b. :return: Tensor or SparseTensor with rank 2 or 3. """ a_ndim = K.ndim(a) b_ndim = K.ndim(b) assert a_ndim == b_ndim, "Expected equal ranks, got {} and {}" "".format( a_ndim, b_ndim ) a_is_sparse = K.is_sparse(a) b_is_sparse = K.is_sparse(b) # Handle cases: rank 2 sparse-dense, rank 2 dense-sparse # In these cases we can use the faster sparse-dense matmul of tf.sparse if a_ndim == 2: if a_is_sparse and not b_is_sparse: return tf.sparse.sparse_dense_matmul(a, b) if not a_is_sparse and b_is_sparse: return ops.transpose( tf.sparse.sparse_dense_matmul(ops.transpose(b), ops.transpose(a)) ) # Handle cases: rank 2 sparse-sparse, rank 3 sparse-dense, # rank 3 dense-sparse, rank 3 sparse-sparse # In these cases we can use the tfsp.CSRSparseMatrix implementation (slower, # but saves memory) if a_is_sparse: a = tfsp.CSRSparseMatrix(a) if b_is_sparse: b = tfsp.CSRSparseMatrix(b) if a_is_sparse or b_is_sparse: out = tfsp.matmul(a, b) if hasattr(out, "to_sparse_tensor"): return out.to_sparse_tensor() else: return out # Handle case: rank 2 dense-dense, rank 3 dense-dense # Here we use the standard dense operation return tf.matmul(a, b)
19,436
def unreshuffle_2d(x, i0, shape): """Undo the reshuffle_2d operation.""" x_flat = unreshuffle_1d(x, i0) x_rev = np.reshape(x_flat, shape) x_rev[1::2, :] = x_rev[1::2, ::-1] # reverse all odd rows return x_rev
19,437
def findNodeJustBefore(target, nodes): """ Find the node in C{nodes} which appeared immediately before C{target} in the input document. @type target: L{twisted.web.microdom.Element} @type nodes: C{list} of L{twisted.web.microdom.Element} @return: An element from C{nodes} """ result = None for node in nodes: if comparePosition(target, node) < 0: return result result = node return result
19,438
def _format_line(submission, position, rank_change, total_hours): """ Formats info about a single post on the front page for logging/messaging. A single post will look like this: Rank Change Duration Score Flair Id User Slug 13. +1 10h 188 [Episode](gkvlja) <AutoLovepon> <arte_episode_7_discussion> """ line = "{:3}".format(f"{position}.") if rank_change is None: line += " (new) " elif rank_change != 0: line += " {:7}".format(f"{rank_change:+d} {total_hours}h") else: line += " {:7}".format(f"-- {total_hours}h") line += f" {submission.score:>5}" line += " {:>24}".format(f"[{submission.link_flair_text}]({submission.id})") line += f" <{submission.author.name}>" line += f" <{reddit_utils.slug(submission)}>" return line
19,439
def help_menu_message(): """Display the help menu options.""" print('\nPlease enter the number for the item you would like help on:\n') print('1: <TBC>') print('2: <TBC>') print('3: <TBC>') print('4: <TBC>') print('5: Exit Help Menu')
19,440
def train_net(network, rfcn_network, imdb, roidb, valroidb, output_dir, tb_dir, pretrained_model=None, max_iters=40000): """Train a Fast R-CNN network.""" roidb = filter_roidb(roidb) valroidb = filter_roidb(valroidb) tfconfig = tf.ConfigProto(allow_soft_placement=True) tfconfig.gpu_options.allow_growth = True with tf.Session(config=tfconfig) as sess: sw = SolverWrapper(sess, network, rfcn_network, imdb, roidb, valroidb, output_dir, tb_dir, pretrained_model=pretrained_model) print('Solving...') sw.train_model(sess, max_iters) print('done solving')
19,441
def describe_import_tasks(filters=None, maxResults=None, nextToken=None): """ Returns an array of import tasks for your account, including status information, times, IDs, the Amazon S3 Object URL for the import file, and more. See also: AWS API Documentation Exceptions :example: response = client.describe_import_tasks( filters=[ { 'name': 'IMPORT_TASK_ID'|'STATUS'|'NAME', 'values': [ 'string', ] }, ], maxResults=123, nextToken='string' ) :type filters: list :param filters: An array of name-value pairs that you provide to filter the results for the DescribeImportTask request to a specific subset of results. Currently, wildcard values aren\'t supported for filters.\n\n(dict) --A name-values pair of elements you can use to filter the results when querying your import tasks. Currently, wildcards are not supported for filters.\n\nNote\nWhen filtering by import status, all other filter values are ignored.\n\n\nname (string) --The name, status, or import task ID for a specific import task.\n\nvalues (list) --An array of strings that you can provide to match against a specific name, status, or import task ID to filter the results for your import task queries.\n\n(string) --\n\n\n\n\n\n :type maxResults: integer :param maxResults: The maximum number of results that you want this request to return, up to 100. :type nextToken: string :param nextToken: The token to request a specific page of results. :rtype: dict ReturnsResponse Syntax { 'nextToken': 'string', 'tasks': [ { 'importTaskId': 'string', 'clientRequestToken': 'string', 'name': 'string', 'importUrl': 'string', 'status': 'IMPORT_IN_PROGRESS'|'IMPORT_COMPLETE'|'IMPORT_COMPLETE_WITH_ERRORS'|'IMPORT_FAILED'|'IMPORT_FAILED_SERVER_LIMIT_EXCEEDED'|'IMPORT_FAILED_RECORD_LIMIT_EXCEEDED'|'DELETE_IN_PROGRESS'|'DELETE_COMPLETE'|'DELETE_FAILED'|'DELETE_FAILED_LIMIT_EXCEEDED'|'INTERNAL_ERROR', 'importRequestTime': datetime(2015, 1, 1), 'importCompletionTime': datetime(2015, 1, 1), 'importDeletedTime': datetime(2015, 1, 1), 'serverImportSuccess': 123, 'serverImportFailure': 123, 'applicationImportSuccess': 123, 'applicationImportFailure': 123, 'errorsAndFailedEntriesZip': 'string' }, ] } Response Structure (dict) -- nextToken (string) -- The token to request the next page of results. tasks (list) -- A returned array of import tasks that match any applied filters, up to the specified number of maximum results. (dict) -- An array of information related to the import task request that includes status information, times, IDs, the Amazon S3 Object URL for the import file, and more. importTaskId (string) -- The unique ID for a specific import task. These IDs aren\'t globally unique, but they are unique within an AWS account. clientRequestToken (string) -- A unique token used to prevent the same import request from occurring more than once. If you didn\'t provide a token, a token was automatically generated when the import task request was sent. name (string) -- A descriptive name for an import task. You can use this name to filter future requests related to this import task, such as identifying applications and servers that were included in this import task. We recommend that you use a meaningful name for each import task. importUrl (string) -- The URL for your import file that you\'ve uploaded to Amazon S3. status (string) -- The status of the import task. An import can have the status of IMPORT_COMPLETE and still have some records fail to import from the overall request. More information can be found in the downloadable archive defined in the errorsAndFailedEntriesZip field, or in the Migration Hub management console. importRequestTime (datetime) -- The time that the import task request was made, presented in the Unix time stamp format. importCompletionTime (datetime) -- The time that the import task request finished, presented in the Unix time stamp format. importDeletedTime (datetime) -- The time that the import task request was deleted, presented in the Unix time stamp format. serverImportSuccess (integer) -- The total number of server records in the import file that were successfully imported. serverImportFailure (integer) -- The total number of server records in the import file that failed to be imported. applicationImportSuccess (integer) -- The total number of application records in the import file that were successfully imported. applicationImportFailure (integer) -- The total number of application records in the import file that failed to be imported. errorsAndFailedEntriesZip (string) -- A link to a compressed archive folder (in the ZIP format) that contains an error log and a file of failed records. You can use these two files to quickly identify records that failed, why they failed, and correct those records. Afterward, you can upload the corrected file to your Amazon S3 bucket and create another import task request. This field also includes authorization information so you can confirm the authenticity of the compressed archive before you download it. If some records failed to be imported we recommend that you correct the records in the failed entries file and then imports that failed entries file. This prevents you from having to correct and update the larger original file and attempt importing it again. Exceptions ApplicationDiscoveryService.Client.exceptions.AuthorizationErrorException ApplicationDiscoveryService.Client.exceptions.InvalidParameterException ApplicationDiscoveryService.Client.exceptions.InvalidParameterValueException ApplicationDiscoveryService.Client.exceptions.ServerInternalErrorException ApplicationDiscoveryService.Client.exceptions.HomeRegionNotSetException :return: { 'nextToken': 'string', 'tasks': [ { 'importTaskId': 'string', 'clientRequestToken': 'string', 'name': 'string', 'importUrl': 'string', 'status': 'IMPORT_IN_PROGRESS'|'IMPORT_COMPLETE'|'IMPORT_COMPLETE_WITH_ERRORS'|'IMPORT_FAILED'|'IMPORT_FAILED_SERVER_LIMIT_EXCEEDED'|'IMPORT_FAILED_RECORD_LIMIT_EXCEEDED'|'DELETE_IN_PROGRESS'|'DELETE_COMPLETE'|'DELETE_FAILED'|'DELETE_FAILED_LIMIT_EXCEEDED'|'INTERNAL_ERROR', 'importRequestTime': datetime(2015, 1, 1), 'importCompletionTime': datetime(2015, 1, 1), 'importDeletedTime': datetime(2015, 1, 1), 'serverImportSuccess': 123, 'serverImportFailure': 123, 'applicationImportSuccess': 123, 'applicationImportFailure': 123, 'errorsAndFailedEntriesZip': 'string' }, ] } :returns: ApplicationDiscoveryService.Client.exceptions.AuthorizationErrorException ApplicationDiscoveryService.Client.exceptions.InvalidParameterException ApplicationDiscoveryService.Client.exceptions.InvalidParameterValueException ApplicationDiscoveryService.Client.exceptions.ServerInternalErrorException ApplicationDiscoveryService.Client.exceptions.HomeRegionNotSetException """ pass
19,442
def prime_divisors(number: int) -> Iterable[int]: """ Returns the prime divisors of the number (sorted in ascending order). E.g. prime_divisors(20) -> 2, 2, 5 """ for divisor in chain([2], count(3, step=2)): while number % divisor == 0: yield(divisor) number //= divisor if divisor > sqrt(number): break # If the original number was prime if number != 1: yield number
19,443
def run_erasure( # pylint: disable = too-many-arguments privacy_request: PrivacyRequest, policy: Policy, graph: DatasetGraph, connection_configs: List[ConnectionConfig], identity: Dict[str, Any], access_request_data: Dict[str, List[Row]], ) -> Dict[str, int]: """Run an erasure request""" traversal: Traversal = Traversal(graph, identity) with TaskResources(privacy_request, policy, connection_configs) as resources: def collect_tasks_fn( tn: TraversalNode, data: Dict[CollectionAddress, GraphTask] ) -> None: """Run the traversal, as an action creating a GraphTask for each traversal_node.""" if not tn.is_root_node(): data[tn.address] = GraphTask(tn, resources) env: Dict[CollectionAddress, Any] = {} traversal.traverse(env, collect_tasks_fn) def termination_fn(*dependent_values: int) -> Tuple[int, ...]: """The dependent_values here is an int output from each task feeding in, where each task reports the output of 'task.rtf(access_request_data)', which is the number of records updated. The termination function just returns this tuple of ints.""" return dependent_values dsk: Dict[CollectionAddress, Any] = { k: (t.erasure_request, access_request_data[str(k)]) for k, t in env.items() } # terminator function waits for all keys dsk[TERMINATOR_ADDRESS] = (termination_fn, *env.keys()) v = dask.delayed(get(dsk, TERMINATOR_ADDRESS)) update_cts: Tuple[int, ...] = v.compute() # we combine the output of the termination function with the input keys to provide # a map of {collection_name: records_updated}: erasure_update_map: Dict[str, int] = dict( zip([str(x) for x in env], update_cts) ) return erasure_update_map
19,444
def write_module_scripts(folder, platform=sys.platform, blog_list=None, default_engine_paths=None, command=None): """ Writes a couple of scripts which allow a user to be faster on some tasks or to easily get information about the module. @param folder where to write the script @param platform platform @param blog_list blog list to follow, should be attribute ``__blog__`` of the module @param command None to generate scripts for all commands or a value in *[blog, doc]*. @param default_engine_paths default engines (or python distributions) @return list of written scripts The function produces the following files: * *auto_rss_list.xml*: list of rss stream to follow * *auto_rss_database.db3*: stores blog posts * *auto_rss_server.py*: runs a server which updates the scripts and runs a server. It also open the default browser. * *auto_rss_server.(bat|sh)*: run *auto_run_server.py*, the file on Linux might be missing if there is an equivalent python script .. faqref:: :title: How to generate auto_rss_server.py? The following code generates the script *auto_rss_local.py* which runs a local server to read blog posts included in the documentation (it uses module `pyrsslocal <http://www.xavierdupre.fr/app/pyrsslocal/helpsphinx/index.html>`_):: from pyquickhelper.pycode import write_module_scripts, __blog__ write_module_scripts(".", blog_list=__blog__, command="blog") """ # delayed import from .build_helper import get_script_module default_set = {"blog", "doc"} if command is not None: if command not in default_set: raise ValueError( # pragma: no cover "command {0} is not available in {1}".format(command, default_set)) commands = {command} else: commands = default_set res = [] for c in commands: sc = get_script_module( c, platform=sys.platform, blog_list=blog_list, default_engine_paths=default_engine_paths) if sc is None: continue # pragma: no cover tobin = os.path.join(folder, "bin") if not os.path.exists(tobin): os.mkdir(tobin) for item in sc: if isinstance(item, tuple): name = os.path.join(folder, "bin", item[0]) with open(name, "w", encoding="utf8") as f: f.write(item[1]) res.append(name) else: # pragma: no cover name = os.path.join( folder, "bin", "auto_run_%s.%s" % (c, get_script_extension())) with open(name, "w") as f: f.write(item) res.append(name) return res
19,445
def _set_stop_area_locality(connection): """ Add locality info based on stops contained within the stop areas. """ # Find stop areas with associated locality codes with connection.begin(): query_stop_areas = connection.execute( db.select([ models.StopArea.code.label("code"), models.StopPoint.locality_ref.label("ref"), db.func.count(models.StopPoint.locality_ref).label("count") ]) .select_from( models.StopArea.__table__ .join(models.StopPoint, models.StopArea.code == models.StopPoint.stop_area_ref) ) .group_by(models.StopArea.code, models.StopPoint.locality_ref) ) stop_areas = query_stop_areas.fetchall() # Find locality for each stop area that contain the most stops areas, ambiguous = _find_stop_area_mode(stop_areas, "locality_ref") # if still ambiguous, measure distance between stop area and each # locality and add to above if ambiguous: add_areas = _find_locality_distance(connection, ambiguous.keys()) areas.extend(add_areas) utils.logger.info("Adding locality codes to stop areas") for a in areas: connection.execute( db.update(models.StopArea) .values({"locality_ref": a["locality_ref"]}) .where(models.StopArea.code == a["code"]) )
19,446
def RichTextBuffer_FindHandlerByName(*args, **kwargs): """RichTextBuffer_FindHandlerByName(String name) -> RichTextFileHandler""" return _richtext.RichTextBuffer_FindHandlerByName(*args, **kwargs)
19,447
def likelihood(tec, phase, tec_conversion, lik_sigma, K = 2): """ Get the likelihood of the tec given phase data and lik_var variance. tec: tensor B, 1 phase: tensor B, Nf tec_conversion: tensor Nf lik_sigma: tensor B, 1 (Nf) Returns: log_prob: tensor (B,1) """ mu = wrap(tec*tec_conversion[None,:])# B, Nf phase = wrap(phase) #K, B, Nf d = tf.stack([tf.distributions.Normal(mu + tf.convert_to_tensor(k*2*np.pi,float_type), lik_sigma).log_prob(phase) for k in range(-K,K+1,1)], axis=0) #B, Nf -> B log_lik = tf.reduce_sum(tf.reduce_logsumexp(d, axis=0), axis=1) # B, 1 # tec_mu = tf.gather(tec, neighbour) # tec_std = 0.001 * tf.exp(-0.25*neighbour_dist**2) # tec_prior = tf.distributions.Normal(tec_mu, tec_std).log_prob(tec) # sigma_priors = log_normal_solve(0.2,0.1) # #B, 1 # sigma_prior = tf.distributions.Normal( # tf.convert_to_tensor(sigma_priors[0],dtype=float_type), # tf.convert_to_tensor(sigma_priors[1],dtype=float_type)).log_prob(tf.log(lik_sigma)) - tf.log(lik_sigma) #B, 1 log_prob = log_lik[:,None]# + tec_prior # + sigma_prior return -log_prob
19,448
def angle_between(v1, v2): """Returns the angle in radians between vectors 'v1' and 'v2':: >>> angle_between((1, 0, 0), (0, 1, 0)) 1.5707963267948966 >>> angle_between((1, 0, 0), (1, 0, 0)) 0.0 >>> angle_between((1, 0, 0), (-1, 0, 0)) 3.141592653589793 """ # https://stackoverflow.com/a/13849249/782170 v1_u = unit_vector(v1) v2_u = unit_vector(v2) cos_theta = np.clip(np.dot(v1_u, v2_u), -1.0, 1.0) if cos_theta > 0: return np.arccos(cos_theta) else: return np.arccos(cos_theta) - np.pi / 2.0
19,449
def create_substrate_bulk(wf_dict_node): """ Calcfunction to create a bulk structure of a substrate. :params wf_dict: AiiDA dict node with at least keys lattice, host_symbol and latticeconstant (If they are not there, raises KeyError) Lattice key supports only fcc and bcc raises ExitCode 380, ERROR_NOT_SUPPORTED_LATTICE """ from aiida.engine import ExitCode from ase.lattice.cubic import FaceCenteredCubic from ase.lattice.cubic import BodyCenteredCubic wf_dict = wf_dict_node.get_dict() lattice = wf_dict['lattice'] if lattice == 'fcc': structure_factory = FaceCenteredCubic elif lattice == 'bcc': structure_factory = BodyCenteredCubic else: return ExitCode(380, 'ERROR_NOT_SUPPORTED_LATTICE', message='Specified substrate has to be bcc or fcc.') directions = [[1, 0, 0], [0, 1, 0], [0, 0, 1]] host_symbol = str(wf_dict['host_symbol']) latticeconstant = float(wf_dict['latticeconstant']) size = (1, 1, 1) structure = structure_factory(directions=directions, symbol=host_symbol, pbc=(1, 1, 1), latticeconstant=latticeconstant, size=size) return StructureData(ase=structure)
19,450
def run_Net_on_multiple(patchCreator, input_to_cnn_depth=1, cnn = None, str_data_selection="all", save_file_prefix="", apply_cc_filtering = False, output_filetype = 'h5', save_prob_map = False): """ run runNetOnSlice() on neighbouring blocks of data. if opt_cnn is not none, it should point to a CNN / Net that will be used. if patchCreator contains a list of 3D data blocks (patchCreator.second_input_data) then it will be used as second input to cnn.output() """ assert str_data_selection in ["all", "train", "test"] MIN = 0 if str_data_selection in ["all", "train"] else patchCreator.training_set_size MAX = patchCreator.training_set_size if str_data_selection =="train" else len(patchCreator.data) second_input_data = None DATA = patchCreator.data timings=[] # if hasattr(patchCreator,"second_input_data"): # second_input_data = patchCreator.second_input_data[opt_list_index] for opt_list_index in range(MIN, MAX): print "-"*30 print "@",opt_list_index+1,"of max.",len(patchCreator.data) postfix = "" if opt_list_index==None else "_" + utilities.extract_filename(patchCreator.file_names[opt_list_index])[1] if isinstance(patchCreator.file_names[0], str) else str(patchCreator.file_names[opt_list_index]) if not isinstance(patchCreator.file_names[opt_list_index], tuple) else utilities.extract_filename(patchCreator.file_names[opt_list_index][0])[1] if opt_list_index is not None: is_training = "_train" if (opt_list_index < patchCreator.training_set_size) else "_test" else: is_training="" this_save_name = save_file_prefix+"prediction"+postfix+"_"+is_training t0 = time.clock() sav = run_Net_on_Block(cnn, DATA[opt_list_index], patchCreator, bool_predicts_on_softmax=1, second_input_data = second_input_data) #this one does all the work t1 = time.clock() timings.append(t1-t0) if apply_cc_filtering: sav = remove_small_conneceted_components(sav) sav = 1 - remove_small_conneceted_components(1 - sav) save_pred(sav, this_save_name, output_filetype, save_prob_map) print 'timings (len',len(timings),')',np.mean(timings),'+-',np.std(timings) return None
19,451
def user_to_janrain_capture_dict(user): """Translate user fields into corresponding Janrain fields""" field_map = getattr(settings, 'JANRAIN', {}).get('field_map', None) if not field_map: field_map = { 'first_name': {'name': 'givenName'}, 'last_name': {'name': 'familyName'}, 'email': {'name': 'email'}, 'username': {'name': 'displayName'}, } result = {} for field in user._meta.fields: if field.name in field_map: fm = field_map[field.name] value = getattr(user, field.name) func = fm.get('function', None) if func: value = func(value) # Plurals are dot delimited parts = fm['name'].split('.') key = parts[0] if len(parts) == 1: result[key] = value else: result.setdefault(key, {}) result[key][parts[1]] = value return result
19,452
def test_promise_thread_safety(): """ Promise tasks should never be executed in a different thread from the one they are scheduled from, unless the ThreadPoolExecutor is used. Here we assert that the pending promise tasks on thread 1 are not executed on thread 2 as thread 2 resolves its own promise tasks. """ event_1 = threading.Event() event_2 = threading.Event() assert_object = {'is_same_thread': True} def task_1(): thread_name = threading.current_thread().getName() def then_1(value): # Enqueue tasks to run later. # This relies on the fact that `then` does not execute the function synchronously when called from # within another `then` callback function. promise = Promise.resolve(None).then(then_2) assert promise.is_pending event_1.set() # Unblock main thread event_2.wait() # Wait for thread 2 def then_2(value): assert_object['is_same_thread'] = (thread_name == threading.current_thread().getName()) promise = Promise.resolve(None).then(then_1) def task_2(): promise = Promise.resolve(None).then(lambda v: None) promise.get() # Drain task queue event_2.set() # Unblock thread 1 thread_1 = threading.Thread(target=task_1) thread_1.start() event_1.wait() # Wait for Thread 1 to enqueue promise tasks thread_2 = threading.Thread(target=task_2) thread_2.start() for thread in (thread_1, thread_2): thread.join() assert assert_object['is_same_thread']
19,453
def weighted(generator: Callable, directed: bool = False, low: float = 0.0, high: float = 1.0, rng: Optional[Generator] = None) -> Callable: """ Takes as input a graph generator and returns a new generator function that outputs weighted graphs. If the generator is dense, the output will be the weighted adjacency matrix. If the generator is sparse, the new function will return a tuple (adj_list, weights). Parameters ---------- generator : Callable A callable that generates graphs directed: bool Whether to generate weights for directed graphs low : float, optional Lower boundary of the sampling distribution interval, i.e., interval in [low, high), by default 0.0 high : float, optional Upper boundary of the sampling distribution interval, i.e., interval in [low, high), by default 1.0 rng : Generator, optional Numpy random number generator, by default None Returns ------- Callable A callable that generates weighted graphs Examples -------- >> weighted(erdos_renyi)(num_nodes=100, prob=0.5) """ if rng is None: rng = default_rng() def weighted_generator(*args, **kwargs): adj = generator(*args, **kwargs) if adj.shape[0] == adj.shape[1]: num_nodes = adj.shape[0] weights = rng.uniform(low=low, high=high, size=(num_nodes, num_nodes)) if not directed: weights = np.triu(weights) weights = weights + weights.T adj = adj.astype(float) * weights return adj weights = rng.uniform(low=low, high=high, size=(adj.shape[0], 1)) return adj, weights return weighted_generator
19,454
def arg_parse(dataset, view, num_shots=2, cv_number=5): """ arguments definition method """ parser = argparse.ArgumentParser(description='Graph Classification') parser.add_argument('--mode', type=str, default='train', choices=['train', 'test']) parser.add_argument('--v', type=str, default=1) parser.add_argument('--data', type=str, default='Sample_dataset', choices = [ f.path[5:] for f in os.scandir("data") if f.is_dir() ]) parser.add_argument('--dataset', type=str, default=dataset, help='Dataset') parser.add_argument('--view', type=int, default=view, help = 'view index in the dataset') parser.add_argument('--num_epochs', type=int, default=1, #50 help='Training Epochs') parser.add_argument('--num_shots', type=int, default=num_shots, #100 help='number of shots') parser.add_argument('--cv_number', type=int, default=cv_number, help='number of validation folds.') parser.add_argument('--NormalizeInputGraphs', default=False, action='store_true', help='Normalize Input adjacency matrices of graphs') parser.add_argument('--evaluation_method', type=str, default='model assessment', help='evaluation method, possible values : model selection, model assessment') parser.add_argument('--threshold', dest='threshold', default='mean', help='threshold the graph adjacency matrix. Possible values: no_threshold, median, mean') parser.add_argument('--no-cuda', action='store_true', default=False, help='Disables CUDA training.') parser.add_argument('--num-classes', dest='num_classes', type=int, default=2, help='Number of label classes') parser.add_argument('--lr', type=float, default=0.001, help='Initial learning rate.') parser.add_argument('--weight_decay', type=float, default=5e-4, help='Weight decay (L2 loss on parameters).') parser.add_argument('--hidden', type=int, default=8, help='Number of hidden units.') parser.add_argument('--nb_heads', type=int, default=8, help='Number of head attentions.') parser.add_argument('--dropout', type=float, default=0.8, help='Dropout rate (1 - keep probability).') parser.add_argument('--alpha', type=float, default=0.2, help='Alpha for the leaky_relu.') return parser.parse_args()
19,455
def generate_sampled_graph_and_labels(triplets, sample_size, split_size, num_rels, adj_list, degrees, negative_rate,tables_id, sampler="uniform"): """Get training graph and signals First perform edge neighborhood sampling on graph, then perform negative sampling to generate negative samples """ # perform edge neighbor sampling if sampler == "uniform": edges = sample_edge_uniform(adj_list, degrees, len(triplets), sample_size) elif sampler == "neighbor": edges = sample_edge_neighborhood(adj_list, degrees, len(triplets), sample_size,tables_id) else: raise ValueError("Sampler type must be either 'uniform' or 'neighbor'.") # relabel nodes to have consecutive node ids edges = triplets[edges] src, rel, dst = edges.transpose() # my_graph = nx.Graph() # edges_to_draw = list(set(list(zip(dst, src, rel)))) # edges_to_draw = sorted(edges_to_draw) # # my_graph.add_edges_from(edges_to_draw[:10]) # # for item in edges_to_draw: # my_graph.add_edge(item[1], item[0], weight=item[2]*10) # pos = nx.spring_layout(my_graph) # labels = nx.get_edge_attributes(my_graph, 'weight') # plt.figure() # nx.draw(my_graph, pos, edge_color='black', width=1, linewidths=1, arrows=True, # node_size=100, node_color='red', alpha=0.9, # labels={node: node for node in my_graph.nodes()}) # nx.draw_networkx_edge_labels(my_graph, pos, edge_labels=labels, font_color='red') # plt.axis('off') # plt.show() uniq_v, edges = np.unique((src, dst), return_inverse=True) src, dst = np.reshape(edges, (2, -1)) relabeled_edges = np.stack((src, rel, dst)).transpose() # negative sampling samples, labels = negative_sampling(relabeled_edges, len(uniq_v), negative_rate) #samples, labels = negative_relations(relabeled_edges, len(uniq_v), # negative_rate) # further split graph, only half of the edges will be used as graph # structure, while the rest half is used as unseen positive samples split_size = int(sample_size * split_size) graph_split_ids = np.random.choice(np.arange(sample_size), size=split_size, replace=False) src = src[graph_split_ids] dst = dst[graph_split_ids] rel = rel[graph_split_ids] # build DGL graph print("# sampled nodes: {}".format(len(uniq_v))) print("# sampled edges: {}".format(len(src) * 2)) #g, rel, norm,_ = build_graph_from_triplets_modified(len(uniq_v), num_rels, # (src, rel, dst)) g, rel, norm=build_graph_directly(len(uniq_v), (src, rel, dst)) return g, uniq_v, rel, norm, samples, labels
19,456
def write(path: str, modules: set) -> None: """Writes a distill.txt file with module names.""" with open(path, "w") as f: for module in modules: f.write(module + "\n")
19,457
def ParseArgs(): """Parses command line options. Returns: An options object as from optparse.OptionsParser.parse_args() """ parser = optparse.OptionParser() parser.add_option('--android-sdk', help='path to the Android SDK folder') parser.add_option('--android-sdk-tools', help='path to the Android SDK platform tools folder') parser.add_option('--R-package', help='Java package for generated R.java') parser.add_option('--R-dir', help='directory to hold generated R.java') parser.add_option('--res-dir', help='directory containing resources') parser.add_option('--crunched-res-dir', help='directory to hold crunched resources') (options, args) = parser.parse_args() if args: parser.error('No positional arguments should be given.') # Check that required options have been provided. required_options = ('android_sdk', 'android_sdk_tools', 'R_package', 'R_dir', 'res_dir', 'crunched_res_dir') for option_name in required_options: if getattr(options, option_name) is None: parser.error('--%s is required' % option_name.replace('_', '-')) return options
19,458
def write_losses_to_log(loss_list, iter_nums, logdir): """Write losses at steps in iter_nums for loss_list to log in logdir. Args: loss_list (list): a list of losses to write out. iter_nums (list): which steps to write the losses for. logdir (str): dir for log file. """ for loss in loss_list: log_fname = os.path.join(logdir, '{}.csv'.format(loss.name)) history = loss.get_history() with tf.io.gfile.GFile(log_fname, 'w' if 0 in iter_nums else 'a') as f: write_list = ['{:d},{:.7f}'.format(iter_num, history[iter_num]) for iter_num in iter_nums] write_str = '\n'.join(write_list) + '\n' if iter_nums[0] == 0: write_str = 'Iter_num,{}\n'.format(loss.name) + write_str f.write(write_str)
19,459
def get_interface_from_model(obj: Base) -> str: """ Transform the passed model object into an dispatcher interface name. For example, a :class:``Label`` model will result in a string with the value `labels` being returned. :param obj: the model object :return: the interface string """ try: return obj.__tablename__ except AttributeError: raise TypeError("Not a transformable model: ", obj)
19,460
def main(): """Doc""" stemmer = PorterStemmer() example_words = ["python", "pythoner", "pythoning", "pythoned", "pythonly"] for word in example_words: print(stemmer.stem(word)) new_text = "It is important to by very pythonly while you are pythoning with python. All pythoners have pythoned poorly at least once." words = word_tokenize(new_text) for word in words: print(stemmer.stem(word))
19,461
def get_text(string, start, end, bom=True): """This method correctly accesses slices of strings using character start/end offsets referring to UTF-16 encoded bytes. This allows for using character offsets generated by Rosette (and other softwares) that use UTF-16 native string representations under Pythons with UCS-4 support, such as Python 3.3+ (refer to https://www.python.org/dev/peps/pep-0393/). The offsets are adjusted to account for a UTF-16 byte order mark (BOM) (2 bytes) and also that each UTF-16 logical character consumes 2 bytes. 'character' in this context refers to logical characters for the purpose of character offsets; an individual character can consume up to 4 bytes (32 bits for so-called 'wide' characters) and graphemes can consume even more. """ import codecs if not isinstance(string, str): raise ValueError('expected string to be of type str') if not any(((start is None), isinstance(start, int))): raise ValueError('expected start to be of type int or NoneType') if not any(((end is None), isinstance(end, int))): raise ValueError('expected end to be of type int or NoneType') if start is not None: start *= 2 if bom: start += 2 if end is not None: end *= 2 if bom: end += 2 utf_16, _ = codecs.utf_16_encode(string) sliced, _ = codecs.utf_16_decode(utf_16[start:end]) return sliced
19,462
def train(model: Hidden, device: torch.device, hidden_config: HiDDenConfiguration, train_options: TrainingOptions, this_run_folder: str, tb_logger): """ Trains the HiDDeN model :param model: The model :param device: torch.device object, usually this is GPU (if avaliable), otherwise CPU. :param hidden_config: The network configuration :param train_options: The training settings :param this_run_folder: The parent folder for the current training run to store training artifacts/results/logs. :param tb_logger: TensorBoardLogger object which is a thin wrapper for TensorboardX logger. Pass None to disable TensorboardX logging :return: """ train_data, val_data = utils.get_data_loaders(hidden_config, train_options) file_count = len(train_data.dataset) if file_count % train_options.batch_size == 0: steps_in_epoch = file_count // train_options.batch_size else: steps_in_epoch = file_count // train_options.batch_size + 1 print_each = 10 images_to_save = 8 saved_images_size = (512, 512) for epoch in range(train_options.start_epoch, train_options.number_of_epochs + 1): logging.info('\nStarting epoch {}/{}'.format(epoch, train_options.number_of_epochs)) logging.info('Batch size = {}\nSteps in epoch = {}'.format(train_options.batch_size, steps_in_epoch)) losses_accu = {} epoch_start = time.time() step = 1 for image, _ in train_data: image = image.to(device) message = torch.Tensor(np.random.choice([0, 1], (image.shape[0], hidden_config.message_length))).to(device) losses, _ = model.train_on_batch([image, message]) if not losses_accu: # dict is empty, initialize for name in losses: # losses_accu[name] = [] losses_accu[name] = AverageMeter() for name, loss in losses.items(): losses_accu[name].update(loss) if step % print_each == 0 or step == steps_in_epoch: logging.info( 'Epoch: {}/{} Step: {}/{}'.format(epoch, train_options.number_of_epochs, step, steps_in_epoch)) utils.log_progress(losses_accu) logging.info('-' * 40) step += 1 train_duration = time.time() - epoch_start logging.info('Epoch {} training duration {:.2f} sec'.format(epoch, train_duration)) logging.info('-' * 40) utils.write_losses(os.path.join(this_run_folder, 'train.csv'), losses_accu, epoch, train_duration) if tb_logger is not None: tb_logger.save_losses(losses_accu, epoch) tb_logger.save_grads(epoch) tb_logger.save_tensors(epoch) first_iteration = True logging.info('Running validation for epoch {}/{}'.format(epoch, train_options.number_of_epochs)) for image, _ in val_data: image = image.to(device) message = torch.Tensor(np.random.choice([0, 1], (image.shape[0], hidden_config.message_length))).to(device) losses, (encoded_images, noised_images, decoded_messages) = model.validate_on_batch([image, message]) if not losses_accu: # dict is empty, initialize for name in losses: losses_accu[name] = AverageMeter() for name, loss in losses.items(): losses_accu[name].update(loss) if first_iteration: if hidden_config.enable_fp16: image = image.float() encoded_images = encoded_images.float() utils.save_images(image.cpu()[:images_to_save, :, :, :], encoded_images[:images_to_save, :, :, :].cpu(), epoch, os.path.join(this_run_folder, 'images'), resize_to=saved_images_size) first_iteration = False utils.log_progress(losses_accu) logging.info('-' * 40) utils.save_checkpoint(model, train_options.experiment_name, epoch, os.path.join(this_run_folder, 'checkpoints')) utils.write_losses(os.path.join(this_run_folder, 'validation.csv'), losses_accu, epoch, time.time() - epoch_start) # if epoch % 10 == 0: # sleep_sec = 5 * 60 # logging.info(f'\nSleeping for {sleep_sec} seconds to cool down the GPU\n') # time.sleep(sleep_sec)
19,463
def init_distance(graph: dict, s: str) -> dict: """ 初始化其他节点的距离为正无穷 防止后面字典越界 """ distance = {s: 0} for vertex in graph: if vertex != s: distance[vertex] = math.inf return distance
19,464
def read_cry_data(path): """ Read a cry file and extract the molecule's geometry. The format should be as follows:: U_xx U_xy U_xz U_yx U_yy U_yz U_zx U_zy U_zz energy (or comment, this is ignored for now) ele0 x0 y0 z0 ele1 x1 y1 z1 ... elen xn yn zn Where the U matrix is made of the unit cell basis vectors as column vectors. Parameters ---------- path : str A path to a file to read Returns ------- val : LazyValues An object storing all the data """ unit = [] coords = [] elements = [] with open(path, 'r') as f: for line in f: parts = line.strip().split() if len(parts) == 3: unit.append([float(x) for x in parts]) if len(parts) == 4: elements.append(parts[0]) coords.append([float(x) for x in parts[1:]]) return LazyValues(elements=elements, coords=coords, unit_cell=unit)
19,465
def get_camera_wireframe(scale: float = 0.3): """ Returns a wireframe of a 3D line-plot of a camera symbol. """ a = 0.5 * torch.tensor([-2, 1.5, 4]) b = 0.5 * torch.tensor([2, 1.5, 4]) c = 0.5 * torch.tensor([-2, -1.5, 4]) d = 0.5 * torch.tensor([2, -1.5, 4]) C = torch.zeros(3) F = torch.tensor([0, 0, 3]) camera_points = [a, b, d, c, a, C, b, d, C, c, C, F] lines = torch.stack([x.float() for x in camera_points]) * scale return lines
19,466
def read_input(file): """ Args: file (idx): binary input file. Returns: numpy: arrays for our dataset. """ with open(file, 'rb') as file: z, d_type, d = st.unpack('>HBB', file.read(4)) shape = tuple(st.unpack('>I', file.read(4))[0] for d in range(d)) return np.frombuffer(file.read(), dtype=np.uint8).reshape(shape)
19,467
def maximum_segment_sum(input_list: List): """ Return the maximum sum of the segments of a list Examples:: >>> from pyske.core import PList, SList >>> maximum_segment_sum(SList([-5 , 2 , 6 , -4 , 5 , -6 , -4 , 3])) 9 >>> maximum_segment_sum(PList.from_seq([-33 , 22 , 11 , -44])) 33 >>> maximum_segment_sum(PList.from_seq([-33 , 22 , 0, 1, -3, 11 , -44, 30, -5, -13, 12])) 31 :param input_list: a PySke list of numbers :return: a number, the maximum sum of the segments of a list """ best_sum, _ = input_list.map(int_to_tuple).reduce(max_and_sum, (0, 0)) return best_sum
19,468
def list_files(tag=None, sat_id=None, data_path=None, format_str=None): """Return a Pandas Series of every file for chosen satellite data Parameters ----------- tag : (string or NoneType) Denotes type of file to load. (default=None) sat_id : (string or NoneType) Specifies the satellite ID for a constellation. Not used. (default=None) data_path : (string or NoneType) Path to data directory. If None is specified, the value previously set in Instrument.files.data_path is used. (default=None) format_str : (string or NoneType) User specified file format. If None is specified, the default formats associated with the supplied tags are used. (default=None) Returns -------- pysat.Files.from_os : (pysat._files.Files) A class containing the verified available files Notes ----- Called by pysat. Not intended for direct use by user. """ if data_path is not None: if tag == '': # files are by month, going to add date to monthly filename for # each day of the month. The load routine will load a month of # data and use the appended date to select out appropriate data. if format_str is None: format_str = 'kp{year:2d}{month:02d}.tab' out = pysat.Files.from_os(data_path=data_path, format_str=format_str, two_digit_year_break=94) if not out.empty: out.ix[out.index[-1]+pds.DateOffset(months=1)- pds.DateOffset(days=1)] = out.iloc[-1] out = out.asfreq('D', 'pad') out = out + '_' + out.index.strftime('%Y-%m-%d') return out elif tag == 'forecast': format_str = 'kp_forecast_{year:04d}-{month:02d}-{day:02d}.txt' files = pysat.Files.from_os(data_path=data_path, format_str=format_str) # pad list of files data to include most recent file under tomorrow if not files.empty: files.ix[files.index[-1]+pds.DateOffset(days=1)] = files.values[-1] files.ix[files.index[-1]+pds.DateOffset(days=1)] = files.values[-1] return files elif tag == 'recent': format_str = 'kp_recent_{year:04d}-{month:02d}-{day:02d}.txt' files = pysat.Files.from_os(data_path=data_path, format_str=format_str) # pad list of files data to include most recent file under tomorrow if not files.empty: files.ix[files.index[-1]+pds.DateOffset(days=1)] = files.values[-1] files.ix[files.index[-1]+pds.DateOffset(days=1)] = files.values[-1] return files else: raise ValueError('Unrecognized tag name for Space Weather Index Kp') else: raise ValueError ('A data_path must be passed to the loading routine ' + 'for Kp')
19,469
def has_bookmark(uri): """ Returns true if the asset with given URI has been bookmarked by the currently logged in user. Returns false if there is no currently logged in user. """ if is_logged_in(): mongo.db.bookmarks.ensure_index('username') mongo.db.bookmarks.ensure_index('asset.uri') return mongo.db.bookmarks.find_one({ 'username': current_user.username, 'asset.uri': uri }) is not None return False
19,470
def evaluate(f, K, dataiter, num_steps): """Evaluates online few-shot episodes. Args: model: Model instance. dataiter: Dataset iterator. num_steps: Number of episodes. """ if num_steps == -1: it = six.moves.xrange(len(dataiter)) else: it = six.moves.xrange(num_steps) it = tqdm(it, ncols=0) results = [] for i, batch in zip(it, dataiter): # Get features. h = f(batch['x_s'], batch['y_s']) if type(h) is tuple: h, (beta, gamma, beta2, gamma2, count) = h print('beta/count', np.stack([beta, count], axis=-1)) batch['beta'] = beta.numpy() batch['gamma'] = gamma.numpy() batch['beta2'] = beta2.numpy() batch['gamma2'] = gamma2.numpy() batch['count'] = count.numpy() batch['h'] = h.numpy() results.append(batch) return results
19,471
def test_command_not_implemented(client, es_clear): """Tests a configured action without implemented function.""" # NOTE: recid can be dummy since it won't reach pass the resource view response = client.post( "/mocks/1234-abcd/draft/actions/command", headers=HEADERS ) assert response.status_code == 500
19,472
def find_period_of_function(eq,slopelist,nroots): """This function finds the Period of the function. It then makes a list of x values that are that period apart. Example Input: find_period_of_function(eq1,[0.947969,1.278602]) """ global tan s1 = slopelist[0] s2 = slopelist[1] if tan == 1: T = 3.14159265359 else: T = s2-s1 periodlist = [] for i in range(nroots): periodlist.append(s1+T*i) return periodlist
19,473
def merge_date_tags(path, k): """called when encountering only tags in an element ( no text, nor mixed tag and text) Arguments: path {list} -- path of the element containing the tags k {string} -- name of the element containing the tags Returns: whatever type you want -- the value of the element note : if you want """ l=k['#alldata'] #2015/01/01 12:10:30 # if "PubMedPubDate" in path[-1]: if "date" in path[-1].lower(): month=None year=None day=None hour=None minute=None r="" # it should always be a dict with one key, and a subdict as value, containing an "#alldata" key # {'month': {'#alldata': ['09']}} for i in l: # month k = next(iter(i)) # ['09'] ad = i[k]['#alldata'] if k == "Year" and len(ad) == 1 and isinstance (ad[0], str): year=ad[0] elif k == "Month" and len(ad) == 1 and isinstance (ad[0], str): month=ad[0] elif k == "Day" and len(ad) == 1 and isinstance (ad[0], str): day=ad[0] elif k == "Hour" and len(ad) == 1 and isinstance (ad[0], str): hour=ad[0] if len(hour) == 1: hour = "0"+hour elif k == "Minute" and len(ad) == 1 and isinstance (ad[0], str): minute=ad[0] if len(minute) == 1: minute = "0"+minute if year is not None: r=r+year if month is not None: r=r+"/"+month if day is not None: r=r+"/"+day if hour is not None: r=r+ " "+hour if minute is not None: r=r+":"+minute #retrun only if at least "year" is present return r return k
19,474
def pr(): """ Work with pull requests. """ pass
19,475
def ocp_play(): """Decorator for adding a method as an common play search handler.""" def real_decorator(func): # Store the flag inside the function # This will be used later to identify the method if not hasattr(func, 'is_ocp_playback_handler'): func.is_ocp_playback_handler = True return func return real_decorator
19,476
def parse_store(client, path='/tmp/damn', index='damn'): """ """ path = dirname(dirname(abspath(__file__))) if path is None else path repo_name = basename(path) create_store_index(client, index) for ok, result in streaming_bulk( client, parse_file_descriptions(path), index=index, chunk_size=50 # keep the batch sizes small for appearances only ): action, result = result.popitem() doc_id = '/%s/%s/%s' % (index, result['_type'], result['_id']) # process the information from ES whether the document has been # successfully indexed if not ok: print('Failed to %s document %s: %r' % (action, doc_id, result)) else: print(doc_id)
19,477
def check_encoder_decoder_args(args) -> None: """ Check possible encoder-decoder argument conflicts. :param args: Arguments as returned by argparse. """ encoder_embed_dropout, decoder_embed_dropout = args.embed_dropout encoder_rnn_dropout_inputs, decoder_rnn_dropout_inputs = args.rnn_dropout_inputs encoder_rnn_dropout_states, decoder_rnn_dropout_states = args.rnn_dropout_states if encoder_embed_dropout > 0 and encoder_rnn_dropout_inputs > 0: logger.warning("Setting encoder RNN AND source embedding dropout > 0 leads to " "two dropout layers on top of each other.") if decoder_embed_dropout > 0 and decoder_rnn_dropout_inputs > 0: logger.warning("Setting encoder RNN AND source embedding dropout > 0 leads to " "two dropout layers on top of each other.") encoder_rnn_dropout_recurrent, decoder_rnn_dropout_recurrent = args.rnn_dropout_recurrent if encoder_rnn_dropout_recurrent > 0 or decoder_rnn_dropout_recurrent > 0: check_condition(args.rnn_cell_type == C.LSTM_TYPE, "Recurrent dropout without memory loss only supported for LSTMs right now.")
19,478
def _scatter(x_arr, y_arr, attributes, xlabel=None, xlim=None, xlog=False, ylabel=None, ylim=None, ylog=False, show=True, save=None): """Private plotting utility function.""" # initialise figure and axis settings fig = plt.figure() ax = plt.gca() ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) ax.spines['bottom'].set_visible(True) ax.spines['left'].set_visible(True) # plotting of a histogram plt.scatter(x_arr, y_arr, color=attributes['color'], alpha=attributes['opacity'], label=attributes['label']) # ax.set_xticks(bins + 0.5) # final axis setting ax.set_xlim(xlim) ax.set_xlabel(xlabel, color="black") ax.set_xscale('log' if xlog==True else 'linear') ax.set_ylim(ylim) ax.set_ylabel(ylabel, color="black") ax.set_yscale('log' if ylog==True else 'linear') # add legend if not attributes['label']==None: legend = ax.legend(loc=0) legend.get_frame().set_facecolor('white') legend.get_frame().set_edgecolor('lightgrey') # save/show figure if save!=None: plt.savefig(save, bbox_inches='tight') if show: plt.show(fig, block=False) return fig, fig.axes
19,479
def first_item(iterable, default=None): """ Returns the first item of given iterable. Parameters ---------- iterable : iterable Iterable default : object Default value if the iterable is empty. Returns ------- object First iterable item. """ if not iterable: return default for item in iterable: return item
19,480
def init_config(): """ Init configuration """ # Load the initial config config = os.path.dirname(__file__) + \ '/colorset/config' try: data = load_config(config) for d in data: c[d] = data[d] except: pass # Load user's config rainbow_config = os.path.expanduser("~") + os.sep + '.rainbow_config.json' try: data = load_config(rainbow_config) for d in data: c[d] = data[d] except (IOError, ValueError) as e: c['USER_JSON_ERROR'] = str(e) # Load default theme theme_file = os.path.dirname(__file__) + \ '/colorset/' + c['THEME'] + '.json' try: data = load_config(theme_file) for d in data: c[d] = data[d] except: pass
19,481
def stratification(n_subjects_per_strata, n_groups, block_length=4, seed=None): """ Create a randomization list for each strata using Block Randomization. If a study has several strata, each strata is seperately randomized using block randomization. Args: n_subjects_per_strata: A list of the number of subjects for each strata. n_groups: The number of groups to randomize subjects to. block_length: The length of the blocks. seed: (optional) The seed to provide to the RNG. Returns: list: a list of length `len(n_subjects_per_strata)` of lists of length `n_subjects_per_strata`. Each sublist is the strata specific randomization list. Notes: The value of `block_length` should be a multiple of `n_groups` to ensure proper balance. Todo: Allow for multiple randomization techniques to be used. """ groups = [] for n_subjects_per_stratum in n_subjects_per_strata: # Adding 52490, a dummy value, to the seed ensures a different list # per strata. The use of a 'magic number' here allows for # reproducibility if seed is not None: seed = seed + 52490 groups.append(block(n_subjects_per_stratum, n_groups, block_length, seed)) return groups
19,482
def pytest_configure(): """ Hack the `project_template` dir into an actual project to test against. """ from mezzanine.utils.importing import path_for_import template_path = Path(path_for_import("mezzanine")) / "project_template" shutil.copytree(str(template_path), str(TMP_PATH)) proj_path = TMP_PATH / "project_name" local_settings = (proj_path / "local_settings.py.template").read_text() (proj_path / "test_settings.py").write_text(TEST_SETTINGS + local_settings) # Setup the environment for Django sys.path.insert(0, str(TMP_PATH)) os.environ.setdefault("DJANGO_SETTINGS_MODULE", "project_name.test_settings") django.setup()
19,483
def ensure_binary(s, encoding='utf-8', errors='strict'): """Coerce **s** to six.binary_type. For Python 2: - `unicode` -> encoded to `str` - `str` -> `str` For Python 3: - `str` -> encoded to `bytes` - `bytes` -> `bytes` """ if isinstance(s, text_type): return s.encode(encoding, errors) elif isinstance(s, binary_type): return s else: raise TypeError("not expecting type '%s'" % type(s))
19,484
def FileDialog(prompt='ChooseFile', indir=''): """ opens a wx dialog that allows you to select a single file, and returns the full path/name of that file """ dlg = wx.FileDialog(None, message = prompt, defaultDir = indir) if dlg.ShowModal() == wx.ID_OK: outfile = dlg.GetPath() else: outfile = None dlg.Destroy() return outfile
19,485
def my_account(): """ Allows a user to manage their account """ user = get_user(login_session['email']) if request.method == 'GET': return render_template('myAccount.html', user=user) else: new_password1 = request.form.get('userPassword1') new_password2 = request.form.get('userPassword2') if new_password1 != new_password2: flash("Passwords do not match!") return render_template('myAccount.html', user=user) user.hash_password(new_password1) # set the new password hash session.add(user) session.commit() flash("Your password has been changed.") return redirect(url_for('index'))
19,486
def recalculate_cart(request): """ Updates an existing discount code, shipping, and tax when the cart is modified. """ from cartridge.shop import checkout from cartridge.shop.forms import DiscountForm from cartridge.shop.models import Cart # Rebind the cart to request since it's been modified. if request.session.get('cart') != request.cart.pk: request.session['cart'] = request.cart.pk request.cart = Cart.objects.from_request(request) discount_code = request.session.get("discount_code", "") if discount_code: # Clear out any previously defined discount code # session vars. names = ("free_shipping", "discount_code", "discount_total") clear_session(request, *names) discount_form = DiscountForm(request, {"discount_code": discount_code}) if discount_form.is_valid(): discount_form.set_discount() if not request.session.get("free_shipping"): settings.clear_cache() clear_session(request, "shipping_type", "shipping_total") handler = lambda s: import_dotted_path(s) if s else lambda *args: None billship_handler = handler(settings.SHOP_HANDLER_BILLING_SHIPPING) tax_handler = handler(settings.SHOP_HANDLER_TAX) try: if request.session["order"]["step"] >= checkout.CHECKOUT_STEP_FIRST: billship_handler(request, None) tax_handler(request, None) except (checkout.CheckoutError, ValueError, KeyError): pass
19,487
def __pad_assertwith_0_array4D(grad: 'np.ndarray', pad_nums) -> 'np.ndarray': """ Padding arrary with 0 septally. :param grad: :param pad_nums: :return: """ gN, gC, gH, gW = grad.shape init1 = np.zeros((gN, gC, gH + (gH - 1) * pad_nums, gW), dtype = grad.dtype) init2 = np.zeros((gN, gC, gH + (gH - 1) * pad_nums, gW + (gW - 1) * pad_nums), dtype = grad.dtype) boolean: List[int] = [(pad_nums + 1) * i for i in range(grad.shape[2])] init1[:, :, boolean, :] = grad boolean: List[int] = [(pad_nums + 1) * i for i in range(grad.shape[3])] init2[:, :, :, boolean] = init1 return init2
19,488
def islist(data): """Check if input data is a list.""" return isinstance(data, list)
19,489
def timeoutHandler(signum, frame): """ Function called when a Jodis connection hits the timeout seconds. """ raise TimeoutError('Jodis Connection Timeout')
19,490
def spatial_mean(xr_da, lon_name="longitude", lat_name="latitude"): """ Perform averaging on an `xarray.DataArray` with latitude weighting. Parameters ---------- xr_da: xarray.DataArray Data to average lon_name: str, optional Name of x-coordinate lat_name: str, optional Name of y-coordinate Returns ------- xarray.DataArray Spatially averaged xarray.DataArray. """ weights = da.cos(da.deg2rad(xr_da[lat_name])) res = xr_da.weighted(weights).mean(dim=[lon_name, lat_name]) return res
19,491
def rank(value_to_be_ranked, value_providing_rank): """ Returns the rank of ``value_to_be_ranked`` in set of values, ``values``. Works even if ``values`` is a non-orderable collection (e.g., a set). A binary search would be an optimized way of doing this if we can constrain ``values`` to be an ordered collection. """ num_lesser = [v for v in value_providing_rank if v < value_to_be_ranked] return len(num_lesser)
19,492
def test_more_images_than_labels(): """[summary] """ # Get images and labels images = [f"/{i}/test_{j}.png" for i in range(10) for j in range(10)] labels = [f"{i}" for i in range(10)] # Get images2labels images2labels = compute_images2labels(images, labels) # Get true images2labels real_images2labels = {images[i * 10 + j]: labels[i] for i in range(10) for j in range(10)} print(real_images2labels) # Assert assert images2labels == real_images2labels
19,493
def Characteristics(aVector): """ Purpose: Compute certain characteristic of data in a vector Inputs: aVector an array of data Initialize: iMean mean iMed median iMin minimum iMax maximum iKurt kurtosis iSkew skewness iStd standard deviation Return value: aResults an array with calculated characteristics """ iMin = aVector.min().values[0] iMax = aVector.max().values[0] iMean = np.mean(aVector).values[0] iMed = np.median(aVector) iKurt = st.kurtosis(aVector)[0] iSkew = st.skew(aVector)[0] iStd = aVector.std().values[0] aResults = np.array([iMin,iMax, iMean,iMed,iKurt,iSkew,iStd]) return aResults
19,494
def loglik(alpha,gamma_list,M,k): """ Calculate $L_{[\alpha]}$ defined in A.4.2 """ psi_sum_gamma=np.array(list(map(lambda x: psi(np.sum(x)),gamma_list))).reshape((M,1)) # M*1 psi_gamma=psi(np.array(gamma_list)) # M*k matrix L=M*gammaln(np.sum(alpha)-np.sum(gammaln(alpha)))+np.sum((psi_gamma-psi_sum_gamma)*(alpha.reshape((1,k))-1)) return L
19,495
def handle_delete(sender_content_type_pk, instance_pk): """Async task to delete a model from the index. :param instance_pk: :param sender_content_type_pk: """ from gum.indexer import indexer, NotRegistered try: sender_content_type = ContentType.objects.get(pk=sender_content_type_pk) sender = sender_content_type.model_class() instance = sender.objects.get(pk=instance_pk) except ObjectDoesNotExist: logger.warning("Object ({}, {}) not found".format(sender_content_type_pk, instance_pk)) return None try: mapping_type = indexer.get_mapping_type(sender) mapping_type.delete_document(instance) except NotRegistered: return None return sender_content_type_pk, instance_pk
19,496
def recode_media(src, dst, start=0, length=0, width=0, height=0, fps=0, bitrate_v=0, bitrate_a=0, no_video=False, no_audio=False, copy_v=False, copy_a=False, overwrite=False, verbose=False): """Recodes media. Args: src : Path to source media file. dst : Path to target media file. start : Start position in seconds. Ignored if <= 0 length : Length of recoded media in seconds. Ignored if <= 0. width : Frame width of recoded video. Ignored if <= 0. height : Frame height of recoded video. Ignored if <= 0. fps : Frame rate of recoded video. Ignored if <= 0. bitrate_v: Bit rate of recoded video stream in KHz. Ignored if <= 0. bitrate_a: Bit rate of recoded audio stream in KHz. Ignored if <= 0. no_video : Drop video stream. Defaults to False. no_audio : Drop audio stream. Defaults to False. copy_v : Copy video stream directly from source. Defaults to False. copy_a : Copy audio stream directly from source. Defaults to False. overwrite: Overwrite target media file. Defaults to False. verbose : Execute in verbose mode. Defaults to False. """ options = ['ffmpeg'] options.append('-i {}'.format(src)) if start >= 0: options.append('-ss {}'.format(start)) if length > 0: options.append('-t {}'.format(length)) if width > 0 and height > 0: options.append('-s {}x{}'.format(width, height)) if fps > 0: options.append('-r {}'.format(fps)) if bitrate_v > 0: options.append('-b:v {}k'.format(bitrate_v)) if bitrate_a > 0: options.append('-b:a {}k'.format(bitrate_a)) if copy_v: options.append('-c:v copy') if copy_a: options.append('-c:a copy') if no_video: options.append('-vn') if no_audio: options.append('-an') if overwrite: options.append('-y') else: options.append('-n') if verbose: options.append('-v info') else: options.append('-v quiet') options.append(dst) command = ' '.join(options) subprocess.call(command, shell=True) return
19,497
def get_file_hashes(directory_path): """Returns hashes of all files in directory tree, excluding files with extensions in FILE_EXTENSIONS_TO_IGNORE or files that should not be built. Args: directory_path: str. Root directory of the tree. Returns: dict(str, str). Dictionary with keys specifying file paths and values specifying file hashes. """ file_hashes = dict() python_utils.PRINT( 'Computing hashes for files in %s' % os.path.join(os.getcwd(), directory_path)) for root, _, filenames in os.walk( os.path.join(os.getcwd(), directory_path)): for filename in filenames: filepath = os.path.join(root, filename) if should_file_be_built(filepath) and not any( filename.endswith(p) for p in FILE_EXTENSIONS_TO_IGNORE): # The path in hashes.json file is in posix style, # see the comment above HASHES_JSON_FILENAME for details. complete_filepath = common.convert_to_posixpath( os.path.join(root, filename)) relative_filepath = common.convert_to_posixpath(os.path.relpath( complete_filepath, directory_path)) file_hashes[relative_filepath] = generate_md5_hash( complete_filepath) return file_hashes
19,498
def fixedcase_word(w, truelist=None): """Returns True if w should be fixed-case, None if unsure.""" if truelist is not None and w in truelist: return True if any(c.isupper() for c in w[1:]): # tokenized word with noninitial uppercase return True if len(w) == 1 and w.isupper() and w not in {'A', 'K', 'N'}: # single uppercase letter return True if len(w) == 2 and w[1] == '.' and w[0].isupper(): # initial with period return True
19,499