content
stringlengths
22
815k
id
int64
0
4.91M
def relative_url_prefixer(filePath: str, newText: str) -> None: """Prepends some text to a relative URL.""" replace( filePath, r'(?<!(\/|<|\w|:))((\/)(\w{0,}))(?<!(\/))', f'{newText}\\2', False )
5,335,500
def gen_geo(num_nodes, theta, lambd, source, target, cutoff, seed=None): """Generates a random graph with threshold theta consisting of 'num_nodes' and paths with maximum length 'cutoff' between 'source' adn target. Parameters ---------- num_nodes : int Number of nodes. theta : float Threshold of graph. lambd : float Weights of graph are generated randomly from exp(lambd) distribution. source : int Origin of path. Must be in range(0, num_nodes). target : int Destination of path. Must be in range(0, num_nodes). cutoff : int Maximum path length. seed : int Set random seed if not None. Returns ------- object of type graph Generated graph. """ file_name = './saved_items/graph_N' + str(num_nodes) + '_cutoff' + str(cutoff) if seed != None: np.random.seed(seed) rand.seed(seed) torch.manual_seed(seed) weights = { node: rand.expovariate(lambd) for node in range(num_nodes)} graph = geo_thresh(num_nodes, theta, weight=weights) for (ni, nj) in graph.edges(): graph.edges[ni,nj]['weight'] = weights[ni] + weights[nj] plt.figure(figsize=(10,5)) nx.draw(graph, with_labels=True, font_weight='bold') plt.savefig('./figures/graph_N' + str(num_nodes) + str(".png"), dpi=500) plt.show() save_obj(graph, file_name) paths = nx.all_simple_paths(graph, source=source, target=target, cutoff=cutoff) paths = list(paths) save_obj(paths, file_name + '_paths') print('Paths length: ', len(paths)) return graph
5,335,501
def FilterExceptions(image_name, errors): """Filter out the Application Verifier errors that have exceptions.""" exceptions = _EXCEPTIONS.get(image_name, []) def _HasNoException(error): # Iterate over all the exceptions. for (severity, layer, stopcode, regexp) in exceptions: # And see if they match, first by type. if (error.severity == severity and error.layer == layer and error.stopcode == stopcode): # And then by regexpr match to the trace symbols. for trace in error.trace: if trace.symbol and re.match(regexp, trace.symbol): return False return True filtered_errors = filter(_HasNoException, errors) error_count = len(filtered_errors) filtered_count = len(errors) - error_count if error_count: suffix = '' if error_count == 1 else 's' filtered_errors.append( 'Error: Encountered %d AppVerifier exception%s for %s.' % (error_count, suffix, image_name)) if filtered_count: suffix1 = '' if filtered_count == 1 else 's' suffix2 = '' if len(exceptions) == 1 else 's' filtered_errors.append( 'Warning: Filtered %d AppVerifier exception%s for %s using %d rule%s.' % (filtered_count, suffix1, image_name, len(exceptions), suffix2)) return (error_count, filtered_errors)
5,335,502
def dim(text: str, reset_style: Optional[bool] = True) -> str: """Return text in dim""" return set_mode("dim", False) + text + (reset() if reset_style else "")
5,335,503
def write_methods(path, thing, method): """ write_methods function has all of necesary commands to write objects in number of formats. """ global SUPPORTED_SAVE_METHODS if method.lower() in SUPPORTED_SAVE_METHODS: if method.lower() == 'pickle': if not path.endswith(".spec"): path = ".".join([path, "spec"]) with open(path, 'wb') as f: pickle.dump(thing, f) else: raise TypeError("{} method is not currently supported".format(method.lower()))
5,335,504
def test_validate_user(): """Test the validate instrument user function. """ u = InstrUser() res, msg = validate_user(u) assert not res and 'id' in msg u.id = 'test' res, msg = validate_user(u) assert not res and 'policy' in msg u.policy = 'unreleasable' res, msg = validate_user(u) assert res # Other positive cases are tested test_plugin_lifecycle
5,335,505
def want_color_output(): """Return ``True`` if colored output is possible/requested and not running in GUI. Colored output can be explicitly requested by setting :envvar:`COCOTB_ANSI_OUTPUT` to ``1``. """ want_color = sys.stdout.isatty() # default to color for TTYs if os.getenv("NO_COLOR") is not None: want_color = False if os.getenv("COCOTB_ANSI_OUTPUT", default="0") == "1": want_color = True if os.getenv("GUI", default="0") == "1": want_color = False return want_color
5,335,506
def jaccard(list1, list2): """calculates Jaccard distance from two networks\n | Arguments: | :- | list1 (list or networkx graph): list containing objects to compare | list2 (list or networkx graph): list containing objects to compare\n | Returns: | :- | Returns Jaccard distance between list1 and list2 """ intersection = len(list(set(list1).intersection(list2))) union = (len(list1) + len(list2)) - intersection return 1- float(intersection) / union
5,335,507
def default_argument_preprocessor(args): """Return unmodified args and an empty dict for extras""" extras = {} return args, extras
5,335,508
def expand_site_packages(site_packages: List[str]) -> Tuple[List[str], List[str]]: """Expands .pth imports in site-packages directories""" egg_dirs: List[str] = [] for dir in site_packages: if not os.path.isdir(dir): continue pth_filenames = sorted(name for name in os.listdir(dir) if name.endswith(".pth")) for pth_filename in pth_filenames: egg_dirs.extend(_parse_pth_file(dir, pth_filename)) return egg_dirs, site_packages
5,335,509
def early_anomaly(case: pd.DataFrame) -> pd.DataFrame: """ A sequence of 2 or fewer events executed too early, which is then skipped later in the case Parameters ----------------------- case: pd.DataFrame, Case to apply anomaly Returns ----------------------- Case with the applied early anomaly """ case = case.reset_index(drop=True) timestamps = case['timestamp'] sequence_size = random.choice([1, 2]) if sequence_size == 1: original_position = random.choice(range(1, len(case))) activities = case.iloc[[original_position]] case = case.drop(original_position) if original_position == 1: anomaly_position = 0 else: anomaly_position = random.choice(range(0, original_position-1)) description = activities['activity'].values[0] + ' was originally executed at position ' + str(original_position+1) + ' and changed to position ' + str(anomaly_position+1) else: original_position = random.choice(range(1, len(case)-1)) activities = case.iloc[original_position:original_position+2] case = case.drop([original_position, original_position+1]) if original_position == 1: anomaly_position = 0 else: anomaly_position = random.choice(range(0, original_position-1)) description = activities['activity'].values[0] + ' and ' + activities['activity'].values[1] + ' were originally executed at positions ' + str(original_position+1) + ' and ' + str(original_position+2) + ' and changed to positions ' + str(anomaly_position+1) + ' and ' + str(anomaly_position+2) case = pd.concat([case.iloc[:anomaly_position], activities, case.iloc[anomaly_position:]], sort=False).reset_index(drop=True) case['timestamp'] = timestamps case['label'] = 'early' case['description'] = description return case
5,335,510
def get_package_version(): """ :returns: package version without importing it. """ base = os.path.abspath(os.path.dirname(__file__)) with open(os.path.join(base, "gotalk/__init__.py")) as initf: for line in initf: m = version.match(line.strip()) if not m: continue return ".".join(m.groups()[0].split(", "))
5,335,511
def parse_cmd(script, *args): """Returns a one line version of a bat script """ if args: raise Exception('Args for cmd not implemented') # http://www.microsoft.com/resources/documentation/windows/xp/all/proddocs/en-us/cmd.mspx?mfr=true oneline_cmd = '&&'.join(script.split('\n')) oneline_cmd = 'cmd.exe /c "%s"' % oneline_cmd return oneline_cmd
5,335,512
def test_first_subfield(): """Test sub-field acessor ^* in field with several subfields""" record = MasterRecord() record.update(test_data) first_subfield = format("v26^*", record) assert first_subfield=='Paris', 'Failed to extract first subfield'
5,335,513
def expected_inheritance(variant_obj): """Gather information from common gene information.""" manual_models = set() for gene in variant_obj.get('genes', []): manual_models.update(gene.get('manual_inheritance', [])) return list(manual_models)
5,335,514
def player_stats_game(data) -> defaultdict: """Individual Game stat parser. Directs parsing to the proper player parser (goalie or skater). Receives the player_id branch. Url.GAME Args: data (dict): dict representing JSON object. Returns: defaultdict: Parsed Data. """ # if the stats dict is empty it means they're scratched if not data['stats']: return None if data['position']['abbreviation'] == 'G': return goalie_stats_game(data['stats']['goalieStats']) else: return skater_stats_game(data['stats']['skaterStats'])
5,335,515
def about(request): """ View function for about page """ return render( request, 'about.html', )
5,335,516
def getTimeDeltaFromDbStr(timeStr: str) -> dt.timedelta: """Convert db time string in reporting software to time delta object Args: timeStr (str): The string that represents time, like 14:25 or 15:23:45 Returns: dt.timedelta: time delta that has hours and minutes components """ if pd.isnull(timeStr): return dt.timedelta(seconds=0) elif not(':' in timeStr): print('could parse time string {0}'.format(timeStr)) return dt.timedelta(seconds=0) else: try: timeSegs = timeStr.split(':') timeSegs = timeSegs[0:2] return dt.timedelta(hours=int(timeSegs[0]), minutes=int(timeSegs[1])) except: print('could parse time string {0}'.format(timeStr)) return dt.timedelta(seconds=0)
5,335,517
def colorBool(v) -> str: """Convert True to 'True' in green and False to 'False' in red """ if v: return colored(str(v),"green") else: return colored(str(v),"red")
5,335,518
def debug_fix(): """ I have trouble with hitting breakpoints in lask-RESTful class methods. This method help me. """ app.config['DEBUG'] = False app.config['PROPAGATE_EXCEPTIONS'] = True app.run(debug=False)
5,335,519
def bootstrap( tokens: List[str], measure: str = "type_token_ratio", window_size: int = 3, ci: bool = False, raw=False, ): """calculate bootstrap for lex diversity measures as explained in Evert et al. 2017. if measure='type_token_ratio' it calculates standardized type-token ratio :param ci: additionally calculate and return the confidence interval returns a tuple :param raw: return the raw results """ results = [] measures = dict( type_token_ratio=type_token_ratio, guiraud_r=guiraud_r, herdan_c=herdan_c, dugast_k=dugast_k, maas_a2=maas_a2, dugast_u=dugast_u, tuldava_ln=tuldava_ln, brunet_w=brunet_w, cttr=cttr, summer_s=summer_s, sichel_s=sichel_s, michea_m=michea_m, honore_h=honore_h, entropy=entropy, yule_k=yule_k, simpson_d=simpson_d, herdan_vm=herdan_vm, hdd=hdd, orlov_z=orlov_z, mtld=mtld, ) # tl_vs: txt_len, vocab_size # vs_fs: vocab_size, freq_spectrum # tl_vs_fs: txt_len, vocab_size, freq_spectrum # tl_fs: txt_len, freq_spectrum # t: tokens classes = dict( tl_vs=( "type_token_ratio", "guiraud_r", "herdan_c", "dugast_k", "maas_a2", "dugast_u", "tuldava_ln", "brunet_w", "cttr", "summer_s", ), vs_fs=("sichel_s", "michea_m"), tl_vs_fs=("honore_h", "herdan_vm", "orlov_z"), tl_fs=("entropy", "yule_k", "simpson_d", "hdd"), t=("mtld",), ) measure_to_class = {m: c for c, v in classes.items() for m in v} func = measures[measure] cls = measure_to_class[measure] for i in range(int(len(tokens) / window_size)): chunk = tokens[i * window_size : (i * window_size) + window_size] txt_len, vocab_size, freq_spectrum = preprocess(chunk, fs=True) if cls == "tl_vs": result = func(txt_len, vocab_size) elif cls == "vs_fs": result = func(vocab_size, freq_spectrum) elif cls == "tl_vs_fs": result = func(txt_len, vocab_size, freq_spectrum) elif cls == "tl_fs": result = func(txt_len, freq_spectrum) elif cls == "t": result = func(chunk) results.append(result) if raw: return results if ci: return (np.mean(results), _sttr_ci(results)) return np.mean(results)
5,335,520
def by_label(move_data, value, label_name, filter_out=False, inplace=False): """ Filters trajectories points according to specified value and collum label. Parameters ---------- move_data : dataframe The input trajectory data value : The type_ of the feature values to be use to filter the trajectories Specifies the value used to filter the trajectories points label_name : String Specifes the label of the column used in the filtering filter_out : boolean, optional(false by default) If set to True, it will return trajectory points with feature value different from the value specified in the parameters The trajectories points with the same feature value as the one especifed in the parameters. inplace : boolean, optional(false by default) if set to true the original dataframe will be altered to contain the result of the filtering, otherwise a copy will be returned. Returns ------- move_data : dataframe or None Returns dataframe with trajectories points filtered by label. """ try: filter_ = move_data[label_name] == value if filter_out: filter_ = ~filter_ return move_data.drop(index=move_data[~filter_].index, inplace=inplace) except Exception as e: raise e
5,335,521
def make_pyrimidine(residue, height = 0.4, scale = 1.2): """Creates vertices and normals for pyrimidines:Thymine Uracil Cytosine""" atoms = residue.atoms names = [name.split("@")[0] for name in atoms.name] idx=names.index('N1'); N1 = numpy.array(atoms[idx].coords) idx=names.index('C2'); C2 = numpy.array(atoms[idx].coords) idx=names.index('N3'); N3 = numpy.array(atoms[idx].coords) idx=names.index('C4'); C4 = numpy.array(atoms[idx].coords) idx=names.index('C5'); C5 = numpy.array(atoms[idx].coords) idx=names.index('C6'); C6 = numpy.array(atoms[idx].coords) N1_C2 = C2-N1 N1_C6 = C6-N1 C2_C6 = height*norm(C6-C2) normal = height*numpy.array(crossProduct(N1_C2, N1_C6, normal=True)) center = (N1+C2+N3+C4+C5+C6)/6.0 vertices = numpy.zeros((14,3), float) vertices[0] = scale*(C2 - normal - center) + center vertices[1] = scale*(N3 - normal - center) + center vertices[2] = scale*(C4 - normal - center) + center vertices[3] = scale*(C5 - normal - center) + center vertices[4] = scale*(C6 - normal - center) + center vertices[5] = scale*(C2 + normal - center) + center vertices[6] = scale*(N3 + normal - center) + center vertices[7] = scale*(C4 + normal - center) + center vertices[8] = scale*(C5 + normal - center) + center vertices[9] = scale*(C6 + normal - center) + center vertices[10] = scale*(N1 - C2_C6 - normal - center) + center vertices[11] = scale*(N1 - C2_C6 + normal - center) + center vertices[12] = scale*(N1 + C2_C6 + normal - center) + center vertices[13] = scale*(N1 + C2_C6 - normal - center) + center faces = numpy.array([[13,4,3,2,1,0,10], [11,5,6,7,8,9,12], [0,5,11,10,10,10,10], [1,6,5,0,0,0,0,], [2,7,6,1,1,1,1], [3,8,7,2,2,2,2], [4,9,8,3,3,3,3], [13,12,9,4,4,4,4]]) return vertices, faces
5,335,522
def get_props(filepath, m_co2=22, m_poly=2700/123, N_A=6.022E23, sigma_co2=2.79E-8, sort=False): """ Computes important physical properties from the dft.input file, such as density of CO2 in the CO2-rich phase, solubility of CO2 in the polyol-rich phase, and specific volume of the polyol-rich phase. The dft.input file is structured as: p \t gsrho1b \t gsrho1a \t 10^-gsrho2b \t gsrho2a. PARAMETERS ---------- filepath : string Filepath to file containing densities and pressures (usually dft.input) m_co2 : float mass of one bead of CO2 in PC-SAFT model [amu/bead] (= Mw / N) m_poly : float mass of one bead of polyol in PC-SAFT model [amu/bead] (= Mw / N) N_A : float Avogadro's number (molecules per mol) sigma_co2 : float sigma parameter for co2 [cm] sort : bool If True, sorts solubility data in terms of increasing pressure RETURNS ------- p : list of floats pressures corresponding to the solubilities [MPa] props : tuple of lists of floats Tuple of physical properties calculated (lists of floats): rho_co2 : density of CO2 in CO2-rich phase [g/mL] solub : solubility of CO2 in polyol-rich phase [w/w] spec_vol : specific volume of polyol-rich phase [mL/g] """ # loads data data = np.genfromtxt(filepath, delimiter='\t') # extracts pressure [MPa] from first column p = data[:,0] # extracts the density of CO2 in the co2-rich phase [beads/sigma^3] rho_co2_v = data[:,1] # extracts the density of CO2 in the polyol-rich phase [beads/sigma^3] rho_co2_l = data[:,2] # extracts the density of polyol in the polyol-rich phase [beads/sigma^3] rho_poly_l = data[:,4] # conversions from beads/sigma^3 to g/mL conv_co2 = m_co2/N_A/sigma_co2**3 conv_poly = m_poly/N_A/sigma_co2**3 # computes density of CO2 in the CO2-rich phase [g/mL] rho_co2 = rho_co2_v*conv_co2 # computes solubility of CO2 in the polyol-rich phase [w/w] solub = rho_co2_l*conv_co2 / (rho_co2_l*conv_co2 + rho_poly_l*conv_poly) # computes specific volume of the polyol-rich phase [mL/g] spec_vol = 1 / (rho_co2_l*conv_co2 + rho_poly_l*conv_poly) # sorts data if requested if sort: inds_sort = np.argsort(p) p = p[inds_sort] rho_co2 = rho_co2[inds_sort] solub = solub[inds_sort] spec_vol = spec_vol[inds_sort] props = (rho_co2, solub, spec_vol) return p, props
5,335,523
def get_genotype(chrom, rsid): """ """ geno_path = ('/home/hsuj/lustre/geno/' 'CCF_1000G_Aug2013_Chr{0}.dose.double.ATB.RNASeq_MEQTL.txt') geno_gen = pd.read_csv(geno_path.format(str(chrom)), sep=" ", chunksize = 10000) for i in geno_gen: if rsid in i.index: break else: pass return(i)
5,335,524
def task_dosomething(storage): """ Task that gets launched to handle something in the background until it is completed and then terminates. Note that this task doesn't return until it is finished, so it won't be listening for Threadify pause or kill requests. """ # An important task that we want to run in the background. for i in range(10): print(i, end="") time.sleep(1) return False
5,335,525
def highway(input_, size, num_layers=1, bias=-2.0, f=tf.nn.relu, scope='Highway'): """Highway Network (cf. http://arxiv.org/abs/1505.00387). t = sigmoid(Wy + b) z = t * g(Wy + b) + (1 - t) * y where g is nonlinearity, t is transform gate, and (1 - t) is carry gate. """ with tf.variable_scope(scope): for idx in xrange(num_layers): g = f(linear(input_, size, scope='highway_lin_%d' % idx)) t = tf.sigmoid(linear(input_, size, scope='highway_gate_%d' % idx) + bias) output = t * g + (1. - t) * input_ input_ = output return output
5,335,526
def convert_to_entry(func): """Wrapper function for converting dicts of entries to HarEnrty Objects""" @functools.wraps(func) def inner(*args, **kwargs): # Changed to list because tuple does not support item assignment changed_args = list(args) # Convert the dict (first argument) to HarEntry if isinstance(changed_args[0], dict): changed_args[0] = HarEntry(changed_args[0]) return func(*tuple(changed_args), **kwargs) return inner
5,335,527
def transform(record: dict, key_ref: dict, country_ref: pd.DataFrame, who_coding: pd.DataFrame, no_update_phrase: pd.DataFrame): """ Apply transformations to OXCGRT records. Parameters ---------- record : dict Input record. key_ref : dict Reference for key mapping. country_ref : pd.DataFrame Reference for WHO accepted country names. who_coding : pd.DataFrame Reference for WHO coding. no_update_phrase : pd.DataFrame Reference for "no update" phrases. Returns ------- dict Record with transformations applied. """ # 1. generator function of new record with correct keys (shared) new_record = utils.generate_blank_record() # 2. replace data in new record with data from old record using column # reference (shared) record = utils.apply_key_map(new_record, record, key_ref) # 3. Assign unique ID (shared) # record = utils.assign_id(record) if record["prov_measure"] == "H8_Protection of elderly people": return None # 4. Handle date formatting record = utils.parse_date(record) # 8. replace sensitive country names record = utils.replace_sensitive_regions(record) # shift areas that should be countries. record = utils.replace_country(record, 'United States', 'Virgin Islands') # 7. Make manual country name changes record = utils.replace_conditional(record, 'country_territory_area', 'Virgin Islands', 'US Virgin Islands') record = utils.replace_conditional(record, 'country_territory_area', 'United States Virgin Islands', 'US Virgin Islands') record = utils.replace_conditional(record, 'country_territory_area', 'Eswatini', 'Swaziland') record = utils.replace_conditional(record, 'country_territory_area', 'South Korea', 'Korea') # 9. assign ISO code record['iso'] = countrycode(codes=record['country_territory_area'], origin='country_name', target='iso3c') # 10. check missing ISO check.check_missing_iso(record) # Remove records where there is no data in prov_subcategory if record['prov_subcategory'] == 0: return(None) # Removes information in flag variables for now record['prov_subcategory'] = int(record['prov_subcategory']) # 11. Join WHO accepted country names (shared) record = utils.assign_who_country_name(record, country_ref) record = financial_measures(record) # 12. Join who coding from lookup (shared) record = utils.assign_who_coding(record, who_coding) # 13. check for missing WHO codes (shared) check.check_missing_who_code(record) # 16. Add WHO PHSM admin_level values record = utils.add_admin_level(record) record = utils.remove_tags(record) # 17. Remove update records record = assign_comment_links(record) # Filter out records with "no update" phrases record = label_update_phrase(record, list(no_update_phrase['phrase'])) return(record)
5,335,528
def validate_client_parameters(cmd, namespace): """Retrieves Batch connection parameters from environment variables""" from azure.mgmt.batch import BatchManagementClient from azure.cli.core.commands.client_factory import get_mgmt_service_client # simply try to retrieve the remaining variables from environment variables if not namespace.account_name: namespace.account_name = cmd.cli_ctx.config.get('batch', 'account', None) if not namespace.account_key: namespace.account_key = cmd.cli_ctx.config.get('batch', 'access_key', None) if not namespace.account_endpoint: namespace.account_endpoint = cmd.cli_ctx.config.get('batch', 'endpoint', None) # Simple validation for account_endpoint if not (namespace.account_endpoint.startswith('https://') or namespace.account_endpoint.startswith('http://')): namespace.account_endpoint = 'https://' + namespace.account_endpoint namespace.account_endpoint = namespace.account_endpoint.rstrip('/') # if account name is specified but no key, attempt to query if we use shared key auth if namespace.account_name and namespace.account_endpoint and not namespace.account_key: if cmd.cli_ctx.config.get('batch', 'auth_mode', 'shared_key') == 'shared_key': endpoint = urlsplit(namespace.account_endpoint) host = endpoint.netloc client = get_mgmt_service_client(cmd.cli_ctx, BatchManagementClient) acc = next((x for x in client.batch_account.list() if x.name == namespace.account_name and x.account_endpoint == host), None) if acc: from msrestazure.tools import parse_resource_id rg = parse_resource_id(acc.id)['resource_group'] namespace.account_key = \ client.batch_account.get_keys(rg, # pylint: disable=no-member namespace.account_name).primary else: raise ValueError("Batch account '{}' not found.".format(namespace.account_name)) else: if not namespace.account_name: raise ValueError("Specify batch account in command line or environment variable.") if not namespace.account_endpoint: raise ValueError("Specify batch endpoint in command line or environment variable.") if cmd.cli_ctx.config.get('batch', 'auth_mode', 'shared_key') == 'aad': namespace.account_key = None
5,335,529
def hc_genes( input_gene_expression: "gene expression data filename (.gct file) where rows are genes and columns are samples", clustering_type: "single or consensus -- Only single is suported at the moment", distance_metric: "the function to be used when comparing the distance/similarity of the rows in the " "input_gene_expression dataset", file_basename: "the name to use when naming output files" = 'HC_out', clusters_to_highlight: "how many clusters to highlight in the dendrogram" = None): """ Perform hierarchical clustering to group genes with similar expression profile. :param input_gene_expression: str; gene expression data filename (.gct file) where rows are genes and columns are samples :param clustering_type: str; single or consensus :param distance_metric: str; the function to be used when comparing the distance/similarity of the rows in the input_gene_expression dataset :param file_basename: str; the name to use when naming output files :param clusters_to_highlight: int; how many clusters to highlight in the dendrogram :return: object; Sklearn's AgglomerativeClustering fitted model """ print("Currenty clustering_type is being ignored, only 'single' is supported.") pwd = '.' gct_name = input_gene_expression col_distance_metric = 'No_column_clustering' output_distances = False row_distance_metric = distance_metric clustering_method = 'average' output_base_name = file_basename row_normalization = False col_normalization = False row_centering = 'Mean' col_centering = 'Mean' custom_plot = 'Genes' show = True # print("This are the parameters to be used (for debugging purposes)") # print(""" # pwd = '.' # gct_name = {gct_name} # col_distance_metric = {col_distance_metric} # output_distances = {output_distances} # row_distance_metric = {row_distance_metric} # clustering_method = {clustering_method} # output_base_name = {output_base_name} # row_normalization = {row_normalization} # col_normalization = {col_normalization} # row_centering = {row_centering} # col_centering = {col_centering} # """.format( # gct_name=gct_name, col_distance_metric=col_distance_metric, # output_distances=str(output_distances), # row_distance_metric=row_distance_metric, clustering_method=clustering_method, # output_base_name=output_base_name, # row_normalization=str(row_normalization), col_normalization=str(col_normalization), # row_centering=row_centering, col_centering=col_centering # ) # ) print("Now we will start performing hierarchical clustering, this may take a little while.") col_model, row_model = HierarchicalClustering(pwd, gct_name, col_distance_metric, row_distance_metric, clustering_method, output_base_name, row_normalization, col_normalization, row_centering, col_centering, output_distances, custom_plot, clusters_to_highlight, show) print("Done with Hierarchical Clustering!") return row_model
5,335,530
def detect_text(path): """Detects text in the file.""" from google.cloud import vision import io client = vision.ImageAnnotatorClient() with io.open(path, 'rb') as image_file: content = image_file.read() image = vision.Image(content=content) response = client.text_detection(image=image) texts = response.text_annotations for text in texts: return text.description if response.error.message: raise Exception( '{}\nFor more info on error messages, check: ' 'https://cloud.google.com/apis/design/errors'.format( response.error.message))
5,335,531
def file_util_is_ext(path, ext): """判断是否指定后缀文件,ext不包含点""" if file_util_get_ext(path) == ext: return True else: return False
5,335,532
def load_tract(repo, tract, patches=None, **kwargs): """Merge catalogs from forced-photometry coadds across available filters. Parameters -- tract: int Tract of sky region to load repo: str File location of Butler repository+rerun to load. patches: list of str List of patches. If not specified, will default to '0,0'--'7,7'. Returns -- Pandas DataFrame of merged catalog """ butler = Butler(repo) if patches is None: # Extract the patches for this tract from the skymap skymap = butler.get(datasetType='deepCoadd_skyMap') patches = ['%d,%d' % patch.getIndex() for patch in skymap[tract]] merged_tract_cat = pd.DataFrame() for patch in patches: this_patch_merged_cat = load_patch(butler, tract, patch, **kwargs) merged_tract_cat.append(this_patch_merged_cat) return merged_tract_cat
5,335,533
def calculate_delta(arg1, arg2): """ Calculates and returns a `datetime.timedelta` object representing the difference between arg1 and arg2. Arguments must be either both `datetime.date`, both `datetime.time`, or both `datetime.datetime`. The difference is absolute, so the order of the arguments doesn't matter. """ if arg1 > arg2: arg1, arg2 = arg2, arg1 if isinstance(arg1, datetime.date) and isinstance(arg1, datetime.date): return ( datetime.datetime(arg2.year, arg2.month, arg2.day) - datetime.datetime(arg1.year, arg1.month, arg1.day) ) if isinstance(arg1, datetime.time) and isinstance(arg1, datetime.time): return ( datetime.datetime(1, 1, 1, arg2.hour, arg2.minute, arg1.second) - datetime.datetime(1, 1, 1, arg1.hour, arg1.minute, arg1.second) ) if isinstance(arg1, datetime.datetime) and isinstance(arg1, datetime.datetime): return arg2 - arg1 raise TypeError( f'Cannot calculate delta between values of types ' f'{type(arg1)} and {type(arg2)} because they are not equivalent' )
5,335,534
def main(): """ Execute action from playbook """ command = NetAppONTAPWFC() command.apply()
5,335,535
def create_model_config(model_dir: str, config_path: str = None): """Creates a new configuration file in the model directory and returns the config.""" # read the config file config_content = file_io.read_file_to_string(root_dir(config_path)) # save the config file to the model directory write_model_config(model_dir, config_content) # load config config = yaml.safe_load(config_content) return config
5,335,536
def get_deployment_physnet_mtu(): """Retrieves global physical network MTU setting. Plugins should use this function to retrieve the MTU set by the operator that is equal to or less than the MTU of their nodes' physical interfaces. Note that it is the responsibility of the plugin to deduct the value of any encapsulation overhead required before advertising it to VMs. Note that this function depends on the global_physnet_mtu config option being registered in the global CONF. :returns: The global_physnet_mtu from the global CONF. """ return cfg.CONF.global_physnet_mtu
5,335,537
def _path(path): """Helper to build an OWFS path from a list""" path = "/" + "/".join(str(x) for x in path) return path.encode("utf-8") + b"\0"
5,335,538
def current_milli_time(): """Return the current time in milliseconds""" return int(time.time() * 1000)
5,335,539
def function_exists(function_name, *args, **kwargs): """ Checks if a function exists in the catalog """ # TODO (dmeister): This creates an SQL injection, but it should not # be a problem for this purpose. function_exists_text_count = PSQL.run_sql_command( "SELECT 'function exists' FROM pg_proc WHERE proname='%s'" % (function_name), *args, **kwargs).count("function exists") return function_exists_text_count == 2
5,335,540
def _loo_jackknife( func: Callable[..., NDArray], nobs: int, args: Sequence[ArrayLike], kwargs: Dict[str, ArrayLike], extra_kwargs: Optional[Dict[str, ArrayLike]] = None, ) -> NDArray: """ Leave one out jackknife estimation Parameters ---------- func : callable Function that computes parameters. Called using func(*args, **kwargs) nobs : int Number of observation in the data args : list List of positional inputs (arrays, Series or DataFrames) kwargs : dict List of keyword inputs (arrays, Series or DataFrames) Returns ------- ndarray Array containing the jackknife results where row i corresponds to leaving observation i out of the sample """ results = [] for i in range(nobs): items = np.r_[0:i, i + 1 : nobs] args_copy = [] for arg in args: if isinstance(arg, (pd.Series, pd.DataFrame)): args_copy.append(arg.iloc[items]) else: args_copy.append(arg[items]) kwargs_copy = {} for k, v in kwargs.items(): if isinstance(v, (pd.Series, pd.DataFrame)): kwargs_copy[k] = v.iloc[items] else: kwargs_copy[k] = v[items] if extra_kwargs is not None: kwargs_copy.update(extra_kwargs) results.append(func(*args_copy, **kwargs_copy)) return np.array(results)
5,335,541
def VentanaFourierPrincipal (ventana): """ Ventana en la que el usuario elige cómo aplicar calor a la placa. Parámetros de la función: ------------------------ ventana: Variable que cierra una ventana ya abierta. Salida de la función: --------------------- Interfaz gráfica con la que el usuario elije un método de simulación. """ ventana.destroy () ventanaFourierPrincipal = Tk () ventanaFourierPrincipal.minsize (800,600) ventanaFourierPrincipal.config (bg='White') etiquetaInfo = Label (ventanaFourierPrincipal, text = ('Cáculo de difusión de Calor por Series de Fourier')) etiquetaInfo.config (bg = 'white', font = ('Verdana', 17)) etiquetaInfo.place (relx=0.5, rely=0.25, anchor=CENTER) etiquetaCalor = Label (ventanaFourierPrincipal, text = ('Cómo aplicar el calor: ')) etiquetaCalor.config (bg = 'white', font = ('Verdana', 12)) etiquetaCalor.place (relx=0.15, rely=0.5, anchor=CENTER) botonUnPunto = Button(ventanaFourierPrincipal, text='Un punto', command = lambda: VentanaFouierUnPunto (ventanaFourierPrincipal)) botonUnPunto.place (relx=0.4, rely=0.6, anchor=CENTER) botonLineal = Button(ventanaFourierPrincipal, text='Linealmente', command = lambda: VentanaFourierLineal (ventanaFourierPrincipal)) botonLineal.place (relx=0.6, rely=0.6, anchor=CENTER) ventanaFourierPrincipal.title ("Cálculo de la ecuación de difusión de calor por Fourier") ventanaFourierPrincipal.mainloop()
5,335,542
def _fastq_illumina_convert_fastq_solexa(in_handle, out_handle, alphabet=None): """Fast Illumina 1.3+ FASTQ to Solexa FASTQ conversion (PRIVATE). Avoids creating SeqRecord and Seq objects in order to speed up this conversion. """ # Map unexpected chars to null from Bio.SeqIO.QualityIO import solexa_quality_from_phred mapping = "".join([chr(0) for ascii in range(0, 64)] + [chr(64 + int(round(solexa_quality_from_phred(q)))) for q in range(0, 62 + 1)] + [chr(0) for ascii in range(127, 256)]) assert len(mapping) == 256 return _fastq_generic(in_handle, out_handle, mapping)
5,335,543
def pull_branch(c: InvokeContext, repo: Repo, directory: str, branch_name: str) -> CommandResult: """ Change to the repo directory and pull master. :argument c: InvokeContext :argument repo: Repo the repo to pull :argument directory: str the directory to change to :argument branch_name: str the branch to pull """ project_path = _generate_path(directory, repo.folder_name) cmd = f"cd {project_path} && git checkout {branch_name} && git pull" return _run_command(c, cmd)
5,335,544
def trace_app(app, tracer, service="aiohttp-web"): """ Tracing function that patches the ``aiohttp`` application so that it will be traced using the given ``tracer``. :param app: aiohttp application to trace :param tracer: tracer instance to use :param service: service name of tracer """ # safe-guard: don't trace an application twice if getattr(app, "__datadog_trace", False): return setattr(app, "__datadog_trace", True) # configure datadog settings app[CONFIG_KEY] = { "tracer": tracer, "service": config._get_service(default=service), "distributed_tracing_enabled": None, "analytics_enabled": None, "analytics_sample_rate": 1.0, } # the tracer must work with asynchronous Context propagation tracer.configure(context_provider=context_provider) # add the async tracer middleware as a first middleware # and be sure that the on_prepare signal is the last one app.middlewares.insert(0, trace_middleware) app.on_response_prepare.append(on_prepare)
5,335,545
def amin(*args, **kwargs): """Async equivalent of min().""" key_fn = kwargs.pop('key', None) if kwargs: raise TypeError('amin() got an unexpected keyword argument') if len(args) == 0: raise TypeError('amin() expected 1 arguments, got 0') elif len(args) == 1: iterable = args[0] else: iterable = args if key_fn is None: result(min(iterable)); return # support generators if not isinstance(iterable, (list, tuple)): iterable = list(iterable) keys = yield amap.asynq(key_fn, iterable) max_pair = min(enumerate(iterable), key=lambda pair: keys[pair[0]]) result(max_pair[1]); return
5,335,546
def scheduler(request): """ This is the host fixture for testinfra. To read more, please see the testinfra documentation: https://testinfra.readthedocs.io/en/latest/examples.html#test-docker-images """ namespace = os.environ.get('NAMESPACE') pod = os.environ.get('SCHEDULER_POD') yield testinfra.get_host(f'kubectl://{pod}?container=scheduler&namespace={namespace}')
5,335,547
def sizeof_fmt(num, suffix='B'): """Return human readable version of in-memory size. Code from Fred Cirera from Stack Overflow: https://stackoverflow.com/questions/1094841/reusable-library-to-get-human-readable-version-of-file-size """ for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']: if abs(num) < 1024.0: return "%3.1f%s%s" % (num, unit, suffix) num /= 1024.0 return "%.1f%s%s" % (num, 'Yi', suffix)
5,335,548
def readFPs(filepath): """Reads a list of fingerprints from a file""" try: myfile = open(filepath, "r") except: raise IOError("file does not exist:", filepath) else: fps = [] for line in myfile: if line[0] != "#": # ignore comments line = line.rstrip().split() fps.append(line[0]) return fps
5,335,549
def print_selected_expression_type(IniStep, FinStep, simulation_dir, output_dir, gene_indexes, celltype): """ SImilar than the above equation but prints the count in an file """ Noutput = len( gene_indexes ) filename = output_dir + 'BNstates_type'+ str(celltype) +'.txt' Scores1 = output_id_cell( simulation_dir, IniStep, FinStep, 2, celltype ) Scores2 = output_id_cell( simulation_dir, IniStep, FinStep, 3, celltype ) NCells = len( Scores1 ) if ( NCells <= 0 ) : with open( filename , "a") as text_file: text_file.close() return NTimiSteps = FinStep - IniStep + 1 count_gene = [0 for x in gene_indexes ] for step in range( IniStep , FinStep + 1 ) : for key in sorted(Scores1) : binarray1 = list( reversed( num_to_bin( Scores1[key][step], 21) )) binarray2 = list( reversed( num_to_bin( Scores2[key][step], 20) )) binstates = binarray1 + binarray2 for i in range(Noutput) : count_gene[i] += binstates[ gene_indexes[i] ] for i in range( Noutput ): count_gene[i] = float(count_gene[i]) / float( NCells * NTimiSteps ) with open( filename , "a") as text_file: s = ' ' text_file.write( s.join( str(x) for x in count_gene ) + '\n' ) text_file.close()
5,335,550
def validate_take_with_convert(convert, args, kwargs): """ If this function is called via the 'numpy' library, the third parameter in its signature is 'axis', which takes either an ndarray or 'None', so check if the 'convert' parameter is either an instance of ndarray or is None """ if isinstance(convert, ndarray) or convert is None: args = (convert,) + args convert = True validate_take(args, kwargs, max_fname_arg_count=3, method="both") return convert
5,335,551
def load_operators_expr() -> List[str]: """Returns clip loads operators for std.Expr as a list of string.""" abcd = list(ascii_lowercase) return abcd[-3:] + abcd[:-3]
5,335,552
def findScanNumberString(s): """If s contains 'NNNN', where N stands for any digit, return the string beginning with 'NNNN' and extending to the end of s. If 'NNNN' is not found, return ''.""" n = 0 for i in range(len(s)): if s[i].isdigit(): n += 1 else: n = 0 if n == 4: return s[i-3:] return ''
5,335,553
def parseWsUrl(url): """ Parses as WebSocket URL into it's components and returns a tuple (isSecure, host, port, resource, path, params). isSecure is a flag which is True for wss URLs. host is the hostname or IP from the URL. port is the port from the URL or standard port derived from scheme (ws = 80, wss = 443). resource is the /resource name/ from the URL, the /path/ together with the (optional) /query/ component. path is the /path/ component properly unescaped. params is the /query) component properly unescaped and returned as dictionary. :param url: A valid WebSocket URL, i.e. ws://localhost:9000/myresource?param1=23&param2=666 :type url: str :returns: tuple -- A tuple (isSecure, host, port, resource, path, params) """ parsed = urlparse.urlparse(url) if parsed.scheme not in ["ws", "wss"]: raise Exception("invalid WebSocket scheme '%s'" % parsed.scheme) if parsed.port is None or parsed.port == "": if parsed.scheme == "ws": port = 80 else: port = 443 else: port = int(parsed.port) if parsed.fragment is not None and parsed.fragment != "": raise Exception("invalid WebSocket URL: non-empty fragment '%s" % parsed.fragment) if parsed.path is not None and parsed.path != "": ppath = parsed.path path = urllib.unquote(ppath) else: ppath = "/" path = ppath if parsed.query is not None and parsed.query != "": resource = ppath + "?" + parsed.query params = urlparse.parse_qs(parsed.query) else: resource = ppath params = {} return (parsed.scheme == "wss", parsed.hostname, port, resource, path, params)
5,335,554
def describe_events(SourceIdentifier=None, SourceType=None, StartTime=None, EndTime=None, Duration=None, MaxRecords=None, Marker=None): """ Returns events related to clusters, cache security groups, and cache parameter groups. You can obtain events specific to a particular cluster, cache security group, or cache parameter group by providing the name as a parameter. By default, only the events occurring within the last hour are returned; however, you can retrieve up to 14 days\' worth of events if necessary. See also: AWS API Documentation Exceptions Examples Describes all the replication-group events from 3:00P to 5:00P on November 11, 2016. Expected Output: :example: response = client.describe_events( SourceIdentifier='string', SourceType='cache-cluster'|'cache-parameter-group'|'cache-security-group'|'cache-subnet-group'|'replication-group', StartTime=datetime(2015, 1, 1), EndTime=datetime(2015, 1, 1), Duration=123, MaxRecords=123, Marker='string' ) :type SourceIdentifier: string :param SourceIdentifier: The identifier of the event source for which events are returned. If not specified, all sources are included in the response. :type SourceType: string :param SourceType: The event source to retrieve events for. If no value is specified, all events are returned. :type StartTime: datetime :param StartTime: The beginning of the time interval to retrieve events for, specified in ISO 8601 format.\n\nExample: 2017-03-30T07:03:49.555Z\n :type EndTime: datetime :param EndTime: The end of the time interval for which to retrieve events, specified in ISO 8601 format.\n\nExample: 2017-03-30T07:03:49.555Z\n :type Duration: integer :param Duration: The number of minutes worth of events to retrieve. :type MaxRecords: integer :param MaxRecords: The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a marker is included in the response so that the remaining results can be retrieved.\nDefault: 100\nConstraints: minimum 20; maximum 100.\n :type Marker: string :param Marker: An optional marker returned from a prior request. Use this marker for pagination of results from this operation. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords . :rtype: dict ReturnsResponse Syntax { 'Marker': 'string', 'Events': [ { 'SourceIdentifier': 'string', 'SourceType': 'cache-cluster'|'cache-parameter-group'|'cache-security-group'|'cache-subnet-group'|'replication-group', 'Message': 'string', 'Date': datetime(2015, 1, 1) }, ] } Response Structure (dict) -- Represents the output of a DescribeEvents operation. Marker (string) -- Provides an identifier to allow retrieval of paginated results. Events (list) -- A list of events. Each element in the list contains detailed information about one event. (dict) -- Represents a single occurrence of something interesting within the system. Some examples of events are creating a cluster, adding or removing a cache node, or rebooting a node. SourceIdentifier (string) -- The identifier for the source of the event. For example, if the event occurred at the cluster level, the identifier would be the name of the cluster. SourceType (string) -- Specifies the origin of this event - a cluster, a parameter group, a security group, etc. Message (string) -- The text of the event. Date (datetime) -- The date and time when the event occurred. Exceptions ElastiCache.Client.exceptions.InvalidParameterValueException ElastiCache.Client.exceptions.InvalidParameterCombinationException Examples Describes all the replication-group events from 3:00P to 5:00P on November 11, 2016. response = client.describe_events( StartTime=datetime(2016, 12, 22, 15, 0, 0, 3, 357, 0), ) print(response) Expected Output: { 'Events': [ { 'Date': datetime(2016, 12, 22, 21, 35, 46, 3, 357, 0), 'Message': 'Snapshot succeeded for snapshot with ID 'cr-bkup' of replication group with ID 'clustered-redis'', 'SourceIdentifier': 'clustered-redis-0001-001', 'SourceType': 'cache-cluster', }, { 'Date': datetime(2016, 12, 22, 16, 27, 56, 3, 357, 0), 'Message': 'Added cache node 0001 in availability zone us-east-1e', 'SourceIdentifier': 'redis-cluster', 'SourceType': 'cache-cluster', }, { 'Date': datetime(2016, 12, 22, 16, 27, 56, 3, 357, 0), 'Message': 'Cache cluster created', 'SourceIdentifier': 'redis-cluster', 'SourceType': 'cache-cluster', }, { 'Date': datetime(2016, 12, 22, 16, 5, 17, 3, 357, 0), 'Message': 'Added cache node 0002 in availability zone us-east-1c', 'SourceIdentifier': 'my-memcached2', 'SourceType': 'cache-cluster', }, { 'Date': datetime(2016, 12, 22, 16, 5, 17, 3, 357, 0), 'Message': 'Added cache node 0001 in availability zone us-east-1e', 'SourceIdentifier': 'my-memcached2', 'SourceType': 'cache-cluster', }, { 'Date': datetime(2016, 12, 22, 16, 5, 17, 3, 357, 0), 'Message': 'Cache cluster created', 'SourceIdentifier': 'my-memcached2', 'SourceType': 'cache-cluster', }, ], 'Marker': '', 'ResponseMetadata': { '...': '...', }, } :return: { 'Marker': 'string', 'Events': [ { 'SourceIdentifier': 'string', 'SourceType': 'cache-cluster'|'cache-parameter-group'|'cache-security-group'|'cache-subnet-group'|'replication-group', 'Message': 'string', 'Date': datetime(2015, 1, 1) }, ] } :returns: ElastiCache.Client.exceptions.InvalidParameterValueException ElastiCache.Client.exceptions.InvalidParameterCombinationException """ pass
5,335,555
def parse_settings(settings: str): """Settings generator from get-config. Get-config returns something that looks like this: Label: ISO Speed Type: RADIO Current: 100 Choice: 0 100 Choice: 1 125 Choice: 2 160 Choice: 3 200 Args: settings: Returned string from gphoto2 --get-config <setting> Yields: value """ for line in settings.split('\n'): if line.startswith('Choice'): tokens = line.split() value = tokens[-1] yield value
5,335,556
def load_image(filename): """Loads an image, reads it and returns image size, dimension and a numpy array of this image. filename: the name of the image """ try: img = cv2.imread(filename) print("(H, W, D) = (height, width, depth)") print("shape: ",img.shape) h, w, d = img.shape print('this is the width', w) print('this is the height', h) #size = h * w except Exception as e: print(e) print ("Unable to load image") return img.shape, img
5,335,557
def add_manipulable(key, manipulable): """ add a ArchipackActiveManip into the stack if not already present setup reference to manipulable return manipulators stack """ global manips if key not in manips.keys(): # print("add_manipulable() key:%s not found create new" % (key)) manips[key] = ArchipackActiveManip(key) manips[key].manipulable = manipulable return manips[key].stack
5,335,558
def importAllPlugins(): """ Bring all plugins that KiKit offers into a the global namespace. This function is impure as it modifies the global variable scope. The purpose of this function is to allow the PCM proxy to operate. """ import importlib for plugin in availablePlugins: module = importlib.import_module(f"kikit.actionPlugins.{plugin.package}") module.plugin().register()
5,335,559
def get_wave_data_type(sample_type_id): """Creates an SDS type definition for WaveData""" if sample_type_id is None or not isinstance(sample_type_id, str): raise TypeError('sample_type_id is not an instantiated string') int_type = SdsType('intType', SdsTypeCode.Int32) double_type = SdsType('doubleType', SdsTypeCode.Double) # WaveData uses Order as the key, or primary index order_property = SdsTypeProperty('Order', True, int_type) tau_property = SdsTypeProperty('Tau', False, double_type) radians_property = SdsTypeProperty('Radians', False, double_type) sin_property = SdsTypeProperty('Sin', False, double_type) cos_property = SdsTypeProperty('Cos', False, double_type) tan_property = SdsTypeProperty('Tan', False, double_type) sinh_property = SdsTypeProperty('Sinh', False, double_type) cosh_property = SdsTypeProperty('Cosh', False, double_type) tanh_property = SdsTypeProperty('Tanh', False, double_type) # Create an SdsType for WaveData Class wave = SdsType(sample_type_id, SdsTypeCode.Object, [order_property, tau_property, radians_property, sin_property, cos_property, tan_property, sinh_property, cosh_property, tanh_property], 'WaveDataSample', 'This is a sample SDS type for storing WaveData type events') return wave
5,335,560
def _spec_augmentation(x, warp_for_time=False, num_t_mask=2, num_f_mask=2, max_t=50, max_f=10, max_w=80): """ Deep copy x and do spec augmentation then return it Args: x: input feature, T * F 2D num_t_mask: number of time mask to apply num_f_mask: number of freq mask to apply max_t: max width of time mask max_f: max width of freq mask max_w: max width of time warp Returns: augmented feature """ y = np.copy(x) max_frames = y.shape[0] max_freq = y.shape[1] # time warp if warp_for_time and max_frames > max_w * 2: center = random.randrange(max_w, max_frames - max_w) warped = random.randrange(center - max_w, center + max_w) + 1 left = Image.fromarray(x[:center]).resize((max_freq, warped), BICUBIC) right = Image.fromarray(x[center:]).resize((max_freq, max_frames - warped), BICUBIC) y = np.concatenate((left, right), 0) # time mask for i in range(num_t_mask): start = random.randint(0, max_frames - 1) length = random.randint(1, max_t) end = min(max_frames, start + length) y[start:end, :] = 0 # freq mask for i in range(num_f_mask): start = random.randint(0, max_freq - 1) length = random.randint(1, max_f) end = min(max_freq, start + length) y[:, start:end] = 0 return y
5,335,561
def deg2rad(x, dtype=None): """ Converts angles from degrees to radians. Args: x (Tensor): Angles in degrees. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the output Tensor. Returns: Tensor, the corresponding angle in radians. This is a tensor scalar if `x` is a tensor scalar. Raises: TypeError: if `x` is not a tensor. Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` Examples: >>> import mindspore.numpy as np >>> x = np.asarray([1, 2, 3, -4, -5]) >>> output = np.deg2rad(x) >>> print(output) [ 0.01745329 0.03490658 0.05235988 -0.06981317 -0.08726647] """ _check_input_tensor(x) def convert(a): return a * pi / 180.0 return _apply_tensor_op(convert, x, dtype=dtype)
5,335,562
def point_in_ellipse(origin, point, a, b, pa_rad, verbose=False): """ Identify if the point is inside the ellipse. :param origin A SkyCoord defining the centre of the ellipse. :param point A SkyCoord defining the point to be checked. :param a The semi-major axis in arcsec of the ellipse :param b The semi-minor axis in arcsec of the ellipse :param pa_rad The position angle of the ellipse. This is the angle of the major axis measured in radians East of North (or CCW from the y axis). """ # Convert point to be in plane of the ellipse, accounting for distortions at high declinations p_ra_dist = (point.icrs.ra.degree - origin.icrs.ra.degree)* math.cos(origin.icrs.dec.rad) p_dec_dist = point.icrs.dec.degree - origin.icrs.dec.degree # Calculate the angle and radius of the test opoint relative to the centre of the ellipse # Note that we reverse the ra direction to reflect the CCW direction radius = math.sqrt(p_ra_dist**2 + p_dec_dist**2) diff_angle = (math.pi/2 + pa_rad) if p_dec_dist == 0 else math.atan(p_ra_dist / p_dec_dist) - pa_rad # Obtain the point position in terms of the ellipse major and minor axes minor = radius * math.sin(diff_angle) major = radius * math.cos(diff_angle) if verbose: print ('point relative to ellipse centre angle:{} deg radius:{:.4f}" maj:{:.2f}" min:{:.2f}"'.format(math.degrees(diff_angle), radius*3600, major*3600, minor*3600)) a_deg = a / 3600.0 b_deg = b / 3600.0 # Calc distance from origin relative to a and b dist = math.sqrt((major / a_deg) ** 2 + (minor / b_deg) ** 2) if verbose: print("Point %s is %f from ellipse %f, %f, %f at %s." % (point, dist, a, b, math.degrees(pa_rad), origin)) return round(dist,3) <= 1.0
5,335,563
def isNumberString(value): """ Checks if value is a string that has only digits - possibly with leading '+' or '-' """ if not value: return False sign = value[0] if (sign == '+') or (sign == '-'): if len(value) <= 1: return False absValue = value[1:] return absValue.isdigit() else: if len(value) <= 0: return False else: return value.isdigit()
5,335,564
def initialise_tweet_database(): """ Initialise Twitter table for storing inbound and outbound Tweet information. """ twitter_sql = """ CREATE TABLE Twitter ( procrystal_id integer PRIMARY KEY, tweet_id text not null, username text not null, reply_sent bool not null, UNIQUE(tweet_id) ) """ initialise_database("./procrystaldb.db", "Twitter", twitter_sql)
5,335,565
def vaseline(tensor, shape, alpha=1.0, time=0.0, speed=1.0): """ """ return value.blend(tensor, center_mask(tensor, bloom(tensor, shape, 1.0), shape), alpha)
5,335,566
def _func_length(target_attr: Union[Dict[str, Any], List[Any]], *_: Any) -> int: """Function for returning the length of a dictionary or list.""" return len(target_attr)
5,335,567
def assert_option_strategy(strategy, init_opts, exp_opts, **kwargs): """Test for any strategy for options handler strategy : strategy to use init_opts : dict with keys :code:`{'c1', 'c2', 'w'}` or :code:`{'c1', 'c2', 'w', 'k', 'p'}` exp_opts: dict with expected values after strategy with given parameters kwargs: arguments to use for given strategy """ assert len(init_opts) == len( exp_opts ), "Size of initial options and expected options must be same" oh = OptionsHandler(strategy) return_opts = oh(init_opts, **kwargs) assert np.allclose( list(return_opts.values()), list(exp_opts.values()), atol=0.001, rtol=0 ), "Expected options don't match with the given strategy"
5,335,568
def import_places_from_swissnames3d( projection: str = "LV95", file: Optional[TextIOWrapper] = None ) -> str: """ import places from SwissNAMES3D :param projection: "LV03" or "LV95" see http://mapref.org/CoordinateReferenceFrameChangeLV03.LV95.html#Zweig1098 :param file: path to local unzipped file. if provided, the `projection` parameter will be ignored. """ try: file = file or get_swissnames3d_remote_file(projection=projection) except HTTPError as error: return f"Error downloading {PLACE_DATA_URL}: {error}. " except ConnectionError: return f"Error connecting to {PLACE_DATA_URL}. " with file: count = get_csv_line_count(file, header=True) data = parse_places_from_csv(file, projection=projection) source_info = f"SwissNAMES3D {projection}" return save_places_from_generator(data, count, source_info)
5,335,569
def create_file_handler(log_file, handler_level, formatter=logging.Formatter(LOG_FORMAT_STRING)): """ Creates file handler which logs even debug messages. """ if handler_level == 'debug': level = logging.DEBUG elif handler_level == 'info': level = logging.INFO elif handler_level == 'warning': level = logging.WARNING elif handler_level == 'error': level = logging.ERROR elif handler_level == 'critical': level = logging.CRITICAL else: raise Exception('logger level has to be defined') fh = MakeFileHandler(log_file) fh.setLevel(level) fh.setFormatter(formatter) return fh
5,335,570
def clear(): """清空模型""" itasca.command('model new')
5,335,571
def utility_for_osf_spam_or_ham(folder_name): """ commandline utility for determining whether osf info is spam or ham and then moving to correct file. """ files_folders = os.listdir(folder_name) for f in files_folders: try: if f[0:3] == "dir": pass else: cur_file_path = folder_name.rstrip("/")+"/"+f cur_file_contents = open(cur_file_path,'r').read() print cur_file_contents try: decision = int(input("spam(1) or ham(2) or skip(3) or quit(4):")) except: decision= 0 print decision #import pdb;pdb.set_trace() #print decision #print decision=='s' #print decision=='h' if decision == 1: from_folder = "./osf_data/"+f to_folder = "./osf_data/osf_spam/"+f command = "mv "+from_folder + " "+ to_folder args = shlex.split(command) subprocess.call(args) elif decision == 2 or decision==0: from_folder = "./osf_data/"+f to_folder = "./osf_data/osf_ham/"+f command = "mv "+from_folder + " "+ to_folder args = shlex.split(command) subprocess.call(args) elif decision==4: break else: pass except: print f
5,335,572
def _generate_data(size): """ For testing reasons only """ # return FeatureSpec('dummy', name=None, data='x' * size) return PlotSpec(data='x' * size, mapping=None, scales=[], layers=[])
5,335,573
def configuration_filename(feature_dir, proposed_splits, split, generalized): """Calculates configuration specific filenames. Args: feature_dir (`str`): directory of features wrt to dataset directory. proposed_splits (`bool`): whether using proposed splits. split (`str`): train split. generalized (`bool`): whether GZSL setting. Returns: `str` containing arguments in appropriate form. """ return '{}{}_{}{}.pt'.format( feature_dir, ('_proposed_splits' if proposed_splits else ''), split, '_generalized' if generalized else '', )
5,335,574
def gammaBGRbuf( buf: array, gamma: float) -> array: """Apply a gamma adjustment to a BGR buffer Args: buf: unsigned byte array holding BGR data gamma: float gamma adjust Returns: unsigned byte array holding gamma adjusted BGR data """ applygammaBGRbuf(buf, gamma) return buf
5,335,575
def sentence_avg_word_length(df, new_col_name, col_with_lyrics): """ Count the average word length in a dataframe lyrics column, given a column name, process it, and save as new_col_name Parameters ---------- df : dataframe new_col_name : name of new column col_with_lyric: column with lyrics Returns return dataframe with new column """ df[new_col_name] = df[col_with_lyrics].apply(_sentence_avg_word_length) return df
5,335,576
def to_sigmas(t,p,w_1,w_2,w_3): """Given t = sin(theta), p = sin(phi), and the stds this computes the covariance matrix and its inverse""" p2 = p*p t2 = t*t tc2 = 1-t2 pc2 = 1-p2 tc= np.sqrt(tc2) pc= np.sqrt(pc2) s1,s2,s3 = 1./(w_1*w_1),1./(w_2*w_2),1./(w_3*w_3) a = pc2*tc2*s1 + t2*s2 + p2*tc2*s3 b = pc2*t2*s1 + tc2*s2 + p2*t2*s3 c = p2*s1 + pc2*s3 d = tc*t*(pc2*s1 - s2 + p2*s3) e = p*pc*tc*(s3 - s1) f = p*pc*t*(s3 - s1) sigma_inv = np.array([[a, d, e], [d, b, f], [e, f, c]]) sigma = np.array([[(b*c - f ** 2)/(a*b*c - c*d ** 2 - b*e ** 2 + 2*d*e*f - a*f ** 2), (-(c*d) + e*f)/(a*b*c - c*d ** 2 - b*e ** 2 + 2*d*e*f - a*f ** 2), (-(b*e) + d*f)/(a*b*c - c*d ** 2 - b*e ** 2 + 2*d*e*f - a*f ** 2)], [(-(c*d) + e*f)/(a*b*c - c*d ** 2 - b*e ** 2 + 2*d*e*f - a*f ** 2), (a*c - e ** 2)/(a*b*c - c*d ** 2 - b*e ** 2 + 2*d*e*f - a*f ** 2), (d*e - a*f)/(a*b*c - c*d ** 2 - b*e ** 2 + 2*d*e*f - a*f ** 2)], [(-(b*e) + d*f)/(a*b*c - c*d ** 2 - b*e ** 2 + 2*d*e*f - a*f ** 2), (d*e - a*f)/(a*b*c - c*d ** 2 - b*e ** 2 + 2*d*e*f - a*f ** 2), (a*b - d ** 2)/(a*b*c - c*d ** 2 - b*e ** 2 + 2*d*e*f - a*f ** 2)]]) return sigma,sigma_inv
5,335,577
def fetch_atlas_pauli_2017(version='prob', data_dir=None, verbose=1): """Download the Pauli et al. (2017) atlas with in total 12 subcortical nodes. Parameters ---------- version: str, optional (default='prob') Which version of the atlas should be download. This can be 'prob' for the probabilistic atlas or 'det' for the deterministic atlas. data_dir : str, optional (default=None) Path of the data directory. Used to force data storage in a specified location. verbose : int verbosity level (0 means no message). Returns ------- sklearn.datasets.base.Bunch Dictionary-like object, contains: - maps: 3D Nifti image, values are indices in the list of labels. - labels: list of strings. Starts with 'Background'. - description: a short description of the atlas and some references. References ---------- https://osf.io/r2hvk/ `Pauli, W. M., Nili, A. N., & Tyszka, J. M. (2018). A high-resolution probabilistic in vivo atlas of human subcortical brain nuclei. Scientific Data, 5, 180063-13. http://doi.org/10.1038/sdata.2018.63`` """ if version == 'prob': url_maps = 'https://osf.io/w8zq2/download' filename = 'pauli_2017_labels.nii.gz' elif version == 'labels': url_maps = 'https://osf.io/5mqfx/download' filename = 'pauli_2017_prob.nii.gz' else: raise NotImplementedError('{} is no valid version for '.format(version) + \ 'the Pauli atlas') url_labels = 'https://osf.io/6qrcb/download' dataset_name = 'pauli_2017' data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, verbose=verbose) files = [(filename, url_maps, {'move':filename}), ('labels.txt', url_labels, {'move':'labels.txt'})] atlas_file, labels = _fetch_files(data_dir, files) labels = np.loadtxt(labels, dtype=str)[:, 1].tolist() fdescr = _get_dataset_descr(dataset_name) return Bunch(maps=atlas_file, labels=labels, description=fdescr)
5,335,578
def GeometricError(ref_point_1, ref_point_2): """Deprecation notice function. Please use indicated correct function""" print(GeometricError.__name__ + ' is deprecated, use ' + geometricError.__name__ + ' instead') traceback.print_stack(limit=2) return geometricError(ref_point_1, ref_point_2)
5,335,579
def svn_fs_open2(*args): """svn_fs_open2(char const * path, apr_hash_t fs_config, apr_pool_t result_pool, apr_pool_t scratch_pool) -> svn_error_t""" return _fs.svn_fs_open2(*args)
5,335,580
def config(workspace): """Return a config object.""" return Config(workspace.root_uri, {})
5,335,581
def longest_sequence_index(sequences: List[List[XmonQubit]]) -> Optional[int]: """Gives the position of a longest sequence. Args: sequences: List of node sequences. Returns: Index of the longest sequence from the sequences list. If more than one longest sequence exist, the first one is returned. None is returned for empty list. """ if sequences: return max(range(len(sequences)), key=lambda i: len(sequences[i])) return None
5,335,582
def subset_raster(rast, band=1, bbox=None, logger=None): """ :param rast: The rasterio raster object :param band: The band number you want to contour. Default: 1 :param bbox: The bounding box in which to generate contours. :param logger: The logger object to use for this tool :return: A dict with the keys 'raster', 'array', 'affine', 'min', and 'max'. Raster is the original rasterio object, array is the numpy array, affine is the transformation for the bbox, min/max are the min/max values within the bbox. """ # Affine transformations between raster and world coordinates. # See https://github.com/sgillies/affine # See https://github.com/mapbox/rasterio/blob/master/docs/windowed-rw.rst a = rast.affine # Convert from pixel coordinates to world coordinates reverse_affine = ~a # Convert from world coordinates to pixel coordinates # Copy the metadata kwargs = rast.meta.copy() # Read the band if bbox is not None: bbox = list(bbox) if len(bbox) != 4: logger.error('BBOX is not of length 4. Should be (xmin, ymin, xmax, ymax)') raise ValueError('BBOX is not of length 4. Should be (xmin, ymin, xmax, ymax)') # Restrict to the extent of the original raster if our requested # bbox is larger than the raster extent min_x = bbox[0] min_y = bbox[1] max_x = bbox[2] max_y = bbox[3] if min_x < rast.bounds[0]: min_x = rast.bounds[0] if min_y < rast.bounds[1]: min_y = rast.bounds[1] if max_x > rast.bounds[2]: max_x = rast.bounds[2] if max_y > rast.bounds[3]: max_y = rast.bounds[3] bbox = (min_x, min_y, max_x, max_y) # Convert the bounding box (world coordinates) to pixel coordinates # window = ((row_start, row_stop), (col_start, col_stop)) window_bl = world_to_pixel_coords(rast.affine, [(bbox[0], bbox[1]),]) window_tr = world_to_pixel_coords(rast.affine, [(bbox[2], bbox[3]),]) window_rows = [int(window_bl[0, 1]), int(window_tr[0, 1])] window_cols = [int(window_bl[0, 0]), int(window_tr[0, 0])] window = ( (min(window_rows), max(window_rows)), (min(window_cols), max(window_cols))) # print('') # print(window[0]) # print(window[1]) kwargs.update({ 'height': abs(window[0][1] - window[0][0]), 'width': abs(window[1][1] - window[1][0]), 'affine': rast.window_transform(window) }) else: window = None # Read the data but only the window we set rast_band = rast.read(band, window=window, masked=True) rast_a = kwargs['affine'] return { 'crs': rast.crs, 'array': rast_band, 'affine': rast_a, 'min': rast_band.min(), 'max': rast_band.max() }
5,335,583
def extract_date_features(df): """Expand datetime values into individual features.""" for col in df.select_dtypes(include=['datetime64[ns]']): print(f"Now extracting features from column: '{col}'.") df[col + '_month'] = pd.DatetimeIndex(df[col]).month df[col + '_day'] = pd.DatetimeIndex(df[col]).day df[col + '_weekday'] = pd.DatetimeIndex(df[col]).weekday df.drop(columns=[col], inplace=True) print("Done!") return df
5,335,584
def record_setitem(data, attr, value): """Implement `record_setitem`.""" data2 = copy(data) py_setattr(data2, attr, value) return data2
5,335,585
async def test_import_from_yaml(hass, canary) -> None: """Test import from YAML.""" with patch( "homeassistant.components.canary.async_setup_entry", return_value=True, ): assert await async_setup_component(hass, DOMAIN, {DOMAIN: YAML_CONFIG}) await hass.async_block_till_done() entries = hass.config_entries.async_entries(DOMAIN) assert len(entries) == 1 assert entries[0].data[CONF_USERNAME] == "test-username" assert entries[0].data[CONF_PASSWORD] == "test-password" assert entries[0].data[CONF_TIMEOUT] == 5
5,335,586
def get_git_branch() -> Optional[str]: """Get the git branch.""" return _run("git", "branch", "--show-current")
5,335,587
def open_image(path, verbose=True, squeeze=False): """ Open a NIfTI-1 image at the given path. The image might have an arbitrary number of dimensions; however, its first three axes are assumed to hold its spatial dimensions. Parameters ---------- path : str The path of the file to be loaded. verbose : bool, optional If `True` (default), print some meta data of the loaded file to standard output. squeeze : bool, optional If `True`, remove trailing dimensions of the image volume if they contains a single entry only (default is `False`). Note that in this case it has not been tested whether the coordinate transformations from the NIfTI-1 header still apply. Returns ------- Volume The resulting 3D image volume, with the ``src_object`` attribute set to the respective ``nibabel.nifti1.Nifti1Image`` instance and the desired anatomical world coordinate system ``system`` set to "RAS". Relies on the NIfTI header's `get_best_affine()` method to dermine which transformation matrix to use (qform or sform). Raises ------ IOError If something goes wrong. """ # According to the NIfTI-1 specification [1]_, the world coordinate system of NIfTI-1 files is always RAS. src_system = "RAS" try: src_object = nibabel.nifti1.load(path) except Exception as e: raise IOError(e) voxel_data = np.asanyarray(src_object.dataobj) if isinstance(voxel_data, np.memmap): voxel_data.mode = "c" # Make sure that no changes happen to data on disk: copy on write hdr = src_object.header ndim = hdr["dim"][0] if ndim < 3: raise IOError("Currently only 3D images can be handled. The given image has {} dimension(s).".format(ndim)) if verbose: print("Loading image:", path) print("Meta data:") print(hdr) print("Image dimensions:", voxel_data.ndim) # Squeeze superfluous dimensions (according to the NIfTI-1 specification [1]_, the spatial dimensions are always # in front) if squeeze: voxel_data = __squeeze_dim(voxel_data, verbose) mat = hdr.get_best_affine() volume = Volume(src_voxel_data=voxel_data, src_transformation=mat, src_system=src_system, src_spatial_dimensions=(0, 1, 2), system="RAS", src_object=src_object) return volume
5,335,588
def add_one_for_ordered_traversal(graph, node_idx, current_path=None): """ This recursive function returns an ordered traversal of a molecular graph. This traversal obeys the following rules: 1. Locations may only be visited once 2. All locations must be visted 3. Locations are visited in the order in which the shortest path is followed - If potential paths are identical in length, then the one that provides lightest total weight is followed - If the total weight of each path is identical (which would be the case for a molecule that contains any cycle) then the path the provides the lightest first atom is chosen - If the lightest first atom is identical, then............. Recursive algorithm works as follows: 1. Go from node to node until reaching a node that has no neighbors. 2. Once this node is reached, it returns itself back up the stack. 3. If a node only has a single path, this is also immediately returned up the stack. 4. Once a node is reach that has two possible paths, a choice is made between the two competing paths. The path that is the shortest is automatically chosen... But this is actually not what I want. What I want is that the path leading down is fully traversed and then the path that provides the lightest direction is gone down first If both paths are then equal in weight (such as should be the case for a cycle) then the the path that provides the most direct route to the heaviest group will be prefered. If the paths are completely identical, then it should not matter which one is chosen first from the perspective of a graph. """ if current_path == None: current_path = [] ### Make copy of input current_path current_path = [x for x in current_path] path = [node_idx] current_path += [node_idx] neighbors = graph.adj[node_idx] ### Build entire traversal list neigh_path_list = [] for entry in neighbors: # print(node_idx, entry) if entry in current_path: continue neigh_path = add_one_for_ordered_traversal(graph, entry, current_path) if len(neigh_path) > 0: neigh_path_list.append(neigh_path) # print(node_idx, entry, neigh_path) ### Only a single option if len(neigh_path_list) == 1: if len(neigh_path_list[0]) == 1: path += neigh_path_list[0] return path elif len(neigh_path_list) == 0: return [node_idx] ### If there's more than single option, then an algorithm that seeks ### to stich together the neighbor paths in a reasonable and unique way ### should be used neigh_list_sorted = _sort_neighbor_path_list(graph, neigh_path_list) # print("SORTED: ", neigh_list_sorted) path += neigh_list_sorted return path
5,335,589
def get_kubeseal_version() -> str: """Retrieve the kubeseal binary version.""" LOGGER.debug("Retrieving kubeseal binary version.") binary = current_app.config.get("KUBESEAL_BINARY") kubeseal_subprocess = subprocess.Popen( [binary, "--version"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) output, error = kubeseal_subprocess.communicate() if error: error_message = f"Error in run_kubeseal: {error}" LOGGER.error(error_message) raise RuntimeError(error_message) version = "".join(output.decode("utf-8").split("\n")) return str(version).split(":")[1].replace('"', "").lstrip()
5,335,590
def test_add_interpolated_tensor(example_grid): """ test the `add_interpolated` method """ f = Tensor2Field(example_grid) a = np.random.random(f.data_shape) c = tuple(example_grid.point_to_cell(example_grid.get_random_point())) c_data = (Ellipsis,) + c p = example_grid.cell_to_point(c, cartesian=False) f.add_interpolated(p, a) np.testing.assert_almost_equal(f.data[c_data], a / example_grid.cell_volumes[c]) f.add_interpolated(example_grid.get_random_point(cartesian=False), a) np.testing.assert_almost_equal(f.integral, 2 * a) f.data = 0 # reset add_interpolated = example_grid.make_add_interpolated_compiled() c = tuple(example_grid.point_to_cell(example_grid.get_random_point())) c_data = (Ellipsis,) + c p = example_grid.cell_to_point(c, cartesian=False) add_interpolated(f.data, p, a) np.testing.assert_almost_equal(f.data[c_data], a / example_grid.cell_volumes[c]) add_interpolated(f.data, example_grid.get_random_point(cartesian=False), a) np.testing.assert_almost_equal(f.integral, 2 * a)
5,335,591
def get_subject_guide_for_section_params( year, quarter, curriculum_abbr, course_number, section_id=None): """ Returns a SubjectGuide model for the passed section params: year: year for the section term (4-digits) quarter: quarter (AUT, WIN, SPR, or SUM) curriculum_abbr: curriculum abbreviation course_number: course number section_id: course section identifier (optional) """ quarter = quarter.upper()[:3] url = "{}/{}/{}/{}/{}/{}/{}".format( subject_guide_url_prefix, 'course', year, quarter, quote(curriculum_abbr.upper()), course_number, section_id.upper()) headers = {'Accept': 'application/json'} response = SubjectGuideDao.getURL(url, headers) response_data = str(response.data) if response.status != 200: raise DataFailureException(url, response.status, response_data) return _subject_guide_from_json(json.loads(response.data))
5,335,592
def test_gas_limit_config(BrownieTester, accounts, config): """gas limit is set correctly from the config""" config["active_network"]["gas_limit"] = 5000000 tx = accounts[0].deploy(BrownieTester, True).tx assert tx.gas_limit == 5000000 config["active_network"]["gas_limit"] = False
5,335,593
def handle_hubbubdelmarkup(bot, ievent): """ arguments: <feedname> <item> - delete markup item from a feed's markuplist. """ try: (name, item) = ievent.args except ValueError: ievent.missing('<feedname> <item>') ; return target = ievent.channel feed = watcher.byname(name) if not feed: ievent.reply("we don't have a %s feed" % name) ; return try: del feed.markup[jsonstring([name, target])][item] except (KeyError, TypeError): ievent.reply("can't remove %s from %s feed's markup" % (item, name)) ; return feed.markup.save() ievent.reply('%s removed from (%s,%s) markuplist' % (item, name, target))
5,335,594
def dense_reach_bonus(task_rew, b_pos, arm_pos, max_reach_bonus=1.5, reach_thresh=.02, reach_multiplier=all_rew_reach_multiplier): """ Convenience function for adding a conditional dense reach bonus to an aux task. If the task_rew is > 1, this indicates that the actual task is complete, and instead of giving a reach bonus, the max amount of reward given for a reach should be given (regardless of whether reach is satisfied). If it is < 1, a dense reach reward is given, and the actual task reward is given ONLY if the reach condition is satisfied. """ if task_rew > 1: total_rew = task_rew + reach_multiplier * max_reach_bonus else: reach_rew = close(reach_thresh, b_pos, arm_pos, close_rew=max_reach_bonus) new_task_rew = task_rew * int(reach_rew > 1) total_rew = reach_multiplier * reach_rew + new_task_rew return total_rew
5,335,595
def handler_fan_out(event, context): """ Publishes an SNS message for each region from which the assets are to be collected. """ elasticsearch_regions = AWS_REGIONS_SET - {'ap-northeast-3'} for region in elasticsearch_regions: sns.publish( TopicArn=os.environ['SNSTopicCollectAWSElasticsearchARN'], Message=region, )
5,335,596
def getAllNumbers(text): """ This function is a copy of systemtools.basics.getAllNumbers """ if text is None: return None allNumbers = [] if len(text) > 0: # Remove space between digits : spaceNumberExists = True while spaceNumberExists: text = re.sub('(([^.,0-9]|^)[0-9]+) ([0-9])', '\\1\\3', text, flags=re.UNICODE) if re.search('([^.,0-9]|^)[0-9]+ [0-9]', text) is None: spaceNumberExists = False numberRegex = '[-+]?[0-9]+[.,][0-9]+|[0-9]+' allMatchIter = re.finditer(numberRegex, text) if allMatchIter is not None: for current in allMatchIter: currentFloat = current.group() currentFloat = re.sub("\s", "", currentFloat) currentFloat = re.sub(",", ".", currentFloat) currentFloat = float(currentFloat) if currentFloat.is_integer(): allNumbers.append(int(currentFloat)) else: allNumbers.append(currentFloat) return allNumbers
5,335,597
def optimal_string_alignment_distance(s1, s2): """ This is a variation of the Damerau-Levenshtein distance that returns the strings' edit distance taking into account deletion, insertion, substitution, and transposition, under the condition that no substring is edited more than once. Args: s1 (str): Sequence 1. s2 (str): Sequence 2. Returns: float: Optimal String Alignment Distance. Examples: >>> rltk.optimal_string_alignment_distance('abcd', 'acbd') 1 >>> rltk.optimal_string_alignment_distance('ca', 'abc') 3 """ utils.check_for_none(s1, s2) utils.check_for_type(str, s1, s2) # s1 = utils.unicode_normalize(s1) # s2 = utils.unicode_normalize(s2) n1, n2 = len(s1), len(s2) dp = [[0] * (n2 + 1) for _ in range(n1 + 1)] for i in range(0, n1 + 1): dp[i][0] = i for j in range(0, n2 + 1): dp[0][j] = j for i in range(1, n1 + 1): for j in range(1, n2 + 1): cost = 0 if s1[i - 1] == s2[j - 1] else 1 dp[i][j] = min(dp[i][j - 1] + 1, dp[i - 1][j] + 1, dp[i - 1][j - 1] + cost) if (i > 1 and j > 1 and s1[i - 1] == s2[j - 2] and s1[i - 2] == s2[j - 1]): dp[i][j] = min(dp[i][j], dp[i - 2][j - 2] + cost) return dp[n1][n2]
5,335,598
def handle_stop(event): """ Handler for mycroft.stop, i.e. button press """ loop.force_unmute()
5,335,599