content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def strip_string_to_integer(string): """ :param string: :return: """ return int("".join(filter(lambda x: x.isdigit(), string)))
0cdb3b3e4411186f7992ec6ce22961cc1cb0a178
27,073
def totalMemory(): """The total amount of memory installed in this computer, in bytes.""" return int
5d1a6731ede23c58c854df5b90a5cde7a1a81da1
27,074
import math def _linspace_args(start, stop, step_size=None, length=None): """ Parameters ---------- start : stop : step_size : (Default value = None) length : (Default value = None) Returns ------- type where step_size OR length is defined. """ if step_size is None: if length is None: length = 10 step_size = (stop - start) / (length + 1) else: if length is not None: raise ValueError('step_size and length cannot be both different from None') length = math.floor((stop - start) / step_size) + 1 return step_size, length
fa427f4def5dc30ae7d51fce70ad8c630a4cbd4b
27,075
def config_handler(cfg): """ Separates the explainability values from the models. :param cfg: Loaded config file. """ explainability_measures = [] models_to_use = [] for model in cfg["models to use"]: models_to_use.append(list(model.keys())[0]) values = list(model.values()) if None not in values: explainability_measures.append(values[0]) else: explainability_measures.append(values) return models_to_use,explainability_measures
bf663fec6b44045f0cd0ee92df80770e8896889c
27,076
def unique_proportion(_metrics): """Computes the proportion of unique non-null values out of all non-null values""" total_values = _metrics.get("table.row_count") unique_values = _metrics.get("column.distinct_values.count") null_count = _metrics.get("column_values.nonnull.unexpected_count") # Ensuring that we do not divide by 0, returning 0 if all values are nulls (we only consider non-nulls unique values) if total_values > 0 and total_values != null_count: return unique_values / (total_values - null_count) else: return 0
df44cc537623404d4de949ffc8bb5acf4fb62d48
27,077
def parsed_variant(): """Return variant information for a parsed variant with minimal information""" variant = {'alternative': 'C', 'callers': { 'freebayes': None, 'gatk': None, 'samtools': None }, 'case_id': 'cust000-643594', 'category': 'snv', 'chromosome': '2', 'clnsig': [], 'compounds': [], 'conservation': {'gerp': [], 'phast': [], 'phylop': []}, 'dbsnp_id': None, 'end': 176968945, 'filters': ['PASS'], 'frequencies': { 'exac': None, 'exac_max': None, 'thousand_g': None, 'thousand_g_left': None, 'thousand_g_max': None, 'thousand_g_right': None}, 'genes': [], 'genetic_models': [], 'hgnc_ids': [], 'ids': {'display_name': '1_10_A_C_clinical', 'document_id': 'a1f1d2ac588dae7883f474d41cfb34b8', 'simple_id': '1_10_A_C', 'variant_id': 'e8e33544a4745f8f5a09c5dea3b0dbe4'}, 'length': 1, 'local_obs_hom_old': None, 'local_obs_old': None, 'mate_id': None, 'position': 176968944, 'quality': 10.0, 'rank_score': 0.0, 'reference': 'A', 'samples': [{'alt_depth': -1, 'display_name': 'NA12882', 'genotype_call': None, 'genotype_quality': None, 'individual_id': 'ADM1059A2', 'read_depth': None, 'ref_depth': -1}, {'alt_depth': -1, 'display_name': 'NA12877', 'genotype_call': None, 'genotype_quality': None, 'individual_id': 'ADM1059A1', 'read_depth': None, 'ref_depth': -1}, {'alt_depth': -1, 'display_name': 'NA12878', 'genotype_call': None, 'genotype_quality': None, 'individual_id': 'ADM1059A3', 'read_depth': None, 'ref_depth': -1}], 'sub_category': 'snv', 'variant_type': 'clinical'} return variant
17af597e88f3be6fa8772a82d0ac8a9243fcb5d4
27,078
def binval(b): """ Renvoie la valeur du nombre binaire b (big endian). Seulement dans [0,15]. """ s = 0 for i in b: s = 2*s + i return s
781e3861a3794de5a571eddb69e5f94b30daf747
27,079
import argparse def get_config_from_args(): """ Reads the arguments and returns a config used by the rest of the script """ parser = argparse.ArgumentParser(description='Parse converter cli args') parser.add_argument('infile', help='The file containing the input json') parser.add_argument('-o', '--outfile', help='The name of the output file. If non is given, the input filen name will be used') parser.add_argument('-f', '--formats', help='Comma seperated list of formats to support (json, xml, bigquery). The input file format is always added') parser.add_argument('-e', '--embed', help='Embed sub structs in the parent struct. This means only one type will be created', action="store_true") parser.add_argument('-l','--list', help='Allow a list to be the root of the data', action="store_true") parser.add_argument('-t', '--type_name', help='The name to give the type created', default=None) config = parser.parse_args() # Breaking up the format into separate strings if config.formats: # Converting everything to lowercase config.formats = config.formats.lower() config.formats = config.formats.split(',') else: config.formats = [] # Adding extension as format ext = config.infile.split('.')[-1] config.formats.append(ext) return config
5f23f0c7b281ac72eebe82a6157c6d39e2791252
27,080
import math def signed_distance_point_to_line(a, b, c, p): """ Distance between a point p and a line defined by a, b and c. a, b, c: the line $x + by + c = 0$ p: the point """ d1 = (a*p[0] + b*p[1] + c) d2 = math.sqrt(math.pow(a, 2.) + math.pow(b, 2.)) #d = abs(d1)/d2 d = d1/d2 return d
f06b67e62b940e259963b1a092650936dc8e9fb0
27,081
import yaml def validate_spackyaml(filename): """ Ensure that a spack.yaml file has a spack or env directive """ try: with open(filename, "r") as fd: data = yaml.load(fd, Loader=yaml.FullLoader) if "env" not in data and "spack" not in data: return False return True except yaml.YAMLError as exc: return False
c9bdf6de87d9945d106c2312087441f84584daef
27,082
def ResetStNum(N=0): """Reset the state numbering counter. """ global NXTSTATENUM NXTSTATENUM = N return NXTSTATENUM
7b3c49221da9110c498bc0b1dc88d230e5d65018
27,083
import binascii def bytes_str(val): """ Converts bytes into hex string. Returns: hex string """ assert isinstance(val, (bytes, bytearray)) return binascii.hexlify(val).decode('ascii')
dfe6d5192b639f656a56918f5ab570f8edd96dd7
27,084
def distinct(l): """Builds a new list with all duplicates removed.""" s = [] for i in l: if i not in s: s.append(i) return s
daa3735224c29ad6cf45ec39c6914949fc8f6125
27,085
import os import subprocess import re def run_camb(params_fname, camb_exec_dir): """ Run CAMB, using a given (pre-written) params file (see camb_params). Waits for CAMB to finish before returning. Returns a dictionary of derived values output by CAMB to stdout. """ # Change directory and call CAMB cwd = os.getcwd() os.chdir(camb_exec_dir) params_path = cwd + "/paramfiles/" + params_fname print("Running CAMB on", params_path) output = subprocess.check_output(["./camb", params_path]) # Capture on-screen output of derived parameters vals = {} print(output) for line in str(output).split("\n"): # Special cases: sigma8 and tau_recomb if "sigma8" in line: s8line = line[line.find('sigma8'):] # Only get sigma8 part of string vals['sigma8'] = float( re.findall(r'\b\d+.\d+\b', s8line)[0] ) elif "tau_recomb" in line: tau = re.findall(r'\b\d+.\d+\b', line) vals['tau_recomb/Mpc'] = float(tau[0]) vals['tau_now/Mpc'] = float(tau[1]) elif "z_EQ" in line: vals['z_EQ'] = float( re.findall(r'\b\d+.\d+\b', line)[0] ) else: # All other params can just be stuffed into a dictionary try: key, val = line.split("=") vals[key.strip()] = float(val) except: pass # Change back to the original directory os.chdir(cwd) return vals
712d877f12071cb2ff5a8844bdea3b565b20e896
27,088
import argparse def parseArgs(): """ Parse arguments from the command line. Uses the `argparse` package to establish all positional and optional arguments. """ parser = argparse.ArgumentParser(description='Find coincidence lattices within combinations of 2D crystals.', epilog= "If you find this script useful, please cite J. Phys. Chem. C, 2016, 120 (20), pp 10895-10908.") parser.add_argument('input_files', nargs='+', help="2D crystals description files") parser.add_argument('-o', '--output_file', default='CoincidenceLattices.dat', help="output file for combinations table (default: CoincidenceLattices.dat file)") parser.add_argument('-a', '--angles', type=float, nargs=2, default=[0.0, 30.0], help="interval of angles (in degrees) to be investigated (default: 0 to 30 deg)", metavar=('ANGLE_MIN', 'ANGLE_MAX')) parser.add_argument('-s', '--angles_step', type=float, default=0.1, help="step for the investigation of angles (default: 0.1)") parser.add_argument('-f', '--self_combinations', action='store_true', help="display combinations of the 2D crystals with themselves (default: False)") parser.add_argument('-N', type=int, default=7,metavar="Nmax", help="integer cutoff for the stopping criterion (default: 7)") parser.add_argument('-t', '--tolerance', type=float, default=0.02, help="maximum strain to be applied to one crystal to make the system commensurate (default: 0.02)") parser.add_argument('--angle_tolerance', type=float, default=0.05, help="tolerance for approximating angles when finding coincidence lattices (default: 0.05)") parser.add_argument('-n', '--n_atoms', type=int, default=100, help="maximum number of atoms inside the supercell (default: 100 atoms)") parser.add_argument('-l', '--label_size', type=int, default=20, help="spacing of the label in the first column of the output file (default: 20 chars)") parser.add_argument('-q', '--quiet', action='store_true', help="do not display text on the output window (default: False)") parser.add_argument('-r', '--first', action='store_true', help="investigate only combinations with the first crystal (default: False)") return parser.parse_args()
6c466a1bfdf7f6fef264ea8ce15a10196a3f854c
27,089
def get_gap_segments(segments): """ Get the gap segments. {(start1, end1), (start2, end2), (start3, end3)} -> {(end1, start2), (end2, start3)} """ segments = [list(i) for i in segments] segments.sort(key=lambda x: x[0]) gap_segments = set() for i in range(len(segments) - 1): gap_segments.add((segments[i][1], segments[i + 1][0])) return gap_segments
75391dcc2ba5fa8b65129f358273d31bd543902f
27,090
def pull_key(key_fun): """Return a new dict with members of objs as values and values generated by key_fun as keys. pull_key(key_fun)(objs) :: Hashable K => (X -> K) -> Seq[X] -> {K : X} Equivalent to the following in Python 3: {key_fun(v):v for v in objs} >>> from operator import itemgetter >>> objs = [{'id': 1, 'name': 'Fred'}, {'id': 3, 'name': 'Wilma'}] >>> result = pull_key(itemgetter('id'))(objs) >>> sorted(result.keys()) [1, 3] >>> result[1]['id'], result[1]['name'] (1, 'Fred') >>> result[3]['id'], result[3]['name'] (3, 'Wilma') """ def pull_key_fun(objs): return dict((key_fun(value), value) for value in objs) return pull_key_fun
3b2288ec0eee6f164896d7b97a31aaec9cb52422
27,091
import re def word_tokenizer(sentence): """ 将一句话分成单词的列表, 会顺便承担清除标点符号的工作 Parameters ---------- sentence : string Returns -------- words : the list of word """ solve_sentence = re.sub(r'\W+|\s+', ' ', sentence) words = solve_sentence.lower().strip(' ').split(' ') return words
835e78d4a4a35beb54e232a1d44a67332b3c267a
27,093
def _hexify(num): """ Converts and formats to hexadecimal """ num = "%x" % num if len(num) % 2: num = '0'+num return num.decode('hex')
71fabff1191f670ec503c76a3be916636e8045ce
27,094
def flatten(list_of_lists: list) -> list: """Given a list of lists, flatten all the items into one list. >>> flatten([ [1, 2, 3], [4, 5, 6]]) [1, 2, 3, 4, 5, 6] """ return [val for sublist in list_of_lists for val in sublist]
093d3bfbb90c043414fdfb18da79334e8910b150
27,095
def timeseries_train_test_split(df, test_size=0.2, gap=0, ascending=True): """Split DataFrame or matrices into random train and test subsets for timeseries Parameters ---------- df : pd.DataFrame, shape (n_samples, n_features) Training data, where n_samples is the number of samples and n_features is the number of features. test_size: float, optional (default=0.2) Should be between 0.0 and 1.0 and represent the proportion of the dataset to include in the test split. gap : int, default=0 Represents the absolute number of the dropped samples between training set and test set. ascending : boolean, default=True, optional Whether timeseries is ascending. Returns ------- t : list, length=2 List each list containing train-test split of inputs. """ assert gap>=0, "`gap` should be great than or equal to 0." t = df.index.tolist() if not ascending: t.reverse() train_len = round(len(t)*(1-test_size))-gap return [t[0:train_len], t[train_len+gap:]]
8a11f2b0ca1c60dc7c9a92419a6f5a2ab8994815
27,097
import random def pad_password(password: str): """ Passwords are 6 characters long. But are sent as 10 characters where the last 4 characters are insignificant. """ possible_chars = "abcdefghijklmnopqrstuvxyzABCDEFGHIJKLMNOPQRSTUVXYZ0123456789" two_random = random.choices(possible_chars, k=2) # return password + "".join(four_random) return password + "!\x03" + "".join(two_random)
09e62c59cc227198df5d9006d8f8478afbe16f28
27,099
import yaml def _get_definitions(definition_file): """Parses "algos.yaml".""" with open(definition_file, "r") as f: return yaml.load(f, yaml.SafeLoader)
5f91ffb115848ac96c4f568e8a87189fb4a1d095
27,100
def list_to_string(l): """ Converts a string list in to a string @param l is the string list @returns a concatentation of each element in the list """ res = "" for elem in l: res += elem return res
ef30e847703f56453c79a2c51ba61497f26b4963
27,101
def is_aware( time_point): """ Return whether ``time_point`` is aware of the timezone """ return time_point.tzinfo is not None and \ time_point.utcoffset() is not None
9169af11d30bb1fca2b4dbc7708dfee7e44e858f
27,104
def string(params): """Define a string parameter to be used within this YAML file""" return {params.name: params.value}
85a6de38c8bbe847fe0776a3ac868843b286ec7e
27,105
def mrr_roundtrip_phase_to_tr_fused(rt_phi, a: float = 0.8, r: float = 0.9, intensity: bool = False): """ description: round trip phase shift to field transmission rt_phi {torch.Tensor or np.ndarray} abs of roundtrip phase shift (abs(phase lag)). range from abs([-pi, 0])=[0, pi]\\ a {scalar} attenuation coefficient\\ r {scalar} self-coupling coefficient\\ intensity {bool scalar} whether output intensity tranmission or field transmission\\ return t {torch.Tensor or np.ndarray} mrr through port field/intensity transmission """ # use slow but accurate mode from theoretical equation # create e^(-j phi) first # angle = -rt_phi # ephi = torch.view_as_complex(torch.stack([angle.cos(), angle.sin()], dim=-1)) ## this sign is from the negativity of phase lag # a_ephi = -a * ephi # t = torch.view_as_real((r + a_ephi).div(1 + r * a_ephi)) # if(intensity): # t = get_complex_energy(t) # else: # t = get_complex_magnitude(t) ra_cosphi_by_n2 = -2 * r * a * rt_phi.cos() t = (a * a + r * r + ra_cosphi_by_n2) / (1 + r * r * a * a + ra_cosphi_by_n2) if not intensity: # as long as a is not equal to r, t cannot be 0. t = t.sqrt() return t
9c97c1598dab9ad4fe4eb307a2c0a277bc261b2e
27,106
def interpolate2d(x,y,X,Y,Z): """ Used to need it but no longer necessary Here is just a placeholder for future implementation if needed again """ return ""
c077685e17c7baeb7826e963d65d926707eef8f4
27,107
import logging from pathlib import Path def get_filelist(paths: list[str], recursive: bool = False) -> list[str]: """Get a list of files (but not directories) from a path. Args: paths (list[str]): The path(s) to search for files. recursive (bool, optional): Whether to recursively search the path. Defaults to False. Returns: list[str]: A list of all filenames, given as absolute paths. """ logger = logging.getLogger("checkr") filelist = [] for path in paths: dir = Path(path) if not dir.exists(): logger.error(f"The directory '{dir}' does not exist.") elif not dir.is_dir(): logger.error(f"'{dir}' is not a directory.") else: results = dir.rglob("*") if recursive else dir.glob("*") filelist.extend([x for x in results if x.is_file()]) return filelist
9b73c1d0845385279a90e860a7666aea2342912c
27,109
import re def size2integer(value): """Try to convert a string representing a size to an integer value in bytes. Supported formats: * K|k for KB * M|m for MB * G|g for GB :param value: the string to convert :return: the corresponding integer value """ m = re.match("(\d+)\s*(\w+)", value) if m is None: if re.match("\d+", value): return int(value) return 0 if m.group(2)[0] in ["K", "k"]: return int(m.group(1)) * 2 ** 10 if m.group(2)[0] in ["M", "m"]: return int(m.group(1)) * 2 ** 20 if m.group(2)[0] in ["G", "g"]: return int(m.group(1)) * 2 ** 30 return 0
f7771bd9fd8904d03a7ab75da5939e01d4a126d7
27,110
def find_domain_zone_matches(domains, zones): """ Finds matches between Amazon SES verified domains and Route 53 hosted zones. Subdomain matches are taken when found, otherwise root domain matches are taken. :param domains: The list of domains to match. :param zones: The list of hosted zones to match. :return: The set of matched domain-zone pairs. When a match is not found, the domain is included in the set with a zone value of None. """ domain_zones = {} for domain in domains: domain_zones[domain] = None # Start at the most specific sub-domain and walk up to the root domain until a # zone match is found. domain_split = domain.split('.') for index in range(0, len(domain_split) - 1): sub_domain = '.'.join(domain_split[index:]) for zone in zones: # Normalize the zone name from Route 53 by removing the trailing '.'. zone_name = zone['Name'][:-1] if sub_domain == zone_name: domain_zones[domain] = zone break if domain_zones[domain] is not None: break return domain_zones
d6a0ed7bd974c411aeb4d4571a23565b7c21ca59
27,111
def format_seconds(s): """ Format a seconds value into a human-readable form """ years, s = divmod(s, 31556952) min, s = divmod(s, 60) h, min = divmod(min, 60) d, h = divmod(h, 24) return '%sy, %sd, %sh, %sm, %ss' % (years, d, h, min, s)
f004d9f2cef8b3b9eee967ebd9d4811cbc80ae6c
27,113
import os def get_filename(file_dir): """ :param file_dir: """ file_name = os.listdir(file_dir) if len(file_name) is not 1: print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!') print('There are {} files in [{}]'.format(len(file_name), file_dir)) print(file_name) new_file = None exit() else: new_file = os.path.join(file_dir, file_name[-1]) return new_file
6cb164ebba19bf56d39b2ac010d9c2b5dde71eda
27,114
def question1(records): """quantité d'ammoniac (en t/an) a été émise en Alsace en 2004""" # On code en dur l'indice de colonne de 2004 col_2004 = 4 for r in records: if ( r.region == "ALSACE" and r.substance == "Ammoniac" and r.metric == "Quantités annuelles émises" and r.unit == "t/an" ): return r.measures[col_2004] # Si on arrive ici c'est qu'on a pas trouvé, on le signale bruyament raise ValueError("Not found")
c6d83a1e12a17e3490af32dad5c4480fb58029e2
27,115
def sanitize_string(input_string): """Removes unwanted characters from a string and returns the result """ return input_string.replace('"', '')
596bd7bf4810ce9ef2b96c6d6fead947bd4e22ec
27,116
def z_vprod(a, b): """ Computes the z component of the vector product of the input vector fields """ return a[:,:,0]*b[:,:,1] - a[:,:,1]*b[:,:,0]
682aea5d364bf2deb8424ae241f916c257f10bf3
27,118
import glob import os def glob_directory(base): """Return a list of files matching base Parameters ---------- base: str string you wish to match e.g. "./", "./*.py" """ if "*" not in base: base = os.path.join(base, "*") return glob.glob(base)
1f70b7e5baed434f4a64d3b2fd7c96667455eb73
27,119
def checkDict(d): """Check a dict recursively for non serializable types""" allowed = [str,int,float,list,tuple,bool] for k, v in d.items(): if isinstance(v, dict): checkDict(v) else: if type(v) not in allowed: return 0 return 1
0bf3bdaf79d939f79d0b967aa6e26098b7fbd50f
27,121
def parse_distro_info_path(path): """Break distro_info path into repo + file""" path = path.strip().rsplit("/", 1) info_repo = path[0] info_file = path[1] remote = False if info_repo.startswith("http"): remote = True return info_file, info_repo, remote
44f221485e04b6e3203ac3863a10d06fcbbe1555
27,123
import math def volatilization_rate(k, mass, dz, dt): """ Compute the transfer of the volatilization (kg/s) source : (“CHEMMAP technical User’s manual 6.10,” 2014) params ------ k : volatilization coefficient [m/s] dz : vertical diffusivity [m²/s] dt : length of a timestep [s] mass: mass of the pollutant [kg] """ return k * mass / math.sqrt(2 * dz * dt)
69ebb196ecb9a860adcb46b5775e90a85c2ca06c
27,125
def extractFeatures(featureExtractors, pacmanData): """ Run all of the feature extractors on the game state training data """ features = {} # pacmanData is a list of GameState instances with the snapshots of the pacman game for gameState in pacmanData: # for each game state, apply all feature extractors and combine the features for extractor in featureExtractors: extracted_features = extractor.extract(gameState) # combining features for feature in extracted_features: # only on first outer iteration if feature not in features: features[feature] = [] features[feature].append(extracted_features[feature]) return features
252074b750833b8ba71783fc813cf4aed72c42e7
27,127
def on_demand_resource_renderer(request, registry, settings): """Active instance of :py:class:`websauna.system.core.render.OnDemandResourceRenderer` managing dynamic CSS and JS. May be None.""" on_demand_resource_renderer = getattr(request, "on_demand_resource_renderer", None) return on_demand_resource_renderer
f74701a325458ddbdb0dc4927ebf5bc186c41bb7
27,128
def _FlattenToScoresList(config_param_score_dict): """Extracts a list of scores from input data structure. Args: config_param_score_dict: of the form {'capture_name': {'param_name' : score_value,.. } ..} Returns: Plain list of all score value present in input data structure """ result = [] for capture_name in config_param_score_dict: result += list(config_param_score_dict[capture_name].values()) return result
188e784f59711bbe46f89c4c8171c3a0551a0cf3
27,129
def absoluteDeg(deg: float = 0): """ absoluteDeg(float degrees) Returns: float degrees """ return deg % 360
f5519977f1e7f6e5e07d79677e29d4c29d669abf
27,130
def filter_dataset(dataset, filter_indices): """ Filter a dataset consisting of a dictionary for the different variables by a vector of True/False indicating if the data point (a row index) should be kept or not. Returning a copy to leave the original dataset untouched. """ dataset_copy = dataset.copy() for key in dataset_copy.keys(): dataset_copy[key] = dataset_copy[key][filter_indices, :] return dataset_copy
256878262c879f4b914693a7333236539f062bc6
27,131
from typing import Mapping def check_dict_nested_attrs(item: Mapping, dict_data: Mapping) -> bool: """ Checks the values from `dict_data` are contained in `item` >>> d = {'a': 1, 'b': {'c': 2}} >>> check_dict_nested_attrs(d, {'a': 1}) True >>> check_dict_nested_attrs(d, {'b': {'c': 2}}) True >>> check_dict_nested_attrs(d, {'d': []}) False """ for key, value in dict_data.items(): if key not in item: return False item_value = item[key] if isinstance(item_value, Mapping): if not check_dict_nested_attrs(item_value, value): return False elif item_value != value: return False return True
08ed8dbc405e236b95e33e10e9c342e15b6363c9
27,132
def calculateEncryptionKey(subjectNumber: int, numLoops: int) -> int: """ Calculates encryption key """ val: int = 1 for _ in range(numLoops): val *= subjectNumber val %= 20201227 return val
81e5259bb7c69c32e9dba62197f54d2f93a412a8
27,133
import itertools def is_almost_simplicial(G, n): """Determines whether a node n in G is almost simplicial. Parameters ---------- G : NetworkX graph The graph on which to check whether node n is almost simplicial. n : node A node in graph G. Returns ------- is_almost_simplicial : bool True if all but one of its neighbors induce a clique Examples -------- This example checks whether node 0 is simplicial or almost simplicial for a :math:`K_5` complete graph with one edge removed. >>> K_5 = nx.complete_graph(5) >>> K_5.remove_edge(1,3) >>> dnx.is_simplicial(K_5, 0) False >>> dnx.is_almost_simplicial(K_5, 0) True """ for w in G[n]: if all(u in G[v] for u, v in itertools.combinations(G[n], 2) if u != w and v != w): return True return False
a1ffdb2b23e6f0d49cb6e65220523eb0553b1a74
27,135
def correct_name(row): """Correct known misspelled names from the data.""" if row["last_name"] == "Digiorgio" and row["first_name"] == "Kevin": row["last_name"] = "DiGiorgio" elif row["last_name"] == "Huynk" and row["first_name"] == "Nho": row["last_name"] = "Huynh" elif row["last_name"] == "Lacouture" and row["first_name"] == "Deidre": row["last_name"] = "LaCouture" elif row["last_name"] == "Nguyen" and row["first_name"] == "My": row["first_name"] = "My Thanh" elif row["last_name"] == "Winters" and row["first_name"] == "Luz": row["first_name"] = "Luz Maria" elif row["last_name"] == "Small" and row["first_name"] == "Phil": row["first_name"] = "Philip" elif row["last_name"] == "Beliveau" and row["first_name"] == "Michael J": row["first_name"] = "Michael" row["middle_initial"] = "J" return row
0de867c8877f66024a13b5d14447835c37a0802c
27,136
def wang_ind(x, a, b): """Wang method: indicator fucntion of interval.""" return (x >= a) * (x <= b)
ca5ce95485ae2ab3082c696efe3c052137fa47a4
27,137
def showBoard(players): """Returns a string representing the current game""" return (player.name for player in players)
ba15705126e845f9e7c324da7e910d3d7c51225b
27,138
def compute_abundances(sam_transcripts, dataset): """ Iterates over sam transcripts and records the transcript assignment of each in a dictionary. This dictionary is then converted to a list of tuples that can later be added to the TALON database""" abundance_dict = {} for sam_transcript in sam_transcripts: transcript_id = sam_transcript.transcript_ID try: abundance_dict[transcript_id] += 1 except: abundance_dict[transcript_id] = 1 abundances = [(x, dataset, abundance_dict[x]) for x in list(abundance_dict.keys())] return abundances
5b4948471daad3a61bbd8662ef87143699cfcdc4
27,140
import csv def create_csv_table(table_path, rows_list): """Create csv file from list of lists. Args: table_path (str): file path to table. rows_list (list): nested list of elements to write to table. Returns: table_path (str): filepath to table. """ with open(table_path, 'w') as f: writer = csv.writer(f) writer.writerows(rows_list) return table_path
f46f1295c408925adac0b2e97e63200036ffd50f
27,141
def processReducedResult(key_value_tuple): """ input: (TW-100917-125717-1537514430, 4.03) output: [TW, 100917, 125717, 1537514430, 4.03] """ values = key_value_tuple[0].split("-") values.append(str(key_value_tuple[1])) return values
22ad7e6f5381ccf983ad159f190a7e1686ab2558
27,142
def projective(nodes): """Identifies if a tree is non-projective or not.""" for leaf1 in nodes: v1,v2 = sorted([int(leaf1.id), int(leaf1.parent)]) for leaf2 in nodes: v3, v4 = sorted([int(leaf2.id), int(leaf2.parent)]) if leaf1.id == leaf2.id:continue if (v1 < v3 < v2) and (v4 > v2): return False return True
04369eaad4564ef8eaee5b083b8c685b40ed8be1
27,143
import json def load_srl(input_file): """Loads cached SRL predictions for an input file.""" verb_srl_dict, nom_srl_dict = {}, {} if "ACE" in input_file: dataset = "ACE" elif "ERE" in input_file: dataset = "ERE" else: raise ValueError("Unknown dataset") split = input_file.split('/')[-1].split('.')[0] for type in ['verb', 'nom']: path = f"data/SRL_output/{dataset}/{type}SRL_{split}.jsonl" with open(path, 'r') as fr: for line in fr: srl_res = json.loads(line) sent_id = srl_res["sent_id"] if type == 'nom': nom_srl_dict[sent_id] = {"nominals": srl_res["nominals"], "words": srl_res["words"] } if type == 'verb': verb_srl_dict[sent_id] = {"verbs": srl_res["verbs"], "words": srl_res["words"] } return verb_srl_dict, nom_srl_dict
e391c9178fe10defc9f0de58861fba9ab3f2ffb7
27,144
def DBC_to_SBC(text): """ 转义 半角竖线/半角空格 -> 全角竖线/全角空格 """ rules = ( (" ", "\u3000"), #全角空格 ("|", "\uFF5C"), #全角竖线 ) for DBC, SBC in rules: text = SBC.join(w for w in text.split(DBC) if w != '') # 合并连续空格 return text
3cb1b9abc0e9d805a9c6bf5002622f13174f5460
27,147
def respond_alert(**kwargs): """Returns a response to `alert`.""" arguments = kwargs['arguments'] bot = kwargs['bot'] message = kwargs['message'] if arguments[0] == 'list': alarms = [f'T-{h}h' for h, c in bot.alarms if c == message.channel.id] msg = "I'm alerting at: " + ', '.join(alarms) else: hours = int(arguments[0]) if bot.add_alert(hours, message.channel.id): msg = f'OK, I will alert {hours} hours before a deadline.' else: msg = f"I'm already alerting {hours} hours before a deadline!" return msg
7c1fddbe24459a19cc7929d49c8feca1613348ca
27,149
import os def reader(path, encoding="utf-8"): """ read txt file from given path """ if os.path.exists(path): with open(path, "r", encoding=encoding) as f: # return list of without line breaks return [line.strip() for line in f] else: print("path does not exists!") return None
1b251b4aadd6a915dd2fb307363110b721e12605
27,150
def _split_csv(string): """Split string into a list, excluding empty strings""" if string is None: return [] return [n.strip() for n in string.split(',') if n]
de6b21c340ec4c24462f3120f3486e974feafb9d
27,151
import random def selectNRandom(nodes, N): """ Selects n random nodes from a list of nodes and returns the list """ random.shuffle(nodes) return nodes[:N]
4d4a330136a8b56b4ee1943db318df13a3be3c3f
27,152
def socketio_handler(event, namespace=None): """Register a socketio handler via decorator.""" def wrapper(func): """Decorate a ws event handler.""" # pylint: disable=protected-access func._ws_event = event func._ws_namespace = namespace return func return wrapper
bd268140c1d09da0f5ff8e2e1d648909fdeef577
27,154
import sys def is_macos(): """ Returns True if the environment is Microsoft Windows. """ return sys.platform == "darwin"
fa843df33351b2feae111064ede5b713760dd400
27,155
def padding(msg: str, num_bytes: int) -> bytes: """ 문자열 메시지를 UTF-8 인코딩 후 `num_bytes`에 맞게 조정합니다, :param msg: 인코딩할 메시지 :param num_bytes: 바이트 수 :return: 인코딩 된 bytes 객체 """ data = msg.encode('utf-8')[:num_bytes] if len(data) < num_bytes: data += b'\x00' * (num_bytes-len(data)) return data
b89319e794a2c65db57f3e56f1afea72cd34a45b
27,156
def globify_time_descriptors(path): """Convert strftime time descriptors to a glob-compatible expression. For example, convert,'%Y' to '[12][0-9][0-9][0-9]'. Then the glob expression can be used to match path names containing a four digit year. """ path = path.replace('%Y', '[12][0-9][0-9][0-9]') path = path.replace('%y', '[0-9][0-9]') path = path.replace('%m', '[01][0-9]') path = path.replace('%d', '[0-3][0-9]') path = path.replace('%H', '[0-2][0-9]') path = path.replace('%M', '[0-5][0-9]') path = path.replace('%S', '[0-5][0-9]') return path
b28d47c903742def7bb9517102f7df3b989fb1ae
27,157
def MakeTuple(object_): """ Returns the given object as a tuple, if it is not, creates one with it inside. @param: Any object or tuple The object to tupleIZE """ if isinstance(object_, tuple): return object_ else: return (object_,)
119e17785bcf5c7b8dae48c422d12dc501c32d57
27,159
def reorder_exons(exon_ids): """ Reorder exons if they were out of order. Parameters: exon_ids (list of str): List of exons 'chrom_coord1_coord2_strand_exon' Returns: exons (list of str): List of same exon IDs ordered based on strand and genomic location """ strand = exon_ids[0].split('_')[-2] coords = [int(i.split('_')[-4]) for i in exon_ids] exons = sorted(zip(exon_ids, coords), key=lambda x: x[1]) exons = [i[0] for i in exons] if strand == '-': exons.reverse() return exons
d3f52a24d4da1a05a1deceaf38927622c141a9ad
27,161
def parseAddress(address): """ Resolve the IP address of the device :param address: :return: add_str """ add_list = [] for i in range(4): add_list.append(int(address.hex()[(i * 2): (i + 1) * 2], 16)) add_str = ( str(add_list[0]) + "." + str(add_list[1]) + "." + str(add_list[2]) + "." + str(add_list[3]) ) return add_str
ec7d2008e5083e495e73175704036c0259fae3f7
27,163
import re def clean_stmt(stmt): """ Apache Superset tests a specified schema, table & field definition with SQL database query. The built SQL, however, cannot be interpreted by Apache Ignite, as it contains the table name extended by the schema: "<schema>"."<table>" We expect that a user defined SQL statement is generated by a user that is familiar with the specialities of Apache Ignite SQLs """ if "\"SQL_" in stmt: """ The provided SQL statement contains a schema reference SQL_<SCHEMA_NAME>_<TABLE_NAME> """ m = re.search(r"(.*)\"SQL_([A-Z0-9_]+)\"(.*)", stmt) if m: """The schema name is group(2)""" schema_token = "\"SQL_" + m.group(2) + "\"." stmt = stmt.replace(schema_token, "") """ Apache Superset uses quotation marks to describe fields; these must be replaced also """ stmt = stmt.replace("\"", "") return stmt else: stmt else: """ The provided SQL statement does not contain any schema reference """ return stmt
b04378b5d7f16aea7c11ba8023593ac2337bd397
27,164
def get_reaction_from_name(reactions, name): """Return reaction with name. Args: reactions (list(reactions)): the reactions. name (str): name of the desired reaction. Returns: reaction: the corresponding reaction. """ matches = [r for r in reactions if r.name == name] if len(matches) > 1: raise UserWarning('ERROR: duplicate reactions in model.') elif len(matches) == 0: raise UserWarning('WARNING: could not find reaction: ' + name) else: return matches[0]
d27da8428d7a9f8e07ede7cf9e4574d2c012e480
27,166
import os def get_latest_trained_model_path(): """ Used to find the latest saved model's path. :return: path of the latest model's Tensorboard, Generated_Images and Saved_Models. """ latest_run_dir = os.listdir("./Results/roses") latest_run_dir.sort() latest_run_dir = latest_run_dir[-1] saved_models_path = "./Results/roses/" + latest_run_dir + "/Saved_Models" generated_images_path = "./Results/roses/" + latest_run_dir + "/Generated_Images" tensorboard_path = "./Results/roses/" + latest_run_dir + "/Tensorboard" return tensorboard_path, generated_images_path, saved_models_path
639a12e169f74bac305f9cdf00655b457cafbced
27,167
def extract_sub_ids(file, naive=False, label_of_interest=0): """ Extracts sub_ids from dataset_x.txt files of the following format: SubjectID,Network,Station,Latitude,Longitude,PercentageEarthquakes,PercentageTremor, PercentageNoise,PercentageUnclearEvent Args: file (path): Path to file containing the dataset info naive (bool): If naive = True, apply naive selection based on threshold (no reliability score) label_of_interest (int): Only used if naive flag is set to true Returns (list): List containing extracted subject ids """ sub_ids = [] threshold = 70.0 offset_dict = {0: 5, 1: 7, 2: 6, 3: 7} # Map labels to indices according to headers label_index = offset_dict[label_of_interest] with open(file, 'r') as f: for index, line in enumerate(f.readlines()): # Don't consider the headers if index > 2: info = line.split(",") if not naive: sub_ids.append(int(info[0])) elif naive and float(info[label_index]) > threshold: sub_ids.append(int(info[0])) return sub_ids
a51daa18ec2c1799dc9e07e59008088290977bb3
27,168
def format_api_output(response): """Format retrieved json from API call""" data = response["items"][0] return { "rep": data["reputation"], "gold": data["badge_counts"]["gold"], "silver": data["badge_counts"]["silver"], "bronze": data["badge_counts"]["bronze"], }
1f914f8f16ac7c623ae554ac567399037752575b
27,169
def _call_string(*args, **kwargs): """Return format of args and kwargs as viewed when typed out""" arg_strings = [] for arg in args: arg_strings.append(repr(arg)) for key, arg in kwargs.items(): arg_strings.append(key+'='+repr(arg)) return "(%s)" % ', '.join(arg_strings)
ab1dbfd03dc860742eb5555505a942c3665e2de6
27,171
def get_partial_link_text_from_selector(selector): """ A basic method to get the partial link text from a partial link selector. """ if selector.startswith("partial_link="): return selector[len("partial_link="):] elif selector.startswith("partial_link_text="): return selector[len("partial_link_text="):] elif selector.startswith("partial_text="): return selector[len("partial_text="):] elif selector.startswith("p_link="): return selector[len("p_link="):] elif selector.startswith("p_link_text="): return selector[len("p_link_text="):] elif selector.startswith("p_text="): return selector[len("p_text="):] return selector
8dbd842af13ce11c47d467c929da93f5f86e178c
27,172
def longest_positive_sequence(the_list): """ Function that given a list, it returns the longest number of positive numbers. If it returns 3, then there exists 3 consecutive positive numbers. @param the_list: an array of integers @complexity: best-case and worst-case is O(N) where N is the length of the list this happens when the loop iterates the entire given list to compute the number of consecutive positive numbers and store them in a memory @return: longest number of positive numbers """ memory = [0] * len(the_list) # base case if the_list[0] > 0: memory[0] = 1 # counter to keep track of the longest count of positive numbers longest_length = memory[0] # dynamic approach for i in range(1, len(the_list)): if the_list[i] > 0: memory[i] = 1 + memory[i-1] if memory[i] > longest_length: longest_length = memory[i] print(memory) print(longest_length) return longest_length
734c55f111fe21491e45710f32c873e749dba74b
27,173
def add_flist( flist, progress_json, scenario, stage='train', file_type='wav', channel_type='observed', channel='CH1' ): """ Adds a file list to the current progress_json object Example:: ....<flists> ......<file_type> (z.B. wav) ........<scenario> (z.B. tr05_simu, tr05_real...) ..........<utterance_id> ............<observed> ..............<A> :param flist: A dictionary acting as a file list :param progress_json: The current json object :param scenario: Name for the file list :param stage: [train, dev, test] :param file_type: Type of the referenced files. e.g. wav, mfcc, ... :return: """ def _get_next_dict(cur_dict, key): try: return cur_dict[key] except KeyError: cur_dict[key] = dict() return cur_dict[key] cur_dict = progress_json[stage] flists_dict = _get_next_dict(cur_dict, 'flists') file_type_dict = _get_next_dict(flists_dict, file_type) scenario_dict = _get_next_dict(file_type_dict, scenario) for utt_id in flist: utt_id_dict = _get_next_dict(scenario_dict, utt_id) channel_type_dict = _get_next_dict(utt_id_dict, channel_type) channel_type_dict[channel] = flist[utt_id]
70261d15a63c5aec46d9aceff949102c41f61e5a
27,175
from sys import _getframe from pathlib import Path def relpath(file): """ Always locate to the correct relative path. :param file: The wanted-to-get file location :return: An absolute path to the file requested """ frame = _getframe(1) curr_file = Path(frame.f_code.co_filename) return str(curr_file.parent.joinpath(file).resolve())
a819a79a0533a3b0ef30103b10801b43f1686812
27,178
def ticket_bucketizer(row): """.""" try: bucket = int(row[0][0]) if bucket == 1 or bucket == 2: return [bucket, row[1]] else: return [3, row[1]] except ValueError: bucket = ord(row[0][0]) if bucket == 80: return [bucket, row[1]] else: return [-1, row[1]]
48164c9aafa93002179493433d688c88f8977577
27,180
import os def get_subid_sesid(in_file, caps_directory): """ This is to extract the base_directory for the DataSink including participant_id and sesion_id in CAPS directory, also the tuple_list for substitution :param subject_id: :return: base_directory for DataSink """ identifier = (in_file.split('/')[-1]).split('_ficvf')[0] participant_id = identifier.split('_')[0] session_id = identifier.split('_')[1] base_directory = os.path.join(caps_directory, 'subjects', participant_id, session_id, 'dwi', 'noddi_based_processing', 'native_space') subst_tuple_list = [ (identifier + '_ficvf.nii.gz', identifier + '_space-b0_NDI.nii.gz'), (identifier + '_fiso.nii.gz', identifier + '_space-b0_FWF.nii.gz'), (identifier + '_odi.nii.gz', identifier + '_space-b0_ODI.nii.gz'), (r'/trait_added/', r''), (r'/processing_datasinker\d{1,4}/', r''), (r'/fit_icvf\d{1,4}/', r''), (r'/fit_isovf\d{1,4}/', r''), (r'/fit_od\d{1,4}/', r''), ] return base_directory, subst_tuple_list
96854789dd7e15aba41ba0c10e96c4bece593419
27,181
def compute_mass(struct_mass, n_golf_balls=13, n_ping_pong_balls=13): """ Used to estimate the empty mass of the plane. Should scale up with surface area of body and wings in some principled manner. """ non_structural_mass = 0.326 #kg ball_mass = n_golf_balls*0.04593 + n_ping_pong_balls*0.0027 return non_structural_mass + ball_mass + struct_mass
5acd12d9609124d8aed1147456deb968247dc50f
27,182
def relativize_paths(root, paths): """ Take a list of fully-qualified paths and remove the root, thus making them relative to something. """ clean_paths = [] for path in paths: clean_paths.append(path.replace("%s/" % root, '')) return clean_paths
12487e109b281fd886c37f4673e22ffad4d6fe21
27,183
def runner(app): """Using test app, create and return a CLI runner object.""" return app.test_cli_runner()
67284844e1e2be40dd3dbbed58b04fdbf295a83c
27,184
def set_force_update_on_page_load(forceUpdateOnPageLoad: bool) -> dict: """ Parameters ---------- forceUpdateOnPageLoad: bool """ return { "method": "ServiceWorker.setForceUpdateOnPageLoad", "params": {"forceUpdateOnPageLoad": forceUpdateOnPageLoad}, }
256f5a46eaa46321118903e99c7b7c6bea153d5b
27,186
def _is_descendant(index, target): """ Return true if index is a parent of target. """ while target.isValid(): if target == index: return True target = target.parent() return False
f97dd888fa9506a57bc81730f2e449b834aafa96
27,187
import time def benchmark(f): """A simple decorator for timing output for publish_body.""" def inner(*args, **kwargs): before = time.time() ret = f(*args, **kwargs) print('Took %f secs for sending %d messages.' % (time.time() - before, len(args[1]['messages']))) return ret return inner
ebeb9a23678c4d3000bab47ab0b82ac45aaea74e
27,188
def cost_deriv(y, t): """ gradient. dcost / dy """ return 2 * (y - t)
aa34c05e4330a79641029441daad3fba5ddd3e97
27,190
def pybtex_unescape(string: str) -> str: """ Reverts the escaping applied by Pybtex. :param str: Input string with pybtex-escape characters. :return: Output string where this has been reverted. """ return string.replace('\\_', '_').replace('\\textasciitilde ', '~')
0c8312ac82c360282369a4e4a05ea9b5899550d8
27,194
def isPointInsideRect(x, y, rect): """ This function determines if the point x, y is within the rectangle rect. Returns TRUE if it is and FALSE if it is not. """ if ((x > rect.left) and (x < rect.right) and (y > rect.top) and (y < rect.bottom)): return True else: return False
984855f6fa85912b323806ebba3b98c7c6aae441
27,195
import inspect import sys def get_model_cls(model): """Return the corresponding class to the model specified in params""" classes = [obj for name, obj in inspect.getmembers(sys.modules[__name__]) if inspect.isclass(obj)] for cls in classes: if cls.__name__.lower() == model: return cls raise NotImplementedError('Have not implemented that galaxy model')
16e5e57a2633f46a20bcc7634049c0a3335b5f21
27,196
import torch def zero_loss(*args, **kwargs): """Dummy loss that always returns zero. Parameters ---------- args : list Can take any number of positional arguments (without using them). kwargs : dict Can take any number of keyword arguments (without using them). Returns ------- loss : torch.tensor torch.tensor(0) """ return torch.tensor(0)
2c4821e6fb1e443258f7fc482f2630c16dedb3a2
27,197
import os def get_timeout(servername, default=None): """Get timeout for server communication from environment variables. Args: servername (str): The name of application (e.g. plantuml) default (int): The result when timeout does not exist in environment variables. Returns: str: The timeout (sec) of server communication. """ timeout = os.getenv('SABACAN_%s_TIMEOUT' % servername.upper()) if timeout is None: timeout = os.getenv('SABACAN_TIMEOUT', default) if timeout is not None: try: return int(timeout) except ValueError: pass return None
c2dd4c502922851312e45ee20b8d85c00b2654c7
27,199
def subcmd(f): """subcmd Decorator - used in combination with a subclassed Command class to enable its methods that act as sub-commands. These subcommand methods get auto-registered with an Application when the main command is registered. When a command is executed, the default execute method will automatically call the method defined by the decorator for the provided command. """ # All this decorator does is set a function (method to be specific) attribute # "is_subcmd" so that the Command class's metaclass can find them and configure # the method as sub commands. f.is_subcmd = True return f
cf9000112a8f0291151d06c207434c79987e88ed
27,200
def get_attribute_from_tag(tag, attribute): """Returns a xml attribute from a given tag""" element = None try: element = tag.attrib[attribute] except KeyError: pass # print("Error: attribute {} was not defined in this tag.".format(e)) return element
914e04f2e6441ffb5f42d71de49bd99fe5e3092b
27,207
def showPage(request): """Show the page when the URL is called. """ if 'geneset' in request.session: # restore previous list of genes gs = request.session['geneset'] genes = [] for item in gs.dataframe().reset_index().to_dict(orient="records"): # won't return TranscriptLengths genes.append(dict([(key,val) for key,val in item.items() if key!="TranscriptLengths"])) name = gs.name else: genes = [] name = None return {'genes':genes, 'name':name}
97dc89c3729cb35a52bdac9e15a4ccc788bb6208
27,208
def _to_blob(b): """Convert a bytestring into a type SQLite will accept for a blob.""" return b
bbaa47e6f4d6e975e5f84910d8fcb2d07cc4d4b2
27,209
def calc_frag_lengths(x, y, orientation, merged_frags): """ given a merged fragments table, calculate the fragment lengths assuming the fragments span the provided x and y breakpoints; adds frag lengths inplace """ if orientation[0] == "+": partlen_x = x - merged_frags["start_pos_x"] else: partlen_x = merged_frags["end_pos_x"] - x if orientation[1] == "+": partlen_y = y - merged_frags["start_pos_y"] else: partlen_y = merged_frags["end_pos_y"] - y merged_len = partlen_x + partlen_y return merged_len
d7f09c4501caca1323351cf07ddb3bdb030a8664
27,210
def _func_any(_current, _checks): """Implementation of Any.""" return True
4d829a662df23b72a504c66141e4eaabcb24f466
27,211
import argparse import sys def add_arguments_get_values(): """ Add arguments to the command line and get the values passed. Returns: argparse.Namespace: Argument values passed. """ # instanciate command line argument parser parser = argparse.ArgumentParser(prog=f"python {sys.argv[0]}") # add script arguments # documentation: https://docs.python.org/3.8/howto/argparse.html parser.add_argument( "-n", "--name", type=str, default="EEGstream", help="LSL outlet stream name: EEGstream (default)", ) parser.add_argument( "-c", "--channels", type=str, default=32, help="number of the EEG channels to simulate: 32 (default)", ) parser.add_argument( "-sr", "--sampling_rate", type=str, default=1024, help="sampling rate (Hz) of the stream: 1024 (default)", ) return parser.parse_args()
7deaee11be5633a945df69999deaca76acfd0596
27,212
def fib (n: int) -> int: """ Returns the n'th Fibonacci number. Note that this function uses zero style indexing, meaning that the first Fibonacci number has an index of zero (n=0).""" f_p, f_c= 0, 1 for i in range(n): f_n = f_c + f_p f_p = f_c f_c = f_n return f_c
431c0ba58b2dfb25eca03c14890d7b6ecab88f0d
27,213