content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def file_and_extension(path: str): """return filename and their extension.""" filename = path.split('/')[-1].split('.')[:-1][0] extension = path.split('.')[-1][0] return filename, extension
0b2ccae4ac054330e9b99b80ef6a657dd08d97ed
699,399
import os import dill import inspect import pickle def pickle_special_metrics(metrics, filename): """Pickle special metrics and dump to filename Args: metrics (list): List of special metrics filename (str): Path to the pickled file Return: True if special metric is not None, False otherwise """ if metrics is not None and not os.path.exists(filename): data = {'function_string': [], 'path': []} for metric in metrics: data['function_string'].append(dill.dumps(metric, recurse=True)) if not os.path.dirname(inspect.getabsfile(metric)) in data['path']: data['path'].append(os.path.dirname(inspect.getabsfile(metric))) with open(filename, 'wb') as fid: pickle.dump(data, fid) if metrics is not None: return True else: return False
d739bf159afd33743790c535624720a2ed356104
699,400
def input_details(interpreter, key): """Returns input details by specified key.""" return interpreter.get_input_details()[0][key]
b06bd5278fcc5d95da387cb506ac9b9f443d9f27
699,401
import os def get_path(b): """Get path for image build Args: b: build context Returns: path """ return os.path.join(*b._keypath)
8603a6e51f3edd2697ace38410b9d8d1d7a05028
699,402
import os def listdir(d): """Get full paths to the files and folders in d. Args: d (str): Path of directory to scan. Returns: full_paths (genexp): The full paths to the entries in the directory d. """ return (os.path.join(d, f) for f in os.listdir(d))
79cd75ab402cf7fca4210a2476276e9ac244b054
699,403
def format_date( date_obj ): # date_obj is a datetime.date object """Returns a formatted date string given a date object. If the day is the first of the month, then the day is dropped, eg: format_date ( datetime.date(2013, 04, 12)) returns 2013-04-12 format_date ( datetime.date(2013, 4, 1)) returns 2013-04 """ if date_obj.day == 1: full_date = date_obj.isoformat() return full_date[0:7] else: return date_obj.isoformat()
bfcddc9cbd4228084a0a4c5e6736be28a90d520e
699,404
def comac(Psi_1, Psi_2, dof): """Co-modal assurance criterion """ comac = 1 return comac
07b2de7e5034c5d0bf3db3a2b59fabd5a26129da
699,405
from typing import Dict from typing import Any def _make_pod_podannotations() -> Dict[str, Any]: """Generate pod annotation. Returns: Dict[str, Any]: pod annotations. """ annot = { "annotations": { "k8s.v1.cni.cncf.io/networks": '[\n{\n"name" : "internet-network",' '\n"interface": "eth1",\n"ips": []\n}\n]' } } return annot
94ab8b66c5cf63546f15cb8fef6e1fbc4aa3cb49
699,406
def getPPSA3(ChargeSA): """The calculation of atom charge weighted positive surface ares It is the sum of the products of atomic solvent-accessible surface area and partial charges over all positively charges atoms. -->PPSA3""" res=0.0 for i in ChargeSA: if float(i[1])>0: res=res+float(i[1])*i[2] return res
e536124fa137969fa8171c56a1c25fcfdebfdfe6
699,407
def merge_result(docker_list: dict, result_dict: dict = {}, max_entries_per_docker: int = 5) -> dict: """Returns a python dict of the merge result Args: :type docker_list: ``dict`` :param docker_list: dictionary representation for the docker image used by integration or script :type result_dict: ``dict`` :param result_dict: dictionary representation for the docker image and the integration/scripts belongs to it merge the current result to it :type max_entries_per_docker: ``int`` :param max_entries_per_docker: max number of integration or script to show per docker image entry Returns: :return: dict as {'docker_image':['integration/scripts', 'integration/scripts',...]} :rtype: dict """ result = result_dict or {} for integration_script, docker_image in docker_list.items(): if integration_script in ['CommonServerUserPowerShell', 'CommonServerUserPython']: continue if docker_image in result: if len(result[docker_image]) < max_entries_per_docker: result[docker_image].append(integration_script) else: result[docker_image] = [integration_script] return result
7cfd2708e15eb65c09e32cf0c08a360f780aacb8
699,408
def check(sudoku_part): """ fukcja do sprawdzania czy dana linia lub kwadrat nie posiadaja wiecej niz 1 takiej samej wartosci """ is_okey = True for i in range(1,9): count = sum([element for element in sudoku_part if element == i]) if count > 1: is_okey = False break return is_okey
e93bd53127ea2f280ed123d2de0a18d2a18a1f0a
699,409
from bs4 import BeautifulSoup import requests def _search_anything(query): """search input words at SNPedia, return and compare related genotypes and yours""" snpedia_query = "https://www.snpedia.com/index.php/" + query soup = BeautifulSoup(requests.get(snpedia_query).content, "lxml") links = soup.find_all("div", attrs={"class": "mw-content-ltr"})[0].find_all("a") rsid_list = [l.text for l in links if l.text.startswith("rs")] rsid_list = [txt for txt in rsid_list if not txt.endswith(")")] if rsid_list == []: rsid_list = [l.text for l in links if l.text] if 'search for this page title' in rsid_list: rsid_list = ["another phenotype"] return rsid_list
384a15c28676c12d98b150851eb0abd5188908bd
699,410
def _major_minor(version): """Given a string of the form ``X.Y.Z``, returns ``X.Y``.""" return ".".join(version.split(".")[:2])
8ca52bd0324bb0445ab3c5b9b44124c0f627f635
699,412
def is_lock_valid(transfer, block_number): """ True if the lock has not expired. """ return block_number <= transfer.expiration
186c48c8a81458a9ff25891f5afa380f07dcbede
699,413
def get_genes_without_assigned_pathways(enrichment_results, genes_query): """Return the genes without any known pathway assigned. :param dict gene_set: list of managers :param set[str] genes_query: gene set queried :return: """ # Get genes in all pathways genes_in_pathways = { gene for resource_pathways in enrichment_results.values() for pathway_dict in resource_pathways.values() for gene in pathway_dict['pathway_gene_set'] } # Find the genes not in pathways return { gene for gene in genes_query if gene not in genes_in_pathways }
ab40c79ca078a36f723bde553b8183079ea4313c
699,414
def set_overlay(background, overlay, pos=(0,0)): """ Function to overlay a transparent image on background. :param background: input color background image :param overlay: input transparent image (BGRA) :param pos: position where the image to be set. :return: result image """ h,w,_ = overlay.shape rows,cols,_ = background.shape y,x = pos[0],pos[1] for i in range(h): for j in range(w): if x+i >= rows or y+j >= cols: continue alpha = float(overlay[i][j][3]/255.0) background[x+i][y+j] = alpha*overlay[i][j][:3]+(1-alpha)*background[x+i][y+j] return background
ae9b8be9193e04ede7649ff9cab2ad4a94c4876a
699,415
def format_variables_info_string(variables: dict): """ Return some textual information about the variables contained in this data source. Useful for CLI / REPL applications. :param variables: :return: """ if not variables: return 'No variables information available.' info_lines = [] for variable in variables: info_lines.append('%s (%s):' % (variable.get('name', '?'), variable.get('units', '-'))) info_lines.append(' Long name: %s' % variable.get('long_name', '?')) info_lines.append(' CF standard name: %s' % variable.get('standard_name', '?')) info_lines.append('') return '\n'.join(info_lines)
8f6f13c13040208e137efdef54175a2b68942b24
699,416
def format_line(entries, sep=' | ', pads=None, center=False): """ Format data for use in a simple table output to text. args: entries: List of data to put in table, left to right. sep: String to separate data with. pad: List of numbers, pad each entry as you go with this number. center: Center the entry, otherwise left aligned. """ line = '' align = '^' if center else '' if pads: pads = [align + str(pad) for pad in pads] else: pads = [align for ent in entries] ents = [] for ind, ent in enumerate(entries): fmt = '{:%s}' % pads[ind] ents += [fmt.format(str(ent))] line = sep.join(ents) return line.rstrip()
c5dfd312f8f13a54943fe18e254a6d8836d4460a
699,417
from typing import List import re def find_economic_exprs(headline: str) -> List[str]: """ >>> find_economic_exprs('ユーロ高が原因で米株安') ['米株 安', 'ユーロ 高'] """ indices = ['円', '米株', '外株', '欧州株', '米国株', '中国株', '上海株', 'アジア株', 'ドル', 'ユーロ', 'ダウ', '先物'] results = [] for index in indices: expr = '{}(高|安)'.format(index) match = re.search(expr, headline) if match is not None: results.append(' '.join([index, match[1]])) return results
bd9541774400139fbcf5b6d0c807ca6e3eb1ef39
699,418
def split_tokens(s,keywordDict): """ Returns list of syntax elements with quotes and spaces stripped. """ result = [] result_append = result.append s_len = len(s) i = 0 while i<s_len: start = i while i<s_len and s[i]!="'": if s[i]=="(" or s[i]==")": if i>start: result_append(s[start:i]) result_append(s[i]) i +=1 # Consume parentheses start = i elif s[i]==" " or s[i]=="$": if i>start: result_append(s[start:i]) i +=1 # Consume more space chars while i<s_len and s[i]==" ": i +=1 start = i else: i +=1 if i>start: result_append(s[start:i]) i +=1 if i>=s_len: break start = i while i<s_len and s[i]!="'": i +=1 if i>=start: result_append(s[start:i]) i +=1 return result
9476337ea8db742e2a71f3caa04571db498d0064
699,419
def props_intersection(var_list, new_props_var, carry_vars=None): """Perform the intersection of the properties of a list of nodes/edges.""" if carry_vars is None: carry_vars = set(var_list) else: carry_vars.update(var_list) query = "\n//Perform the intersection of the properties of " query += ", ".join(var_list) + "\n" query += "WITH [] as {}, ".format(new_props_var) +\ ", ".join(carry_vars) + "\n" var_first = var_list[0] query +=\ "WITH {} + REDUCE(pairs = [], k in keys({}) | \n".format( new_props_var, var_first) +\ "\tCASE WHEN ALL(others in [{}] WHERE k in keys(others))\n".format( ", ".join(var_list[1:])) +\ "\tTHEN\n" +\ "\t\tpairs + REDUCE(inner_pairs = [], v in {}[k] | \n".format( var_first) +\ "\t\t\tCASE WHEN ALL(others in [{}] WHERE v in others[k])\n".format( ", ".join(var_list[1:])) +\ "\t\t\tTHEN\n" +\ "\t\t\t\tinner_pairs + {key: k, value: v}\n" +\ "\t\t\tELSE\n" +\ "\t\t\t\tinner_pairs\n" +\ "\t\t\tEND)\n" +\ "\tELSE\n" +\ "\t\tpairs\n" +\ "\tEND) as {}, ".format(new_props_var) +\ ", ".join(carry_vars) + "\n" query +=\ "WITH apoc.map.groupByMulti({}, 'key') as {}, ".format( new_props_var, new_props_var) +\ ", ".join(carry_vars) + "\n" +\ "WITH apoc.map.fromValues(REDUCE(pairs=[], k in keys({}) | \n".format( new_props_var) +\ "\tpairs + [k, REDUCE(values=[], v in {}[k] | \n".format( new_props_var) +\ "\t\tvalues + CASE WHEN v.value IN values THEN [] ELSE v.value END)])) as {}, ".format( new_props_var) +\ ", ".join(carry_vars) + "\n" carry_vars.add(new_props_var) return query
0262cc395f58361d245b5e52778f57e8dc312c69
699,420
def lines_to_config(ctx_keys, line, delete): """ Return the command as it would appear in frr.conf """ cmd = [] if line: for (i, ctx_key) in enumerate(ctx_keys): cmd.append(" " * i + ctx_key) line = line.lstrip() indent = len(ctx_keys) * " " # There are some commands that are on by default so their "no" form will be # displayed in the config. "no bgp default ipv4-unicast" is one of these. # If we need to remove this line we do so by adding "bgp default ipv4-unicast", # not by doing a "no no bgp default ipv4-unicast" if delete: if line.startswith("no "): cmd.append("%s%s" % (indent, line[3:])) else: cmd.append("%sno %s" % (indent, line)) else: cmd.append(indent + line) # If line is None then we are typically deleting an entire # context ('no router ospf' for example) else: for i, ctx_key in enumerate(ctx_keys[:-1]): cmd.append("%s%s" % (" " * i, ctx_key)) # Only put the 'no' on the last sub-context if delete: if ctx_keys[-1].startswith("no "): cmd.append("%s%s" % (" " * (len(ctx_keys) - 1), ctx_keys[-1][3:])) else: cmd.append("%sno %s" % (" " * (len(ctx_keys) - 1), ctx_keys[-1])) else: cmd.append("%s%s" % (" " * (len(ctx_keys) - 1), ctx_keys[-1])) return cmd
ae9fcff2272a167790f2ea9554b3649f91c497af
699,421
import torch def masked_loss(lossfunc, logits, y, lens): """ Computes the loss of the first `lens` items in the batches """ mask = torch.zeros_like(logits, dtype=torch.bool) for i, l in enumerate(lens): mask[i, :l, :] = 1 logits_masked = torch.masked_select(logits, mask) y_masked = torch.masked_select(y, mask) return lossfunc(logits_masked, y_masked)
235f5e51a643eac803c28c2ab62b74e730c3c14d
699,422
def int_or_none(x): """ A helper to allow filtering by an integer value or None. """ if x.lower() in ("null", "none", ""): return None return int(x)
f7a99fd637b5f5e2f4519034b36d03128279726c
699,423
def collatz(n): """Cuenta las veces que se itera hasta llegar a 1. Pre: n debe ser un numero entero. Post: Devuelve un numero natural de las repeticiones. """ res = 1 while n!=1: if n % 2 == 0: n = n//2 else: n = 3 * n + 1 res += 1 return res
4e1b5d227dfe18eddad1832352c3280995c672e5
699,424
import re def format_error(log: str, pkg_version): """ Execute some replacement transformations on the log, to make it more compressible """ # remove store hashes log = re.subn(r"(.*/nix/store/)[\d\w]+(-.*)", r"\1#hash#\2", log)[0] # reduce multiple white spaces to a single space log = re.subn(r"(\S*)( +)(\S*)", r"\1 \3", log)[0] # remove python versions log = re.sub("python[\d\.\-ab]+", "python#VER#", log) # remove line numbers log = re.sub("line (\d*)", "line #NUM#", log) # equalize tmp directories log = re.sub("tmp[\d\w_]*", "#TMP#", log) # remove package versions log = re.sub(pkg_version, "#PKG_VER#", log) # detect some common errors and shorten them common = ( 'unpacker produced multiple directories', ) for err in common: if err in log: log = err break # for Exceptions keep only short text match = re.match("(?s:.*)[\s\n]([\w\._]*Error:.*)", log) if match: log = match.groups()[0] # remove common warnings and trim line number and length lines = log.splitlines(keepends=True) lines = map(lambda line: line[:400], lines) remove_lines_marker = ( '/homeless-shelter/.cache/pip/http', '/homeless-shelter/.cache/pip', 'DEPRECATION: Python 2.7' ) filtered = filter(lambda l: not any(marker in l for marker in remove_lines_marker), lines) return ''.join(list(filtered)[:90])
7bae545c480eeeabf57aac9ca5603e34e9368aee
699,425
import platform def supports_posix() -> bool: """Check if the current machine supports basic posix. Returns: bool: True if the current machine is MacOSX or Linux. """ return platform.system().lower() in ( "darwin", "linux", )
527bb8570e0493deec01aadafdf929e7315b27c0
699,426
import torch def compute_reconstruction_error(vae, data_loader, **kwargs): """ Computes log p(x/z), which is the reconstruction error. Differs from the marginal log likelihood, but still gives good insights on the modeling of the data, and is fast to compute. """ # Iterate once over the data and computes the reconstruction error log_lkl = {} for tensors in data_loader: loss_kwargs = dict(kl_weight=1) _, _, losses = vae(tensors, loss_kwargs=loss_kwargs) for key, value in losses._reconstruction_loss.items(): if key in log_lkl: log_lkl[key] += torch.sum(value).item() else: log_lkl[key] = 0.0 n_samples = len(data_loader.indices) for key, value in log_lkl.items(): log_lkl[key] = log_lkl[key] / n_samples log_lkl[key] = -log_lkl[key] return log_lkl
ee4fa35002e0f43d6482ae7f9ccfc7b7b1282db7
699,427
def _whbtms(anchor): """Return width, height, x bottom, and y bottom for an anchor (window).""" w = anchor[2] - anchor[0] + 1 h = anchor[3] - anchor[1] + 1 x_btm = anchor[0] + 0.5 * (w - 1) y_btm = anchor[1] + 1.0 * (h - 1) return w, h, x_btm, y_btm
0b4d65ad8a9dca2753ee703ad47764a1cb88f50e
699,428
def hi(): """ """ return "Welcome to the IRIS Prediction Model!"
b2d5ff70eedebe5c628ff6676b03e30badf31666
699,429
def substract(x, y): """SUB NUMBERS TO GETHER""" return y - x
9be83227021ae8c37d4e2c2c9497346f96e878d0
699,430
def _compute_land_only_mask(ds, lsm_min=0.8): """Computes a mask for cells that are considered to be over the land. The lsm fraction is above lsm_min. Here, lsm is the land-sea mask as a fraction (0 is all water, 1 is all land).""" da_mask = ds.lsm > lsm_min da_mask.attrs[ "long_name" ] = f"Land only (land-sea mask > {lsm_min} [{ds.lsm.units}])" return da_mask
23b682a216031abac3e90989d130f7b3de9abf7c
699,432
def check_datasets(datasets): """Check that datasets is a list of (X,y) pairs or a dictionary of dataset-name:(X,y) pairs.""" try: datasets_names = [dataset_name for dataset_name, _ in datasets] are_all_strings = all([isinstance(dataset_name, str) for dataset_name in datasets_names]) are_unique = len(list(datasets_names)) == len(set(datasets_names)) if are_all_strings and are_unique: return datasets else: raise ValueError("The datasets' names should be unique strings.") except: raise ValueError("The datasets should be a list of (dataset name:(X,y)) pairs.")
e02771dd7d2df3d9e716141ba2ada92408a2b14e
699,433
import requests import json def get_card_updated_data(scryfall_id) -> str: """Gets updated data from scryfall Args: scryfall_id ([type]): [description] Returns: json: result from scryfalls card api ref: https://scryfall.com/docs/api/cards/id ex scryfall_id="e9d5aee0-5963-41db-a22b-cfea40a967a3" """ r = requests.get( f"https://api.scryfall.com/cards/{scryfall_id}", ) if r.status_code == 200: return json.loads(r.text) raise Exception("Could not download os save card image")
2a0f1a9c65cbe7e173543e8a3d4b03fd364e2ae6
699,435
import torch def basis(A): """Return orthogonal basis of A columns. """ if A.is_cuda: # torch.orgqr is not available in CUDA Q, _ = torch.qr(A, some=True) else: Q = torch.orgqr(*torch.geqrf(A)) return Q
ad5de52748b4bfca35c2607af623457b4e0362c3
699,436
import torch def get_thresholded_graph(meta_data_tensor, threshold_list): """Get the thresholded graph given meta information and a list of thresholds. Args: meta_data_tensor: threshold_list: Returns: """ adj_list = [] for k, v in enumerate(threshold_list): meta_ = meta_data_tensor[:, k:k + 1] dist = meta_[:, None, :] - meta_[None, :, :] cur_adj = torch.abs(dist) <= v cur_adj = cur_adj.long() adj_list.append(cur_adj.squeeze()) cur_adj = torch.stack(adj_list, -1) return cur_adj
a09b27dfea06519b06ef5ed25793da67b01f4bfe
699,437
def get_candidate_file_name( monotonous: bool, agg_point: bool, both: bool = False, ) -> str: """Get canonical filename for candidate set sizes.""" if both: return f"candidate_sizes{'_mono' if monotonous else ''}_k_point" else: return f"candidate_sizes{'_mono' if monotonous else ''}_{'_point' if agg_point else '_k'}"
85b27f7767fd495a46459553b96160691a3dbeda
699,438
def square_of_sum(limit): """ Returns the square of the sum of all integers in the range 1 up to and including limit. """ return sum([i for i in range(limit + 1)]) ** 2
503048c7b175d10dce20ff9d2f9435be4fbe1c35
699,439
def has_duplicate(sampled_clumper): """Checks for a duplicate""" for i in range(len(sampled_clumper)): source_clump = sampled_clumper[i] for j in range(len(sampled_clumper)): if i != j: if sampled_clumper[j] == source_clump: return True return False
d34dc7de8d84b1dc9e1b65e0a83b9cefcbed8055
699,440
import re def is_valid_regex(regex): """Helper function to determine whether the provided regex is valid. Args: regex (str): The user provided regex. Returns: bool: Indicates whether the provided regex was valid or not. """ try: re.compile(regex) return True except (re.error, TypeError): return False
4c544168f2b1894e7cc7c7804342f923287022b5
699,441
def spatial_overlap_conv_3x3_stride_1_dilate_2(p): """ This method computes the spatial overlap of dilated 3x3 convolutional layer with stride 1 and dialation rate of 2 in terms of its input feature map's spatial overal value (p). """ denominator = 9 - 24 * max(p - 0.5, 0) if p < 0.5: return 15 * p / denominator else: return (6 + 3 * (1 - p) - (14 + 4 * (1 - p)) * max(p - 0.5, 0)) / denominator
91a6bd178e7dcad6e21ad7fa58df28aaf1ad279b
699,442
def normalize_medical_kind(shape, properties, fid, zoom): """ Many medical practices, such as doctors and dentists, have a speciality, which is indicated through the `healthcare:speciality` tag. This is a semi-colon delimited list, so we expand it to an actual list. """ kind = properties.get('kind') if kind in ['clinic', 'doctors', 'dentist']: tags = properties.get('tags', {}) if tags: speciality = tags.get('healthcare:speciality') if speciality: properties['speciality'] = speciality.split(';') return (shape, properties, fid)
dbf75799624cbe119c4c3a74c8814d0a979d828e
699,443
def _get_tws(sel_cim_data): """Return terrestrial water storage.""" return sel_cim_data.TWS_tavg
2b8405a245d39466ef2b47463d88518cc3b5bedf
699,444
import os import sys def add_to_path(path): """Add the specified path to the system path. @param path: Path to add. @return True if path was added. Return false if path does not exist or path was already in sys.path """ if os.path.exists(path) and path not in sys.path: sys.path.insert(0, path) return True return False
8e57968a2384e80959a30f97267d2ea8a5650ce1
699,445
import torch def load_graph(pt_file_path="./sample_graph.pt"): """ Parameters ---------- pt_file_path : file path Returns ------- graph : torch_geometric.data.Data - data.n_node: number of nodes - data.n_node_type: number of node types == (1 or 2) - data.n_edge: number of edges - data.n_edge_type: number of edge types - data.node_type: (source_node_type, target_node_type) - data.edge_index: [list of] torch.Tensor, int, shape (2, n_edge), [indexed by edge type] [0, :] : source node index [1, :] : target node index - data.edge_type: None or list of torch.Tensor, int, shape (n_edge,), indexed by edge type - data.edge_weight: None or list of torch.Tensor, float, shape (n_edge,) - data.source_node_idx_to_id: dict {idx : id} - data.target_node_idx_to_id: dict {idx : id} """ return torch.load(pt_file_path)
a45304199c92527e4b3e48430c8ac0f9f59a74d5
699,447
def duffing(x, u, dt=0.05, delta=0.2): """Duffing's oscillator""" dxdt = (x[1], x[0] - delta*x[1] - x[0]**3 + u) return x[0] + dt*dxdt[0], x[1] + dt*dxdt[1]
ef193499ce213a1848a2482fdb13c53227e464d0
699,448
def merge_arrays(*lsts: list): """Merges all arrays into one flat list. Examples: >>> lst_abc = ['a','b','c']\n >>> lst_123 = [1,2,3]\n >>> lst_names = ['John','Alice','Bob']\n >>> merge_arrays(lst_abc,lst_123,lst_names)\n ['a', 'b', 'c', 1, 2, 3, 'John', 'Alice', 'Bob'] """ merged_list = [] [merged_list.extend(e) for e in lsts] return merged_list
f2dd78a9e2d31347d72e1e904c0f2c6e30b37431
699,449
import argparse import sys def parse_args(args): """ Create the arguments """ parser = argparse.ArgumentParser() parser.add_argument("-u", dest="user", help="Username", default="user") parser.add_argument("-p", dest="passwd", help="Password", default="user") parser.add_argument("-k", dest="key_path", help="Public ssh key path") parser.add_argument(dest="host", help="Target host") if len(sys.argv) < 2: parser.print_help() sys.exit(1) return parser.parse_args(args)
479d5127503c367e78d81935d987f3149fd48ce1
699,450
def is_within_range(num, lower_bound: int, upper_bound: int) -> bool: """ check if a number is within a range bounded by upper and lower bound :param num: target number needs to be checked against :param lower_bound: exclusive lower bound :param upper_bound: exclusive upper bound :return: True if number is within this range, False otherwise """ num = int(num) if upper_bound < lower_bound: raise ValueError("bounds are incorrect") return lower_bound < num < upper_bound
325f4084a88254ec2f29dd28749b8709de394078
699,452
from typing import Optional from typing import Tuple def _shape_param_to_id_str(shape_param: Optional[Tuple[Optional[int], ...]]) -> str: """Convert kernel input shape parameter used in `test_call.py` and `conftest.py` into a human readable representation which is used in the pytest parameter id.""" if shape_param is None: return "None" shape_strs = tuple("indim" if dim is None else str(dim) for dim in shape_param) if len(shape_strs) == 1: return f"({shape_strs[0]},)" return f"({', '.join(shape_strs)})"
ba73efa0693217a32ba42754af36204631c4200d
699,453
import pandas def estimate_test_percentages_for_regions(df: pandas.DataFrame) -> pandas.Series: """ Calculates the fraction of tests per region. Uses the 7 days up to the last day for which daily new_test data is available for all regions. WARNING: If any region has a gap _before_ the last day for which all of them have data, this function will fail to return the correct result. Parameters ---------- df: pandas.DataFrame The dataframe containing the new_test column with a [region, date] index as genereated by get_testcounts_DE. An `all` region has to be included. Returns ------- region_test_percentages: pandas.Series Region-indexed series of fractions of all tests. """ rows_with_testcounts = df.new_tests[~df.new_tests.isna()] last_date_with_testcounts_for_all_regions = rows_with_testcounts.groupby('region').tail(1).reset_index()['date'].min() # select the last 7 days up to the latest testcount data point last_week_of_testcounts = slice(last_date_with_testcounts_for_all_regions - pandas.Timedelta('6D'), last_date_with_testcounts_for_all_regions) # Then calculate the sum of tests one week up to that date testcounts_in_last_daily_data = df.new_tests.xs(last_week_of_testcounts, level='date').groupby('region').sum() # Finally convert absolutes to fractions return testcounts_in_last_daily_data / testcounts_in_last_daily_data['all']
a422f4621cc76d8bb0819ab6a1f12996da962a46
699,454
import json def ratelimit(): """untrusted id found""" return json.dumps({ 'success': False, 'error': 'signal api rate limit reached' }), 413, {'ContentType': 'application/json'}
b8a2e917acdbe26850e950e62dadf419d64ecc02
699,455
def get_lr(optimizer): """Used for convenience when printing""" for param_group in optimizer.param_groups: return param_group['lr']
c73d08f3cbac372b02c3ccce7e40526d7e686e63
699,456
import os def find_executable(filename, environ=None): """Find an executable by searching the current $PATH.""" if environ is None: environ = os.environ path = environ.get("PATH", "/usr/local/bin:/usr/bin:/bin").split(":") for dirpath in path: dirpath = os.path.abspath(dirpath.strip()) filepath = os.path.normpath(os.path.join(dirpath, filename)) if os.path.isfile(filepath): return filepath return None
be3cabb7298d4a22cb3e81c1878352f2bb405430
699,457
def cmp_by_level_2_tag_code(left, right): """ tag按照第二列标签进行排序 :param left: tag1 :param right: tag2 :return:-1,1,0 """ left = left["tag_code"] right = right["tag_code"] if left == "other": return 1 elif right == "other": return -1 else: return 0
6ee210c54967dcad7673d4c130ecc0e5bc907bc5
699,458
def lcs(string_a, string_b): """ Calculate the length of the longest common subsequence of characters between two strings. The time complexity of this implementation is dominated by the two nested loops, let the length of string_a and string_b is 'n' and 'm' respectively. This would lead to a time complexity of O(n*m). But in general, we can consider it as O(n*n) instead of O(n*m). """ matrix = [[0 for i in range(len(string_a) + 1)] for j in range(len(string_b) + 1)] for y in range(1, len(string_b) + 1): for x in range(1, len(string_a) + 1): if string_a[x-1] == string_b[y-1]: matrix[y][x] = matrix[y-1][x-1] + 1 else: matrix[y][x] = max(matrix[y-1][x], matrix[y][x-1]) return matrix[-1][-1]
dc1183092928e1cb9bce5926207fbceb85b79e2e
699,459
def getbox(xcenter, ycenter, size=500): """ A function to compute coordinates of a bounding box. Arguments: - xcenter: int, x position of the center of the box. - ycenter: int, y position of the center of the box. - size: int, size of the side of the box. Returns: - box: list of tuple of int, (x, y) coordinates of the sides of the box. """ xo = xcenter - int(size / 2) yo = ycenter - int(size / 2) xe = xcenter + int(size / 2) ye = ycenter + int(size / 2) horizonhaut = [(x, yo) for x in range(xo, xe)] horizonbas = [(x, ye) for x in range(xo, xe)] verticalgauche = [(xo, y) for y in range(yo, ye)] verticaldroite = [(xe, y) for y in range(yo, ye)] box = horizonbas + horizonhaut + verticaldroite + verticalgauche return box
6e8030f113a13775365fcd715be46690849154f3
699,460
from typing import List def get_entity_attributes(entity: dict) -> List[str]: """Get a list with the attributes of the entity. Args: entity(:obj:`dict`): entity. Returns: list: containing the attributes. """ keys = entity.keys() not_attributes = ["id", "type"] attributes = [attr for attr in keys if attr not in not_attributes] return attributes
a5482a679ff5060d2deb26d7af6f4dc7fe394bf5
699,461
def fahrenheitToRankie(fahrenheit:float, ndigits = 2)->float: """ Convert a given value from Fahrenheit to Rankine and round it to 2 decimal places. Wikipedia reference: https://en.wikipedia.org/wiki/Fahrenheit Wikipedia reference: https://en.wikipedia.org/wiki/Rankine_scale """ return round(float(fahrenheit)+ 459.7, ndigits)
feea06edfbc800fbb4da6a3a128b2a89f07966ed
699,462
def connect(H, G, Gout=None, Hin=None): """ Connect outputs Gout of G to inputs Hin of H. The outputs and inputs of the connected system are arranged as follows: - remaining outputs of G get lower, the outputs of H the higher indices - inputs of G get the lower, remaining inputs of H the higher indices connect(H, G) is equivalent to H * G. """ if issubclass(type(H), type(G)): try: connection = H.__connect__(G, Gout, Hin) except AttributeError: connection = G.__rconnect__(H, Gout, Hin) else: try: connection = G.__rconnect__(H, Gout, Hin) except AttributeError: connection = H.__connect__(G, Gout, Hin) return(connection)
c5a6bbf86ec9da5daf006a5e1dc3de1f8140e45f
699,463
import uuid def generate_event_uuid(): """ Generate the identifier of the event (FaaS) Returns: str: the event identifier """ return str(uuid.uuid4()).replace('-', '')
e982f3699b9a0ee78676a74ed31095534a4ca24b
699,464
def factorial(x): """Return the factorial of a given number.""" if x == 1: return 1 else: return factorial(x-1) * x
a1c3adacf122c1bec57bbbfd246f07bf627febcd
699,465
def roi_size(roi): """ returns number of cols and rows """ cols = roi[1][0] - roi[0][0] + 1 rows = roi[1][1] - roi[0][1] + 1 return cols, rows
caa9213399d9672aa9f23380c7f79db1b42cfa12
699,466
from pathlib import Path def is_file_suffix(filename, suffixes, check_exist=True): """ is_file + check for suffix :param filename: pathlike object :param suffixes: tuple of possible suffixes :param check_exist: whether to check the file's existence :return: bool """ if check_exist and not Path(filename).is_file(): return False return str(filename).endswith(suffixes)
e1e81eb0c39c991586097882c728535829acf415
699,467
import shutil def cmd_exists(cmd): """Returns True if a binary exists on the system path""" return shutil.which(cmd) is not None
0d43181532b9b71cb06b6a5d1f207bbb19899129
699,468
import requests def get_swapi_resource(url, params=None): """This function initiates an HTTP GET request to the SWAPI service in order to return a representation of a resource. The function defines two parameters, the resource url (str) and an optional params (dict) query string of key:value pairs may be provided as search terms (e.g., {'search': 'yoda'}). If a match is obtained from a search, be aware that the JSON object that is returned will include a list property named 'results' that contains the resource(s) matched by the search query term(s). Parameters: resource (str): a url that specifies the resource. params (dict): optional dictionary of querystring arguments. This parameter should have a default value of None. Returns: dict: dictionary representation of the decoded JSON. """ if params: response = requests.get(url, params=params).json() else: response = requests.get(url).json() return response
3a5de57fc498da1e25de4f2b3e61cae8508ad7c7
699,469
def assertify(text): """Wraps text in the (assert ).""" return '(assert ' + text + ')'
da7af0fe5d27759d8cef4589843796bee8e74383
699,470
def about_time(all_df): """ elapsed time remaining time Note: remaining time not collected anymore! """ # time_list = [all_df['elapsed'].iloc[-1], all_df['remaining_time'].iloc[-1]] time_list = [all_df['elapsed'].iloc[-1], None] if len(time_list) != 2: print("***len(time_list): {}".format(len(time_list))) return time_list, len(time_list)
780a426c05ed8484796d718ca1aabaec1b648d03
699,471
import os def get_file_name(file): """ Return a file name of path. Keyword arguments: file -- The string of file path """ return os.path.splitext(os.path.basename(file))[0]
2590e6c90061ce37f8325776a038ce3770be1fe2
699,472
def bubble_sort(items): """Sort given items by swapping adjacent items that are out of order, and repeating until all items are in sorted order. Running time: O(n**2) because it passes through n/2 (on average) elements n-1 times, which simplifies to n elements n times, n**2 Memory usage: O(1), as only a boolean and two integers are declared """ # Take up to n-1 passes for j in range(len(items) - 1): # Bool for checking if items were swapped swapped = False # Last item after each pass is always sorted and can be ignored for i in range(len(items) - 1 - j): # Swap items if the current is greater than the next if items[i] > items[i + 1]: items[i], items[i + 1] = items[i + 1], items[i] swapped = True # If there were no swaps, list is already sorted if not swapped: return items return items
88748e0d552f0d08e228049f12268e34aed4907b
699,473
def human_list(_list) -> str: """Convert a list of items into 'a, b or c'""" last_item = _list.pop() result = ", ".join(_list) return "%s or %s" % (result, last_item)
04ee703d1878ac7602ba33aaf1831ccc188ee9e2
699,474
def readOpenMMReporterFile(reporter_file): """ Creates a dictionary containing all the entries in the reported data reporter_file Parameters ---------- reporter_file : str Path to the reporter output file """ with open(reporter_file, 'r') as ef: lines = ef.readlines() data = {} for r in lines[0].split(','): data[r.replace('#','').replace('"','').strip()] = [] for i,r in enumerate(data): for line in lines[1:]: data[r].append(float(line.strip().split(',')[i])) return data
9f7d3c958ef82994764f276bdd854868099c7960
699,475
def choose_word(file_path, index): """ :param file_path: :param index: :return: tuple (num of words without duplicates, the word of the given index in the file """ file_input = open(file_path, "r") count_of_words = len(set(file_input.read().split(" "))) file_input.seek(0) lines = file_input.read().split(" ") list_of_words = [] for item in lines: list_of_words.append(item) relevant_tuple = (count_of_words, list_of_words[index % len(list_of_words)-1]) return relevant_tuple
9c038574509918a4e91c316cde749dda322ef0e1
699,476
import os def get_env_token(): """ Get GitHub OAuth token from "GITHUB_TOKEN" environment variable. :return: GitHub OAuth token. """ token = None if "GITHUB_TOKEN" not in os.environ else os.environ["GITHUB_TOKEN"] return token
dcee0fcef76b5ccdef094ae7fc40212c633d37c5
699,478
def ccall_except_check(x): """ >>> ccall_except_check(41) 42 >>> ccall_except_check(-2) -1 >>> ccall_except_check(0) Traceback (most recent call last): ValueError """ if x == 0: raise ValueError return x+1
f4a877f63a189b39fea477af218438de2e6c2a78
699,479
def num_fragmentations(df, obj_frequencies): """Total number of switches from tracked to not tracked.""" fra = 0 for o in obj_frequencies.index: # Find first and last time object was not missed (track span). Then count # the number switches from NOT MISS to MISS state. dfo = df[df.OId == o] notmiss = dfo[dfo.Type != 'MISS'] if len(notmiss) == 0: continue first = notmiss.index[0] last = notmiss.index[-1] diffs = dfo.loc[first:last].Type.apply(lambda x: 1 if x == 'MISS' else 0).diff() fra += diffs[diffs == 1].count() return fra
5d7bcebfeb486499eb8c102789db6cf9141f9247
699,480
def input_prompt(prompt): """ Get user input :param prompt: the prompt to display to the user """ return input(prompt)
b780278c37f048d1ea61f36af0947b683c5f9b9c
699,481
def safeintorbignumber(value): """safely converts value to integer or 10M""" try: return int(value) except ValueError: return 10000000
de665dc11ba27c6846412b706cf833e46dc26758
699,482
import requests import re import json def get_figshare_article(article_id): """ Given a figshare article id, return a JSON object containing the article metadata :param article_id: :return: JSON object containing Figshare metadata """ version = re.compile('v[0-9]*') article_result = requests.get('https://api.figshare.com/v2/articles/{}'.format(article_id)).content article_result = json.loads(article_result) # Figshare uses versioned DOI. VIVO is only interested in the most recent version. # If Figshare returns a versioned DOI, chop off the version if 'doi' in article_result and len(article_result['doi']) > 0: doi = article_result['doi'] p = re.search(version, doi) if p is not None: doi = doi.replace('.' + p.group(), '') article_result['doi'] = doi return article_result
375bbb36226f002e8a7c85b8e2b1a28f13f52443
699,483
def isNumber(s): """ Tests whether an input is a number Parameters ---------- s : string or a number Any string or number which needs to be type-checked as a number Returns ------- isNum : Bool Returns True if 's' can be converted to a float Returns False if converting 's' results in an error Examples -------- >>> isNum = isNumber('5') >>> isNum True >>> isNum = isNumber('s') >>> isNum False """ try: float(s) return True except ValueError: return False
bd1bae27814ddca22c39d6411df3581e8116c324
699,484
import os def _validate_ml_id(app_id, run_id): """Validates if there was an experiment run previously from the same app id but from a different experiment (e.g. hops-util-py vs. maggy) module. """ try: prev_ml_id = os.environ["ML_ID"] except KeyError: return app_id, run_id prev_app_id, _, prev_run_id = prev_ml_id.rpartition("_") if prev_run_id == prev_ml_id: # means there was no underscore found in string raise ValueError( "Found a previous ML_ID with wrong format: {}".format(prev_ml_id) ) if prev_app_id == app_id and int(prev_run_id) >= run_id: return app_id, (int(prev_run_id) + 1) return app_id, run_id
f415c0f03f16a0db108a649682182c8b81219f0e
699,485
def read_epi_params(item_basename): """Read all the necessary EPI parameters from text files with the given basename. Args: item_basename (str): The basename for all the files we need to read. In particular the following files should exist: - basename + '.read_out_time.txt' (with the read out time) - basename + '.phase_enc_dir.txt' (with the phase encode direction, like AP, PA, LR, RL, SI, IS, ...) Returns: dict: A dictionary for use in the nipype workflow 'all_peb_pipeline'. It contains the keys: - read_out_time (the read out time of the scan in seconds) - enc_dir (the phase encode direction, converted to nipype standards (x, -x, y, -y, ...)) """ with open(item_basename + '.read_out_times.txt', 'r') as f: read_out_time = float(f.read()) with open(item_basename + '.phase_enc_dir.txt', 'r') as f: phase_encoding = f.read() phase_enc_dirs_translate = {'AP': 'y-', 'PA': 'y', 'LR': 'x-', 'RL': 'x', 'SD': 'x-', 'DS': 'x', 'SI': 'z-', 'IS': 'z', 'HF': 'z-', 'FH': 'z'} return {'read_out_time': read_out_time, 'enc_dir': phase_enc_dirs_translate[phase_encoding]}
3af788073607df214c447f2b87606b2d3e1638fd
699,487
def filled_int(val, length): """ Takes a value and returns a zero padded representation of the integer component. :param val: The original value. :param length: Minimum length of the returned string :return: Zero-padded integer representation (if possible) of string. Original string used if integer conversion fails """ try: converted_val = int(val) except ValueError: converted_val = val return str(converted_val).zfill(length)
55da47570ab03c5964f3af167bb8a4f27656a660
699,488
import torch def sph2cart_unit(u): """ :param u: N x 2 in [azimuth, inclination] :return: """ """ :param phi: azimuth, i.e., angle between x-axis and xy proj of vector r * sin(theta) :param theta: inclination, i.e., angle between vector and z-axis :return: [x, y, z] """ phi, theta = u[..., 0], u[..., 1] sinth = torch.sin(theta) x = sinth * torch.cos(phi) y = sinth * torch.sin(phi) z = torch.cos(theta) return torch.stack((x, y, z), dim=-1)
59b119dbb629a98057d051a1370385f450d3fae3
699,489
def coroutine(func): """ Decorator that allows to forget about the first call of a coroutine .next() method or .send(None) This call is done inside the decorator :param func: the coroutine to decorate """ def start(*args,**kwargs): cr = func(*args,**kwargs) next(cr) return cr return start
1ab7a10c369f4882541ee81614e94d31f7acc528
699,490
import pickle def _load_batch(fpath, label_key='labels'): """Internal utility for parsing CIFAR data. Arguments: fpath: path the file to parse. label_key: key for label data in the retrieve dictionary. Returns: A tuple `(data, labels)`. """ with open(fpath, 'rb') as f: d = pickle.load(f, encoding='bytes') # decode utf8 d_decoded = {} for k, v in d.items(): d_decoded[k.decode('utf8')] = v d = d_decoded data = d['data'] labels = d[label_key] data = data.reshape(data.shape[0], 3, 32, 32) return data, labels
4311566594ac6f0f6c9573bdb3ccf162205991ae
699,491
def _time_ago(t: int) -> str: """Get time ago information from duration.""" if t < 3600: return f'{int(t/ 60)} minutes ago' else: return f'{int(t / 3600)} hours and {int(t/60) - int(int(t/60)/60)*60} minutes ago'
2451197f458df9b7c2ac6f5e9b1487b7e6f8211d
699,493
import os import fnmatch def isfile_wo_ext(path): """Checks that a file exists, or exists with any extension""" path_dir = os.path.dirname(path) path_fn = os.path.basename(path) return os.path.isfile(path) or len(fnmatch.filter(os.listdir(path_dir), path_fn + '.*')) > 0
e48aead6545faf36224114ce88229d8a39b0c756
699,494
def categories_to_json(categories): """ categories_to_json converts categories SQLAlchemy object to json object works by simply looping over collection of objects and manually mapping each Object key to a native Python dict """ main = {} main['categories'] = [] for cat in categories: catDict = {} catDict['id'] = cat.id catDict['name'] = cat.name catDict['items'] = [] for item in cat.items: itemDict = {} itemDict['id'] = item.id itemDict['title'] = item.title itemDict['description'] = item.description catDict['items'].append(itemDict) main['categories'].append(catDict) return main
34702fb12d1398395826bae71d39edd558199498
699,495
def velocity(estimate, actual, times=60): """Calculate velocity. >>> velocity(2, 160, times=60) 0.75 >>> velocity(3, 160, times=60) 1.125 >>> velocity(3, 160, times=80) 1.5 """ return (estimate*times)/(actual*1.)
ffdb12c7aea05f9fc8aa9e354e8db2320cbf431a
699,496
from pathlib import Path def _node_does_not_exist(node_path): """ Determine whether module already exists for node :param node_path: full path to node module :return: bool; True if node does not exist; False if it does exist """ try: # noinspection PyArgumentList Path(node_path).resolve() except FileNotFoundError: return True else: return False
242f9895cb6e7ef4441b6d76b013cbdc88f8de45
699,499
import os def _get_home(): """Find user's home directory if possible. Otherwise raise error. :see: http://mail.python.org/pipermail/python-list/2005-February/263921.html """ path='' try: path=os.path.expanduser("~") except: pass if not os.path.isdir(path): for evar in ('HOME', 'USERPROFILE', 'TMP'): try: path = os.environ[evar] if os.path.isdir(path): break except: pass if path: return path else: raise RuntimeError('please define environment variable $HOME')
2b82e51a8c06ab34b546b117fa1cbc2eec88639f
699,500
def load_description(): """Return description""" with open("README.md") as buffer: return buffer.read()
b167638602c4d5f662ccc72e34dc042b7d9e63bb
699,501
def indent(s, depth): # type: (str, int) -> str """ Indent a string of text with *depth* additional spaces on each line. """ spaces = " "*depth interline_separator = "\n" + spaces return spaces + interline_separator.join(s.split("\n"))
69d509761cc6ab2f861f8b8c7fe7097ce8deff13
699,502
def prior_transform(u): """Flat prior between -10. and 10.""" return 10. * (2. * u - 1.)
4dd488a76a08361f1c7d833eaef96a7dfdf51ca6
699,503
def check_value_types(vars, argv): """ This checks that all the user variables loaded in use that same or comparable datatypes as the defaults in vars. This prevents type issues later in the simulation. Given the many uservars and the possibility for intentional differences, especially as the program is developed, this function tries to be NOT OPINIONATED, only correcting for several obvious and easy to correct issues of type discrepancies occur between argv[key] and vars[key] ie 1) argv[key] = "true" and vars[key] = False this script will not change argv[key] to False... it will convert "true" to True ---> argv[key]=True 2) argv[key] = "1.01" and vars[key] = 2.1 this script will change argv[key] from "1.01" to float(1.01) Inputs: :param dict vars: Dictionary of program defaults, which will later be overwritten by argv values :param dict argv: Dictionary of User specified variables Returns: :returns: dict vars: Dictionary of program defaults, which will later be overwritten by argv values :returns: dict argv: Dictionary of User specified variables """ for key in list(argv.keys()): if key not in list(vars.keys()): # Examples may be things like root_output_folder # Just skip these continue if type(argv[key]) != type(vars[key]): # Several variable default is None which means checks are # processed elsewhere... if vars[key] is None: # check argv[key] is "none" or "None" if type(argv[key]) == str: if argv[key].lower() == "none": argv[key] = None else: continue # Handle number types elif type(vars[key]) == int or type(vars[key]) == float: if type(argv[key]) == int or type(argv[key]) == float: # this is fine continue elif type(argv[key]) == str: try: temp_item = float(argv[key]) if type(temp_item) == float: argv[key] = temp_item else: printout = "This parameter is the wrong type.\n \t Check : " printout = printout + "{} type={}\n".format( key, type(argv[key]) ) printout = printout + "\t Should be type={}\n\t".format( type(vars[key]) ) printout = ( printout + "Please check SMILESClickChem documentation using -h" ) raise IOError(printout) except: printout = "This parameter is the wrong type. \n \t Check :" printout = printout + " {} type={}\n".format( key, type(argv[key]) ) printout = printout + "\t Should be type={}\n\t".format( type(vars[key]) ) printout = ( printout + "Please check SMILESClickChem documentation using -h" ) raise IOError(printout) else: printout = "This parameter is the wrong type. \n \t Check :" printout = printout + " {} type={}\n".format(key, type(argv[key])) printout = printout + "\t Should be type={}\n\t".format( type(vars[key]) ) printout = printout + "Please check SMILESClickChem documentation using -h" raise IOError(printout) elif type(vars[key]) == bool: if argv[key] is None: # Do not try to handle this. May make sense. continue if type(argv[key]) == str: if argv[key].lower() in ["true", "1"]: argv[key] = True elif argv[key].lower() in ["false", "0"]: argv[key] = False elif argv[key].lower() in ["none"]: argv[key] = None else: printout = "This parameter is the wrong type. \n \t Check :" printout = printout + " {} type={}\n".format( key, type(argv[key]) ) printout = printout + "\t Should be type={}\n\t".format( type(vars[key]) ) printout = ( printout + "Please check SMILESClickChem documentation using -h" ) raise IOError(printout) else: printout = "This parameter is the wrong type. \n \t Check :" printout = printout + " {} type={}\n".format(key, type(argv[key])) printout = printout + "\t Should be type={}\n\t".format( type(vars[key]) ) printout = printout + "Please check SMILESClickChem documentation using -h" raise IOError(printout) return vars, argv
395c4927f3b8b8ab169ad488b21a07b3db26ffbb
699,505
def ensure_components(model_tracer_list): """If a CO2 flavor is detected for a model, ensure all flavors are present.""" co2_components = {'CO2', 'CO2_OCN', 'CO2_LND', 'CO2_FFF'} models = set(m for m, t in model_tracer_list) new_list = [] for model in models: m_tracers = set(t for m, t in model_tracer_list if m == model) if m_tracers.intersection({'CO2_LND+CO2_FFF'}): new_list.extend((model, t) for t in co2_components.union(m_tracers)) else: new_list.extend([(model, t) for t in m_tracers]) return new_list
8cb7f4e14f51718c8db75779ea02f6a73917323d
699,506
def split(Text): """Function to Spilt individual words""" return list(Text)
4b69f7eeb7e4b1856275b74f81e20015ba2ae172
699,507
import io def annual(): """ Returns the html with the annual Catalonia forecast until 2024 """ f=io.open("Annual_Forecast.html", 'r') html = f.read() return html
5321febc4c09de97893c30b6a27a30a089b18079
699,509
def subtraction(a, b): """subtraction: subtracts b from a, return result c""" a = float(a) b = float(b) c = b - a return c
0115222efc08588a12a6fbc1965441b83a7eaff0
699,510