content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def convert_member(member_str): """ Convert member data from database to member id list :param member_str: Data from Class database :type member_str: str :return: a list of member id as integer :rtype: list >>> print(convert_member("1,2,50,69")) [1, 2, 50, 69] """ if (member_str == "0") or (member_str == ""): return [] else: li = list(member_str.split(",")) li = [int(item) for item in li] return li
fec4081104c3cb4574e255c8408164062a287963
25,256
import json def is_json_serializable(x): """ Check if an object is serializable by serializing it and catching exceptions :param x: :type x: :return: :rtype: """ try: json.dumps(x) return True except (TypeError, OverflowError): return False
204c0439416a778f6d017765c6c16d99c19ead91
25,257
def is_palindrome(s): """ What comes in: -- a string s that (in this simple version of the palindrome problem) contains only lower-case letters (no spaces, no punctuation, no upper-case characters) What goes out: Returns True if the given string s is a palindrome, i.e., reads the same backwards as forwards. Returns False if the given string s is not a palindrome. Side effects: None. Examples: abba reads backwards as abba so it IS a palindrome but abbz reads backwards as zbba so it is NOT a palindrome Here are two more examples: (Note: I have put spaces into the strings for readability; the real problem is the string WITHOUT the spaces.) a b c d e x x e d c b a reads backwards as a b c d e x x e d c b a so it IS a palindrome but a b c d e x y e d c b a reads backwards as a b c d e y x e d c b a so it is NOT a palindrome Type hints: :type s: str """ # ------------------------------------------------------------------------- # DONE: 5. Implement and test this function. # The testing code is already written for you (above). # ########################################################################### # IMPORTANT: As with ALL problems, work a concrete example BY HAND # to figure out how to solve this problem. The last two examples # above are particularly good examples to work by hand. ########################################################################### # ------------------------------------------------------------------------- m = 1 for k in range(len(s)//2): if s[k] != s[len(s) - m]: return False m = m + 1 return True
41409a3681c6f5c343f19fc53823415852243d45
25,258
import io def _load_text_file(path: str): """Load .vec file""" fin = io.open(path, 'rb') first_line = fin.readline().decode('utf8') vocab_size, embedding_size = map(int, first_line.split()) # init vocab list vocab_list = ['0'] * vocab_size # record start position of each line in file byte_pos = [0] * (vocab_size + 1) byte_pos[0] = fin.tell() for idx in range(vocab_size): line = fin.readline() tokens = line.rstrip().split(b' ') vocab_list[idx] = tokens[0].decode('utf8') byte_pos[idx + 1] = fin.tell() fin.close() return embedding_size, vocab_size, vocab_list, byte_pos
139e39cf155a5c757dc8c6034c298b84881d54e7
25,259
def number_of_pluses_before_an_equal(reaction): """ Args: reaction (str) - reaction with correctly formatted spacing Returns (int): number_of - the number of pluses before the arrow `=>` Example: >>>number_of_pluses_before_an_equal("C6H12O6 + 6O2=> 6CO2 + 6H2O") 1 """ number_of = 0 # so we don't have to worry about (i - 1) < 0 reac = reaction.strip() for i in range(1, len(reaction)): if reaction[i] == "=": return number_of if i > 0: # and reaction[i+1] == " " is omitted because of formatting reasons if reaction[i] == "+" and reaction[i-1] == " ": number_of += 1 return number_of
f532ee41dee797d7c2ae1639fae9f1cb61c3f037
25,260
import random def generate_chromosome(min_length_chromosome, max_length_chromosome, possible_genes, repeated_genes_allowed): """ Function called to create a new individual (its chromosome). It randomly chooses its length (between min_length_chromosome and min_length_chromosome), and it randomly chooses genes among the list of possible_genes. :param min_length_chromosome: (int) Minimum allowed length of the chromosome. :param max_length_chromosome: (int) Maximum allowed length of the chromosome. :param possible_genes: (list of ...) List with the all the possible values that the genes can take. :param repeated_genes_allowed: (bool) It is a boolean that indicates whether the genes can be repeated in the chromosome (repeated_genes_allowed = 1) or they cannot be repeated (repeated_genes_allowed = 0). :return: * (list of genes) List that represents the chromosome. """ # Choose a random number of genes number_of_genes = random.randrange(min_length_chromosome, max_length_chromosome + 1) # Create new chromosome: if repeated_genes_allowed: chromosome = random.choices(possible_genes, weights=None, k=number_of_genes) return chromosome else: possible_genes_aux = possible_genes.copy() random.shuffle(possible_genes_aux) return possible_genes_aux[:number_of_genes]
aae0356538958bfe180b3f0abade7afd5dc2e7f7
25,261
def binary(x: int, pre: str='0b', length: int=8): """ Return the binary representation of integer x Input: x: an integer of any size pre: the prefix for the output string, default 0b length: length of the output in binary if its representation has smaller length default is 8 i,e, 2**8=256 int, a byte Return: The binary representation of integer x with a minimum lenght of "length" padded with trailing 0s """ return '{0}{{:{1}>{2}}}'.format(pre, 0, length).format(bin(x)[2:])
287e5bb87f31b71ad7ccd1cf65fab729794eeef4
25,262
def calculate_median_stat(stats): """ Calculates the stat (key) that lies at the median for stat data from the output of get_stat_data. Note: this function assumes the objects are sorted. """ total = 0 keys = [k for k in stats.keys() if k != "metadata"] total = sum(stats[k]["numerators"]["this"] for k in keys) half = total / 2.0 counter = 0 for key in keys: counter += stats[key]["numerators"]["this"] if counter >= half: return key
09e3f1b3059c36fe479a597de51243bb59b261d0
25,263
import torch def clip(tensor, min_tensor, max_tensor): """Imitate numpy's clip.""" clipped = torch.max(torch.min(tensor, max_tensor), min_tensor) return clipped
1b868f4a5518bc74cfed8835fa54f0d55195c3a9
25,264
import inspect def library(scope=None, version=None, converters=None, doc_format=None, listener=None, auto_keywords=False): """Class decorator to control keyword discovery and other library settings. By default disables automatic keyword detection by setting class attribute ``ROBOT_AUTO_KEYWORDS = False`` to the decorated library. In that mode only methods decorated explicitly with the :func:`keyword` decorator become keywords. If that is not desired, automatic keyword discovery can be enabled by using ``auto_keywords=True``. Arguments ``scope``, ``version``, ``converters``, ``doc_format`` and ``listener`` set library's scope, version, converters, documentation format and listener by using class attributes ``ROBOT_LIBRARY_SCOPE``, ``ROBOT_LIBRARY_VERSION``, ``ROBOT_LIBRARY_CONVERTERS``, ``ROBOT_LIBRARY_DOC_FORMAT`` and ``ROBOT_LIBRARY_LISTENER``, respectively. These attributes are only set if the related arguments are given and they override possible existing attributes in the decorated class. Examples:: @library class KeywordDiscovery: @keyword def do_something(self): # ... def not_keyword(self): # ... @library(scope='GLOBAL', version='3.2') class LibraryConfiguration: # ... The ``@library`` decorator is new in Robot Framework 3.2. The ``converters`` argument is new in Robot Framework 5.0. """ if inspect.isclass(scope): return library()(scope) def decorator(cls): if scope is not None: cls.ROBOT_LIBRARY_SCOPE = scope if version is not None: cls.ROBOT_LIBRARY_VERSION = version if converters is not None: cls.ROBOT_LIBRARY_CONVERTERS = converters if doc_format is not None: cls.ROBOT_LIBRARY_DOC_FORMAT = doc_format if listener is not None: cls.ROBOT_LIBRARY_LISTENER = listener cls.ROBOT_AUTO_KEYWORDS = auto_keywords return cls return decorator
78b3a7c2423d0b594d5d8f4e564b929f2ef7148a
25,265
def false(*args): """ >>> false(1) False >>> false(None) False """ return False
cb960acdc5ddb7a2a54d1a69165fc684674b34fe
25,266
def get_pct_higher(df): """returns the percentage of the difference between ex1/ex2 closing prices""" # if exchange 1 has a higher closing price than exchange 2 if df['higher_closing_price'] == 1: # % difference return ((df['close_exchange_1'] / df['close_exchange_2']) - 1) * 100 # if exchange 2 has a higher closing price than exchange 1 elif df['higher_closing_price'] == 2: # % difference return ((df['close_exchange_2'] / df['close_exchange_1']) - 1) * 100 # closing prices are equivalent else: return 0
34eb491a666c5b83a70374f065f789a0559e8b4e
25,268
def reflection_normal(n1, n2): """ Fresnel reflection losses for normal incidence. For normal incidence no difference between s and p polarisation. Inputs: n1 : Refractive index of medium 1 (input) n2 : Refractive index of medium 2 (output) Returns: R : The Fresnel Doctests: >>> '%.2f' % reflection_normal(1.5,1) '0.04' """ return((n1-n2)/(n1+n2))**2.
db3e1779628116ce2d82a91dada64aa2d8ff4463
25,269
import numpy def mut( chrom: int, all_max: list, all_min: list, prob_m: float, point_m: int, chi: list, ) -> list: """Apply random mutation to a member Parameters ---------- chrom : int The number of chromosomes a member has all_max : list The list of the maximum allele values all_min : list The list of the minimum allele values prob_m : float The probability of random mutation occurring point_m: int The number of potential random mutations chi : list The member Returns ------- list The mutated member """ # Loop for the number of potential random mutations for _ in range(0, point_m): # Generate a random value between 0 and 1 x = numpy.random.uniform(size = 1) # Check if the value is less than the probability for random mutations if x < prob_m: # Select the point of random mutation m_point = numpy.random.randint(0, chrom - 1) # Apply the mutation chi[m_point] = numpy.random.randint(all_min[m_point], all_max[m_point]) return chi
d45a9598d8baef59014230d35586e2cd5afce583
25,270
def _get_oauth_url(url): """ Returns the complete url for the oauth2 endpoint. Args: url (str): base url of the LMS oauth endpoint, which can optionally include some or all of the path ``/oauth2/access_token``. Common example settings that would work for ``url`` would include: LMS_BASE_URL = 'http://edx.devstack.lms:18000' BACKEND_SERVICE_EDX_OAUTH2_PROVIDER_URL = 'http://edx.devstack.lms:18000/oauth2' """ stripped_url = url.rstrip('/') if stripped_url.endswith('/access_token'): return url if stripped_url.endswith('/oauth2'): return stripped_url + '/access_token' return stripped_url + '/oauth2/access_token'
e2f81f8fa0aab74c41eb253e1a7e2291ff96e334
25,271
def descr_bit_length(space, w_int): """int.bit_length() -> int Number of bits necessary to represent self in binary. >>> bin(37) '0b100101' >>> (37).bit_length() 6 """ val = space.int_w(w_int) if val < 0: val = -val bits = 0 while val: bits += 1 val >>= 1 return space.wrap(bits)
4a9ba13e91a2e398dc54ce2c157ac52faab93da3
25,273
def matchInIndex(node, index): """ :type node: mutils.Node :type index: dict[list[mutils.Node]] :rtype: Node """ result = None if node.shortname() in index: nodes = index[node.shortname()] if nodes: for n in nodes: if node.name().endswith(n.name()) or n.name().endswith(node.name()): result = n break if result is not None: index[node.shortname()].remove(result) return result
f010105cd02cd2dc699233431c2383b770ecea67
25,274
def ifiltermap(predicate, function, iterable): """creates a generator than combines filter and map""" return (function(item) for item in iterable if predicate(item))
26a89bc7df2bec825a372e0f1745ffba4bf63683
25,275
import sys def get_params(): """ Format arguments as command and args params """ temp = sys.argv[1:] cmd = temp[0] if '--' in temp[0] else '--run' args = temp[1:] if '--' in temp[0] else temp return { 'cmd': cmd, 'args': args, }
960e0e9e44d1b0591a310a7a1730877b85f8227a
25,278
import os def pip_url_kwargs(parentdir, git_remote): """Return kwargs for :func:`create_repo_from_pip_url`.""" repo_name = 'repo_clone' return { 'pip_url': 'git+file://' + git_remote, 'repo_dir': os.path.join(str(parentdir), repo_name), }
302b352737c306f61306d55df987104329b7668a
25,279
from pathlib import Path from typing import List def read_feastignore(repo_root: Path) -> List[str]: """Read .feastignore in the repo root directory (if exists) and return the list of user-defined ignore paths""" feast_ignore = repo_root / ".feastignore" if not feast_ignore.is_file(): return [] lines = feast_ignore.read_text().strip().split("\n") ignore_paths = [] for line in lines: # Remove everything after the first occurance of "#" symbol (comments) if line.find("#") >= 0: line = line[: line.find("#")] # Strip leading or ending whitespaces line = line.strip() # Add this processed line to ignore_paths if it's not empty if len(line) > 0: ignore_paths.append(line) return ignore_paths
57fa48fa61edfe9856d98171d54855a854c33743
25,281
from pathlib import Path import math def process(file: Path) -> int: """ Process input file yielding the submission value :param file: file containing the input values :return: value to submit """ heading = 90 east_west_pos = 0 north_south_pos = 0 instructions = [l.strip() for l in open(file)] for i in instructions: action = i[0] value = int(i[1:]) if action == 'R': heading = (heading + value) % 360 elif action == 'L': heading = (heading - value) % 360 if action == 'E': east_west_pos += value if action == 'W': east_west_pos -= value if action == 'N': north_south_pos += value if action == 'S': north_south_pos -= value if action == 'F': east_west_pos += value * math.sin(float(heading) / 360.0 * 2.0 * math.pi) north_south_pos += value * math.cos(heading / 360 * 2 * math.pi) manhattan_distance = int(abs(east_west_pos) + abs(north_south_pos)) return manhattan_distance
3ba2c0a9fd4457ea2b49d6aca983123d8af45e04
25,282
import os def get_openpype_icon_path() -> str: """Path to OpenPype icon png file.""" return os.path.join( os.path.dirname(os.path.abspath(__file__)), "openpype_icon.png" )
027eab80e01269adf5c2ed6febfed68fb9850630
25,283
def calc_wildtype_point(wt, pwms, alleles, ev_couplings, start_pos=0): """Calculate immunogenicity and energy of the wildtype. Parameters ---------- ev_couplings : `EVcouplings` Read in e_ij binaries used for preparing input files. config : `ConfigParser` Configurations. Returns ------- immuno : float Immunogenicity of the wildtype. hamiltonian : float Hamiltonian of the wildtype. """ # find lower-case positions and delete idx_map = {start_pos+i:i for i in range(len(wt))} idx = set(ev_couplings.index_list).intersection(set(range(start_pos, start_pos+len(wt)))) cov_seq = "".join(wt[idx_map[i]] for i in sorted(idx)) hamiltonians = ev_couplings.hamiltonians([cov_seq])[0] hamiltonian = -1.0 * hamiltonians[0] # calculate immunogenicity immuno = 0 for i in range(len(wt)-9+1): ep = wt[i:i+9] for al,pwm in pwms.items(): immuno += alleles.loc[al,"p"]*max(sum(pwm[j,a] for j,a in enumerate(ep))-alleles.loc[al,"pssm_thresh"] ,0) return immuno, hamiltonian
03c6b33ab0a5418322af3d25f6cd3efc65747ec7
25,284
def ensembl_gene_response(): """Return a response from ensembl gene api""" _response = [ { "description": ( "alpha- and gamma-adaptin binding protein " "[Source:HGNC Symbol;Acc:25662]" ), "logic_name": "ensembl_havana_gene", "version": 8, "assembly_name": "GRCh37", "gene_id": "ENSG00000103591", "external_name": "AAGAB", "start": 67493371, "seq_region_name": "15", "feature_type": "gene", "end": 67547533, "strand": -1, "id": "ENSG00000103591", "biotype": "protein_coding", "source": "ensembl_havana", } ] return _response
f896fcd516d2e08a0219cf7db5f8492524328560
25,285
import six def makestr(s): """Converts 's' to a non-Unicode string""" if isinstance(s, six.binary_type): return s if not isinstance(s, six.text_type): s = repr(s) if isinstance(s, six.text_type): s = s.encode('utf-8') return s
99c5a3100effd4c75e34bfb6c021ff2fd5ebfdbf
25,286
def _nearest(pivot, items): """Returns nearest point """ return min(items, key=lambda x: abs(x - pivot))
b64eb1bb83895e56badb387067c9839e811d90b5
25,287
def give_same(value): """Return what is given.""" return value
92e0b8b3e6d40120fbe1d860ff74c220fbdfaec5
25,288
def process_mutect_vcf(job, mutect_vcf, work_dir, univ_options): """ Process the MuTect vcf for accepted calls. :param toil.fileStore.FileID mutect_vcf: fsID for a MuTect generated chromosome vcf :param str work_dir: Working directory :param dict univ_options: Dict of universal options used by almost all tools :return: Path to the processed vcf :rtype: str """ mutect_vcf = job.fileStore.readGlobalFile(mutect_vcf) with open(mutect_vcf, 'r') as infile, open(mutect_vcf + 'mutect_parsed.tmp', 'w') as outfile: for line in infile: line = line.strip() if line.startswith('#'): print(line, file=outfile) continue line = line.split('\t') if line[6] != 'REJECT': print('\t'.join(line), file=outfile) return outfile.name
083859b8ef25f8b0f1c89f0542286a692aef98c0
25,289
import itertools def get_feature_iter_func(flat): """ Helper function to determine what type of iteration to perform across two features. Takes `bool` as input. """ if flat: return lambda feature_names: itertools.combinations(feature_names, 2) else: return lambda feature_names: itertools.product(feature_names, feature_names)
36d329b2a62f0374bfa0894eedd8f72e69d94239
25,290
def process_task(testcase: dict, values: dict): """Process testcase with values.""" for tcparam in testcase["params"]: if not tcparam.get("values"): for vvalue in values["values"]: if tcparam["id"] == vvalue["id"]: tcparam["value"] = vvalue["value"] # else no change is required else: for pvalue in tcparam["values"]: if pvalue.get("params"): newpars = {"params": pvalue["params"]} pvalue["params"] = process_task(newpars, values)["params"] for vvalue in values["values"]: if pvalue["id"] == vvalue["value"]: tcparam["value"] = pvalue["title"] return testcase
d4b1255820fe8aec8a770c9a38f2b4dd0c75d1d5
25,291
import platform import os import logging def default_ccache_dir() -> str: """:return: ccache directory for the current platform""" # Share ccache across containers if 'CCACHE_DIR' in os.environ: ccache_dir = os.path.realpath(os.environ['CCACHE_DIR']) try: os.makedirs(ccache_dir, exist_ok=True) return ccache_dir except PermissionError: logging.info('Unable to make dirs at %s, falling back to local temp dir', ccache_dir) # In osx tmpdir is not mountable by default if platform.system() == 'Darwin': ccache_dir = "/tmp/_mxnet_ccache" os.makedirs(ccache_dir, exist_ok=True) return ccache_dir return os.path.join(os.path.expanduser("~"), ".ccache")
218340855bd7172aabcf7cf1eb461b4dfd637638
25,292
def insert(src, pos, s): """在src的pos处插入s""" before = src[:pos] after = src[pos:] return before + s + after
82b3bd2c46858623ccb51d5d6bbec1ef537a5c1e
25,293
def broadcast_proc(*procs, combine=None): """Combine two or more updaters into one. Input data is passed on to all of them, output is combined using combine(...) method or just assembled into a tuple if combine argument was omitted. combine -- should accept as many arguments as there are procs out_proc(x) == proc1(x), proc2(x),... out_proc() == combine(proc1(), proc2(), ...) """ def tuplify(*x): return tuple(x) if combine is None: combine = tuplify def _proc(*x): if len(x) == 0: return combine(*(proc() for proc in procs)) for proc in procs: proc(*x) return _proc
102259473ea7ce98cc7514ab0052d5adf293909e
25,295
def answer(question): """ Evaluate expression """ if not question.startswith("What is"): raise ValueError("unknown operation") math = question[7:-1].replace("minus", "-").replace("plus", "+") math = math.replace("multiplied by", "*").replace("divided by", "/") math = math.split() if not math or not math[0].lstrip("-").isnumeric(): raise ValueError("syntax error") current = [int(math[0])] for index in range(1, len(math)): if not math[index].lstrip("-").isnumeric(): if math[index] not in "/*+-": raise ValueError("unknown operation") current.append(math[index]) if index < len(math) - 1 and math[index + 1] in "/*+-": raise ValueError("syntax error") else: try: if current[-1] == "+": current = [current[0] + int(math[index])] elif current[-1] == "-": current = [current[0] - int(math[index])] elif current[-1] == "*": current = [current[0] * int(math[index])] elif current[-1] == "/": current = [current[0] / int(math[index])] else: raise ValueError("syntax error") except TypeError: raise ValueError("syntax error") if len(current) == 1: return current[0] raise ValueError("syntax error")
ae5a0552549a59bcf879a7bb429903f8ac33084c
25,296
def retrieve_longest_smiles_from_optimal_model(task): """ From the optimal models that were trained on the full data set using `full_working_optimal.py`, we retrieve the longest SMILES that was generated. Parameters ---------- task : str The task to consider. Returns ------- int : The longest SMILES that was generated when training the best model strategy for `task` data. """ if task == "FreeSolv": longest_smiles = 76 elif task == "ESOL": longest_smiles = 109 elif task in ["lipo", "lipophilicity"]: longest_smiles = 268 elif task in ["chembl28", "affinity"]: longest_smiles = 246 else: longest_smiles = None return longest_smiles
5692b6e5bf322b0a6df67f9ad5ac699429ba9711
25,297
def partition_sequences(k): """ Generates a set of partition_sequences of size :math:`k`, i.e. sequences :math:`(n_i)` such that :math:`\sum_{i=1}^k i n_i = k`.""" assert k >= 0, 'Negative integer.' def f(k, c, i): if k == 0: yield [0] * c elif i == k: yield [1] + [0] * (c - 1) elif i < k: for n in range(0, k // i + 1): for ps in f(k - (i * n), c - 1, i + 1): yield [n] + ps return f(k, k, 1)
fac5e4e061767bc539c9d5aebb670de8c69f1843
25,299
import re def all_collections(db): """ Yield all non-sytem collections in db. """ include_pattern = r'(?!system\.)' return ( db[name] for name in db.list_collection_names() if re.match(include_pattern, name) )
1b8220ac493036995695fc9ccf9ac74882677b4d
25,300
def single_line_paragraph(s): """Return True if s is a single-line paragraph.""" return s.startswith('@') or s.strip() in ('"""', "'''")
1e1febf21479b65920423268d93e5571de72b4ef
25,301
import os def clean_path(path, root=''): """ Returns given path in absolute format expanding variables and user flags. Parameters ---------- path : string Path to clean. root : string Alternative root for converting relative path. Returns ------- out : string Converted path """ if path is not None: path = os.path.expandvars(os.path.expanduser(path)) root = os.path.expandvars(os.path.expanduser(root)) return os.path.abspath([root + '/', ''][os.path.isabs(path) or not (os.path.isabs(root))] + path)
030d35ced383353e2a2542b1d6c4c11f097fc7e2
25,302
def join_name_with_id(stations, data): """ Performs a join on stations and pollution data :param stations: list of stations data for a specific date :param data: pollution data :return: list of joined data """ mapping = {name.lower().strip(): id_station for id_station, name in stations} data = [{'id_station': mapping[row['name'].lower().strip()], **row} for row in data if row['name'].lower().strip() in mapping] for row in data: del row['name'] return data
1dc8768d64ffa53a152052d6ab15bd36fb84dddb
25,303
def flatten_probs(probs, labels, ignore_index=None): """Flattens predictions in the batch.""" if probs.dim() == 3: # assumes output of a sigmoid layer B, H, W = probs.size() probs = probs.view(B, 1, H, W) B, C, H, W = probs.size() probs = probs.permute(0, 2, 3, 1).contiguous().view(-1, C) # B*H*W, C=P,C labels = labels.view(-1) if ignore_index is None: return probs, labels valid = (labels != ignore_index) vprobs = probs[valid.nonzero().squeeze()] vlabels = labels[valid] return vprobs, vlabels
515d0ffda912c60d5106881318410ba8f796662b
25,305
def makeStanDict(dobj, N_comp=2): """ convert from an MCMC data object to a dict for input to STAN""" return dict( N_comp = N_comp, N_band = dobj.n, nu_obs = dobj.freq_obs, flux = dobj.d, sigma = dobj.sig, z = dobj.z )
0e7c754937bb6bf7bd5be8dbbdea5015e9a13540
25,306
def num_words_tags(tags, data): """This functions takes the tags we want to count and the datafram and return a dict where the key is the tag and the value is the frequency of that tag""" tags_count = {} for tag in tags: len_tag = len(data[data['Tag'] == tag]) tags_count[tag] = len_tag return tags_count
9c6fddfcbdd1958e1c43b9b9bf8750bacacc3a31
25,307
def change_possible(part): """Check if there is anything to permute""" if ':' not in part or (part.count(':') == 1 and ('http:' in part or 'https:' in part)): return False else: return True
d74359a518766eae0581de696049119848529ba3
25,308
def generate_pyproject(tmp_path): """Return function which generates pyproject.toml with a given ignore_fail value.""" def generator(ignore_fail): project_tmpl = """ [tool.poe.tasks] task_1 = { shell = "echo 'task 1 error'; exit 1;" } task_2 = { shell = "echo 'task 2 error'; exit 1;" } task_3 = { shell = "echo 'task 3 success'; exit 0;" } [tool.poe.tasks.all_tasks] sequence = ["task_1", "task_2", "task_3"] """ if isinstance(ignore_fail, bool) and ignore_fail: project_tmpl += "\nignore_fail = true" elif not isinstance(ignore_fail, bool): project_tmpl += f'\nignore_fail = "{ignore_fail}"' with open(tmp_path / "pyproject.toml", "w") as fp: fp.write(project_tmpl) return tmp_path return generator
a79d0f24a3edc9fa8689cd0c33cef9e8f8121252
25,309
def parseConfig(s): """Parses a simple config file. The expected format encodes a simple key-value store: keys are strings, one per line, and values are arrays. Keys may not have colons in them; everything before the first colon on each line is taken to be the key, and everything after is considered a space-separated list of value-array entries. Leading and trailing whitespace are stripped on each key and value entry. No special handling of comments is implemented, but non-conforming lines (those with no colon) will be silently ignored. Arguments: s: A string containing the full contents of a config file. Returns a dictionary mapping strings to lists. The lists, which may be singletons, contain ints, floats, and/or strings. """ def stringToNumberMaybe(s): if s.lower() in ['true', 'yes']: return True if s.lower() in ['false', 'no']: return False try: return int(s) except ValueError: try: return float(s) except ValueError: return s lines = s.splitlines() d = {} for line in lines: kv = [x.strip() for x in line.split(':',1)] try: val_list = [stringToNumberMaybe(x) for x in kv[1].split()] if len(val_list) != 1: d[kv[0]] = val_list else: d[kv[0]] = val_list[0] except IndexError: pass return d
259fdcef0eabeb410b2c2f79c50bb5985b5ce5f7
25,311
def parse_args(args): """ Parses the command line arguments. For now, the only arg is `-d`, which allows the user to select which database file that they would like to use. More options might be added in the future or this option might be changed. """ if args[0] == "-d": return ' '.join(args[1:]).strip() else: return None
b251103d2d73f63ff795ddad82de8040d8e81ec4
25,312
import json def make_ably_error_event(code, status): """Make a control event.""" return { 'event': 'error', 'data': json.dumps({ 'message':'Invalid accessToken in request: sarasa', 'code': code, 'statusCode': status, 'href':"https://help.ably.io/error/%d" % code }) }
c88cb225cc0b6d2c5c9b14da818e65d19dbdb890
25,314
def msgs_features(msgs): """Feature extractor for a list of messages""" n_sents = sum(m['n_sentences'] for m in msgs) * 1.0 future = sum(len(m['lexicon_words'].get("disc_temporal_future", [])) for m in msgs) / n_sents return dict( polite=sum(m['politeness'] for m in msgs) / len(msgs), sent=sum(m['sentiment']['positive'] for m in msgs) / n_sents, future=future)
263bca98df4ab4e3e167b5a565c15966069e42b4
25,315
import time import pandas def make_func(file_name): """Create function for testing. It is necessary because file_name should be constant for jitted function. """ def _function(): start = time.time() df = pandas.read_csv(file_name) return time.time() - start, df return _function
1cf11058af2d1b00ac8cd640ef8a7e3efd7ece44
25,316
import torch def new_ones(tensor, size, dtype=None): """Return new tensor with shape.""" if dtype == 'long': dtype = torch.long elif dtype == 'uint8': dtype = torch.uint8 else: dtype = None if dtype is None: return tensor.new_ones(size) else: return tensor.new_ones(size, dtype=dtype)
2f8843a0a6bcb3493a1efce881c9e88d8a33cfa4
25,317
def get_binary2source_entity_mapping(source2binary_mapping_full): """ for each binary functions, aggregate all source functions mapping to this function""" binary2source_entity_mapping_simple_dict = {} binary2source_entity_mapping_line_dict = {} unresolved_binary_address = [] binary2source_function_mapping_simple_dict = {} for mapping_line in source2binary_mapping_full: if mapping_line[-2] is None: if mapping_line[-1] not in unresolved_binary_address: unresolved_binary_address.append(mapping_line[-1]) continue if mapping_line[-2] not in binary2source_entity_mapping_line_dict: binary2source_entity_mapping_line_dict[mapping_line[-2]] = [] if mapping_line[-2] not in binary2source_entity_mapping_simple_dict: binary2source_entity_mapping_simple_dict[mapping_line[-2]] = [] if mapping_line[-2] not in binary2source_function_mapping_simple_dict: binary2source_function_mapping_simple_dict[mapping_line[-2]] = [] if mapping_line[1] == "0" and mapping_line[2] == None: continue if [mapping_line[0], mapping_line[1], mapping_line[2], mapping_line[3]] not in \ binary2source_entity_mapping_line_dict[ mapping_line[-2]]: binary2source_entity_mapping_line_dict[mapping_line[-2]].append( [mapping_line[0], mapping_line[1], mapping_line[2], mapping_line[3]]) if [mapping_line[0], mapping_line[2], mapping_line[3]] not in binary2source_entity_mapping_simple_dict[mapping_line[-2]]: binary2source_entity_mapping_simple_dict[mapping_line[-2]].append([mapping_line[0], mapping_line[2], mapping_line[3]]) if [mapping_line[0], mapping_line[2], mapping_line[3]] not in binary2source_function_mapping_simple_dict[mapping_line[-2]]: if not mapping_line[3] or "Function" in mapping_line[3]: binary2source_function_mapping_simple_dict[mapping_line[-2]].append([mapping_line[0], mapping_line[2], mapping_line[3]]) return binary2source_entity_mapping_line_dict, binary2source_entity_mapping_simple_dict, binary2source_function_mapping_simple_dict, unresolved_binary_address
8f75c8848b4d0c87b64cf1c0909f24e0a6866c91
25,318
def path_filter(path): # type: (str) -> str """ Removes the trailing '/' of a path, if any :param path: A parsed path :return: The parsed path without its trailing / """ return path[:-1] if path and path[-1] == "/" else path
4b449dfe2f840a25bec605464e6a8dbaeaf9afed
25,319
def generate_partial_word(word, correct_guess_list): """generates the word with all correctly chosen letters""" temp_partial_word = "" # for each letter either put a dash or a letter for i in range(len(word)): matches = False for letter in correct_guess_list: if letter == word[i]: temp_partial_word = temp_partial_word + letter matches = True if matches == False: temp_partial_word = temp_partial_word + "_" return temp_partial_word #if there is no match to word[i] then add a underscore for that index # only append underscore after all matches are determined
35a1d1afe0262f0fd816c1e7978847b692136e05
25,320
import os def file_walker(filepath, extension=(".tiff", ".tif")): """Returns a list of filenames, which satisfy the extension, in the filepath folder :param filepath: path of the folder containing TIFF tiles :type filepath: str :param extension: Tuple of extensions which are searched for :type extension: Union[str, Tuple[str]] :return: list of filenames and paths which have appropriate file extension :rtype: List[str] """ list_of_files = [] for file in os.listdir(filepath): if file.lower().endswith(extension): filename = os.path.join(filepath, file) list_of_files.append(filename) return list_of_files
929564e9a9a425b99a3989b3e64408baab674d9b
25,321
from typing import Sequence from typing import Any def reverse_enumerate(iterable: Sequence[Any]): """ Enumerate over an iterable in reverse order while retaining proper indexes. Arguments: iterable: Iterable to enumerate over. """ return zip(reversed(range(len(iterable))), reversed(iterable))
697c7e4ae2261b57c3f6d28ad4d0bc4bc9e1bcc9
25,323
import json def to_javascript(obj): """For when you want to inject an object into a <script> tag. """ return json.dumps(obj).replace('</', '<\\/')
2fba6a30eb19fd0b8fcc4295c3994ad6fa82b02f
25,324
def parse_years(years): """Parse input string into list of years Args: years (str): years formatted as XXXX-YYYY Returns: list: list of ints depicting the years """ # Split user input into the years specified, convert to ints years_split = list(map(int, years.split('-'))) # if only 1 year then that is list if len(years_split) == 1: year_range = years_split else: # Make a complete list of years from first year specified to last. year_range = [str(x) for x in range(min(years_split), max(years_split) + 1)] return year_range
5296bd2f9e49a4a1689c813dd0e8641ea9a5c16f
25,326
def dict_from_string(s): """ Inverse of dict_to_string. Takes the string representation of a dictionary and returns the original dictionary. :param s: The string representation of the dictionary :return: [dict] the dictionary """ l = s.replace("[", "").replace("]", "").split("_") d = {x.split(":")[0]: float(x.split(":")[1]) for x in l} return d
b16677d1d39cfe74b53a327e2bcd85b6f16ee8de
25,329
def get(columns, table): """ Format SQL to get columns from table Args: columns (tuple): column names table (str): table name to fetch from Returns: str: sql string """ columns = tuple([columns]) if isinstance(columns, str) else columns return "SELECT {c} FROM {t}".format(c=', '.join(columns), t=table)
7c4bacad2121f66782e551d440d602e381d77b89
25,330
def get_text(data): """ :param data: status as json dict :return: the full text of the status """ # Try for extended text of original tweet, if RT'd (streamer) try: text = data['retweeted_status']['extended_tweet']['full_text'] except: # Try for extended text of an original tweet, if RT'd (REST API) try: text = data['retweeted_status']['full_text'] except: # Try for extended text of an original tweet (streamer) try: text = data['extended_tweet']['full_text'] except: # Try for extended text of an original tweet (REST API) try: text = data['full_text'] except: # Try for basic text of original tweet if RT'd try: text = data['retweeted_status']['text'] except: # Try for basic text of an original tweet try: text = data['text'] except: # Nothing left to check for text = '' return text
f2a30578cff007ddc82f362d1b047b48a38ba71c
25,331
import inspect import functools def onetimemethod(method): """Decorator for methods which need to be executable only once.""" if not inspect.isfunction(method): raise TypeError('Not a function.') has_run = {} @functools.wraps(method) def wrapped(self, *args, **kwargs): """Wrapped method being run once and only once.""" nonlocal has_run if has_run.setdefault(id(self), False): raise RuntimeError( "One-time method '%s' cannot be re-run for this instance." % method.__name__ ) has_run[id(self)] = True return method(self, *args, **kwargs) return wrapped
8133ee826ea57bbf05e01bac81757e22f0e4c072
25,333
def lcs_to_add(lcs,ref,Nb,Ne,dat,Mb,Me,add): """This routine takes the snake list, the reference file, the data file, and the add section from the tolerance file. Based on this information it merges any new tolerances into the add tolerance data structure. The new add tolerance data structure are returned. """ nsnake = len(lcs) isnake = 0 xi1 = Mb-1 yj1 = Nb-1 if nsnake == 0: # # deal with added lines # if yj1 == 0: lnt = 0 tnt = 0 else: (tr,dr,lnt,tnt) = ref.token[yj1] Nadd = Me-Mb+1 if Nadd > 0: if add.has_key(yj1): (ntok,lnt,tnt) = add[yj1] if ntok < Nadd: add[yj1] = (Nadd,lnt,tnt) else: add[yj1] = (Nadd,lnt,tnt) else: while (isnake < nsnake): (xi2,yj2,xi3,yj3,itype) = lcs[isnake] # # deal with added lines # if yj1 == 0: lnt = 0 tnt = 0 else: (tr,dr,lnt,tnt) = ref.token[yj1] Nadd = xi2-(xi1+1) if Nadd > 0: if add.has_key(yj1): (ntok,lnt,tnt) = add[yj1] if ntok < Nadd: add[yj1] = (Nadd,lnt,tnt) else: add[yj1] = (Nadd,lnt,tnt) isnake = isnake + 1 xi1 = xi3 yj1 = yj3 if yj1 == 0: lnt = 0 tnt = 0 else: (tr,dr,lnt,tnt) = ref.token[yj1] Nadd = Me-xi1 if Nadd > 0: if add.has_key(yj1): (ntok,lnt,tnt) = add[yj1] if ntok < Nadd: add[yj1] = (Nadd,lnt,tnt) else: add[yj1] = (Nadd,lnt,tnt) return add
983aa6a1cab71aa977c126805936f626edee603b
25,334
def nevergrad_get_setting(self): """Get a setting to trial from one of the nevergrad optimizers. """ method = self._method_chooser.ask() params = self._optimizers[method.args[0]].ask() return { 'method_token': method, 'method': method.args[0], 'params_token': params, 'params': params.args[0], }
578a41bce432981b24cd84c70e9c2b2544e7bde7
25,335
import os import re def load_stage1_intfs(path): """Load interfaces of stage1 generated code. Parameters: path: str Filesystem path to read data from. Relative or absolute. No final pathsep. Example: "." for the current directory. Will be scanned for filenames of the form "mgs_*_impl.h", where the wildcard gives the model label. Returns: [(label,filename,content), ...] where label: str Label from the model. Deduced from the filename. filename: str Basename of the file (no path). content: str File content as one string (containing linefeeds). """ p_maybepath = r"(?:.*{pathsep})?".format(pathsep=os.path.sep) p_basename = r"mgs_(.*)_impl" p_interface = r"\.h" pattern = "{maybepath}{basename}{interface}".format(maybepath=p_maybepath, basename=p_basename, interface=p_interface) def relevant(filename): return len(re.findall(pattern, filename)) def getlabel(filename): matches = re.findall(pattern, filename) assert len(matches) == 1 group = matches[0] return group files_and_dirs = [os.path.join(path, x) for x in os.listdir(path)] files = [x for x in files_and_dirs if os.path.isfile(x)] matching_files = [x for x in files if relevant(x)] if not len(matching_files): raise(ValueError("No stage1 files found; please generate them first by running stage1.py.")) def read(filename): with open(filename, "rt", encoding="utf-8") as f: content = f.read() return content return [(getlabel(f), os.path.basename(f), read(f)) for f in matching_files]
e8b5b17eb0e95fbd85f01a87efb9869c875c5c20
25,336
from typing import Tuple import math def rotation_y_to_alpha( rotation_y: float, center: Tuple[float, float, float] ) -> float: """Convert rotation around y-axis to viewpoint angle (alpha).""" alpha = rotation_y - math.atan2(center[0], center[2]) if alpha > math.pi: alpha -= 2 * math.pi if alpha <= -math.pi: alpha += 2 * math.pi return alpha
785ee46456e373b28e5fcb4edd3a81a3e344abda
25,337
def sigma2Phi(sigma,i,overall_solution_space,reference_seq): """This takes in an amino acid and a position and returns the 1x19 binary indicator vector Phi""" AAchanges = [aa for aa in overall_solution_space[i] if aa!=reference_seq[i]] # these are all 19 possible AA changes (i.e. AAs that are not the reference sequence AA) #AAchanges = [aa for aa in AAs if aa!=reference_seq[i]] Phi = [] for aa in AAchanges: if sigma==aa: Phi.append(1) else: Phi.append(0) return Phi
8a18168e1931ba6b49a367957bd5f4a07a424315
25,340
def _normalize(vec): """Normalizes a list so that the total sum is 1.""" total = float(sum(vec)) return [val / total for val in vec]
31ca018d688a5c28b89071e04049578737cd027d
25,344
def to_float(string): """Converts a string to a float if possible otherwise returns None :param string: a string to convert to a float :type string: str :return: the float or None if conversion failed and a success flag :rtype: Union[Tuple[float, bool], Tuple[None, bool]] """ try: return float(string), True except ValueError: return None, False
00f3e16765aad9dc79e73cb687676893a743cc7f
25,345
import argparse def get_args(): """ Grab CLI arguments. :return: CLI arguments dict. """ argp = argparse.ArgumentParser( description="Checks the replica lag of a MySQL Slave or returns OK if it's a Master" ) argp.add_argument('-H', '--hostname', action='store', required=False, default='localhost', help='Hostname of the MySQL DB, default: localhost' ) argp.add_argument('-P', '--port', action='store', required=False, default=3306, type=int, help='Port of the MySQL DB, default: 3306' ) argp.add_argument('-U', '--username', action='store', required=False) argp.add_argument('-p', '--password', action='store', required=False) argp.add_argument('-w', '--warning', action='store', required=True, type=int, help='WARNING threshold for lag value in seconds' ) argp.add_argument('-c', '--critical', action='store', required=True, type=int, help='CRITICAL threshold for lag value in seconds' ) return argp.parse_args()
11c7a2462c9fd27c706b2084435c53ca9284f770
25,346
import re import os def get_project_directory() -> list: """ Returns project directory Returns: project_dir (list): path back to top level directory """ project_dir = re.split(r"\\|/", os.path.dirname(os.path.realpath(__file__))) + ['..', '..', '..'] return project_dir
14af81c1c7f79ca45bf688313eb2702ccf498a2d
25,347
def body_abs(df): """ 波动绝对值 :param df: :return: """ return abs(df['open'] - df['close'])
210a70c7c74b0a5001d92f67943c95e52c13af09
25,348
def month_add(date, months): """Add number of months to date""" year, month = divmod(date.year * 12 + date.month + months, 12) return date.replace(year=year, month=month)
8252e82f8b7dae41a6d3dee8e6bc58b00c376aa7
25,349
import os import argparse def get_args(): """ Allows users to input arguments Returns: argparse.ArgumentParser.parse_args Object containing options input by user """ def isFile(string: str): if os.path.isfile(string): return string else: raise parser = argparse.ArgumentParser() parser.description = "Counts the amount of MAG, and MeSH terms in a JSON newline delmited file." parser.add_argument("input_file", type=isFile, help="Path to JSON newline delimited file") return parser.parse_args()
7d5dcd4d493931d5ab63666cfce8735d28db222c
25,350
def obtain_Pleth(tensec_data): """ obtain Pulse Pleth values of ten second data :param tensec_data: 10 seconds worth of heart rate data points :return PlethData: Pulse Pleth unmultiplexed data """ PlethData = tensec_data[0::2] return PlethData
45159ae768aa8dfa0f67add0951fecb346a8557b
25,351
def R_to_r(R): """ Description: A function to scale down the autocorrelation function to a correlation function Input: :param R: Autocorrelation function of the signal Output: :return r: correlation function of the signal :rtype: ndarray """ r = R/R[0] return r
951832bc9bc3e4817250c6f0473413b7a65b701d
25,352
def get_policy(crm_service, project_id, version=3): """Gets IAM policy for a project.""" policy = ( crm_service.projects() .getIamPolicy( resource=project_id, body={"options": {"requestedPolicyVersion": version}}, ) .execute() ) return policy
984f40daa2a5e5334d7aa1adb3920c56f7a13a9b
25,353
def get_log_data(lcm_log, lcm_channels, end_time, data_processing_callback, *args, **kwargs): """ Parses an LCM log and returns data as specified by a callback function :param lcm_log: an lcm.EventLog object :param lcm_channels: dictionary with entries {channel : lcmtype} of channels to be read from the log :param data_processing_callback: function pointer which takes as arguments (data, args, kwargs) where data is a dictionary with entries {CHANNEL : [ msg for lcm msg in log with msg.channel == CHANNEL ] } :param args: positional arguments for data_processing_callback :param kwargs: keyword arguments for data_processing_callback :return: return args of data_processing_callback """ data_to_process = {} print('Processing LCM log (this may take a while)...') t = lcm_log.read_next_event().timestamp lcm_log.seek(0) for event in lcm_log: if event.channel in lcm_channels: if event.channel in data_to_process: data_to_process[event.channel].append( lcm_channels[event.channel].decode(event.data)) else: data_to_process[event.channel] = \ [lcm_channels[event.channel].decode(event.data)] if event.eventnum % 50000 == 0: print(f'processed {(event.timestamp - t)*1e-6:.1f}' f' seconds of log data') if 0 < end_time <= (event.timestamp - t)*1e-6: break return data_processing_callback(data_to_process, *args, *kwargs)
6e2a57f04af2e8a6dc98b756ff99fab50d161ffe
25,354
from typing import OrderedDict def stats(arr): """ Return the statistics for an input array of values Args: arr (np.ndarray) Returns: OrderedDict """ try: return OrderedDict([('min', arr.mean()), ('max', arr.max()), ('mean', arr.mean()), ('std', arr.std())]) except ValueError: # Can happen if the input array is empty return OrderedDict([('min', None), ('max', None), ('mean', None), ('std', None)])
2243b48e129096c76461ea6a877a6b2a511d21d0
25,356
import uuid def generate_blank_record(): """ Generate a blank record with the correct WHO PHSM keys. Other objects requiring the same selection of keys descend from here. Returns ------- A blank record with keys in WHO PHSM column format. type dict. """ record = { "processed": None, "uuid": str(uuid.uuid4()), "who_id": None, "who_id_original": None, "dataset": None, "prop_id": None, "keep": None, "duplicate_record_id": None, "who_region": None, "country_territory_area": None, "iso": None, "iso_3166_1_numeric": None, "admin_level": None, "area_covered": None, "prov_category": None, "prov_subcategory": None, "prov_measure": None, "who_code": None, "original_who_code": None, "who_category": None, "who_subcategory": None, "who_measure": None, "comments": None, "date_start": None, "measure_stage": None, "prev_measure_number": None, "following_measure_number": None, "date_end": None, "reason_ended": None, "targeted": None, "enforcement": None, "non_compliance_penalty": None, "value_usd": None, "percent_interest": None, "date_entry": None, "link": None, "link_live": None, "link_eng": None, "source": None, "source_type": None, "alt_link": None, "alt_link_live": None, "alt_link_eng": None, "source_alt": None, "queries_comments": None, "date_processed": None, "flag": None, "old_targeted": None } return record
a4c2cc723d2b5d4e7ddcf173e0bf3a4b4587173e
25,358
from typing import Callable from typing import Iterable from typing import Optional from typing import Any def find(predicate: Callable, sequence: Iterable) -> Optional[Any]: """ Find the first element in a sequence that matches the predicate. ??? Hint "Example Usage:" ```python member = find(lambda m: m.name == "UserName", guild.members) ``` Args: predicate: A callable that returns a boolean value sequence: A sequence to be searched Returns: A match if found, otherwise None """ for el in sequence: if predicate(el): return el return None
32060a3bd3b578bb357e68dad626f71d0c8ea234
25,360
def get_final_values(iterable): """Returns every unique final value (non-list/tuple/dict/set) in an iterable. For dicts, returns values, not keys.""" ret = list() if type(iterable) == dict: return(get_final_values(list(iterable.values()))) for entry in iterable: if (type(entry) == tuple or type(entry) == list or type(entry) == set): ret.extend(get_final_values(entry)) elif (type(entry) == dict): ret.extend(get_final_values(entry.values())) else: ret.extend(iterable) return(set(ret))
466dd8542c87f8c03970ca424c608f83d326c2cb
25,361
def ko_record_splitter(lines): """Splits KO lines into dict of groups keyed by type.""" result = {} curr_label = None curr = [] i = 0 for line in lines: i+= 1 if line[0] != ' ': if curr_label is not None: result[curr_label] = curr fields = line.split(None, 1) # Annoyingly, can have blank REFERENCE lines # Lacking PMID, these still have auth/title info, however... if len(fields) == 1: curr_label = fields[0] curr_line = '' else: curr_label, curr_line = fields curr = [line] else: curr.append(line) if curr: result[curr_label] = curr return result
c3a378dc01f0f2d2559fbe9a6eda4f5fb6183e68
25,362
def node_to_ctl_transform(graph, node): """ Return the *ACES* *CTL* transform from given node name. Parameters ---------- graph : DiGraph *aces-dev* conversion graph. node : unicode Node name to return the *ACES* *CTL* transform from. Returns ------- CTLTransform *ACES* *CTL* transform. Examples -------- >>> ctl_transforms = classify_aces_ctl_transforms( ... discover_aces_ctl_transforms()) >>> graph = build_aces_conversion_graph(ctl_transforms) >>> node_to_ctl_transform(graph, 'ODT/P3D60_48nits') # doctest: +ELLIPSIS CTLTransform('odt...p3...ODT.Academy.P3D60_48nits.ctl') """ return graph.nodes[node]["data"]
bae6941fdea34b8481981269db7be4c6513553f8
25,363
def contains_str(cadena1, cadena2): """Comprueba que la primera cadena se encuentra contenida en la segunda cadena. Arguments: cadena1 {[str]} -- Cadena a encontrar cadena2 {[str]} -- Cadena base """ cad1 = cadena1.lower() cad2 = cadena2.lower() puntuacion = 0 puntuacion_max = 0 idx = 0 for val in cad2: if cad1[idx] is val: idx += 1 if idx is len(cad1)-1: return True else: idx = 0 return False
e81bf557c3893e0f47ee4579a67b5e4c5125b19f
25,364
def yiq_to_rgb(yiq): """ Convert a YIQ color representation to an RGB color representation. (y, i, q) :: y -> [0, 1] i -> [-0.5957, 0.5957] q -> [-0.5226, 0.5226] :param yiq: A tuple of three numeric values corresponding to the luma and chrominance. :return: RGB representation of the input YIQ value. :rtype: tuple """ y, i, q = yiq r = y + (0.956 * i) + (0.621 * q) g = y - (0.272 * i) - (0.647 * q) b = y - (1.108 * i) + (1.705 * q) r = 1 if r > 1 else max(0, r) g = 1 if g > 1 else max(0, g) b = 1 if b > 1 else max(0, b) return round(r * 255, 3), round(g * 255, 3), round(b * 255, 3)
0ede6cfacc368a3d225fe40b0c3fe505f066233b
25,365
def read_y_n(inp): """ Takes user's input as an argument and translates it to bool """ choice = input(inp) if choice.lower() in ['y', 'yep', 'yeah', 'yes']: return True return False
cf1baee8d4b3e533ff0216c3d94e1bf6ed17a202
25,369
def delim() -> str: """80 char - delimiter.""" return '-' * 80 + '\n'
d74e847836632d3a7f7e2705d5b1dee0210d0161
25,371
def _synda_search_cmd(variable): """Create a synda command for searching for variable.""" project = variable.get('project', '') if project == 'CMIP5': query = { 'project': 'CMIP5', 'cmor_table': variable.get('mip'), 'variable': variable.get('short_name'), 'model': variable.get('dataset'), 'experiment': variable.get('exp'), 'ensemble': variable.get('ensemble'), } elif project == 'CMIP6': query = { 'project': 'CMIP6', 'activity_id': variable.get('activity'), 'table_id': variable.get('mip'), 'variable_id': variable.get('short_name'), 'source_id': variable.get('dataset'), 'experiment_id': variable.get('exp'), 'variant_label': variable.get('ensemble'), 'grid_label': variable.get('grid'), } else: raise NotImplementedError( f"Unknown project {project}, unable to download data.") query = {facet: value for facet, value in query.items() if value} query = ("{}='{}'".format(facet, value) for facet, value in query.items()) cmd = ['synda', 'search', '--file'] cmd.extend(query) cmd = ' '.join(cmd) return cmd
439eee8b4e71ada16f586e992f194b434a5b1551
25,372
def django_db_fields(model): """ Return the fields actually stored for each table """ all_fields = set(model._meta.fields) for cls in model.__bases__: if getattr(cls,'_meta',None): all_fields = all_fields.difference(set(cls._meta.fields)) return all_fields
f4a538875b7dc527a12f090323749d2fa1e0501e
25,373
def from_aws_tags(tags): """ Convert tags from AWS format [{'Key': key, 'Value': value}] to dictionary :param tags :return: """ return {tag['Key']: tag['Value'] for tag in tags}
a58931a29302154cc01656ece403d1468db1a6ab
25,374
def geo_query(ds, ulx, uly, lrx, lry, querysize=0): """ For given dataset and query in cartographic coordinates returns parameters for ReadRaster() in raster coordinates and x/y shifts (for border tiles). If the querysize is not given, the extent is returned in the native resolution of dataset ds. raises Gdal2TilesError if the dataset does not contain anything inside this geo_query """ geotran = ds.GetGeoTransform() rx = int((ulx - geotran[0]) / geotran[1] + 0.001) ry = int((uly - geotran[3]) / geotran[5] + 0.001) rxsize = int((lrx - ulx) / geotran[1] + 0.5) rysize = int((lry - uly) / geotran[5] + 0.5) if not querysize: wxsize, wysize = rxsize, rysize else: wxsize, wysize = querysize, querysize # Coordinates should not go out of the bounds of the raster wx = 0 if rx < 0: rxshift = abs(rx) wx = int(wxsize * (float(rxshift) / rxsize)) wxsize = wxsize - wx rxsize = rxsize - int(rxsize * (float(rxshift) / rxsize)) rx = 0 if rx+rxsize > ds.RasterXSize: wxsize = int(wxsize * (float(ds.RasterXSize - rx) / rxsize)) rxsize = ds.RasterXSize - rx wy = 0 if ry < 0: ryshift = abs(ry) wy = int(wysize * (float(ryshift) / rysize)) wysize = wysize - wy rysize = rysize - int(rysize * (float(ryshift) / rysize)) ry = 0 if ry+rysize > ds.RasterYSize: wysize = int(wysize * (float(ds.RasterYSize - ry) / rysize)) rysize = ds.RasterYSize - ry return (rx, ry, rxsize, rysize), (wx, wy, wxsize, wysize)
23f74093377889f6fd91f379d5292d5d61b9957f
25,377
def index_nearest_shape(point, r_tree, shape_index_dict): """Returns the index of the nearest Shapely shape to a Shapely point. Uses a Shapely STRtree (R-tree) to perform a faster lookup""" result = None if point.is_valid: # Point(nan, nan) is not valid (also not empty) in 1.8 geom = r_tree.nearest(point) result = shape_index_dict[id(geom)] return result
08153395ec03d9f0496f3bc16a0a58c2e1d09d60
25,380
import time def timestamp_suffix(): """Generate a suffix based on the current time.""" return '-' + str(int(time.time()))
10dde6e7c5215f9859eb39ead7b7f88503973060
25,382
def example_globus(request): """Globus example data.""" return { 'identity_provider_display_name': 'Globus ID', 'sub': '1142af3a-fea4-4df9-afe2-865ccd68bfdb', 'preferred_username': 'carberry@inveniosoftware.org', 'identity_provider': '41143743-f3c8-4d60-bbdb-eeecaba85bd9', 'organization': 'Globus', 'email': 'carberry@inveniosoftware.org', 'name': 'Josiah Carberry' }, { 'expires_in': 3599, 'resource_server': 'auth.globus.org', 'state': 'test_state', 'access_token': 'test_access_token', 'id_token': 'header.test-oidc-token.pub-key', 'other_tokens': [], 'scope': 'profile openid email', 'token_type': 'Bearer', }, { 'identities': [ { 'username': 'carberry@inveniosoftware.org', 'status': 'used', 'name': 'Josiah Carberry', 'email': 'carberry@inveniosoftware.org', 'identity_provider': '927d7238-f917-4eb2-9ace-c523fa9ba34e', 'organization': 'Globus', 'id': '3b843349-4d4d-4ef3-916d-2a465f9740a9' } ] }
61c489bf3bdd66330c634326af895d0454a64406
25,383
def _verify_data_inputs(tensor_list): """Verify that batched data inputs are well-formed.""" for tensor in tensor_list: # Data tensor should have a batch dimension. tensor_shape = tensor.get_shape().with_rank_at_least(1) # Data batch dimensions must be compatible. tensor_shape[0].assert_is_compatible_with(tensor_list[0].get_shape()[0]) return tensor_list
43ef710835370f1c348cf27a67df13e0c96bed18
25,384
def join_col(col): """Converts an array of arrays into an array of strings, using ';' as the sep.""" joined_col = [] for item in col: joined_col.append(";".join(map(str, item))) return joined_col
f6386d99e69e3a8c04da2d7f97aa7fb34ac9044c
25,386
def cint2lev(arr, cint): """Determines appropriate contour levels for a plt. arr -- The array that is about to be contoured cint -- The requested contour interval""" if cint <= 0.: return [] lb = arr.min() ub = arr.max() first = 0. while True: if first > lb: break first += cint cur = first res = [cur] while True: if cur > ub: break cur += cint res.append(cur) return res
baebdc0efee45685b616704d281eae0d9cfa3b57
25,387
def read_words_from_file(f): """ Reads a text file of words in the format '"word1","word2","word3"' """ txt = open(f).read() return list(map(lambda s: s.strip('"'), txt.split(",")))
669aeebd2cbfeb67cdc0cd65da2e58fdefa3bfe6
25,389