content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def calculateRelativeAveragePerturbation(data): """ calculate the relative average perturbation (Koike, 1973), also termed freuqency perturbation quotient (Takahashi & Koike, 1975) """ n = len(data) if n < 3: raise Exception("need at least three data points") sum1 = 0 sum2 = 0 for i in range(n): if i > 0 and i < (n-1): sum1 += abs((data[i-1] + data[i] + data[i+1]) / 3 - data[i]) sum2 += data[i] sum1 /= float(n-2) sum2 /= float(n) return sum1 / sum2
29158c1ff927820a844f90e4ffa0c5f0e2eb919c
17,581
import random def randit(it, rand=None): """ Random object from iterable. Return an occurrence at random from the given iterable, using `rand` if given or else a new `Random` object. """ return it[(rand or random.Random()).randrange(0, len(it))]
fa8c4bb78ae90923cd3149af4714a7e89f85afde
17,582
def calc_focal_length(distance, width, pixels): """ Calculates the focal length based off the input Parameters: distance(int): distance from camera to the object width(int): actual width of the object pixels(int): width in pixels of the object return: focal length of the camera based off the target object rtype: int """ return (distance * pixels) / width
5a8ba34ad7d1c408552ec50015aa3df7e18f555c
17,583
def create_greeting(moment): """Prints customized welcome string based on time Args: moment (timestamp): current time Returns: greeting (string): the final welcome string """ if moment.hour < 12: greeting = 'Good morning' elif moment.hour < 20: greeting = 'Good evening' else: greeting = 'Good night' return greeting
6dfc2d113b27a95c631186ee3688f7054e6cc923
17,584
def _read_data(dataset, varname, step_var, sel_slice, avg_dim, tstep, dim): """ The the data for a given step variable. Parameters ---------- dataset : collection datasets that contain the data that needs to be displayed varname : collection variable names that are displayed step_variable : collection variable names of the stpping dimension (dimension that is changed) sel_slice : dict if not None a dict object defining the variable name and the index along which the dataset is sliced avg_dim : collection if not None a collection containing the variable names across which an average is taken tstep : int index of the stepping variable dim : dim the target dimension of the data set (2D or 1D) Returns ------- xarray dataset of dimension dim: xarray.Dataset """ if len(dataset[varname].shape) == dim: # There is not much more to do here: return dataset[varname].values if step_var is None: dset = dataset[varname] else: if isinstance(step_var, (int, float)): try: step_var = dataset.dims[int(step_var)] except IndexError: raise ValueError('Could not find step_variable in dataset') else: try: _ = dataset.variables[step_var] except KeyError: raise ValueError('Could not find step_variable in dataset') dset = dataset[varname][{step_var: tstep}] if len(dset.shape) == dim: # There is not much more to do here: return dset.values if sel_slice is None: out_dims = [] if isinstance(avg_dim, str): avg_dim = (avg_dim, ) for d in avg_dim: if d in dset.dims and d not in out_dims: out_dims.append(d) # This indicates that we have to apply an avg along an axis return dset.mean(dim=tuple(out_dims)).values else: # Try to select a slice out_slice = {} try: for key, value in sel_slice.items(): out_slice[key] = value except AttributeError: raise ValueError('Slices should be defind with dictionaries,' 'of the form of dim:slice/num') return dset[out_slice].values
35ba3f82a140e1f9c7455c93c63d9260ae01e070
17,585
import re def preprocess_summary(text): """Pre-process an episode summary string by removing repeated whitespaces, bracketed text, and citations.""" text = re.sub('[\(\[].*?[\)\]]', '', text) # remove brackets text = re.sub(' +', ' ', text) # remove multiple whitespaces text = re.sub('\s+\.\s+', '. ', text) # removed whitespaces from before dots # We want to get rid of the part after the first '\n' for summaries with multiple paragraphs text = text.split('\n')[0] # make sure the last sentence ends with '.', '!', or '?', if there is a half finished sentence that is usually a # citation or reference on Wikipedia if not (text.endswith('.') or text.endswith('?') or text.endswith('!')): last_closing = max([text.rfind('.'), text.rfind('?'), text.rfind('!')]) if last_closing > 0: text = text[:last_closing+1] if text.endswith(' .'): text = text[:-2]+'.' return text
ef950787a28487a29106a6a5ef959adbf52c3703
17,588
def create_segmented_sequence(length, seq_initializer): """ Create a segmented test_sequence A segment is a list of lists. `seq_initializer` is used to create `length` individual segments, which allows for the using any of the pre-supplied initializers for a regular genomic test_sequence, or for making your own. `length` denotes how many segments to generate. If it's an integer, then we will create `length` segments. However, if it's a function that draws from a random distribution that returns an int, we will, instead, use that to calculate the number of segments to generate. >>> from leap_ec.binary_rep.initializers import create_binary_sequence >>> segments = create_segmented_sequence(3, create_binary_sequence(3)) >>> assert len(segments) == 3 :param length: How many segments? :type length: int or Callable :param seq_initializer: initializer for creating individual sequences :type seq_initializer: Callable :return: test_sequence of segments :rtype: list """ if callable(length): num_segments = length() else: num_segments = length segments = [seq_initializer() for _ in range(num_segments)] return segments
bfe4296a91b0ea122c20501347cfd68fbc8ff16c
17,589
import re def convert_keys(input_value): """ Convert all of the keys in a dict recursively from CamelCase to snake_case. Also strips leading and trailing whitespace from string values. :param input_value: :return: """ retn = None if isinstance(input_value, list): retn = [] for list_item in input_value: if isinstance(list_item, (dict, list)): retn.append(convert_keys(list_item)) else: if isinstance(list_item, str): retn.append(list_item.strip()) else: retn.append(list_item) elif isinstance(input_value, dict): retn = dict() for k, v in input_value.items(): new_key_s = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", k) new_key = re.sub("([a-z0-9])([A-Z])", r"\1_\2", new_key_s).lower() if isinstance(v, (dict, list)): retn[new_key] = convert_keys(v) else: if isinstance(v, str): retn[new_key] = v.strip() else: retn[new_key] = v return retn
c0727ac011362f2449d2e5ced694752df529dfeb
17,590
def search_colorspaces(config, aces_id): """ Search the config for the supplied ACES ID, return the color space name. """ for cs in config.getColorSpaces(): desc = cs.getDescription() if aces_id in desc: return cs.getName() return None
b47f7b9105db178904ce272ad8b0002412d96f82
17,591
def _get_gradient_op_type(node_def): """Returns the custom gradient op type.""" if ("_gradient_op_type" in node_def.attr and node_def.op not in ["StatefulPartitionedCall", "PartitionedCall"]): return node_def.attr["_gradient_op_type"].s return None
17a7b14c5de9f3306fe148379e26a3c1d08a87d7
17,593
def getgraph(graph): """Fonction qui transforme un objet graphe en dictionnaire python. Cette fonction prend un graphe en paramètre et le transforme en dictionnaire suivant la syntaxe des fichiers JSON de la librairie graphviz. Args: graph (Graph): Un objet graphe de la classe Graph. Returns: dict: Un dictionnaire représentant le graphe. """ initial_state = graph.getInitialState() final_states = graph.getFinalStates() alphabet = graph.getAlphabet() states = graph.getStates() # Récupérer les transitions # [ ["from", "value", "to"], .. ["from","value", "to"] ] nodes = graph.gettransitions() nodes = [[str(node.mFrom), str(node.mValue), str(node.mGoto)] for node in nodes] gr = { 'alphabet': alphabet, 'states': states, 'initial_state': initial_state, 'accepting_states': final_states, 'transitions': nodes } return gr
e2cfe987332049a9c2ad9a69fd75e30cb20b4440
17,594
def get_errors(node): """Get cooking errors. If node already has errors check whether it needs to recook If so, then recook first to see if that solves it. """ if node.errors() and node.needsToCook(): node.cook() return node.errors()
969fde376e4028daf91e34ab4eafe7184344cddc
17,595
import re def has_valid_characters(glyph_name): """Tests for presence of valid characters in a glyph name as specified by the Adobe OpenType Feature File specification. The test here includes characters specified for 'development' names, a broader set than the production name definition https://github.com/adobe-type-tools/afdko/blob/develop/docs/OpenTypeFeatureFileSpecification.html""" valid_characters = r"^[A-Za-z0-9\._\*\+\-\:\^\|\~]{1,63}$" regex = re.compile(valid_characters) return re.match(regex, glyph_name)
f0ca267ebb06792adebef039e8de9f01ecfb7651
17,597
import base64 def base64url_encode(input): """Helper method to base64url_encode a string. Args: input (str): A base64url_encoded string to encode. """ return base64.urlsafe_b64encode(input).replace(b'=', b'')
882976603c43c98a07dc4eede3d4de4fd0aa5981
17,598
def config(): """Integration test config. Override the dictionary to run the tests.""" return { "host": "some_test_host_you_have_access_to", "user_name": "some_remote_user_with_your_key_in_authorized_keys", }
1283f502ef94ee300c40477231182a54ecb9ebf5
17,599
def fib(n): """ n = quantity of which number fibonacci the function stop :param n: int :return: int """ if n <= 2: return 1 return fib(n-1)+fib(n-2)
2abe45e823d1639d5cbbecad9845da14e98a2232
17,600
def get_upload_folder_structure(file_name): """ Return the structure in the upload folder used for storing and retrieving uploaded files. Two folder levels are created based on the filename (UUID). The first level consists of the first two characters, the second level consists of the third character of the uuid. Example: ``9cd7b281-2c70-42eb-86ec-e29fc755cc1e.jpg`` is stored to ``9c/d/9cd7b281-2c70-42eb-86ec-e29fc755cc1e.jpg``. Args: file_name (str): The name of the file. Returns: list. A list of the folders or an empty list if there was a problem with the file name. """ try: return [file_name[0:2], file_name[2:3]] except TypeError: return []
3a231d820de6a67d8d9a0af77055092cc24c3a48
17,601
def find_matching_edge(m, i, j): """Return corresponding edge for a given arc. """ if (i,j) in m.edge: return (i,j) else: return (j,i)
4bbc182116c210dd9cdb3ca4719a6bc1be8e882e
17,602
def sanitize_result(data): """Sanitize data object for return to Ansible. When the data object contains types such as docker.types.containers.HostConfig, Ansible will fail when these are returned via exit_json or fail_json. HostConfig is derived from dict, but its constructor requires additional arguments. This function sanitizes data structures by recursively converting everything derived from dict to dict and everything derived from list (and tuple) to a list. """ if isinstance(data, dict): return dict((k, sanitize_result(v)) for k, v in data.items()) elif isinstance(data, (list, tuple)): return [sanitize_result(v) for v in data] else: return data
6058f5ec32fadd2de6869f91747d7968c4437ce9
17,603
def _partition(seq, vector, m=None): """ Return the partition of seq as specified by the partition vector. Examples ======== >>> from sympy.utilities.iterables import _partition >>> _partition('abcde', [1, 0, 1, 2, 0]) [['b', 'e'], ['a', 'c'], ['d']] Specifying the number of bins in the partition is optional: >>> _partition('abcde', [1, 0, 1, 2, 0], 3) [['b', 'e'], ['a', 'c'], ['d']] The output of _set_partitions can be passed as follows: >>> output = (3, [1, 0, 1, 2, 0]) >>> _partition('abcde', *output) [['b', 'e'], ['a', 'c'], ['d']] See Also ======== combinatorics.partitions.Partition.from_rgs """ if m is None: m = max(vector) + 1 elif isinstance(vector, int): # entered as m, vector vector, m = m, vector p = [[] for i in range(m)] for i, v in enumerate(vector): p[v].append(seq[i]) return p
cd547ee04d3b4a559d66351d32b3c790a753bc3d
17,604
import random def calculate_multiply(solution_string): """ Some miners are punished for being bad at finding blocks, while others are boosted for being good at that. """ # this code is no longer required, so re skip the logic here return 1 miner_id = str(solution_string.get_miner_id()) key = 'miner_id__percent_ratio__' + miner_id percent_ratio = cache.get(key, None) multiply_solution = 1 # default if percent_ratio is None: miner = Miner.objects.get(pk=miner_id) percent_ratio = miner.percent_ratio cache.set(key, percent_ratio) if percent_ratio > 120: # we only allow the solution if the random value # is under 101. The higher the percent_ratio, the more unlikely # it if that this is true, so more solutions are dropped rand_val = random.randrange(0, int(percent_ratio)) if rand_val > 100: multiply_solution = 0 if percent_ratio < 30: multiply_solution = 4 elif percent_ratio < 40: multiply_solution = 3 elif percent_ratio < 60: multiply_solution = 2 return multiply_solution
0121f5a3ec1249c577fe9d97d45e5a4784176e70
17,606
def abbrev_key ( key: str, ) -> str: """ Abbreviate the IRI, if any key: string content to abbreviate returns: abbreviated IRI content """ if key.startswith("@"): return key[1:] key = key.split(":")[-1] key = key.split("/")[-1] key = key.split("#")[-1] return key
6cb2058f32d6320f1be118d8831cc074cd0cc56f
17,608
from unittest.mock import patch def _patch_broadlink_gendevice(return_value): """Patch the broadlink gendevice method.""" return patch( "homeassistant.components.broadlink.device.blk.gendevice", return_value=return_value, )
041dc6a4e526ff2401264900bf9887c770713fa5
17,609
def markup(text, annotations, positive_class=True): """Given a text and a list of AnnotatedSpan objects, inserts HTML <span> tags around the annotated areas.""" last_char = 0 doc_markedup = [] for start, end, word, weight, level in annotations: doc_markedup.append(text[last_char:start]) doc_markedup.append('<span class="%s-%d" title="%s (%0.3f)">%s</span>' % ('pos' if (weight > 0) == positive_class else 'neg', level, word, weight, text[start:end])) last_char = end doc_markedup.append(text[last_char:]) return ''.join(doc_markedup)
94bce15672e2d2109ddc5f0ed5f0c48ec8c1684b
17,610
def fix_image_page_for_root(content, file): """ Looks for images and fix their path as if the extract were copied to the root. @param content extracted content @param file file where is comes from (unused) @return content """ rows = content.split("\n") for i in range(len(rows)): row = rows[i] if ".. image::" in row: spl = row.split(".. image::") img = spl[-1] if "../images" in img: img = img.lstrip("./ ") if len(spl) == 1: row = ".. image:: " + img else: row = spl[0] + ".. image:: " + img rows[i] = row return "\n".join(rows)
db5c9061c62c46fe5fd3324b7c5f5244a880013d
17,611
from typing import Tuple def split_platform(platfrm: str) -> Tuple[str, str, str]: """ Split a platform string into its (os, architecture, variant) form. """ parts = platfrm.split("/", maxsplit=2) return ( parts[0], parts[1] if len(parts) > 1 else "", parts[2] if len(parts) > 2 else "", )
75b4594a874c03cc5977472b396ecc94d41206e3
17,612
def dp(m, n, x): """ The bottom up dynamic programing approach """ dp = [[0 for _ in range(x+1)] for _ in range(n+1)] dp[0][0] = 1 # Only when the no. of throw are zero and Sum is 0, the no. of ways are 1 # else the number of ways are 0 for i in range(1, n+1): for j in range(1, x+1): for k in range(1, m+1): if j-k >= 0: dp[i][j] += dp[i-1][j-k] return dp[n][x]
09f771a6d67129304977e4e0f29ec96d7bc963b8
17,613
def dict_from_map(isl_map, p_key, p_val): """ Create a dictionary from an ISL map. """ ret = {} def add_dep(p): p_var_dict = p.get_var_dict() if p_key not in p_var_dict: raise ValueError( "p_key='%s' not in %s" % (p_key, ",".join(p_var_dict.keys())) ) if p_val not in p_var_dict: raise ValueError( "p_val='%s' not in %s" % (p_key, ",".join(p_var_dict.keys())) ) k = p.get_coordinate_val(*p_var_dict[p_key]).to_python() v = p.get_coordinate_val(*p_var_dict[p_val]).to_python() if k not in ret: ret[k] = [] ret[k].append(v) isl_map.wrap().foreach_point(add_dep) return ret
d5e8c8713ff3231b4cd49e881b6021935152ff00
17,614
def condensate_belowdew(Rs, Rv, Rsi, Rvi, Bo, Bg, Bgi, Np, Gp): """ Calculate the parameters for material balance plot of gas-condensate reservoirs below dewpoint pressure Input: array: Rs, Rv, Bo, Bg, Np, Gp float: initial Rs (Rsi), initial Rv (Rvi), initial Bg (Bgi) Output: F (array), Eg (array) """ Btg = ((Bg * (1 - (Rs * Rvi))) + (Bo * (Rvi - Rv))) / (1 - (Rv * Rs)) # in RB/STB Bto = ((Bo * (1 - (Rv * Rsi))) + (Bg * (Rsi - Rs))) / (1 - (Rv * Rs)) # in RB/scf Gi = 0 F = (Np * ((Bo - (Rs * Bg)) / (1 - (Rv * Rs)))) + ((Gp - Gi) * ((Bg - (Rv * Bo)) / (1 - (Rv * Rs)))) Eg = Btg - Bgi return (F, Eg)
3a17a29972ff15304fd8ebac0a2446339babde82
17,615
import re def safe_format(fmt, **kwargs): """ Function that safely formats strings with arbitrary potentially user-supplied format strings Looks for interpolation placeholders like {target} or {{ target }} """ return re.sub( r"\{?\{([^{}]*)\}\}?", lambda m: kwargs.get(m.group(1).strip(), m.group(0)), fmt )
e53c7410175532156be466ee9626ccb1ca387e5c
17,617
def create_exists_nonce_func(cache, key_prefix='nonce:', expires=86400): """Create an ``exists_nonce`` function that can be used in hooks and resource protector. :param cache: Cache instance :param key_prefix: key prefix for temporary credential :param expires: Expire time for nonce """ def exists_nonce(nonce, timestamp, client_id, oauth_token): key = '{}{}-{}-{}'.format(key_prefix, nonce, timestamp, client_id) if oauth_token: key = '{}-{}'.format(key, oauth_token) rv = cache.has(key) cache.set(key, 1, timeout=expires) return rv return exists_nonce
f2f66e1b323205b0ae1f1624b53c440fb09666b3
17,618
import os def get_files_paths(directory: str, file_type='') -> list: """Function prepares list of paths to the files within a given directory. Parameters ---------- directory : str file_type : str, default='' If default empty string is passed then all files will be selected. Returns ------- list """ if len(file_type) > 0: files = [os.path.join(directory, x) for x in os.listdir(directory) if x.endswith(file_type)] return files else: return [os.path.join(directory, x) for x in os.listdir(directory)]
f77e68d0ab3dcc784eb6f0bd8cfe64cdfe31607c
17,619
def basic_ttr(n_terms, n_words): """ Type-token ratio (TTR) computed as t/w, where t is the number of unique terms/vocab, and w is the total number of words. (Chotlos 1944, Templin 1957) """ if n_words == 0: return 0 return n_terms / n_words
3d56fd414d6d462c722a2d29bd15bb7ef8bf7559
17,621
import torch from typing import OrderedDict def load_statedict(model_path): """Loads model state dict. Args: model_path: model path Returns: state dict """ print(f"Loading model from {model_path}.") state_dict = torch.load(model_path, map_location=lambda storage, loc: storage) print("Loaded model.") # Remove distributed naming if model trained in distributed mode model_state_dict = OrderedDict() for k, v in state_dict["model"].items(): if k.startswith("module."): name = k[len("module.") :] model_state_dict[name] = v else: model_state_dict[k] = v return state_dict, model_state_dict
d556db551dca3176ffcfa88af99fc89f36d9b444
17,622
def GetCacheKeyPolicy(client, args, backend_bucket): """Returns the cache key policy. Args: client: The client used by gcloud. args: The arguments passed to the gcloud command. backend_bucket: The backend bucket object. If the backend bucket object contains a cache key policy already, it is used as the base to apply changes based on args. Returns: The cache key policy. """ cache_key_policy = client.messages.BackendBucketCdnPolicyCacheKeyPolicy() if (backend_bucket.cdnPolicy is not None and backend_bucket.cdnPolicy.cacheKeyPolicy is not None): cache_key_policy = backend_bucket.cdnPolicy.cacheKeyPolicy if args.cache_key_include_http_header is not None: cache_key_policy.includeHttpHeaders = args.cache_key_include_http_header if args.cache_key_query_string_whitelist is not None: cache_key_policy.queryStringWhitelist = ( args.cache_key_query_string_whitelist) return cache_key_policy
202d1cce49478051255bd48f0b9e117dd19ed63f
17,624
import argparse def parse_args(default_config_path): """ Parses the commandline arguments with argparse :param default_config_path: default path to config file """ parser = argparse.ArgumentParser(description='Parse flags to configure the json parsing') parser.add_argument("-cp", "--configpath", help="Path to configuration file", default=default_config_path, type=str) parser.add_argument("-po", "--plotonly", help="Boolean parameter, if true, only plot", action="store_true") parser.add_argument("-nc", "--noconfig", help="Boolean parameter, if true always override config", action="store_true") args = parser.parse_args() return args
0255d02ed5c4cec3571620e6a69a1a15309e646a
17,625
def multi_powmod(bases, exponents, modulus): """ raise all bases in xs to the respective powers in ys mod n: :math:`\prod_{i=1}^{len(bases)} base_i^{exponent_i} \pmod{modulus}` :param bases: the bases :param exponents: the exponents :param modulus: the modulus :return: the calculated result """ if len(bases) != len(exponents): raise ValueError("xs and ys don't have the same size") result = 1 for base, power in zip(bases, exponents): result = (result * pow(base, power, modulus)) % modulus return result
b8b0bcc32e7938996d20044fcd4e0649273d0ee5
17,626
def GetProcessorSummary(processor): """ Internal function to print summary of processor params: processor - value representing struct processor * return: str - representing the details of given processor """ processor_state_str = "INVALID" processor_state = int(processor.state) processor_states = { 0: 'OFF_LINE', 1: 'SHUTDOWN', 2: 'START', # 3 (formerly INACTIVE) 4: 'IDLE', 5: 'DISPATCHING', 6: 'RUNNING' } if processor_state in processor_states: processor_state_str = "{0: <11s} ".format(processor_states[processor_state]) out_str = "Processor {: <#018x} cpu_id {:>#4x} State {:<s}\n".format(processor, int(processor.cpu_id), processor_state_str) return out_str
ba3576625a4b9c948bc7cf032a8eee4f43be649f
17,631
def ticks(group): """Wrapper function for .add_steps method. """ pheno, steps, sim_id = group pheno.add_steps(steps) return pheno, sim_id
f567d9e14d7421fb196921543bab9587ca040fa4
17,632
def update_output_div(input_value): """Format the input string for displaying""" return 'You\'ve entered "{}"'.format(input_value)
615a671775ed836712978485230403d9a331f366
17,635
def get_xy_coords(storms): """ Takes Polygons of storm masks as paired coordinates and returns seperated x and y coordinates Args: storms: List of polygon storms [x, y] Returns: x: list of x coordinates y: list of y coordinates """ x, y = [], [] [(x.append(list(polygon.exterior.coords.xy[0])), y.append(list(polygon.exterior.coords.xy[1]))) for polygon in storms] return x, y
444ba53cf8ffabe1f9e4bea5a7d8a70d6b41a056
17,636
def reducemap(func, sequence, initial=None, include_zeroth = False): """ A version of reduce that also returns the intermediate values. :param func: A function of the form x_i_plus_1 = f(x_i, params_i) Where: x_i is the value passed through the reduce. params_i is the i'th element of sequence x_i_plus_i is the value that will be passed to the next step :param sequence: A list of parameters to feed at each step of the reduce. :param initial: Optionally, an initial value (else the first element of the sequence will be taken as the initial) :param include_zeroth: Include the initial value in the returned list. :return: A list of length: len(sequence), (or len(sequence)+1 if include_zeroth is True) containing the computed result of each iteration. """ if initial is None: val = sequence[0] sequence = sequence[1:] else: val = initial results = [val] if include_zeroth else [] for s in sequence: val = func(val, s) results.append(val) return results
7c3fbd5e60777ecaf82ff2d7745aafab879abd10
17,637
def throughput_sampler(throughput_data, start_value, target): """Return a function that can efficiently draw samples from `throughput_data` """ sample_buffer_size = int( 2 * (target - start_value) / throughput_data["count"].mean() ) sample_buffer: dict = dict(idx=0, buffer=None) def get_throughput_sample(): if sample_buffer["buffer"] is None or sample_buffer["idx"] >= len( sample_buffer["buffer"].index ): sample_buffer["buffer"] = throughput_data["count"].sample( sample_buffer_size, replace=True ) sample_buffer["idx"] = 0 sample_buffer["idx"] += 1 return sample_buffer["buffer"].iat[sample_buffer["idx"] - 1] return get_throughput_sample
791ead6fb3c11ebcd8b9d7acc385b22fb01b454d
17,638
import numpy def to_uint8(img): """ Cast data type of numpy array to unsigned int8. """ return img.astype(numpy.uint8)
4893d1e85c0dbcf12032580bc5a28860b7d05a93
17,639
def readme(): """Parse README for long_description.""" with open('README.md') as fin: return fin.read()
9023e60d156840233a03a34e4c8449ddc9bde0e6
17,640
import torch def get_labels(df, emo_dict): """ returns the labels according to the emotion dictionary """ return torch.tensor([emo_dict[label] for label in df["label"].values])
8c4fc33a62c1135376c8b14750d055b71cb64986
17,641
def getDonorTypes(atoms): """ for all atoms with HD neighbors we need to assign a new type N-HD becomes NX-HD NA-HD becomes N2-HD OA-HD becomes OX-HD SA-HD becomes SX-HD """ def hasHD(atom, atypes): for neighbor in atom.iterBonded(): if atypes[neighbor.getIndex()]=='HD': return True return False mol = atoms.getAtomGroup().getMolecule() adTypes = mol._ag.getData("AD_element") subsetADTypes = [] for a in atoms: i = a.getIndex() # skip N from backbone else it becomes NX # and the AtomSet complains that NX has no H # attached since HN is not in the FRatoms if adTypes[i]=='N' and a.getName()!='N': if hasHD(a, adTypes): subsetADTypes.append('NX' ) else: subsetADTypes.append(adTypes[i]) elif adTypes[i]=='NA': if hasHD(a, adTypes): subsetADTypes.append('N2' ) else: subsetADTypes.append(adTypes[i]) elif adTypes[i]=='OA': if hasHD(a, adTypes): subsetADTypes.append('OX') else: subsetADTypes.append(adTypes[i]) elif adTypes[i]=='SA': if hasHD(a, adTypes): subsetADTypes.append('SX') else: subsetADTypes.append(adTypes[i]) else: subsetADTypes.append(adTypes[i]) return subsetADTypes
7e3580bbca1aaf2e05635e39361da32963750a5c
17,642
def get_color(): """define some (8) colors to use for plotting ... """ # return [plt.cm.Spectral(each) # for each in np.linspace(0, 6, 8)] # return ['b', 'g', 'r', 'c', 'm', 'y', 'k'] return ['pink', 'darkviolet', 'blue', 'teal', 'forestgreen', 'darkorange', 'red', 'deeppink', 'blueviolet', 'royalblue', 'lightseagreen', 'limegreen', 'yellowgreen', 'tomato', 'silver', 'gray', 'black']
c486df8037dd5e550043d3515ac28d2278fe06dc
17,643
def user_exists(cursor,username): """ Test whether a user exists, from its username Parameters: ========== cursor: Psycopg2 cursor Cursor of type RealDict in the postgres database username: Str Name of the user """ SQL = "SELECT count(*) AS nb FROM users WHERE username=%s" cursor.execute(SQL,[username]) res = bool(cursor.fetchone()['nb']) return(res)
c221cbb6dd3c99d1eacfc88c8f2161276680b938
17,646
def modify_leaves(f, ds): """Walks a data structure composed of nested lists/tuples/dicts, and creates a new (equivalent) structure in which the leaves of the data structure (i.e the non-list/tuple/dict data structures in the tree) have been mapped through f.""" if isinstance(ds, list): return [modify_leaves(f, sds) for sds in ds] if isinstance(ds, tuple): return tuple(modify_leaves(f, sds) for sds in ds) if isinstance(ds, dict): return {k: modify_leaves(f, sds) for k, sds in ds.items()} return f(ds)
54df919ef7ebf94a70a603e321fbe6b47b68b7c1
17,648
def netmask_to_cidr(netmask): """ Convert netmask to cidr format Notes: code from https://stackoverflow.com/a/43885814 Args: netmask (string): netmask address Returns: int: cidr value """ return sum([bin(int(x)).count('1') for x in netmask.split('.')])
613c90a814d30d01494203d06a4b7ae231f19311
17,649
from typing import List def lines_to_list(lines: str) -> List[str]: """ Transform multi-line input to a list. """ return lines.splitlines(keepends=False)
35b3d803066525f645c45702ec1773470aec964f
17,651
def energy_to_wavelength(energy: float): """Conversion from photon energy (eV) to photon wavelength (angstroms)""" return 1.2398 / energy * 1e4
19c3b56852546330a6343d47fce742556b40e2ed
17,652
def count_classes(labels): """Count how often the classes appear.""" class_dict = {} for image in labels: for row in image: for label in row: if label not in class_dict: class_dict[label] = 1 else: class_dict[label] += 1 return class_dict
552a9367712580004e78a3ff4e99d7f4a74db402
17,653
import requests import sys def get_model_id(options): """get_model_id Get Model Id from Trello API :options: argparse.Namespace arguments :returns: str id_model Trello board idModel """ trello_api_url = 'https://api.trello.com/1/board/{}'.format( options.trello_board_id ) params = { 'key': options.trello_api_key, 'token': options.trello_token, } trello_response = requests.get( trello_api_url, params=params ) if trello_response.status_code is not 200: print('Error: Can\'t get the idModel. Please check the configuration') sys.exit(1) board_info_json = trello_response.json() return board_info_json['id']
febc0f71937c303d2bfdb02bc0e1f23bf30d18a9
17,654
from typing import Counter def is_nsfw(labels, filter_list=None, min_confidence=95, min_occurrence=2): """ Return a number of high confidence, re-occurring labels that intersect the filtering list. """ filter_list = filter_list or ( 'Explicit Nudity', 'Graphic Nudity', 'Graphic Female Nudity', 'Graphic Male Nudity', 'Sexual Activity', ) labels = (x['Name'] for x in labels if x['Confidence'] > min_confidence) common_labels = {k: v for k, v in Counter(labels).items() if v >= min_occurrence} return len(set(common_labels) & set(filter_list))
46147dbb634f1be0fb564c08ad6e436ef134ed04
17,655
import torch import itertools def prepare_inputs_for_bert_xlnet(sentences, word_lengths, tokenizer, cls_token_at_end=False, pad_on_left=False, cls_token='[CLS]', sep_token='[SEP]', pad_token=0, sequence_a_segment_id=0, cls_token_segment_id=1, pad_token_segment_id=0, device=None): """ Loads a data file into a list of `InputBatch`s `cls_token_at_end` define the location of the CLS token: - False (Default, BERT/XLM pattern): [CLS] + A + [SEP] + B + [SEP] - True (XLNet/GPT pattern): A + [SEP] + B + [SEP] + [CLS] `cls_token_segment_id` define the segment id associated to the CLS token (0 for BERT, 2 for XLNet) """ """ output: { 'tokens': tokens_tensor, # input_ids 'segments': segments_tensor, # token_type_ids 'mask': input_mask, # attention_mask 'selects': selects_tensor, # original_word_to_token_position 'copies': copies_tensor # original_word_position } """ ## sentences are sorted by sentence length max_length_of_sentences = max(word_lengths) tokens = [] segment_ids = [] selected_indexes = [] start_pos = 0 for ws in sentences: selected_index = [] ts = [] for w in ws: if cls_token_at_end: selected_index.append(len(ts)) else: selected_index.append(len(ts) + 1) ts += tokenizer.tokenize(w) ts += [sep_token] si = [sequence_a_segment_id] * len(ts) if cls_token_at_end: ts = ts + [cls_token] si = si + [cls_token_segment_id] else: ts = [cls_token] + ts si = [cls_token_segment_id] + si tokens.append(ts) segment_ids.append(si) selected_indexes.append(selected_index) max_length_of_tokens = max([len(tokenized_text) for tokenized_text in tokens]) #if not cls_token_at_end: # bert # assert max_length_of_tokens <= model_bert.config.max_position_embeddings padding_lengths = [max_length_of_tokens - len(tokenized_text) for tokenized_text in tokens] if pad_on_left: input_mask = [[0] * padding_lengths[idx] + [1] * len(tokenized_text) for idx,tokenized_text in enumerate(tokens)] indexed_tokens = [[pad_token] * padding_lengths[idx] + tokenizer.convert_tokens_to_ids(tokenized_text) for idx,tokenized_text in enumerate(tokens)] segments_ids = [[pad_token_segment_id] * padding_lengths[idx] + si for idx,si in enumerate(segment_ids)] selected_indexes = [[padding_lengths[idx] + i + idx * max_length_of_tokens for i in selected_index] for idx,selected_index in enumerate(selected_indexes)] else: input_mask = [[1] * len(tokenized_text) + [0] * padding_lengths[idx] for idx,tokenized_text in enumerate(tokens)] indexed_tokens = [tokenizer.convert_tokens_to_ids(tokenized_text) + [pad_token] * padding_lengths[idx] for idx,tokenized_text in enumerate(tokens)] segments_ids = [si + [pad_token_segment_id] * padding_lengths[idx] for idx,si in enumerate(segment_ids)] selected_indexes = [[0 + i + idx * max_length_of_tokens for i in selected_index] for idx,selected_index in enumerate(selected_indexes)] copied_indexes = [[i + idx * max_length_of_sentences for i in range(length)] for idx,length in enumerate(word_lengths)] input_mask = torch.tensor(input_mask, dtype=torch.long, device=device) tokens_tensor = torch.tensor(indexed_tokens, dtype=torch.long, device=device) segments_tensor = torch.tensor(segments_ids, dtype=torch.long, device=device) selects_tensor = torch.tensor(list(itertools.chain.from_iterable(selected_indexes)), dtype=torch.long, device=device) copies_tensor = torch.tensor(list(itertools.chain.from_iterable(copied_indexes)), dtype=torch.long, device=device) return {'tokens': tokens_tensor, 'segments': segments_tensor, 'selects': selects_tensor, 'copies': copies_tensor, 'mask': input_mask}
fe1b20975ed5f14c4e39fbc56bddbfec6a632edf
17,656
from typing import Iterable from typing import Any def multi_if_else( if_condition_return: Iterable | list[tuple[bool, Any]], else_return: Any | None = None, ) -> Any | None: """ Handle a series of :keyword:`if` or :keyword:`elif`. Parameters ---------- if_condition_return : list[tuple(bool, Any)] Array of tuple contains the condition and result. if the return is :obj:`Exception` would raise a error. else_return : Any, default None The final returning result, if the result is :obj:`Exception` would raise an error. Notes ----- The back of :func:`multi_if_else` is using multiple :keyword:`if` not one :keyword:`if` and multiple :keyword:`elif`. So :keyword:`if`'s condition should be more specific compare to :keyword:`elif`. :keyword:`elif` version: .. code-block:: python def rank(score: int) -> str: if not isinstance(score, int): raise TypeError("the 'score' must be an integer.") if 100 >= score >= 90: return 'A' elif score >= 70: return 'B' elif score >= 60: return 'C' elif score >= 30: return 'D' elif score >= 0: return 'E' else: raise ValueError( "the 'score' range should be between 0 and 100.", ) :keyword:`if` version: .. code-block:: python def rank(score: int) -> str: if not isinstance(score, int): raise TypeError("the 'score' must be an integer.") if 100 >= score >= 90: return 'A' if 90 > score >= 70: return 'B' if 70 > score >= 60: return 'C' if 60 > score >= 30: return 'D' if 30 > score >= 0: return 'E' raise ValueError("the 'score' range should be between 0 and 100.") Examples -------- >>> from dtoolkit.util import multi_if_else >>> import numpy as np >>> array = np.asarray( ... [ ... [1, 0], ... [0, 1], ... ] ... ) >>> mask = array == 0 >>> mask array([[False, True], [ True, False]]) >>> mask_lambda = lambda how: multi_if_else( ... [ ... (how == "any", mask.any()), ... (how == "all", mask.all()), ... (how is not None, ValueError(f"Invalid how option: {how}")), ... ], ... TypeError("Must specify how"), ... ) >>> mask_lambda("any") True >>> mask_lambda("all") False :func:`mask_lambda` equal to following, but with helping of :func:`multi_if_else` could be more easier. .. code-block:: python def mask_lambda(how, mask): if how == 'any': return mask.any() elif how == 'all': return mask.all() elif how is not None: ValueError(f"Invalid how option: {how}") else: TypeError("Must specify how") """ for condition, result in if_condition_return: if condition: if isinstance(result, Exception): raise result return result if isinstance(else_return, Exception): raise else_return return else_return
22c1e219ef373d84b49b8cd7c8546ea04cfbb9db
17,657
def sample_function(params: dict): """ An example of a simulation function format that is compatible with the sample generation needed for EZK dependence measure estimation. The params dictionary holds all the information needed to initialize and simulate the system. A perturbing random variable is needed for each variable in `state` to perform sampling; this is stored in `distributions`. It is recommended that the duration of simulation be one of the parameters for tuning the time between variable perturbation and focal variable response. """ # print("Calling sampler with state\n{}".format(params['state'])) # if params['perturbation']: # print("Calling sample_function with perturbation; variables \ # are sampled by random variables of the form: {}".format([a.__name__ for a in params['distributions']])) # if 'duration' in params: # print("Executing simulation for {} time steps.".format(params['duration'])) # # presumably state should change; sample_function just passes state back into params unchanged # return params | {'state': params['state']} # return random state new_state = [] for ag in params['state']: # new_state.append(np.random.randn(len(ag)).tolist()) new_state.append(ag) # print("{}\n".format(new_state)) return params | {'state': new_state}
befb6aa14cb469ef0b7f5145c90e19708bc096c2
17,658
def request_to_query(**params): """ 生成HTTP请求参数 """ print(params) print(type(params)) qs = [] for key,value in params.items(): qs.append("%s=%s"%(key,value)) print(qs) result = "&".join(qs) return result
030f009c2f443be174979c468380a4e5468e1bb8
17,659
import argparse def arg_parser(): """Return parsed args for this script.""" desc = """Compute the WER between two 'transcript'-like files. The first token of each line should be the ID. The order of the lines doesn't matter.""" parser = argparse.ArgumentParser(description=desc) parser.add_argument("ref_file", type=argparse.FileType('r')) parser.add_argument("hyp_file", type=argparse.FileType('r')) args = parser.parse_args() return args
81471fb27993ff47e933a113c4824c2e3fcaec3e
17,660
import re def to_camel(s): """Convert string s from `snake_case` to `camelCase`""" return re.sub(r"(?!^)_([a-zA-Z])", lambda m: m.group(1).upper(), s)
28ac46b93230fb567321adf940e0fd37131b4903
17,661
def parse_data(data: str) -> list: """ Takes a data input string, splits and returns the components. Example: Input: "1-3 a: abcde" Output: [1, 3, "a", "abcde"} """ reqs, text = [i.strip() for i in data.split(":")] req_occurances, req_letter = reqs.split() req_low, req_high = req_occurances.split("-") return [ int(req_low), int(req_high), req_letter, text, ]
d0f974877292a11ef968edf05ca41afdbc7ae5e5
17,662
def _get_nameservers(resolv_conf="/etc/resolv.conf"): """Very simple private function to read the on-disk resolv.conf for nameserver options. Args: resolv_conf (str): Absolute path of the on-disk resolv.conf. Defaults to '/etc/resolv.conf'. Returns: list: Returns a list object containing nameserver configuration. """ nameservers = [] try: lines = open(resolv_conf).read().splitlines() except FileNotFoundError: nameservers.append("8.8.8.8") return nameservers for line in lines: if "nameserver" in line: nameservers.append(line.split(" ")[1]) if not nameservers: nameservers.append("8.8.8.8") return nameservers
07211e78454b7e8f8db569640941101f636049ad
17,665
def get_veil_zx_order_from_signed_order(signed_order): """Get a Veil-compatible json 0x signed order from a `ZxSignedOrder` instance""" return { "maker_address": signed_order.maker_address_, "taker_address": signed_order.taker_address_, "fee_recipient_address": signed_order.fee_recipient_address_, "sender_address": signed_order.sender_address_, "exchange_address": signed_order.exchange_address_, "maker_asset_amount": signed_order.maker_asset_amount_, "taker_asset_amount": signed_order.taker_asset_amount_, "maker_fee": signed_order.maker_fee_, "taker_fee": signed_order.taker_fee_, "salt": signed_order.salt_, "expiration_time_seconds": str(signed_order.expiration_time_seconds_), "maker_asset_data": signed_order.maker_asset_data_, "taker_asset_data": signed_order.taker_asset_data_, "signature": signed_order.signature, }
6d319bdabc91d407f02e322b83882f5bf04eb392
17,667
import argparse def parameter_parser(): """ A method to parse up command line parameters. By default it gives an embedding of the Twitch Brasilians dataset. The default hyperparameters give a good quality representation without grid search. Representations are sorted by node identifiers. """ parser = argparse.ArgumentParser(description = "Run DANMF.") parser.add_argument("--edge-path", nargs = "?", default = "./input/ptbr_edges.csv", help = "Edge list csv.") parser.add_argument("--output-path", nargs = "?", default = "./output/ptbr_danmf.csv", help = "Target embedding csv.") parser.add_argument("--membership-path", nargs = "?", default = "./output/ptbr_membership.json", help = "Cluster membership json.") parser.add_argument("--pre-training-method", nargs = "?", default = "shallow", help = "Pre-training procedure used.") parser.add_argument("--iterations", type = int, default = 100, help = "Number of training iterations. Default is 100.") parser.add_argument("--pre-iterations", type = int, default = 100, help = "Number of layerwsie pre-training iterations. Default is 100.") parser.add_argument("--seed", type = int, default = 42, help = "Random seed for sklearn pre-training. Default is 42.") parser.add_argument("--lamb", type = float, default = 0.01, help = "Regularization parameter. Default is 0.01.") parser.add_argument("--layers", nargs="+", type=int, help = "Layer dimensions separated by space. E.g. 128 64 32.") parser.add_argument("--calculate-loss", dest="calculate_loss", action="store_true") parser.add_argument("--not-calculate-loss", dest="calculate_loss", action="store_false") parser.set_defaults(calculate_loss=False) parser.set_defaults(layers=[32, 8]) return parser.parse_args()
81df17ad6cd636cc13ff87ecfb3f2880b3e989fd
17,669
def orientation(p, q, r): """ Finds the orientation of an ordered set of vertices(p, q, r). p: First vertex represented as a tuple. q: Second vertex represented as a tuple. r: Third vertex represented as a tuple. returns: 0 : Collinear points 1 : Clockwise points 2 : Counterclockwise """ val = ((q[1] - p[1]) *(r[0] - q[0])) - ((q[0] - p[0]) * (r[1] - q[1])) if val == 0: #Collinear return 0 if val > 0: # Clock return 1 else: # Counterclock return 2
3651c0b6eee3550c427ad03d229d947337e7eed1
17,670
def L1(y_output, y_input): """ L1 Loss Function calculates the sum of the absolute difference between the predicted and the input""" return sum(abs(y_input - y_output))
d1b53f4aa2a47b2fe3179c657e6337dcf951a980
17,671
from pathlib import Path def get_dataset_file(working_directory, resource_name, resource_extension): """ :param working_directory: the directory containing the the requested resource :param resource_name: the name of the resource file in the directory :param resource_extension: the file extension of the requested resource file. Please note that the extension param must not begin with "." character as it gets already considered in the function :return: an object of `pathlib.PosixPath` which can be directly opened """ return Path(working_directory, resource_name + "." + resource_extension)
6a08a04940e0d2e169c8b89f47e14a9fcd646d35
17,672
def expand_test_result_df(df_test): """Adds columns to a DataFrame with test results Args: df_test DataFrame as produced by trainer.ModelTrainer, i.e. with columns 'tp', 'fp', 'fn', 'correct', 'total_examples' and 'examples_above_threshold' Returns: the input DataFrame with additional columns for 'precision', 'recall', 'acc'uracy, 'f1' measure and 'coverage' percentage. """ #print('type of df_test', str(type(df_test))) #print('keys in df_test', df_test.keys()) df = df_test epsilon = 0.00001 # avoid division by zero df['precision'] = df['tp'] / (df['tp'] + df['fp'] + epsilon) df['recall'] = df['tp'] / (df['tp'] + df['fn'] + epsilon) df['acc'] = df['correct'] / df['examples_above_threshold'] df['f1'] = 2*df['tp'] / (2*df['tp'] + df['fp'] + df['fn'] + epsilon) df['coverage'] = df['examples_above_threshold']/ (df['total_examples'] + epsilon) return df
dea64054f8fb372d9777b6bdc9b0064843bbd459
17,673
def is_image_file(filename): """ Check if given file is image file or not Parameters ------- filename: str input file path Returns ------- img_flag: bool flag for image """ IMG_EXTENSIONS = [ '.jpg', '.JPG', '.jpeg', '.JPEG', '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP', 'tif', 'TIF', 'tiff', 'TIFF', ] img_flag = any(filename.endswith(extension) for extension in IMG_EXTENSIONS) return img_flag
3ddb6589a421b91e52cacb3b0bae75c1d682d981
17,674
def check_collision(board, shape, offset): """ See if the matrix stored in the shape will intersect anything on the board based on the offset. Offset is an (x, y) coordinate. """ off_x, off_y = offset for cy, row in enumerate(shape): for cx, cell in enumerate(row): if cell and board[cy + off_y][cx + off_x]: return True return False
f66e1739abf334c7b90ba5c129d18d67c4978e81
17,675
import os def walk(top, topdown=True, onerror=None): """Recursive directory tree generator for directories. Args: top: string, a Directory name topdown: bool, Traverse pre order if True, post order if False. onerror: optional handler for errors. Should be a function, it will be called with the error as argument. Rethrowing the error aborts the walk. Errors that happen while listing directories are ignored. Returns: Yields, Each yield is a 3-tuple: the pathname of a directory, followed by lists of all its subdirectories and leaf files. That is, each yield looks like: (dirname, [subdirname, subdirname, ...], [filename, filename, ...]). Each item is a string. """ return os.walk(top, topdown, onerror)
a3d16256688ae27cb648c19f4a6a61ca1795b810
17,676
def _get_add_scalar_output_quant_param(input_scale, input_zero_point, scalar): """ Determine the output scale and zp of quantized::add_scalar op This is used for mobilenet v3 Refer to aten/src/ATen/native/quantized/cpu/qadd.cpp The names of variables are the same as torch impl """ q_min = 0 q_max = 255 s = input_scale z = input_zero_point c = scalar c_q = round(c / s) if q_min > z - c_q: s_prime = (float(q_max) - (z - c_q)) / (float(q_max) - q_min) * s z_prime = q_min elif q_max < z - c_q: s_prime = (float(z - c_q) - q_min) / (float(q_max) - q_min) * s z_prime = q_max else: s_prime = s z_prime = z - c_q return s_prime, z_prime
2da1f77f0c9f4c332882cb004901b0f197546e89
17,677
def get_buckets(client, strip_chars=None): """ returns list of bucket's names """ bucket_list = [bucket.name for bucket in client.buckets.all()] if strip_chars: bucket_list = [i.strip(strip_chars) for i in bucket_list if i.strip(strip_chars)] return bucket_list
3fa0a07d185299680f25efe8ea7cfaa50944a5f8
17,678
def get_in(coll, path=None, default=None): """Returns a value at path in the given nested collection. Args: coll(object): path(str):'a.0.b.c' """ if path is None: return coll for key in path.split('.'): try: if isinstance(coll, dict): coll = coll[key] elif isinstance(coll, list): coll = coll[int(key)] else: raise KeyError except (KeyError, IndexError, TypeError, ValueError): return default return coll
cc15527a84b9fa0b862aff6add07a851b103a265
17,679
def check_explainer(explainer): """ Check if explainer class correspond to a shap explainer object """ if explainer is not None: if explainer.__class__.__base__.__name__ != 'Explainer': raise ValueError( "explainer doesn't correspond to a shap explainer object" ) return explainer
b792e2b865cc3c6db33db5654096f2a18bdfc985
17,680
def dstport_to_icmptc(dstport): """ Destination port to ICMP type- and code - definition taken from https://www.erg.abdn.ac.uk/users/gorry/course/inet-pages/icmp-code.html https://www.iana.org/assignments/icmp-parameters/icmp-parameters.xhtml Args: dstport `int` destination port number Return: `int` icmp type, `int` icmp code """ return int(dstport / 256), dstport % 256
9a1697431205a18468ef06edec7f86f5056f9419
17,681
from typing import Union import json def _format_body(res: str) -> Union[str, dict]: """private function used to return response body as a dict if it's JSON, and as a string otherwise""" res = res.strip() # remove any extra whitespace or newline characters at beginning or end try: body = json.loads(res) except: # if json.loads fails, it must not be JSON so return it as is body = res return body
35fbdf93cbc531becca94621a1d041d7a5bd6702
17,682
def _serp_query_string(parse_result): """ Some search engines contain the search keyword in the fragment so we build a version of a query string that contains the query string and the fragment. :param parse_result: A URL. :type parse_result: :class:`urlparse.ParseResult` """ query = parse_result.query if parse_result.fragment != '': query = '{}&{}'.format(query, parse_result.fragment) return query
277a1de9409384e5868f9cd8a473da7950e22d2d
17,683
def do_math(op, op1, op2): """ 执行数学运算 :param op: 操作符 :param op1: 操作数1 :param op2: 操作数2 """ if op == '+': return op1 + op2 elif op == '-': return op1 - op2 elif op == '*': return op1 * op2 elif op == '/': return op1 / op2
a72d1274a285fb35de32dbe3737545ee101e6375
17,684
import subprocess import os def find_doxypy(): """Check if doxypy is in system path or else ask for location of doxypy.py""" doxypy_path="" try: # first check to see if doxypy is in system path if subprocess.call(["doxypy", "makedocumentation.py"],stdout=open(os.devnull)): raise OSError() doxypy_path="doxypy" except OSError: doxypy_path=input("Enter location of doxypy.py: ") if not os.path.exists(doxypy_path) or doxypy_path[-9:] != 'doxypy.py': print("Invalid path to doxypy") exit() return doxypy_path
baa18e9d7cf3ac994913fa01c985593d9008c7a9
17,685
def get_destination_filename(path, prefix="t", overwrite=False): """ Get the output file name. :param pathlib.Path path: Destination path :param str prefix: prefix of filename. :param bool overwrite: if True then :return: file name for output :rtype: str """ if overwrite: name = path.name else: name = "-".join([prefix, path.name]) return name
af57d27b04fa1040fa0584ca7e6250101a32d77f
17,687
def add_list(shape, outfile): """ Given a shape that represents an 'Outline' -- OpenOffice's representation of a bulleted or numbered list -- attempt to convert the elements into a sensible Markdown list, and write to "outfile". """ els = shape["elements"] indent = 0 def item_to_str(item): s = (' ' * indent * 4) + "- " + item["String"].strip() return s # handle first item output = [item_to_str(els[0])] def dump_output(): print( "\n".join(output) + "\n", file=outfile) if len(els) == 1: dump_output() return # handle rest of items last_el = els[0] for el in els[1:]: # int-ify the level if None if el["NumberingLevel"] is None: el["NumberingLevel"] = 0 if last_el["NumberingLevel"] is None: last_el["NumberingLevel"] = 0 # new indent if el["NumberingLevel"] > last_el["NumberingLevel"]: indent += 1 elif el["NumberingLevel"] < last_el["NumberingLevel"]: indent = max(0, indent-1) else: pass #print(" new indent:", indent) if len(el["String"]) > 1: output.append(item_to_str(el)) last_el = el dump_output()
81466000144b3cdf0353de23aa3999368ee05c21
17,688
from typing import AsyncGenerator def mock_subscriber(topic_event) -> AsyncGenerator: """A mock subscriber.""" async def _f(): yield topic_event return _f()
25b65cddb2dfba1ff655e9675e6af4353bd40260
17,690
def filter_count_matrix(count_matrix, n_reads=50000, n_cells=2, cpm_thresh=0.5): """ Remove poorly sequenced cells and genes with low occurrence. Filter cells if the total aligned read counts falls below a provided threshodl. Filter genes with both low counts-per-million and low occurrence among cells. Args: count_matrix (pd.DataFrame): raw read-count matrix. n_reads (int): minimum number of read counts a cell must have to avoid filtering. Default is 50,000. n_cells (int, optional): minimum number of cells required to exhibit the minimum expression level to avoid filtering. Default is 2. cpm_thresh (float, optional): minimum counts-per-million in lowly mapped genes. Default is 0.5 Returns: (pd.DataFrame): filtered dataframe. References: Cell Filtering: Rizzetto, S., Eltahla, A. A., Lin, P., Bull, R., Lloyd, A. R., Ho, J. W. K., … Luciani, F. (2017). Impact of sequencing depth and read length on single cell RNA sequencing data of T cells. Scientific Reports, 7(1), 12781. https://doi.org/10.1038/s41598-017-12989-x https://www.nature.com/articles/s41598-017-12989-x Gene Filtering: Chen Y, Lun ATL and Smyth GK. From reads to genes to pathways: differential expression analysis of RNA-Seq experiments using Rsubread and the edgeR quasi-likelihood pipeline [version 2; referees: 5 approved]. F1000Research 2016, 5:1438 (doi: 10.12688/f1000research.8987.2) https://f1000research.com/articles/5-1438/v2 """ # drop cells with low coverage cell_counts = count_matrix.sum() bad_cells = cell_counts.index.values[cell_counts < n_reads] count_matrix.drop(bad_cells, axis=1, inplace=True) # drop genes with low expression and low occurrence cpm = count_matrix.apply(lambda x: x / cell_counts[x.name] * 10**6, axis=0) low_genes = cpm.apply(lambda x: sum(x > cpm_thresh) < n_cells, axis=1) low_genes = low_genes.index.values[low_genes] return count_matrix.drop(labels=low_genes, axis=0)
66a9f36389afb2b7014a049e52a33c982593a124
17,692
def clean_dict(dictionary): """ Returns a new but cleaned dictionary. * Keys with None type values are removed * Keys with empty string values are removed This function is designed so we only return useful data """ newdict = dict(dictionary) for key in dictionary.keys(): if dictionary.get(key) is None: del newdict[key] if dictionary[key] == "": del newdict[key] return newdict
c48f8894b554b7c3b5f8601a9ee7edc9964989e1
17,693
def luhn_checksum(s: str) -> int: """Compute Luhn's check digit over a string of digits""" LUHN_ODD_LOOKUP = (0, 2, 4, 6, 8, 1, 3, 5, 7, 9) # sum_of_digits (index * 2) evens = sum(int(p) for p in s[-1::-2]) odds = sum(LUHN_ODD_LOOKUP[int(p)] for p in s[-2::-2]) return (evens + odds) % 10
d28705e84d211282d842e3ea1c8be71b3fb88be8
17,694
def print_pretty_dict(d): """ Create a dictionary like a table in CLI. One line by key. The first line (header) is Wikipedia Language Name :param d: The dictionary :return: A pretty dictionary for your CLI """ d = dict(sorted(d.items(), key=lambda item: item[0])) pretty_dict = "{:<10} {:<10}\n".format('Wikipedia', 'Language Name') for key, value in d.items(): pretty_dict += "{:<10} {:<10}\n".format(key, value) return pretty_dict
41c5ae87d4d0d441f367cf74aad06b0450c6e212
17,698
import fnmatch import os def find_file_or_subdir_in_dir(pattern, base_dir, matching_names): """Commond code to return file or subdir by pattern.""" matching_names = fnmatch.filter(matching_names, pattern) assert len(matching_names) == 1 return os.path.join(base_dir, matching_names[0])
2c52b8e563edc1b7890d789d21df9c177ab62353
17,700
def markersdates(markers): """returns the list of dates for a list of markers """ return [m[4] for m in markers]
108b3f903eb9ca8f9d510821703c5a7704cddc5a
17,701
def solution2(nums): """ graph -> {} number: graph_index; graph[n] = graph_i; nums[graph[n]] == n -> it's the root find root: n -> graph_i = graph[n] -> nums[graph_i] """ l = len(nums) graph = {} graph_size = {} def root_index(n): point_to_n = nums[graph[n]] if point_to_n == n: return graph[n] else: return root_index(point_to_n) max_size = 0 for i in range(l): n = nums[i] if n in graph: continue root_i = i if n - 1 not in graph and n + 1 not in graph: graph[n] = i elif n - 1 in graph and n + 1 in graph: root_i1, root_i2 = root_index(n - 1), root_index(n + 1) graph[n] = root_i1 root_i = root_i1 if root_i1 != root_i2: graph[nums[root_i2]] = root_i1 graph_size[root_i1] += graph_size[root_i2] else: # only one in graph n_in_graph = n - 1 if n - 1 in graph else n + 1 graph[n] = graph[n_in_graph] root_i = root_index(n) # plus count graph_size[root_i] = (graph_size.get(root_i) or 0) + 1 max_size = max(max_size, graph_size[root_i]) return max_size
5fdd83d1fb68566e742bbc066349f145a561cac0
17,702
from tqdm import tqdm def tqdm_wrap(iterable): """Use tqdm if it is installed.""" try: return tqdm(iterable) except ModuleNotFoundError: return iterable
a7ac31102d75b58105378cf43657f1dbe2f8592e
17,703
import os def _IsFileOrDirWithFile(path): """Determine if a path is a file or a directory with an appropriate file.""" return os.path.isfile(path) or ( os.path.isdir(path) and os.path.isfile(os.path.join(path, 'include.yaml')))
9d4cf7517199eb3f7b7f05aa9d0f1db48122acac
17,705
def kmer_coverage(C, L, k): """Converts contig coverage to kmer coverage Args: * C: Coverage * L: Template length * k: hash length """ return C * (L - k + 1) / float(L)
b422b92e1c4269e24f43d88844383d56406a189b
17,707
def box(request): """ Parametrized fixture determining whether/how to transform fill_value. Since fill_value is defined on a per-test basis, the actual transformation (based on this fixture) is executed in _check_promote. Returns ------- boxed : Boolean Whether fill_value should be wrapped in an np.array. box_dtype : dtype The dtype to pass to np.array([fill_value], dtype=box_dtype). If None, then this is passed on unmodified, and corresponds to the numpy default dtype for the given fill_value. * (True, None) # fill_value wrapped in array with default dtype * (True, object) # fill_value wrapped in array with object dtype * (False, None) # fill_value passed on as scalar """ return request.param
b2faac79c6d78fa36342457bde0dcb4204022ab0
17,708
def interface(host): """Return an IP address for a client connection given the server host. If the server is listening on '0.0.0.0' (INADDR_ANY) or '::' (IN6ADDR_ANY), this will return the proper localhost. """ if host == '0.0.0.0': # INADDR_ANY, which should respond on localhost. return '127.0.0.1' if host == '::': # IN6ADDR_ANY, which should respond on localhost. return '::1' return host
868ae36fdc45af5e64e53ed2222c18d04fc0c9c6
17,709
def quote(value): """ Quotes and escapes the given value to pass it to tr64c """ esc = { '\\': "\\\\", '\n': "\\n", '\r': "\\r", '\t': "\\t", '"' : "\\\"", '\'': "\\'" } res = [] for c in value: res.append(esc.get(c, c)) return '"' + ''.join(res) + '"'
9fc0e7dcab7dd677497b764bf608aa17c5dccfd4
17,710
import os def getBuildTime(path): """Get the modification time for a build folder.""" return os.path.getmtime(path)
bc7bc1d46e59ee41869d3cb47dc5de7a714e86c8
17,711
import posixpath def split_path(path): """Convert PATH to (parent-path, name), unless it is None. """ return posixpath.split(path) if path is not None else None
2a1aed6ed265ec8fee20375a0db41f9d9f94df35
17,712