content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def removecommongaps(s1, s2): """Remove common gap characters between the two sequences. Return s1, s2 with these characters removed. """ if len(s1) != len(s2): raise ValueError('Sequences must be same length') return ( ''.join(b1 for b1, b2 in zip(s1, s2) if b1 != '-' or b2 != '-'), ''.join(b2 for b1, b2 in zip(s1, s2) if b1 != '-' or b2 != '-'), )
a73c4227b00203033d0aacebc5d8f378fd5ce009
75,664
def add_position_clsite(modseq, clpos): """ Adjusts the position of the Cl site with modifications """ # determine the position of the cross-link with modifications in the string add_pos = 0 for modi in modseq: # print (cl_pos1, modi[0]) # -1 is needed since we also subtract it from the cross-link site if clpos < modi[0] - 1: break else: add_pos = modi[1] - modi[0] # print ("add:", add_pos) # add offset to cross-linker position clpos = clpos + add_pos return clpos
48c0ae8bae6cc353ea5ecd344c5c64c983dba8e8
75,665
def warmup_lr(init_lr, step, iter_num): """ Warm up learning rate """ return step/iter_num*init_lr
2dfb2fc82d63052bf84f9815c5ed9acf09f0fb71
75,669
import re def escape_html_syntax_characters(string): """ Escape the three HTML syntax characters &, <, >. & becomes &amp; < becomes &lt; > becomes &gt; """ string = re.sub('&', '&amp;', string) string = re.sub('<', '&lt;', string) string = re.sub('>', '&gt;', string) return string
2e07a45c8aa30ca3a7ef0e4a5296d9205de634c6
75,674
def Measurement_timeofday_method_diff(self,probe): """ Computes diff between time values and caches last time. """ delta = 0 if hasattr(self,'_last'): delta = probe.microsecondsSinceEpoch - self._last; self._last = probe.microsecondsSinceEpoch return delta
2b4b4985249af7030de651d55d4a873b310417b1
75,677
def read_embeddings(args, graph): """Read embeddings from an external file.""" with open(args.emb_filename) as f: # Ignore the first line (# of nodes, # of dimensions). emb = f.read().splitlines()[1:] emb = [e.split() for e in emb] # Split with whitespace. node_names = list(graph.nodes()) # Used to replace integers with character names. emb = {node_names[int(e[0])]: [float(ee) for ee in e[1:]] for e in emb} # Convert embeddings to float. emb_lst = list(emb.values()) return emb, emb_lst, node_names
e32e0323c8ea0156a220e0cd143a490871a3061b
75,679
from typing import Tuple def _parse_manifest_file(manifest_file_path: str) -> Tuple[list, list]: """ Parsing manifest file """ audio_paths = list() transcripts = list() with open(manifest_file_path) as f: for idx, line in enumerate(f.readlines()): audio_path, _, transcript = line.split('\t') transcript = transcript.replace('\n', '') audio_paths.append(audio_path) transcripts.append(transcript) return audio_paths, transcripts
98aeebbb0a3a47f67635aa3ac85b59542c31a14f
75,680
def makeNgramModel(tokenlist, n, fp={}): """This function generates an N-gram model as a dictionary data-structure. """ for start in range( len(tokenlist) - (n - 1) ): tokenslice = tokenlist[start : start + n] #print("Token-slice:", tokenslice) stringngram = " ".join(tokenslice) #print("String N-gram:", stringngram) #print(" ".join(tokens[start : start + n])) fp[stringngram] = fp.get(stringngram, 0) + 1 return fp
1d5f1c36adb36c52d9bbd479c8f328259095f6c8
75,681
import re def count(item, string, case_sensitive=False): """Returns the exact number of how many times `item` is found in `string`. :param item: item to count the occurrences of in `string` :param string: string to count occurrences of `item` in. :param case_sensitive: if set to `True`, the search after `item` is case-sensetive. """ flags = False if case_sensitive else re.I return len(re.findall(str(item), string, flags))
5e4e25fc066dbc0a743560484984b22bc52fe0e6
75,686
def as_learning_rate_by_sample(learning_rate_per_minibatch, minibatch_size, momentum=0, momentum_as_unit_gain=False): """ Compute the scale parameter for the learning rate to match the learning rate definition used in other deep learning frameworks. In CNTK, gradients are calculated as follows: g(t + 1) = momentum * v(t) + (1-momentum) * gradient(t) Whereas in other frameworks they are computed this way : g(t + 1) = momentum * v(t) According to the above equations we need to scale the learning rate with regard to the momentum by a factor of 1/(1 - momentum) :param learning_rate_per_minibatch: The current learning rate :param minibatch_size: Size of the minibatch :param momentum: The current momentum (0 by default, used only when momentum_as_unit_gain is True) :param momentum_as_unit_gain: Indicate whetherf the momentum is a unit gain factor (CNTK) or not (TensorFlow, etc.) :return: Scaled learning rate according to momentum and minibatch size """ assert learning_rate_per_minibatch > 0, "learning_rate_per_minibatch cannot be < 0" assert minibatch_size > 0, "minibatch_size cannot be < 1" learning_rate_per_sample = learning_rate_per_minibatch / minibatch_size if momentum_as_unit_gain: learning_rate_per_sample /= (1. - momentum) return learning_rate_per_sample
2a07ea0c2541cc29f89c7ab7fe43299aee8ee5fa
75,694
def read_data(data_file): """ Reads in whitespace delimited data points of the form: 2.345 0.87 3.141 6.77 where the last column is the dependent variable and all columns before are indepndent variables. param str data_file: path to training data returns: list of training data instances. rtype: list(list(float)) """ data = open(data_file).readlines() data = [x.strip() for x in data] data = [x.split() for x in data] data = [[float(attribute) for attribute in instance] for instance in data] return data
678f596138676728d921ee254e35fc513eeeefa4
75,698
import importlib def import_modules(modules): """ Utility function to import an iterator of module names as a list. Skips over modules that are not importable. """ module_objects = [] for module_name in modules: try: module_objects.append(importlib.import_module(module_name)) except ImportError: pass return module_objects
ea236dc7e62abda22a88691b4fa1caba55684803
75,699
def suqeuclidean(x, y): """Square euclidean distance. """ result = 0.0 for i in range(x.shape[0]): result += (x[i] - y[i]) ** 2 return result
9be6d56e3dc4629f17b04a31bb1c69033b1bd237
75,703
import threading def StrptimeRun(strptime, patched): """Checks the given strptime function runs without raising an Exception. Returns: True on success, False on failure. """ if patched: import _strptime # pylint: disable=unused-import def Target(fn): global error # pylint: disable=global-statement try: fn('Tue Aug 16 21:30:00 1988', '%c') except AttributeError: error = True threads = [] for unused_i in range(2): t = threading.Thread(target=Target, args=(strptime,)) t.start() threads.append(t) for t in threads: t.join() return not error
ab29a3dced94eeef3c30fe5bfe6f9e125c17d301
75,704
def filter_columns_by_prefix(columns, prefixes): """Filter columns by prefix.""" filtered_columns = {column for column in columns if True in (column.startswith(prefix) for prefix in prefixes)} return filtered_columns
5757a4d1cd9312e42e9bf9e3e17d4358c546a155
75,705
def non_negative_validation(value): """ Validate if value is negative and raise Validation error """ if isinstance(value, list): if any(v < 0 for v in value): raise ValueError("The Values in the list must not be negative") else: return value else: if value < 0: raise ValueError("The Value must not be negative.") else: return value
9cdb18a781819d856b3ae44bea9a030bac941059
75,706
def _recompute_best(results): """_recompute_best. Internal helper function for the AnnealResults class. Computes the best AnnealResult in an AnnealResults object. Parameters ---------- results : AnnealResults object. Returns ------- res : AnnealResult object. The AnnealResult from ``results`` with the lowest value. """ best = None for r in results: if best is None or r.value < best.value: best = r return best
3d2dd370663c3395bc09dfa3fc864c7f7b2f87c4
75,714
def func_bool(x): """Implementation of `func_bool`.""" return True
42d6ee553390c8eff1b3194b4b888a1397aee237
75,715
def inv_mod(n: int, p: int): """ Find a inverse of n mod p. :param n: Value of n where nx === 1 (mod p) :param p: Value of p where nx === 1 (mod p) :returns: Value of x where nx === 1 (mod p) """ return pow(n, -1, p)
30847b0869ed8cd4fa425404561233078105eeaf
75,716
import statistics def smooth(data_in, window_size): """Smooths the data, which should be a list, by averaging with the given window size.""" data_in_len = len(data_in) data_out_len = data_in_len - window_size + 1 if data_out_len <= 0: return data_in data_out = [] for i in range(0, data_out_len): val = statistics.mean(data_in[i:i + window_size]) data_out.append(val) return data_out
4ec6eff2e40d2c1804a153d5724b7f241126b844
75,719
def groupBy(keyFn, row_data): """Group rows in row_data by the result of keyFn. Arguments: keyFn -- A function that returns the cell data of the column to group by. Needs to be tailored to the structure of row_data. row_data -- A table organized as a list of row data structures. Returns: { 'key_id1': [ row1, row2, row3, ...], 'key_id2': [ row4, row5, ... ], ...} """ temp_dict = {} for row in row_data: key = keyFn(row) # Get key/cell data corresponding to the groupBy column for this row a = temp_dict.get(key,[]) # Get the sub-table for whatever we have so far for the given key a.append(row) # Attach this row to the grouped sub-table temp_dict[key] = a # Put sub-table back into temp_dict return temp_dict
5a6b4fa6bb7a81884c7ecd72c8599f0850e52c11
75,720
def compose_name(hidden_units, learning_rate, epsilon, lmbda, lr_decay, search_plies=1): """Return name for parameter save file based on the hyperparameters.""" name = f'N{hidden_units:d}' name += f'-alpha{learning_rate:.3f}' name += f'-lambda{lmbda:.2f}' name += f'-epsilon{epsilon:.5f}' name += f'-dalpha{lr_decay:.6f}' if search_plies > 1: name += f'-plies{search_plies}' return name
671346332a7e4c63ef0038d43eed3acc6d5cc485
75,721
def get_down_str(down): """Converts an integer down to a string. :param down: :return: """ if down == 1: return "1st" elif down == 2: return "2nd" elif down == 3: return "3rd" else: return ""
964dd05a5c4e384e00c0d5ea2be6be52cb44bc36
75,723
def drop(num): """Produce a sequence with the same elements as the input sequence, but omitting the first num elements. """ def dropper(input): p = num for elt in input: if p > 0: p -= 1 else: yield elt return dropper
97f89a47382475f81265405c2d7024ad22d3ff3f
75,725
def build_geometry(self): """Compute the curve (Line) needed to plot the Slot. The ending point of a curve is the starting point of the next curve in the list Parameters ---------- self : SlotW15 A SlotW15 object Returns ------- curve_list: list A list of 6 Segment and 5 Arc """ line_dict = self._comp_line_dict() curve_list = [ line_dict["1-2"], line_dict["2-3"], line_dict["3-4"], line_dict["4-5"], line_dict["5-6"], line_dict["6-8"], line_dict["8-9"], line_dict["9-10"], line_dict["10-11"], line_dict["11-12"], line_dict["12-13"], ] return [line for line in curve_list if line is not None]
ad5a09221a623e8a03cf138ae73abb3cb4c20d37
75,728
def each(xs:list, f) -> list: """each(xs, f) e.g. xs >> each >> f Answers [f(x) for x in xs]""" return [f(x) for x in xs]
cf601609e17949f8c5ef96688ac76176fb0a647c
75,729
def dict_subset(x, include=[]): """Subset a dict.""" return dict((k, v) for k, v in x.items() if k in include)
f278b30d48d623e00bdc669bb08b62ed263f036e
75,730
def zscore_normalize_array(array, mean, std_dev): """ Zscore normalize the numpy array based on the genomic mean and standard deviation :param std_dev: :param mean: :param array: Input array of bigwig values :return: Zscore normalized array """ return (array - mean) / std_dev
2b4f19be7311b1e997f982f82b64c5dd50cf6e8b
75,731
def get_final_pop(dict_gen): """ Returns final population of results dict Parameters ---------- dict_gen : dict Dict holding generation number as key and population object as value Returns ------- tup_res : tuple Results tuple (final_pop, list_ann, list_co2) """ # Get final population # ################################################################### final_pop = dict_gen[max(dict_gen.keys())] list_ann = [] list_co2 = [] print() print('Final population results:') print('##############################################################') # Loop over individuals for ind in final_pop: # Get annuity ann = ind.fitness.values[0] co2 = ind.fitness.values[1] print(ind) print('Fitnesses: ' + str(ann) + ', ' + str(co2)) if isinstance(ann, float) and isinstance(co2, float): list_ann.append(ann) list_co2.append(co2) return (final_pop, list_ann, list_co2)
dac5abdfe1099abd8d2393aa2c158329f4cf33f4
75,733
def simplify_logger_name(logger_name: str): """Simple function to reduce the size of the loggers name. Parameters: logger_name (str): Name of the logger to simplify. e.g path.to.my_module Examples: simplify_logger_name('path.to.my_module') = 'p.t.mm' """ modules = [module.split('_') for module in logger_name.split('.')] simplified = '.'.join([''.join(element[0] for element in elements) for elements in modules]) return simplified
eb67b1002dac4feaeae07803a75531d36f7fcceb
75,737
def parse_wire(line): """Parse line to directions with steps creating wire.""" wire = [] for instruction in line.split(','): direction, *steps = instruction wire.append((direction, int(''.join(steps)))) return wire
74ec6e11b739093c52687f6e1bfed6854c53c607
75,738
def ema(df, n, m): #exponential moving average """ Wrapper function to estimate EMA. :param df: a pandas DataFrame. :return: ema_{t}=(m/n)*a_{t}+((n-m)/n)*ema_{t-1} """ result = df.copy() for i in range(1,len(df)): result.iloc[i]= (m*df.iloc[i-1] + (n-m)*result[i-1]) / n return result
854593e971036f3a4465948583d4ab21215ac942
75,742
import typing import requests import time def get_response( url: str, *, max_attempts=5, **request_kwargs ) -> typing.Union[requests.Response, None]: """Return the response. Tries to get response max_attempts number of times, otherwise return None Args: url (str): url string to be retrieved max_attemps (int): number of request attempts for same url request_kwargs (dict): kwargs passed to requests.get() timeout = 10 [default] E.g., r = get_response(url, max_attempts=2, timeout=10) r = xmltodict.parse(r.text) # or r = json.load(r.text) """ # ensure requests.get(timeout=10) default unless over-ridden by kwargs if "timeout" in request_kwargs: pass else: request_kwargs["timeout"] = 10 # try max_attempts times for count, x in enumerate(range(max_attempts)): try: response = requests.get(url, **request_kwargs) return response except: time.sleep(0.01) # if count exceeded return None
03a98a3b6fad18bd821418b51997135d1f5683f6
75,743
import asyncio def create_task(coroutine): """Schedules a coroutine to be run.""" return asyncio.get_event_loop().create_task(coroutine)
f830613139e7e0e65271f12770a13ec03caac83b
75,744
def get_style(format): """Infer style from output format.""" if format == 'simple-html': style = 'html' elif format in ('tex', 'latex', 'pdf'): style = 'markdown_tex' else: style = 'markdown' return style
d3ac2bdc64bc76cd689b8de5cc824aed9a2e282c
75,747
def normalise_to_max(xarray,yarray): """Given x and y arrays returns a y array which is normailsed to 100% at the maximum value of the original y array""" ymax=max(yarray) ynorm = (yarray/ymax)*100 return ynorm
123bbcb3879389f34cd5f4aba9efc7ee01d310a0
75,749
def ProfileCurve(type=0, a=0.25, b=0.25): """ ProfileCurve( type=0, a=0.25, b=0.25 ) Create profile curve Parameters: type - select profile type, L, H, T, U, Z (type=int) a - a scaling parameter (type=float) b - b scaling parameter (type=float) Returns: a list with lists of x,y,z coordinates for curve points, [[x,y,z],[x,y,z],...n] (type=list) """ newpoints = [] if type == 1: # H: a *= 0.5 b *= 0.5 newpoints = [ [-1.0, 1.0, 0.0], [-1.0 + a, 1.0, 0.0], [-1.0 + a, b, 0.0], [1.0 - a, b, 0.0], [1.0 - a, 1.0, 0.0], [1.0, 1.0, 0.0], [1.0, -1.0, 0.0], [1.0 - a, -1.0, 0.0], [1.0 - a, -b, 0.0], [-1.0 + a, -b, 0.0], [-1.0 + a, -1.0, 0.0], [-1.0, -1.0, 0.0] ] elif type == 2: # T: a *= 0.5 newpoints = [ [-1.0, 1.0, 0.0], [1.0, 1.0, 0.0], [1.0, 1.0 - b, 0.0], [a, 1.0 - b, 0.0], [a, -1.0, 0.0], [-a, -1.0, 0.0], [-a, 1.0 - b, 0.0], [-1.0, 1.0 - b, 0.0] ] elif type == 3: # U: a *= 0.5 newpoints = [ [-1.0, 1.0, 0.0], [-1.0 + a, 1.0, 0.0], [-1.0 + a, -1.0 + b, 0.0], [1.0 - a, -1.0 + b, 0.0], [1.0 - a, 1.0, 0.0], [1.0, 1.0, 0.0], [1.0, -1.0, 0.0], [-1.0, -1.0, 0.0] ] elif type == 4: # Z: a *= 0.5 newpoints = [ [-0.5, 1.0, 0.0], [a, 1.0, 0.0], [a, -1.0 + b, 0.0], [1.0, -1.0 + b, 0.0], [1.0, -1.0, 0.0], [-a, -1.0, 0.0], [-a, 1.0 - b, 0.0], [-1.0, 1.0 - b, 0.0], [-1.0, 1.0, 0.0] ] else: # L: newpoints = [ [-1.0, 1.0, 0.0], [-1.0 + a, 1.0, 0.0], [-1.0 + a, -1.0 + b, 0.0], [1.0, -1.0 + b, 0.0], [1.0, -1.0, 0.0], [-1.0, -1.0, 0.0] ] return newpoints
aafc553eaa9bed3e00187a074e102c86b3851656
75,750
from pathlib import Path def path(s): """ Returns a :class:`Path` object for the given string. :param str s: The string containing the path to parse :returns: A :class:`Path` object representing the path """ i = s.rfind('/') + 1 dirname, basename = s[:i], s[i:] if dirname and dirname != '/' * len(dirname): dirname = dirname.rstrip('/') i = basename.rfind('.') if i > 0: ext = basename[i:] else: ext = '' return Path(dirname, basename, ext)
bb4af71ec534f59e6376d903472eae8ffdba0498
75,754
def filter_by_device_name(items, device_names, target_device_name): """Filter a list of items by device name. Args: items: A list of items to be filtered according to their corresponding device names. device_names: A list of the device names. Must have the same legnth as `items`. target_device_name: A `str` representing the desired device name. Returns: Filtered items from `items`. """ assert len(items) == len(device_names) assert all(device_names), "device_names are not all non-empty strings" # Note: we use `endswith` instead of `==` for device-name filtering because # in some cases, the device names from kernel/op execution can have slightly # different values than the device names from # `distribution.extended.worker_devices`. return [items[i] for i, device_name in enumerate(device_names) if device_name.endswith(target_device_name)]
7beca323c953a59650392f60fa38e73fbc62e4b4
75,761
import struct def read_crc(file): """Read a crc32 from a file.""" return struct.unpack('<i',file.read(4))[0]
49bafb0512c224fa458c16c549dd0a3274d5f328
75,762
import math def isPointEqual(point1, point2, tol=1e-4): """Determins if a Point3D is almost-equal to a Point3D in a list Args: point1: (Point3D) The Point to be checked point2: (Point3D) The Points to check agains tol: (float) Tollerance for almost-equality Returns: bool: True if almost equal to point """ return math.isclose(point1.x, point2.x, rel_tol=tol) and math.isclose(point1.y, point2.y, rel_tol=tol) and math.isclose(point1.z, point2.z, rel_tol=tol)
d103fd3a676b34afac0eaba9aec6b15172c3ff63
75,767
import time from datetime import datetime def format_date(timestamp, precision=0): """ Construct an ISO 8601 time from a timestamp. There are several possible sources for *timestamp*. - time.time() returns a floating point number of seconds since the UNIX epoch of Jan 1, 1970 UTC. - time.localtime(time.time()) returns a time tuple for the current time using the local time representation. - datetime.datetime.now() returns a datetime object for the current time using the local time representation, but with no information about time zone. - iso8601.parse_date(str) returns a datetime object for a previously stored time stamp which retains time zone information. In the first three cases the formatted date will use the local time representation but include the UTC offset for the local time. The fourth case the UTC offset of the time stamp will be preserved in formatting. If *precision* is given, encode fractional seconds with this many digits of precision. This only works if *timestamp* is datetime object or seconds since epoch. """ dt = None microsecond = 0 # Try converting from seconds to time_struct try: microsecond = int(1000000*(timestamp-int(timestamp))) timestamp = time.localtime(timestamp) except TypeError: # Not a floating point timestamp; could be datetime or time_struct pass # Try converting from datetime to time_struct if isinstance(timestamp, datetime): microsecond = timestamp.microsecond tz = timestamp.utcoffset() if tz is not None: dt = tz.days*86400 + tz.seconds timestamp = timestamp.timetuple() # Find time zone offset isdst = timestamp.tm_isdst if timestamp.tm_isdst >=0 else 0 if dt is None: dt = -(time.timezone,time.altzone)[isdst] # Do the formatting local = time.strftime('%Y-%m-%dT%H:%M:%S',timestamp) sign = "+" if dt >= 0 else "-" offset = "%02d:%02d"%(abs(dt)//3600,(abs(dt)%3600)//60) fraction = ".%0*d"%(precision,microsecond//10**(6-precision)) if precision else "" return "".join((local,fraction,sign,offset))
305e001bef5aa91d2524e152552073413d974577
75,768
def joinf(sep, seq): """sep.join(seq), omitting None, null or so.""" return sep.join([s for s in filter(bool, seq)]) or None
42f0cae4d624367c1943955d9dfa3b74d102232d
75,771
from datetime import datetime import pytz def now() -> datetime: """ Returns the current datetime with the correct timezone information >>> isinstance(now(), datetime) True """ return datetime.utcnow().replace(tzinfo=pytz.utc)
3a1966468cf597050750d94627e4f89c75d1b886
75,772
def is_file_genpath(genpath): """ Determine whether the genpath is a file (e.g., '/stdout') or not (e.g., 'command') :param genpath: a generalized path :return: a boolean value indicating if the genpath is a file. """ return genpath.startswith('/')
41e6f19a0cedb52de761a5efdbf770aaae373f7d
75,783
def filter_interpolations(base: str, *args) -> str: """Filter the interpolations from a string Args: base (str): The text to filter *args: The interpolations (Memory objects) to filter """ for memspace in args: base = str(base).replace(memspace.repr, str(memspace.value)) return base
4a27707853f32be1a703497a91d21784ee2f2dc4
75,786
def request_value(request, key, default=None): """ Returns first found value, using this order: POST, GET, default. :param request: :param key: :param default: :return: """ value = request.POST.get(key, None) or request.GET.get(key, None) if value is not None: return value return default
28cc13be18632bee1c1ad43558470bdbaa5100ad
75,787
def get_fitness_score(subject, goal): """ In this case, subject and goal is a list of 5 numbers. Return a score that is the total difference between the subject and the goal. """ total = 0 for i in range(len(subject)): total += abs(goal[i] - subject[i]) return total
24cd6283d141affd7edff579956274c9c9aee6a4
75,788
def SearchTFProfNode(node, name): """Search a node in the tree.""" if node.name == name: return node for c in node.children: r = SearchTFProfNode(c, name) if r: return r return None
dd24c7a299dcc7adab7b8bb0409659bb2623f2cc
75,790
import decimal def round(val, digits, mode = decimal.ROUND_HALF_UP): """ Round a decimal value to the given number of decimal places, using the given rounding mode, or the standard ROUND_HALF_UP if not specified """ return val.quantize(decimal.Decimal("10") ** -digits, mode)
b9500218759328543eff4cd285b04cf6062d8686
75,791
def get_available_resources(threshold, usage, total): """Get a map of the available resource capacity. :param threshold: A threshold on the maximum allowed resource usage. :param usage: A map of hosts to the resource usage. :param total: A map of hosts to the total resource capacity. :return: A map of hosts to the available resource capacity. """ return dict((host, int(threshold * total[host] - resource)) for host, resource in usage.items())
e5eca0a5eb6977d580f74ae6592b13211ac04f37
75,792
def remove_response(stream, pre_filt=(0.01, 0.02, 8.0, 10.0), response_output="DISP"): """ Removes the instrument response. Assumes stream.attach_response has been called before. """ stream.remove_response(pre_filt=pre_filt, output=response_output, zero_mean=False, taper=False) return stream
a312a30e0c9c45eca2c41118192df5bbdd95c53a
75,794
import hashlib def sha256(msg): """ return the hex digest for a givent msg """ return hashlib.sha256(msg).hexdigest()
0597475f92e1183fbcede6ca7d20d89092bff6a6
75,796
def read_messages(message_file): """(file open for reading) -> list of str Read and return the message_file, with each line separated into a different item in a list and the newline character removed. """ returned_message = [] contents = message_file.readlines() for item in contents: returned_message.append(item.strip('\n')) # Takes out the newline character return returned_message
c3c6a58ed1a85979165d9a71923fe1911f9a4fe2
75,797
def concatenate_qa(prev_qns_text_list, prev_ans_text_list): """ Concatenates two lists of questions and answers. """ qa = "" for q, a in zip(prev_qns_text_list, prev_ans_text_list): qa += q + " | " + a + " || " return qa
0dc6bca0cc84e5b6a06b67304142369c596624cc
75,809
def is_feat_in_sentence(sentence, features): """ Parameters ---------- sentence: str, One sentence from the info text of a mushroom species features: list of strs List of possible features as in dataset_categories.features_list Return ------ bool, True if sentence contains at least one feature from features and else False. """ for feature in features: if feature in sentence: return True return False
5094edcbcad15ea7b1aaa77a32b5aa758f0ac2d6
75,812
import re def add_asic_arg(format_str, cmds_list, asic_num): """ Add ASIC specific arg using the supplied string formatter New commands are added for each ASIC. In case of a regex paramter, new regex is created for each ASIC. """ updated_cmds = [] for cmd in cmds_list: if isinstance(cmd, str): if "{}" in cmd: if asic_num == 1: updated_cmds.append(cmd.format("")) else: for asic in range(0, asic_num): asic_arg = format_str.format(asic) updated_cmds.append(cmd.format(asic_arg)) else: updated_cmds.append(cmd) else: if "{}" in cmd.pattern: if asic_num == 1: mod_pattern = cmd.pattern.format("") updated_cmds.append(re.compile(mod_pattern)) else: for asic in range(0, asic_num): asic_arg = format_str.format(asic) mod_pattern = cmd.pattern.format(asic_arg) updated_cmds.append(re.compile(mod_pattern)) else: updated_cmds.append(cmd) return updated_cmds
652d7b4439e4ad68dee4f125b6b7a7ebe26467c5
75,817
def set_intersect(variable1, variable2, d): """ Expand both variables, interpret them as lists of strings, and return the intersection as a flattened string. For example: s1 = "a b c" s2 = "b c d" s3 = set_intersect(s1, s2) => s3 = "b c" """ val1 = set(d.getVar(variable1).split()) val2 = set(d.getVar(variable2).split()) return " ".join(val1 & val2)
db68885a18f52bdc439ee0cdbd3f756b9e3dc1cb
75,824
from typing import OrderedDict def read_dat_file(dat_file): """ Read an ASCII ".dat" file from JXP format 'database' Parameters ---------- dat_file : str filename Returns ------- dat_dict : OrderedDict A dict containing the info in the .dat file """ # Define datdict = OrderedDict() # Open f = open(dat_file, 'r') for line in f: tmp = line.split('! ') tkey = tmp[1].strip() key = tkey val = tmp[0].strip() datdict[key] = val f.close() return datdict
37df8d16eeb9e45123c14f11ec5aaa2abfcb7b7a
75,828
from typing import Tuple def _gray_code_comparator(k1: Tuple[int, ...], k2: Tuple[int, ...], flip: bool = False) -> int: """Compares two Gray-encoded binary numbers. Args: k1: A tuple of ints, representing the bits that are one. For example, 6 would be (1, 2). k2: The second number, represented similarly as k1. flip: Whether to flip the comparison. Returns: -1 if k1 < k2 (or +1 if flip is true) 0 if k1 == k2 +1 if k1 > k2 (or -1 if flip is true) """ max_1 = k1[-1] if k1 else -1 max_2 = k2[-1] if k2 else -1 if max_1 != max_2: return -1 if (max_1 < max_2) ^ flip else 1 if max_1 == -1: return 0 return _gray_code_comparator(k1[0:-1], k2[0:-1], not flip)
ba8a23a949b0a92d69574cd525639e83442999c1
75,830
def reinforce_grad(loss): """ A closure to modify the gradient of a nn module. Use to implement REINFORCE gradient. Gradients will be multiplied by loss. Arguments: - loss: Gradients are multiplied by loss, should be a scalar """ def hook(module, grad_input, grad_output): new_grad = grad_input * loss return new_grad return hook
c1dfcf5079e2516785867dd6677ece04b831fcb4
75,834
def build_array(text): """Returns an array of parsed lines from the input text Array elements are in the format: (min, max, character, string) """ array = [] with open(text, 'r') as f: for line in f: _range, char, s = line.strip().split() n, m = _range.split('-') array.append((int(n), int(m), char[0], s)) return array
2a46b5f09bb08bc146c56d94b175a9c0fbc407ab
75,837
def transpose(matrix): """ transposes a 2-dimensional list """ return [[matrix[r][c] for r in range(len(matrix))] for c in range(len(matrix[0]))]
7566e59b976cf17f31884d717ce8e1f634f918ce
75,839
def flip_data_str_signs(data): """Flip the signs of data string, e.g. '1 2 3' --> '-1 -2 -3' """ return ' '.join([str(-float(i)) for i in data.split()])
071509863c8e9616df9eeb20e56b6f52ea090ad2
75,843
def f90float(s): """Convert string repr of Fortran floating point to Python double""" return float(s.lower().replace('d', 'e'))
567e7302bd4f28dc252bd75af3851fd7ad3293d7
75,847
import requests import pickle def api_request(file, thresh=0.5): """ Post request to serverless backend api where our model is lcoated Receives a csv with the classes and polygons classified by our model Parameters ---------- file: .tiff file Tiff file to be classified by our model thresh: float Threshold applied. To be classified a prediction values has to be equal or greater than threshold """ try: req = requests.post('https://hxy1cn1sl8.execute-api.us-east-1.amazonaws.com/Prod/segment_tiff', data=file, params={'threshold': thresh}) polygons = pickle.loads(req.content) polygons.to_csv(f'src/data/polygons.csv', index=False) except: return api_request(file)
717302d8c8144cb65953298eeef74efa5324a7c0
75,849
def dep_parenreduce(mysplit, mypos=0): """Accepts a list of strings, and converts '(' and ')' surrounded items to sub-lists: >>> dep_parenreduce(['']) [''] >>> dep_parenreduce(['1', '2', '3']) ['1', '2', '3'] >>> dep_parenreduce(['1', '(', '2', '3', ')', '4']) ['1', ['2', '3'], '4'] """ while mypos < len(mysplit): if mysplit[mypos] == "(": firstpos = mypos mypos = mypos + 1 while mypos < len(mysplit): if mysplit[mypos] == ")": mysplit[firstpos:mypos+1] = [mysplit[firstpos+1:mypos]] mypos = firstpos break elif mysplit[mypos] == "(": # recurse mysplit = dep_parenreduce(mysplit,mypos) mypos = mypos + 1 mypos = mypos + 1 return mysplit
2d40980bd43cf0902cc7a4fe0fbfff35fdac9e8f
75,852
def imdb_iM_table(imodulon_table, cat_order=None): """ Reformats the iModulon table according Parameters ---------- imodulon_table : ~pandas.DataFrame Table formatted similar to IcaData.imodulon_table cat_order : list, optional List of categories in imodulon_table.category, ordered as desired Returns ------- im_table: ~pandas.DataFrame New iModulon table with the columns expected by iModulonDB """ im_table = imodulon_table[ [ "name", "regulator_readable", "function", "category", "n_genes", "exp_var", "precision", "recall", ] ].copy() im_table.index.name = "k" im_table.category = im_table.category.fillna("Uncharacterized") if cat_order is not None: cat_dict = {val: i for i, val in enumerate(cat_order)} im_table.loc[:, "category_num"] = [ cat_dict[im_table.category[k]] for k in im_table.index ] else: try: im_table.loc[:, "category_num"] = imodulon_table["new_idx"] except KeyError: im_table.loc[:, "category_num"] = im_table.index return im_table
d8140b4642961879cbc0601b812aaac302873c24
75,856
def uint_size(value): """Returns number of bytes (power of two) to represent unsigned value.""" assert value >= 0 n = 8 while not value < (1 << n): n *= 2 return n // 8
73ef3be70f3a9af28e45f0f0ed57830508cb756b
75,858
import re def github_to_markdown_body(body: str) -> str: """ Generate a markdown body from the GitHub provided one. :param body: The markdown body provided by the GitHub Releases. :returns: A markdown body. """ body = re.sub( r"#(\d{1,5})", r"[#\1](https://github.com/rucio/rucio/issues/\1)", body ) REPLACE_CHARACTERS = {"<": r"\<", ">": r"\>", "\r\n": "\n", "\n#": "\n\n##"} for before, after in REPLACE_CHARACTERS.items(): body = body.replace(before, after) return body
6a738960f202a805de1a01a2821c6178ec388f54
75,866
def poly_np(x, *coefs): """ f(x) = a * x + b * x**2 + c * x**3 + ... *args = (x, a, b, ...) """ # Add a warning for something potentially incorrect if len(coefs) == 0: raise Exception("You have not provided any polynomial coefficients.") # Calculate using a loop result = x * 0 for power,c in enumerate(coefs): result += c * (x ** (power+1)) return result
4e77a0841478c817ce1904cb1f41c8bbf9238e53
75,871
def symbols_gen(N): # генерация списка из символов для красивого вывода матрицы. """ Функция, которая генерирует символы для уравнений :params N: количество переменных :return symbols: сгенерированные символы, список """ symbols = [] for i in range(65, 65 + N): symbols.append(chr(i)) return symbols
4874d66082473cc02dd10d3f9e5c3c17aed995fd
75,873
def _neighbors(point): """ Get left, right, upper, lower neighbors of this point. """ i, j = point return {(i-1, j), (i+1, j), (i, j-1), (i, j+1)}
fd90d5c270c68c38a2ac352b0f1f064c1df377da
75,876
def compute_out_degrees(digraph): """ dict -> dict Takes a directed graph represented as a dictionary, and returns a dictionary in which the keys are the nodes and the values are the nodes' outdegree value. """ out_degrees = {} for node in digraph: out_degrees[node] = len(digraph[node]) return out_degrees
f50050974a4e053ec4273e63b45f70a90c65d0f5
75,877
def overlaps_v(text_proposals, index1, index2): """ Calculate vertical overlap ratio. Args: text_proposals(numpy.array): Text proposlas. index1(int): First text proposal. index2(int): Second text proposal. Return: overlap(float32): vertical overlap. """ h1 = text_proposals[index1][3] - text_proposals[index1][1] + 1 h2 = text_proposals[index2][3] - text_proposals[index2][1] + 1 y0 = max(text_proposals[index2][1], text_proposals[index1][1]) y1 = min(text_proposals[index2][3], text_proposals[index1][3]) return max(0, y1 - y0 + 1) / min(h1, h2)
7c8f1167e2334b80356db2f3d099a9081ddda6b9
75,878
def getIstioServiceName(service_name, project_id, zone): """ Returns the Istio service name of a certain service. """ return "ist:{}-zone-{}-cloud-ops-sandbox-default-{}".format(project_id, zone, service_name)
c1dde7fb92d8415df1eec6343986cc4f152289fc
75,882
import torch import math def gaussian(window_size, sigma): """ Generates a list of Tensor values drawn from a gaussian distribution with standard diviation = sigma and sum of all elements = 1. Length of list = window_size """ gauss = torch.Tensor([math.exp(-(x - window_size//2)**2/float(2*sigma**2)) for x in range(window_size)]) return gauss/gauss.sum()
424ead50a8283b57f96f851d4c85be63636f98bd
75,883
import random def randBytes(b: int = 2) -> bytes: """ Get a random number of bytes :param b: number of bytes generate :return: random number of bytes requested """ return bytes([random.getrandbits(8) for _ in range(b)])
51af4722e9710f0cff315a09bb9ce2f6e956ee6a
75,884
from typing import List from typing import Any def sort_file_summary_content(data: List[Any]) -> List[Any]: """sorts the summary file contents""" return sorted( data, key=lambda x: x["branch"] + x["host"] + x["compiler"] + x["c_version"] + x["mpi"] + x["m_version"] + x["o_g"], )
dbcc135bafdb097e00ea25e9cd6172599d5faa43
75,885
import copy def badmatch(match, badfn): """Make a copy of the given matcher, replacing its bad method with the given one. """ m = copy.copy(match) m.bad = badfn return m
f9937a4076ede88b25735a0b095e63d7082da620
75,889
import yaml def update_config(config, updates): """Modifies the YAML configurations, given a list of YAML updates. """ if isinstance(updates, str): updates = [updates] for update in updates: edits = yaml.safe_load(update) for k, v in edits.items(): node = config for ki in k.split('.')[:-1]: if ki in node: node = node[ki] else: node[ki] = dict() node = node[ki] ki = k.split('.')[-1] node[ki] = v return config
f9c66068226fc44d8fe8f35bb1f21a6d8648b3fb
75,891
from typing import List from typing import Union from pathlib import Path def cli_args(tmpdir) -> List[Union[Path, str]]: """ Fixture simulating a set of CLI arguments. Returns: List of args. """ in_folder = Path("requirements.in") assert in_folder.exists() out_folder = Path(tmpdir).joinpath("fake_requirements") tld = Path(tmpdir) ignore = "linting" return [in_folder, out_folder, tld, ignore]
bbfdc585289dbbdb5cd532b2791bf695cb2f5a91
75,892
def sentinel(name): """Return a named value to use as a sentinel.""" class Sentinel(object): def __repr__(self): return name return Sentinel()
9e7d7dd333e7e0544c37744c603eccbbf36a9eea
75,896
import math def event_prediction(alpha, var, r_t): """ predict the total number of retweets :param alpha: a parameter of linear regression (alpha) :param var: a parameter of linear regression (variance) :param r_t: the total number of tweet at the observation time :return: predicted number of retweets """ if isinstance(alpha, float): r_est = r_t * (math.exp(alpha + (var / 2.0))) return r_est else: r_est = [r_t * (math.exp(alpha[i] + (var[i] / 2.0))) for i in range(len(alpha))] return r_est
96b2c6f96f150856ef6de7be70b618095bc069ca
75,897
def create_snapshot_repo(els, reponame, body, verify=True): """Function: create_snapshot_repo Description: Creates a repository in Elasticsearch cluster. Arguments: (input) els -> ElasticSearch instance. (input) reponame -> Name of repository. (input) body -> Contains arguments for the dump command. (input) verify -> True|False - Validate the repository. (output) Return exit status of create_repository command. """ body = dict(body) return els.snapshot.create_repository(repository=reponame, body=body, verify=verify)
6a7bf809bfc7c9654a1779d45e65634a8266060a
75,898
def calculate_dir(start, target): """ Calculate the direction in which to go to get from start to target. start: a tuple representing an (x,y) point target: a tuple representing an (x,y) point as_coord: whether you want a coordinate (-1,0) or a direction (S, NW, etc.) """ dx = target[0] - start[0] dy = target[1] - start[1] if dx < 0: dx = -1 elif dx > 0: dx = 1 if dy < 0: dy = -1 elif dy > 0: dy = 1 return (dx, dy)
f728b0bbb80c3726218a73fd54d050eab25408e3
75,900
def rectified_linear_unit(x): """ Returns the ReLU of x, or the maximum between 0 and x.""" return x*(x > 0)
b59c8baae1cc997ae17188b08117776683c28ba7
75,905
def get_meta_value(meta, *keys, default=None): """ Return value from metadata. Given keys can define a path in the document tree. """ try: for key in keys: if not meta: raise KeyError(key) meta = meta[key] return meta except KeyError: return default
89dfbe8ca157e51848a311c49a99de657f9238af
75,906
def as_words(string): """Split the string into words >>> as_words('\tfred was here ') == ['fred', 'was', 'here'] True """ return string.strip().split()
543472e536da2024d118a22575348bbb263abcaf
75,907
def getChildIndex(parent, toFind): """ Return the index of the given child in the given parent. This performs a linear search. """ count = 0 child = parent.firstChild while child: if child == toFind: return count if child.nodeType == 1: count += 1 child = child.nextSibling return -1
abea7fd879a675d8e14a7d163518c3cc7fe33198
75,912
import itertools def pairwise(iterable): """For a list ``s``, return pairs for consecutive entries. For example, a list ``s0``, ``s1``, etc. will produce ``(s0,s1), (s1,s2), ...`` and so on. See: https://docs.python.org/3/library/itertools.html#recipes.""" a, b = itertools.tee(iterable) next(b, None) return zip(a, b)
8e0a3dd02db27c547870b8ada2e848be128690b6
75,915
import torch def clip_tensor(x, lb=0., ub=1.): """ Clip a tensor to be within lb and ub :param x: :param lb: lower bound (scalar) :param ub: upper bound (scalar) :return: clipped version of x """ return torch.clamp(x, min=lb, max=ub)
ce6d53080285bf53c118f6f3cc9cc22830d792d1
75,918
def between(data, delim1, delim2): """Extracts text between two delimiters. Parameters ---------- data : str Text to analyse delim1 : str First delimiter. delim2 : str Second delimiter. Returns ------- str Text between delimiters. """ return data.split(delim1)[1].split(delim2)[0]
781c62d3a925449f9d5bfce1fc744f366e61767f
75,925
import getpass def ask_user_password(prompt: str) -> str: """ Read a password from the console. """ return getpass.getpass(prompt + ": ")
e0ea187d3f92e02d27d5bba7ad0feba3f8ee12a3
75,930
def decode(minterm,n_variables): """ 输入最小项编号,输出最小项的01串 :param minterm: 待转化成string的 :param n_variables: 变元数量 :return: 最小项的01串 :rtype: str """ result=['0']*n_variables for i in reversed(range(n_variables)): result[i]=str(minterm%2) minterm=minterm//2 return ''.join(result)
7cd014c293859f18e9e59fa992054df762cd77df
75,933
def lisp_string(python_string): """ Convert a string to a Lisp string literal. """ return '"%s"' % python_string.replace('\\', '\\\\').replace('"', '\\"')
e50d21f9c2b8e5438679f680773a7a18fd951e7d
75,937
import bisect def findClosest(a, x): """ Returns index of value closest to `x` in sorted sequence `a`. """ idx = bisect.bisect_left(a, x) if idx == 0: return a[0] if idx == len(a): return a[-1] if a[idx] - x < x - a[idx - 1]: return idx else: return idx - 1
a5d33f353324fafc6fde07b88832e96c9f97e0bb
75,938
def choose(n, k): """ return the binomial coefficient of n over k """ def rangeprod(k, n): """ returns the product of all the integers in {k,k+1,...,n} """ res = 1 for t in range(k, n+1): res *= t return res if (n < k): return 0 else: return (rangeprod(n-k+1, n) / rangeprod(1, k))
e76f01a39c0c5c731799cb02ead5ae096bd55e02
75,943
def tsv(infile, comment=None): """ Returns a generator for tab-delmited file. Args: infile: Input file as a file-like object comment (str): Rows beginning with this string will be ignored. Returns: generator: A generator that yields each row in the file as a list. """ if comment is None: return (l.strip('\n').split('\t') for l in infile) else: return (l.strip('\n').split('\t') for l in infile if not l.startswith(comment))
1588c5fb67cdeda424a3ea76de7d013e68b116f3
75,951
def fix(s: str) -> str: """ This function capitalise the first letter of the first word of each sentence. """ my_s = [i.capitalize() for i in s.split('. ')] return '. '.join(map(str, my_s))
2ceb5ea948f47771e259b3f895d49b0689a9e263
75,959