content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def get_version(diff_file, ix=True): """Determine the product version from the diff file name. param ix denotes if the diff file was generated by APIx or CLIx """ split_ver = diff_file.split("/")[-1].split("-") if "-comp.yaml" in diff_file: return split_ver[0] else: return f"{split_ver[0]}-to{split_ver[2]}"
a1d899abdea8ee59c76b6103bd27cb0b40e100cd
49,022
from typing import Optional from typing import Union from typing import cast from typing import Dict def validate_axis(axis: Optional[Union[int, str]] = 0, none_axis: int = 0) -> int: """Check the given axis is valid.""" # convert to numeric axis axis = cast( Dict[Optional[Union[int, str]], int], {None: none_axis, "index": 0, "columns": 1} ).get(axis, axis) if axis in (none_axis, 0, 1): return cast(int, axis) else: raise ValueError("No axis named {0}".format(axis))
654cd35d1ddd4ff27c0a37635a211ddfca710c6e
49,024
import torch def so3_log_abs_det_jacobian(x): """ Return element wise log abs det jacobian of exponential map :param x: Algebra tensor of shape (..., 3) :return: Tensor of shape (..., 3) Removable pole: (2-2 cos x)/x^2 -> 1-x^2/12 as x->0 """ x_norm = x.double().norm(dim=-1) mask = x_norm > 1e-10 x_norm = torch.where(mask, x_norm, torch.ones_like(x_norm)) ratio = torch.where( mask, (2 - 2 * torch.cos(x_norm)) / x_norm ** 2, 1 - x_norm ** 2 / 12 ) return torch.log(ratio).to(x.dtype)
82d22cef96578a90d677d4d631dfb2ed7c783d05
49,029
def a1(row_index, column_index): """Get an A1 notation for a cell specified by its row index and column index""" ord_first = ord('A') ord_last = ord('Z') letters_count = ord_last - ord_first + 1 level, letter_index = divmod(column_index, letters_count) letter = chr(ord_first + letter_index) * (1 + level) number = row_index + 1 return '{letter}{number}'.format(letter=letter, number=number)
d319600fcdddfbe7c0a715b56e8d00b6b9198d84
49,031
def get_E2K_lookup_dict(the_dict, main_key, sub_key): """Returns a lookup dictionary specified by main_key and sub_key, returning an empty dictionary if any is missing. This is for use in the post-processing functions.""" subdict = the_dict[main_key].get(sub_key, {}) \ if the_dict.get(main_key) else {} if subdict: return {v.get('ID'): k for k, v in subdict.items() if v.get('ID')}
56dbb81592a00e873dc5bda1234377a48bde9e94
49,032
def make_feat_paths(feat_path): """ Make a feature path into a list. Args: feat_path (str): feature path Returns: paths (list): list of paths """ if feat_path is not None: paths = [feat_path] else: paths = None return paths
6a7960c86a7f97afbe7b248970fd7f7f8efeda1e
49,034
def annuity_factor(n, i): """ Calculate annuity factor Args: n: depreciation period (40 = 40 years) i: interest rate (0.06 = 6%) Returns: annuity factor derived by formula (1+i)**n * i / ((1+i)**n - 1) """ return (1 + i) ** n * i / ((1 + i) ** n - 1)
9efb7062c609c260482a3f870350148518dba5d9
49,036
def trim_to_preferred_protocols(hosts_and_urls): """ remove all but http and ftp URLs, and if both http and ftp are offered, leave only http. Return [(hostid, url), ...] """ results = [] try_protocols = ('https', 'http', 'ftp') for (hostid, hcurls) in hosts_and_urls: protocols = {} url = None for hcurl in hcurls: for p in try_protocols: if hcurl.startswith(p+':'): protocols[p] = hcurl for p in try_protocols: if p in protocols: url = protocols[p] break if url is not None: results.append((hostid, url)) return results
c4e108e4a650431a39d3a689153f516c1b711143
49,039
def is_invalid_schema(schema, test_value): """ Checks schema against tests with dictionary nesting >>> is_invalid_schema({"valid_key": "some_value"}, {"valid_key": "some_value"}) False >>> is_invalid_schema({"invalid_key": "some_value"}, {"valid_key": "some_value"}) True >>> is_invalid_schema( ... {"nested": {"valid_key": "some_value", "another_key": "some_value"}}, ... {"nested": {"valid_key": "some_value"}} ... ) False >>> is_invalid_schema( ... {"nested": {"invalid_key": "some_value", "another_key": "some_value"}}, ... {"nested": {"valid_key": "some_value"}} ... ) True >>> is_invalid_schema( ... {"nested": {"valid_key": "some_invalid_value", "another_key": "some_value"}}, ... {"nested": {"valid_key": "some_value"}} ... ) True >>> is_invalid_schema( ... {"nested": {"double": {"valid_key": "some_value", "another_key": "some_value"}}}, ... {"nested": {"double": {"valid_key": "some_value"}}} ... ) False >>> is_invalid_schema( ... {"nested": {"double": {"valid_key": "some_value", "another_key": "some_value"}}}, ... {"nested": {"double": {"valid_key": "some_value"}, "some_key": "no_value"}} ... ) True """ if isinstance(test_value, dict): return any( is_invalid_schema(schema[k], test_value[k]) if k in schema else True for k in test_value.keys() ) return schema != test_value
894109fc9910fc708d9a8800e1169d6e00876e0d
49,040
def formed_bond_keys(tra): """ keys for bonds that are formed in the transformation """ #print('tra test:', len(tra), tra) #if len(tra) == 1: # frm_bnd_keys = tra #else: frm_bnd_keys, _ = tra return frm_bnd_keys
7190e308c86f3c453696379e5b914c9665e8b482
49,046
import re def handle_lambda(call, line, file): """ Resolve calls that point to <lambda>. Finds the lambda definition in the file/line pointed to by the `call` and extracts the name of the variable that gets assigned the lambda. :param call: call dictionary :param line: line dictionary :param file: file dictionary of the file pointed to by the call :return: updated call dictionary """ if call['entry_point'] == '<lambda>': if line['line_number'] == call['line_number']: return {} num = call['line_number'] # file the name of the variable defined in file['lines'][num-1] m = re.search(r'(?<=\s)(\w*)(?=\s*=\s*lambda)', file['lines'][num-1]['code']) if m is not None: call['entry_point'] = m.group(0) return call
ff9c99f123596d7554bebdeb8c607fd8864ef1a4
49,047
import collections def get_month_most_posts(dates): """Receives a list of datetimes and returns the month (format YYYY-MM) that occurs most""" c = collections.Counter() for d in dates: c[str(d.year) + '-' + str(d.month).zfill(2)] += 1 return c.most_common(1)[0][0]
6c71301657e45529706688cdb51e4bbe43284d41
49,049
import math def get_data_file(lon, lat): """ Returns the correct HGT data file to get elevation from. Credit: Aatish Neupane Link: https://librenepal.com/article/reading-srtm-data-with-python/ :param lat: The latitude of the desired point. :param lon: The longitude of the desired point. :return: The name of the data file. :raise: ValueError: If lat/lon are not within supported bounds. """ if 48 <= math.floor(lat) < 60 and 110 < math.floor(lon) <= 121: return "N{}W{}.hgt".format(math.floor(lat), math.floor(lon)) else: raise ValueError("Point does not fall within supported bounds")
c454df12af58fc1174fec591d5d32b3b97f85118
49,053
def initialDataLists(D): """ Initialize grid, scalars, and vectors lists used in convertToVtk function. """ grid = []; # simple scalars = [list(x) for x in [()]*len(D["scalar"].keys())]; # more complicated vectors = [list(x) for x in [()]*len(D["vector"].keys())]; # more complicated return [grid, scalars, vectors];
96dab994f29c79d874fd4be55cc96757a3b7c8f4
49,055
def get_list_of_new_entries(device_entries: list, file_entries: list) -> list: """ Compares log entries from the local logfile with the log entries from the device Args: device_entries (list): List of LogEntry Objects, fetched from the device file_entries (list): List of LogEntry Objects, fetched from the local logfile Raises: N/A Returns: new_entries (list): List of LogEntry objects from the device which are not yet present in the local logfile """ new_entries = [] # if there are no log entries in the local log file or it does not exist yet, # simply return the log entries from the device if not file_entries: return device_entries # if there are no log entries on the device, return empty list if not device_entries: return new_entries # otherwise, add the latest log entries from the device until # we reach a log entry already present in the local log file while device_entries[-1].timestamp > file_entries[-1].timestamp: new_entries.append(device_entries.pop()) # sort by timestamp so the log entries are in the correct order (new entries last) new_entries.sort(key=lambda entry: entry.timestamp) return new_entries
baa16c9999f42e6d71a1d6bc62305f42239b61b0
49,058
def get_region_start(region: str) -> int: """Return the start position of the region string.""" if ":" not in region: return 1 contig, start_end = region.split(":") start, end = start_end.split("-") return int(start)
4dc1392815560d6e3886addaef7837ecf21069ba
49,059
def instjoin(*instnames): """Return hierarchical instance names from instance name components >>> instjoin('I1','VS') 'I1.VS' >>> instjoin('','VS') 'VS' """ return '.'.join([part for part in instnames if len(part) > 0])
3bd842de4bc6026d41d36267d2cca1b6d9d4b575
49,066
def _records_match(old_ttl, old_record_data, new_ttl, new_record_data): """Checks to see if original and new TTL and values match.""" matches = True if old_ttl != new_ttl: matches = False if old_record_data != new_record_data: matches = False return matches
982f8d5b9f6c72b95a5d6b76d4909eb89906e3df
49,074
def htmlDataset(dataset = {}, title=""): """ Utility function to generate HTML Table from a Dataset""" content = "<TABLE cellpadding=5> <caption align=top>" + title + " </caption><TR></TR><TR></TR>" for row in dataset['DATA']: content += "<TR>" for col in dataset['DATA'][row]: if row==0: content += "<TH align=left bgcolor=#BBDB88>" else: content += "<TD align=left bgcolor=#FFFAB2>" content += col if row==0: content += "</TH>" else: content += "</TD>" content += "</TR>" content += "</TABLE>" return content
871c44487fadacdb8c255389a57065b0887df1c6
49,075
def clamp(n, lower, upper): """ :param n: Number to clamp :param lower: Lower bound :param upper: Upper bound :return: Clamped number """ return max(lower, min(n, upper))
df283f640cfee371b44d8ab33c90ac959f3601e2
49,078
import six def GetLabels(args, client, instance_properties=False): """Gets labels for the instance message.""" labels_value = client.messages.Instance.LabelsValue if instance_properties: labels_value = client.messages.InstanceProperties.LabelsValue if args.labels: return labels_value(additionalProperties=[ labels_value.AdditionalProperty( key=key, value=value) for key, value in sorted(six.iteritems(args.labels)) ]) return None
f32e135a211c151d97062e5535e7b0149f081ea9
49,080
def get_video_parts(video_path): """Given a full path to a video, return its parts.""" video_path = video_path.replace('\\','/') # Adapt for win parts = video_path.split('/') filename = parts[3] filename_no_ext = filename.split('.')[0] classname = parts[2] train_or_test = parts[1] return train_or_test, classname, filename_no_ext, filename
9f514909f398b0abf5b929c5ca384971168a268e
49,084
def find_cal_by_type(standards, std_type): """ :param standards: list, of CalEvent objects :param std_type: string, in ['high', 'mid', 'low'] :return: returns the first CalEvent in the list with the matching standard type """ return next((ce for ce in standards if ce.standard_used == std_type), None)
f87e011534e6aa16c1c508bcafd334d7886f5568
49,085
import re def match_mm3_angle(mm3_label): """Matches MM3* label for angles.""" return re.match('[\sa-z]2', mm3_label)
bb9d3a41e9b0aabb6efe36efc251ceb83c134911
49,086
from typing import Any def _float_val(col: Any) -> float: """Convert marker data to float values so we can put them in a typed np.array. The marker column has type float if it has a 0.0 value, and would only have type str for a marker value.""" if isinstance(col, str): return 1.0 return float(col)
8bd8636a21bb7e72eb6f585702462a34a63575a5
49,090
def __collatz_recursivo(numero: int, pasos: list) -> list: """Calcula recursivamente la conjetura de Collatz, devuelve los pasos realizados""" pasos.append(numero) if numero == 1: return pasos if numero % 2 == 0: return __collatz_recursivo(numero // 2, pasos) return __collatz_recursivo(numero * 3 + 1, pasos)
249a5de371528e5fe0f3aee247a0ce525461ca32
49,092
def get_multiple_ranges(service, spreadsheet_id, range_names): """ Get multiple ranges from Google Sheets. :param service: Authenticated sheets service object :param spreadsheet_id: Id of the spreadsheet :param range_names: Ranges to get :return: Results of the query """ result = service.spreadsheets().values().batchGet( spreadsheetId=spreadsheet_id, ranges=range_names ).execute() ranges = result.get('valueRanges', []) return ranges
36bf548fe3d444909854317490379e5c64a0da33
49,094
def payload_satisfies(pred): """This wraps a predicate so it is applied to the payloads of intervals. The output function expects one or more Intervals as input (depending on how many arguments ``pred`` expects) and applies the predicate to the payloads of the Intervals instead of the Intervals themselves. Arg: pred: The predicate to wrap. Returns: An output function that applies ``pred`` to payloads. """ def new_pred(*interval_args): return pred(*[i.payload for i in interval_args]) return new_pred
8d758587d9a361740cfcb88c9ea29b7bea6f1ff7
49,096
def get_eso_file_version(raw_version): """Return eso file version as an integer (i.e.: 860, 890).""" version = raw_version.strip() start = version.index(" ") return int(version[(start + 1) : (start + 6)].replace(".", ""))
d456ce65585a8aad0c08d9e53b6aeb77132a0d49
49,109
def have_readme(answer): """Check if project has a readme""" return answer['check_readme']
47e963d36a18211e40580dae26e1c44b82f12375
49,111
def int_to_bytes(x, length): """Converts bigendian integer to byte array with fixed length.""" res = bytearray(length) for i in range(length): res[length - i - 1] = int(x) % 256 x = int(x) // 256 return res
f8cfceca1c51483115c7d2d877b9d4a6c7c5fef9
49,117
def strip_linebreaks(s): """ Strip excess line breaks from a string """ return u"\n".join([c for c in s.split(u'\n') if c])
537b835608e8697da4d9c05f1983fe39ef09b980
49,119
def find_output_node(nodetree): """ Get first output node that has incoming links. """ for n in nodetree.nodes: if n.type.startswith('OUTPUT'): for inp in n.inputs: if inp.is_linked: return n return None
f3dafd1873baacdc1568a3ab6d7d8d70602127ec
49,120
import socket def is_unused_port(port, protocol): """ Check whether the port is unused Args: port (int): port protocol (str): application protocol (tcp or udp) Return: bool: whether the port is unused """ protocols = { 'udp': socket.SOCK_DGRAM, 'tcp': socket.SOCK_STREAM, } sock = socket.socket(family=socket.AF_INET, type=protocols[protocol]) try: sock.bind(('127.0.0.1', port)) sock.close() return True except socket.error: return False
6a40d4a1b9882d1f8cf2d9bfa56b59e2f81c7737
49,121
def _make_rv_params_variable(rv, **params): """ Wraps the random variable rv, allowing it to accept time-dependent parameters. """ if any(callable(x) for x in params.values()): return lambda t: rv( **{k: (x(t) if callable(x) else x) for k, x in params.items()}) else: return rv(**params)
a497962b67f88a9bc01855e695d17a0aceaa8958
49,124
def _call_initialize(klass): """Call `_initialize` when the class is created""" klass._initialize() return klass
367642e8c53bf1bf4d7e8625dbe60938a5b0d7ff
49,126
import torch def tsp_get_min_dist(dist_matrix): """ Takes a distance matrix dist_matrix of shape (n,n) or (bs,n,n) Returns the mean minimal distance between two distinct nodes for each of the *bs* instances """ if not isinstance(dist_matrix,torch.Tensor): try: dist_matrix = torch.tensor(dist_matrix) except Exception: raise ValueError(f"Type {type(dist_matrix)} could not be broadcasted to torch.Tensor") solo=False if len(dist_matrix.shape)==2: solo=True dist_matrix = dist_matrix.unsqueeze(0) bs,n,_ = dist_matrix.shape max_dist = torch.max(dist_matrix) eye_mask = torch.zeros_like(dist_matrix) eye_mask[:] = torch.eye(n) dist_modif = dist_matrix + max_dist*eye_mask min_dist = torch.min(dist_modif,dim=-1).values min_mean = torch.mean(min_dist,dim=-1) if solo: min_mean = min_mean[0] return min_mean
1eea80f43b062567b58d9938016e6dce6c46e731
49,127
def get_size_and_path(line): """From a 'ls -l' line, return columns 4 (size) and 8 (path).""" cols = line.split() size, path = (int(cols[4]), cols[8]) return size, path
73d1d66b6d759b2e238b4140ca91581c5e303724
49,137
def _as_float(string, default: float = 0.0): """Return first sequence as float.""" return float(string.strip().partition(" ")[0] or default)
e0d199ab8e71f9e87174f4fb78cdd351ccfc69d0
49,139
def legendre_symbol(a, p): """ Computes the Legendre symbol :param a: number :param p: prime number :return: Returns 1 if `a` has a square root modulo p, -1 otherwise. """ ls = pow(a, (p - 1) // 2, p) if ls == p - 1: return -1 return ls
8a0238b2a4e89c36b1bd0f6a5a99d656c3a52eab
49,141
def FE_concatenate_multiple_columns(df, cols, filler=" ", drop=True): """ This handy function combines multiple string columns into a single NLP text column. You can do further pre-processing on such a combined column with TFIDF or BERT style embedding. Inputs --------- df: pandas dataframe cols: string columns that you want to concatenate into a single combined column filler: string (default: " "): you can input any string that you want to combine them with. drop: default True. If True, drop the columns input. If False, keep the columns. Outputs: ---------- df: there will be a new column called ['combined'] that will be added to your dataframe. """ df = df.copy(deep=True) df['combined'] = df[cols].apply(lambda row: filler.join(row.values.astype(str)), axis=1) if drop: df = df.drop(cols, axis=1) return df
35d77da9562ee40e20d049d45a548e71c454a18b
49,142
def turning(player_data, p, t): """ >>> player_data = {} >>> player_data['a'] = pd.DataFrame({'tick':[4,5],'angle':[10,10]}, [4,5]) >>> turning(player_data, 'a', 5) False >>> turning(player_data, 'a', 6) False >>> player_data['a'] = pd.DataFrame({'tick':[4,5],'angle':[10,11]}, [4,5]) >>> turning(player_data, 'a', 5) True """ if not (t in player_data[p]['tick']) or not ((t-1) in player_data[p]['tick']): return False if abs(player_data[p].loc[t,'angle'] - player_data[p].loc[t-1,'angle']) > 1e-8: return True return False
45edcfd85799e4204c08af0fc4910794981f118c
49,145
def get_nlat_nlon(n_nodes, lonlat_ratio): """Calculate the width (longitudes) and height (latitude) of an equiangular grid provided in 1D. Parameters ---------- lonlat_ratio : int lonlat_ratio = H // W = n_longitude rings / n_latitude rings Aspect ratio to reshape the input 1D data to a 2D image. A ratio of 2 means the equiangular grid has the same resolution in latitude and longitude. """ # ratio = n_lon/n_lat (width/height) n_lat = int((n_nodes / lonlat_ratio) ** 0.5) n_lon = int((n_nodes * lonlat_ratio) ** 0.5) if n_lat * n_lon != n_nodes: # Try to correct n_lat or n_lon if ratio is wrong if n_nodes % n_lat == 0: n_lon = n_nodes // n_lat if n_nodes % n_lon == 0: n_lat = n_nodes // n_lon assert n_lat * n_lon == n_nodes, f'Unable to unpack nodes: {n_nodes}, lonlat_ratio: {lonlat_ratio}' return n_lat, n_lon
644b11b2439c503840580c634e994014bf3812e0
49,150
def list2str(l): """ Convert list to a string :param l: list :returns: list <string> """ s = '' for i in range(len(l)): s = s + str(l[i]) return s
94e70d371f4c81c08dbdd7d2a583b9c2e68500a8
49,152
def is_valid_color(color, threshold): """ Return True if each component of the given color (r, g, b, a) is above threshold """ if color[0] > threshold and color[1] > threshold and color[2] > threshold: return True return False
3391779e0977225f7340f50a823610b4dd22876b
49,158
import csv def readCSV(path2File): """ returns a list of links to pdfs. Assumes the first column of the csv file corresponds to the links """ pdfFileLinks = [] with open(path2File, newline='') as csvfile: data = csv.reader(csvfile) for row in data: pdfFileLinks.append(row[0]) return pdfFileLinks
0918fbbaf580cdf4221551b7cac58e4add129c76
49,159
import asyncio def run_in_loop(future): """Run a co-routine in the default event loop""" try: result = asyncio.get_event_loop().run_until_complete(future) except Exception as ex: print("Exception: {}".format(ex)) raise return result
7bc36ae9d406a20bb3d8aaa0bd34356c98251ad3
49,160
def fun(s): """Determine if the passed in email address is valid based on the following rules: It must have the username@websitename.extension format type. The username can only contain letters, digits, dashes and underscores [a-z], [A-Z], [0-9], [_-]. The website name can only have letters and digits [a-z][A-Z][0-9] The extension can only contain letters [a-z][A-Z]. The maximum length of the extension is 3. Args: s (str): Email address to check Returns: (bool): Whether email is valid or not """ if s.count("@") == 1: if s.count(".") == 1: user, domain = s.split("@") website, extension = domain.split(".") if user.replace("-", "").replace("_", "").isalnum(): if website.isalnum(): if extension.isalnum(): if len(extension) <= 3: return True return False
d5a3e4de4010bf71b646270d901f87b526fb92e9
49,162
import base64 def decode_bytes(s: str) -> bytes: """Decode the bytes. Args: s: Encoded binary content. Returns: Decoded binary data. Raises: binascii.Error: If the data isn't valid. """ return base64.b64decode(s[1:])
d8572387012e51d7eb230b3d09a1e6bfdabcaac0
49,165
def ctof(temp_c): """Convert temperature from celsius to fahrenheit""" return temp_c * (9/5) + 32
4357cb6d355d14c11b21bfe0bd4efc5c1e3b4703
49,167
def rename_cols(data): """Rename columns of a DataFrame by capitalizing them.""" return data.rename(columns=str.capitalize)
b6aced2882b12feeed5dc3494125b4c8e68d2fc5
49,168
import re def is_esemble_id(name): """ It returns True if name is an stable Ensembl ID. Stable IDs are created in the form ENS[species prefix][feature type prefix][a unique eleven digit number]. """ return re.match("ENS.*G[0-9]{11}", name) is not None
ec96712499d064a19b0aa719e504f3317fc1fcac
49,172
def addouter(C, b, factor=1): """Add in place `factor` times outer product of vector `b`, without any dimensional consistency checks. """ for i in range(C.shape[0]): for j in range(C.shape[1]): C[i,j] += factor * b[i] * b[j] return C
ded8354eb180fe50bd6aab9f03f3128f27889deb
49,176
def product_rule(u, v, du, dv): """The product rule of calculus, d/dx uv = u dv v du.""" return u * dv + v * du
7f5861e2c6d081f51b2949b272b04c4161eac0d2
49,178
def rgb(red, green, blue): """ Make a tkinter compatible RGB color. """ return "#%02x%02x%02x" % (red, green, blue)
e33394c24728847d420991d5633a239b8684bdb8
49,182
def transform_dict(d, xformer): """ Transform elements of the dict d using the xformer (also a dict, where the keys match the keys in d and the values of d are transformed by invoking the corresponding values in xformer. """ for k, v in xformer.items(): if k in d: d[k] = v(d[k]) return d
f38fc198671a8bf706fed6a57ecda07c8ab08321
49,186
def float_else_zero(sstring): """Return converted string to float. If conversion fail, return zero. :param sstring: String to be converted :return: ``float(sstrinq)`` if ``sstring`` can be converted to float (e.g. ``"3.14"``), else ``0`` """ try: return float(sstring) except ValueError: return 0
6c51c811574664ef2e19e4f6978075c2f92da9e1
49,187
def _local_name(element): """Strip namespace from the XML element's name""" if element.tag and element.tag[0] == '{': return element.tag.rpartition('}')[2] return element.tag
0cf7b7d2aa3571679e49a4a5fb18c7e7a4193683
49,188
def build_event_info(info, time): """Adds created_at time to event info dict.""" return {**info, 'created_at': time}
a7faa2d3798d2692310af16e84099d7b86fa84f0
49,192
def aggregate(sequence, func, seed=None): """ Applies accumulator function over a sequence. Args: sequence: iterable Sequence of items to go through. func: callable Accumulator function expecting two arguments (res, next). seed: any Initial aggregation value. If set to None, the first item is used. Returns: any """ res = seed for i, item in enumerate(sequence): if i == 0 and seed is None: res = item continue res = func(res, item) return res
30318589a7073226d547d9131754a44b3d20f0f7
49,193
import re def sanitize_license_plate(number): """Sanitize a license plate number to [A-Z0-9]+, no dashes/spaces.""" number = re.sub(r'[ -]', '', number.strip().upper()) return number if re.match(r'^[A-Z0-9]+$', number) else None
f2c89d3bfffd9056a74cfcd4e7aef8892a7eb8f9
49,197
def with_last_multiplicity(template_layers_config, multiplicity): """ Change the multiplicity of the last layer in a layered micro-service application, while keeping the average service time for a user request constant. """ layers_config = [ layer_config for layer_config in template_layers_config] layers_config[-1] = \ layers_config[-1]._replace( average_work=layers_config[-1].average_work/multiplicity, multiplicity=multiplicity) return layers_config
ab27b4dd2f6d329cbe3275310ceb806dc7c95a47
49,199
def greater_than_previous_counter(iterable: list) -> int: """ Return a count of the number of times the i + 1 > i """ ctr = int() for i, j in zip(iterable, iterable[1:]): if int(j) > int(i): ctr += 1 return ctr
f5f34dbdf0779c4e1a695a9d31431a3513ace40c
49,202
import struct def bytes_to_fp32(bytes_data, is_big_endian=False): """ bytes to float :param bytes_data: bytes :param is_big_endian: is big endian or not,default is False. :return: fp32 """ return struct.unpack('>f' if is_big_endian else '<f', bytes_data)[0]
a2b22a6bedcc060b63d160482bdadbe042fbdda2
49,206
def load_attribute_from_file(file_path: str): """ Loads attribute from file. :param file_path: path to file :return: list of attributes """ attributes = [] with open(file_path) as f: for line in f: attributes.append(list(map(float, line.split()))) return attributes
cc98cf28ab41c6c06a358fb833dda071aaa7688f
49,207
def mph(mps): """ Meters per second to miles per hour """ mpsToMph = 3600.0 / (0.0254 * 12.0 * 5280.0) return mps * mpsToMph
93ab7c27e55b16587cfa1315c77a453c5487889d
49,208
import zlib def adler32(filepath,blocksize=2**20): """ Return the ader32 of a file as an 8-byte hex number `blocksize` adjusts how much of the file is read into memory at a time. This is useful for large files. 2**20 = 1024 * 1024 = 1 mb 2**12 = 4 * 1024 = 4 kb """ csum = 1 with open(filepath, 'rb') as afile: buf = afile.read(blocksize) while len(buf) > 0: csum = zlib.adler32(buf,csum) buf = afile.read(blocksize) # From the documentation: # > Changed in version 3.0: Always returns an unsigned value. # > To generate the same numeric value across all Python versions and # > platforms, use crc32(data) & 0xffffffff. csum = csum & 0xffffffff return ('0'*8 + hex(csum)[2:])[-8:]
9e299e752664dc71a29a6393331751d49327ecc1
49,211
def make_batches(data, bsz): """Return a list of lists of size bsz given a list of examples.""" return [data[i:i + bsz] for i in range(0, len(data), bsz)]
cbbc388ba58074a22a4b94ec94c81ac303443054
49,215
import re def api_result_to_release_format(api_df, id_lookup_dict=None, verbose=False): """ Reindex a PyCAP API result to an NCANDA release format. REDCap API, when used with PyCAP, returns results as a DataFrame indexed by NCANDA ID (study_id - X-00000-Y-0) and combined event + arm (redcap_event_name) On the other hand, release files are typically indexed by XNAT ID (NCANDA_S0?????; mri_xnat_id in Redcap). This function will: 1. Convert Redcap IDs to NCANDA SIDs using id_lookup_dict (as generated by `get_id_lookup_from_demographics_file`) or the `mri_xnat_sid` column (if present in api_df), 2. Drop Redcap IDs that cannot be converted in that way, 3. Separate event and arm to individual columns and make their names release-compatible, 4. Return DataFrame indexed by release primary keys (subject, arm, visit). """ df = api_df.copy(deep=True) df.reset_index(inplace=True) if id_lookup_dict: df['subject'] = df['study_id'].map(id_lookup_dict) elif 'mri_xnat_sid' in df.columns: df['subject'] = df['mri_xnat_sid'] else: raise IndexError("You must supply id_lookup_dict, or api_df has to " "have the mri_xnat_sid column") nan_idx = df['subject'].isnull() if verbose: study_id_nans = df.loc[nan_idx, 'study_id'].tolist() print ("Dropping study IDs without corresponding NCANDA SID: " + ", ".join(study_id_nans)) df = df[~nan_idx] df[['visit', 'arm']] = (df['redcap_event_name'] .str.extract(r'^(\w+)_(arm_\d+)$')) def clean_up_event_string(event): """ If possible, convert Redcap event name to NCANDA release visit name. If conversion fails, return the original string. Intended to be passed to pd.Series.map. """ # NOTE: Only accounts for full Arm 1 events match = re.search(r'^(baseline|\dy)', event) if not match: return event elif re.match('^\d', match.group(1)): return "followup_" + match.group(1) else: return match.group(1) df['visit'] = df['visit'].map(clean_up_event_string) def clean_up_arm_string(arm): """ If possible, convert Redcap arm name to NCANDA release arm name. If conversion fails, return the original string. Intended to be passed to pd.Series.map. """ arm_dict = {'arm_1': 'standard', 'arm_2': 'recovery', 'arm_3': 'sleep', 'arm_4': 'maltreated'} if arm not in arm_dict: return arm else: return arm_dict[arm] df['arm'] = df['arm'].map(clean_up_arm_string) return df.set_index(['subject', 'arm', 'visit'])
aacdc5c177ff7026dea8bc399c67bd78287c7e4f
49,219
from typing import List from typing import Tuple def swap_rows(matrix: List[List[float]], rows: Tuple[int, int]) -> List[List[float]]: """ Mutate matrix by swapping rows[0] and rows[1]. Preconditions: - len(matrix) > 0 - all(len(row) > 0 for row in matrix) - sum([1 for i in range(1, len(matrix)) if len(matrix[i]) != len(matrix[0])]) == 0 - all([0 <= i < len(matrix) for i in rows]) """ temp = matrix[rows[0]] matrix[rows[0]] = matrix[rows[1]] matrix[rows[1]] = temp return matrix
666df5872d468086dc97614dfc39007e182cd7bc
49,220
def flatten_requirement(requirement): """ Return only the package name from a requirement. Arguments: requirement (pkg_resources.Requirement): A requirement object. Returns: string: Package name. """ return requirement.key
9a2e493a97763417aef41d18405c609e65e28875
49,224
from typing import Callable from datetime import datetime def parse_time(string: str, parser: Callable = datetime.strptime)->datetime: """ :param string: date and time as a string :param parser: function to convert string to datetime :return: datetime.datetime """ date_formats = ["%Y-%m-%dT%H:%M:%S.%fZ", '%Y-%m-%dT%H:%M:%SZ'] for df in date_formats: try: return parser(string, df) except ValueError: pass raise ValueError('Invalid time format in string %s' % string)
c8a937842cf8878a3442a53ae8fd5dc780404daf
49,225
import math def betalambda(mass, freq, w): """Return value of beta*lambda of beam. Arguments: mass(double): mc^2 of beam particle in MeV freq(double): frequency in MHz w(double): Kinetic energy in MeV """ c = 2.99792458e8 # m/s wavelength = c / (freq * 1.0e6) gamma = 1.0 + w / mass beta = math.sqrt(1.0 - 1/(gamma * gamma)) return beta * wavelength
75ac561912822bb28c6fbbefbf7a6ea930ad291f
49,227
def edits0(word): """ Return all strings that are zero edits away (i.e. the word itself). """ return{word}
06836ba7da2a02eb7d9e9d1a2d12d74fc10f95e8
49,235
def getTokenToTokenPrice(orfeed_i, tokenSrc, tokenDst, dex, amount_src_token=1): """Get the rate of swap tokenSrc to tokenDst in a given Dex Args: orfeed_i (OrFeed): The instance of OrFeed class tokenSrc (Symbol): Symbol of src token tokenDst (Symbol): Symbol of dst token dex (str): The Dex where the rate is going to be requested amount_src_token (int, optional): Amount of src token. Defaults to 1 src token unit. Returns: Dict: Return a dict containing all relevant infos about the request """ res = orfeed_i.getExchangeRate(tokenSrc, tokenDst, dex, amount_src_token) return { "tokenSrc": tokenSrc, "tokenDst": tokenDst, "tokenPair": tokenSrc + "-" + tokenDst, "provider": dex, "price": res, }
1a0299f03a1e002f5c2a3a613a54443cd6ac4f07
49,243
def _replace_comments(s): """Replaces matlab comments with python arrays in string s.""" s = s.replace('%', '#') return s
0420a4cc2f54fea3e2b3e35e185758cd826380c7
49,245
def get_indels_regions(read): """Get indel region start and end positions of a read.""" indels_blocks = [] aligned_regions = read.get_blocks() start = read.reference_start; end = read.reference_end indels_blocks.append((start, aligned_regions[0][0])) for i in range(len(aligned_regions)-1): indels_blocks.append((aligned_regions[i][1], aligned_regions[i+1][0])) indels_blocks.append((aligned_regions[-1][1], end)) return indels_blocks
342c48d936d7c4264d087904eba0023dd4f0610d
49,246
def handle400error(ns, message): """ Function to handle a 400 (bad arguments code) error. """ return ns.abort(400, status=message, statusCode="400")
f7a0ae35337b38dfb49e6f197c8e67ab4508ead4
49,247
def group(merge_func, tokens): """ Group together those of the tokens for which the merge function returns true. The merge function should accept two arguments/tokens and should return a boolean indicating whether the strings should be merged or not. Helper for tokenise(string, ..). """ output = [] if tokens: output.append(tokens[0]) for token in tokens[1:]: prev_token = output[-1] if merge_func(prev_token, token): output[-1] += token else: output.append(token) return output
4912e4a20b2313b34617cd9dd23bd33fe5b9e4bc
49,257
def assign_scores(game): """Cleanup games without linescores to include team score based on team totals.""" for team in game['team'].values(): if 'score' not in team and 'totals' in team: team['score'] = team['totals']['source__B_R'] return game
87eb0d1d4453ab5734a7ce7c10d8d8828fc34071
49,259
def intersect(a, b): """ Identifies the elements in A that are in B. """ return list(set(a) & set(b))
3f704d653ed5f2b552a55f3eef95db7569718037
49,268
def getText(targetFile): """Return a string containing the contents of the target file""" with open(targetFile, "rt", encoding="utf-8") as f: return f.read()
90e70c5a7fc1796b0fd28fc4f52d602cf1c89b27
49,274
def get_centre(bounds): """ Get the centre of the object from the bounding box. """ if len(bounds) != 6: return [None] * 6 return [bounds[i] - (bounds[i] - bounds[i - 1]) / 2.0 for i in range(1, len(bounds), 2)]
ffa873400eb35957c08da523abcc1a0fea3b664d
49,275
def delay_from_foffsets(df, dfd, dfdd, times): """ Return the delays in phase caused by offsets in frequency (df), and two frequency derivatives (dfd, dfdd) at the given times in seconds. """ f_delays = df * times fd_delays = dfd * times**2 / 2.0 fdd_delays = dfdd * times**3 / 6.0 return (f_delays + fd_delays + fdd_delays)
1977b71f5bb5cafce55da7435141c6ff0db29e67
49,277
def get_pulsar_producer_stage(pipeline_builder, topic): """Create and return a Pulsar Producer origin stage depending on execution mode for the pipeline.""" pulsar_producer = pipeline_builder.add_stage('Pulsar Producer', type='destination') pulsar_producer.set_attributes(data_format='TEXT', text_field_path='/text', topic=topic) return pulsar_producer
7e2aa226445dfa979cfdab043b67fbf32aedadd1
49,278
def incrementAtIndex(valueList, index, max): """Returns True if the value incremented.""" originalValue = valueList[index] valueList[index] += 1 if valueList[index] < max else 0 return valueList[index] != originalValue
b835fe8e7078e5ea245f7115c639084412e928d5
49,282
def hack_ncbi_fasta_name(pipe_name): """Turn 'gi|445210138|gb|CP003959.1|' into 'CP003959.1' etc. For use with NCBI provided FASTA and GenBank files to ensure contig names match up. Or Prokka's *.fna and *.gbk files, turning 'gnl|Prokka|contig000001' into 'contig000001' """ if pipe_name.startswith("gi|") and pipe_name.endswith("|"): return pipe_name.split("|")[3] elif pipe_name.startswith("gnl|") and pipe_name.count("|")==2: return pipe_name.split("|")[2] else: return pipe_name
3b384b101a63fc2babd2b7b26b603565d71a8496
49,283
def photo_pull(req, id_num, img_name): """ Creates path to image based on name and redMapper id number. Args: req: the http request id_num: the redmapperID of the image galaxy img_name: the name of the desired image Returns: Path to desired image """ path = "static/data/" + id_num + "/" + id_num + "-" + img_name return path
0c35ea26385b408dbcf40445ec161c16f8cf9f69
49,288
def read_data(fname, ignore_docstart=False): """Read data from any files with fixed format. Each line of file should be a space-separated token information, in which information starts from the token itself. Each sentence is separated by a empty line. e.g. 'Apple NP (NP I-ORG' could be one line Args: fname (str): file path for reading data. Returns: sentences (list): Sentences is a list of sentences. Sentence is a list of token information. Token information is in format: [token, feature_1, ..., feature_n, tag_label] """ sentences, prev_sentence = [], [] with open(fname) as f: for line in f: if not line.strip(): if prev_sentence and (not ignore_docstart or len(prev_sentence) > 1): sentences.append(prev_sentence) prev_sentence = [] continue prev_sentence.append(list(line.strip().split())) if prev_sentence != []: sentences.append(prev_sentence) return sentences
71be54b8b9e3a762f13ce9ba10e1f7a885032e2e
49,290
def seqs_dic_count_chars(seqs_dic): """ Given a dictionary with sequences, count how many times each character appears. >>> seqs_dic = {'s1': 'ABCC', 's2': 'ABCD'} >>> seqs_dic_count_chars(seqs_dic) {'A': 2, 'B': 2, 'C': 3, 'D': 1} """ assert seqs_dic, "given seqs_dic empty" cc_dic = {} for seq_id in seqs_dic: seq = seqs_dic[seq_id] for c in seq: if c in cc_dic: cc_dic[c] += 1 else: cc_dic[c] = 1 assert cc_dic, "cc_dic empty" return cc_dic
aafd059c4c100d755b3e3e10e99b4449eb9e360d
49,291
import pathlib def unique_directories(files): """Returns a list of directories (pathlib.Path objects) for the files passed without repetitions.""" return list({pathlib.Path(x).parent for x in files})
326ed8b251b21fb36f03c5ddc7edd0b18918e868
49,297
from typing import Any from typing import Callable def noop_decorator(*args: Any, **kwargs: Any) -> Any: """Return function decorated with no-op; invokable with or without args. >>> @noop_decorator ... def func1(x): return x * 10 >>> @noop_decorator() ... def func2(x): return x * 10 >>> @noop_decorator(2, 3) ... def func3(x): return x * 10 >>> @noop_decorator(keyword=True) ... def func4(x): return x * 10 >>> check_eq(func1(1) + func2(1) + func3(1) + func4(1), 40) """ if len(args) != 1 or not callable(args[0]) or kwargs: return noop_decorator # Decorator is invoked with arguments; ignore them. func: Callable[[Any], Any] = args[0] return func
16333bd7c21885ff894efc2a8fa6008bbbbc1b43
49,301
from typing import Mapping from typing import List from typing import Any def transpose_dict_of_lists(dict_of_lists: Mapping[str, list], keys: List[str]) \ -> List[Mapping[str, Any]]: """Takes a dict of lists, and turns it into a list of dicts.""" return [{key: dict_of_lists[key][i] for key in keys} for i in range(len(dict_of_lists[keys[0]]))]
b51323fcc31aa41ee8a5c333ad36e5d2fdae2b85
49,303
import math def gauss_legendre_1(max_step): """Float number implementation of the Gauss-Legendre algorithm, for max_step steps.""" a = 1. b = 1./math.sqrt(2) t = 1./4.0 p = 1. for i in range(max_step): at = (a + b) / 2.0 bt = math.sqrt(a*b) tt = t - p*(a-at)**2 pt = 2.0 * p a, b, t, p = at, bt, tt, pt my_pi = ((a+b)**2)/(4.0*t) return my_pi
c48fb524c7c5b3aeb14fe247deaa4c329bc2b405
49,304
def parse_clan_file(clan_list): """ Parses a list of Rfam clan accessions clan_list: A plain .txt file containing a list of Rfam Clan Accessions return: A list of clan accessions """ fp = open(clan_list, 'r') clan_accessions = [x.strip() for x in fp] fp.close() return clan_accessions
c5869235750902876f10408e73bbf675316d130c
49,312
def c2f(celsius): """ Convert Celcius to Farenheit """ return 9.0/5.0 * celsius + 32
bbe693bf2fa529a3b50793796c2b7b89edb683a6
49,314
from pathlib import Path def get_plot_folder(folder_path: str): """Creates a folder for plots, creating also parents directories if necessary""" folder = Path(folder_path) if not folder.exists(): folder.mkdir(parents=True) return folder
dd95e7089a377f94593e0d4ead360f14a9268a7a
49,315
def source_location_to_tuple(locpb): """Converts a SourceLocation proto into a tuple of primitive types.""" if locpb is None: return None if not locpb.file() and not locpb.line() and not locpb.function_name(): return None return locpb.file(), locpb.line(), locpb.function_name()
cac9f13bcdccab65eeaed94955e3fc5f48193f4b
49,316
def _data_types_from_dsp_mask(words): """ Return a list of the data types from the words in the data_type mask. """ data_types = [] for i, word in enumerate(words): data_types += [j + (i * 32) for j in range(32) if word >> j & 1] return data_types
a0c10a96ce8d6ca0af3156ee147de8571f605447
49,322
def get_expected_number_of_faces(off_file): """ Finds the expected number of faces in an OFF file. Used to check this matches the number of items in a pixel mapping list. :param off_file: The OFF file. :return: The number of faces in the OFF file. """ for line in off_file.split("\n")[1:]: if line[0] != "#": return int(line.split()[1])
8486fa165f43d924c6dd17b2670d75f8091256d1
49,323