content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def circular_ll(l): """ Given a circular linked list, this would return the beginning of the circular loop Returns False if the list is linear. This is a toughie! Observations: 1. We will use two pointers, one moving twice as fast as the other. Then if the two meet again, we have a loop. 2. If they both started from the same place, they will meet at the start. (fast ptr would have made two laps by the time the slow ptr made just one!) 3. If They didn't start at the same place, but say the fast ptr started k steps ahead, then they will meet k steps before the start of the loop. So we just have to run these two pointers, the place where they meet will be k steps before the start of the loop. Also, this k is the head start that fast ptr got before the small ptr entered the loop, so head is k steps away from the start of the loop. Reset slow ptr back to head, and leave the fast ptr at the meeting place. Since both these starting points are k steps away from the loop start, by running both the pointers AT THE SAME PACE, we are guaranteed that they will meet again at the loop start. """ fast = l.head slow = l.head # Run one to make them meet while fast and fast.next: fast = fast.next.next slow = slow.next if fast.data == slow.data: break # Handle the case of no circularity if not fast.next: return False # Setting slow back to head slow = l.head while slow.data != fast.data: slow = slow.next fast = fast.next return fast.data
78845c5faa7cb16fee678e519bc3dbfe378f1978
469,374
def make_graph(chrom, start, end, flank=150): """ Make a long deletion graph :param chrom: chromosome name :param start: start coordinate (first deleted base) :param end: end coordinate (last deleted base) :param flank: flank length :return: paragraph dict """ assert end - start + 1 >= 2 * flank target_region_l = "%s:%i-%i" % (chrom, max(1, start - flank - 1), start + flank + 1) target_region_r = "%s:%i-%i" % (chrom, max(1, end - flank - 1), end + flank + 1) lf_pos = "%s:%i-%i" % ( chrom, max(1, start - flank - 1), max(1, start - 1)) mid_l_pos = "%s:%i-%i" % ( chrom, start, start + flank - 1) mid_r_pos = "%s:%i-%i" % ( chrom, max(1, end - flank), max(1, end - 1)) rf_pos = "%s:%i-%i" % ( chrom, end + 1, end + flank + 1) graph = { "sequencenames": ["REF", "DEL"], "target_regions": [target_region_l, target_region_r], "nodes": [ { "name": "source", "sequence": "NNNNN" }, { "name": "LF", "reference": lf_pos }, { "name": "MID_L", "reference": mid_l_pos }, { "name": "MID_R", "reference": mid_r_pos }, { "name": "RF", "reference": rf_pos }, { "name": "sink", "sequence": "NNNNN" }, ], "edges": [ { "from": "source", "to": "LF", }, { "from": "source", "to": "MID_R", }, { "from": "LF", "to": "RF", "sequences": ["DEL"] }, { "from": "LF", "to": "MID_L", "sequences": ["REF"] }, { "from": "MID_R", "to": "RF", "sequences": ["REF"] }, { "from": "MID_R", "to": "sink", }, { "from": "RF", "to": "sink", } ], "paths": [ { "nodes": ["LF", "MID_L"], "path_id": "REF|1", "sequence": "REF", "nucleotide_length": 2 * flank }, { "nodes": ["MID_R", "RF"], "path_id": "REF|2", "sequence": "REF", "nucleotide_length": 2 * flank }, { "nodes": ["LF", "RF"], "path_id": "DEL|1", "sequence": "DEL", "nucleotide_length": 2 * flank } ] } return graph
0d0b3c84f057f945b6c79993619b0a6de9053824
93,266
def normalize(df, col_name, replace=True): """Normalize number column in DataFrame The normalization is done with max-min equation: z = (x - min(x)) / (max(x) - min(x)) replace -- set to False if it's desired to return new DataFrame instead of editing it. """ col = df[col_name] norm_col = (col - col.min()) / (col.max() - col.min()) if replace: df[col_name] = norm_col return df else: norm_df = df.copy() norm_df[col_name] = norm_col return norm_df
e9e458977677f09ce53f08a8e83b004ce0c043e4
682,802
def colorize(value: str, is_warning: bool) -> str: """ Utility to set a color for the output string when it exceeds the threshold. Args: value: String to be output. is_warning: Whether it exceeds the threshold. Returns: colorized string output """ if is_warning: return termcolors.make_style(fg=get_config()["PRINT_THRESHOLDS"]["COLOR"])( # type: ignore value ) return value
4150f4c333f16694e2ddfd8c5a478ab8518d4b39
539,934
def find_standard_output_styles(labels): """ Function to find some standardised colours for the outputs we'll typically be reporting on - here only incidence and prevalence, but can easily be extended to accommodate more. Args: labels: List containing strings for the outputs that colours are needed for. Returns: yaxis_label: Unit of measurement for outcome title: Title for subplot """ yaxis_label = {} title = {} if 'incidence' in labels: yaxis_label['incidence'] = 'Per 100,000 per year' title['incidence'] = 'Incidence' if 'prevalence' in labels: yaxis_label['prevalence'] = 'Per 100,000' title['prevalence'] = 'Prevalence' return yaxis_label, title
b1e0efa2f444cb613456f86e96113650f67407d3
301,149
def ms(x): """Convert seconds to milliseconds""" return 1000 * x
4191154d1d72731d66f63bea2b689e41982cff02
175,806
def dot_to_underscore(string): """ Replace every dot with an underscore in a string. """ return string.replace('.','_')
a04237a33331077260ec40fd59086e1f9c463738
239,026
import networkx as nx def create_networkx_undirected_graph(net, unique_source, unique_sink): """ Create a NetworkX undirected graph from a Petri net, returning also correspondences for the unique source and the unique sink places that were discovered Parameters ------------- net Petri net unique_source Unique source place unique_sink Unique sink place Returns ------------- graph NetworkX graph unique_source_corr Correspondence in the NetworkX graph of the unique source place unique_sink_corr Correspondence in the NetworkX graph of the unique sink place inv_dictionary Correspondence between NetworkX nodes and Petri net entities """ graph = nx.Graph() dictionary = {} inv_dictionary = {} for place in net.places: value = len(dictionary) dictionary[place] = value inv_dictionary[value] = place graph.add_node(dictionary[place]) for transition in net.transitions: value = len(dictionary) dictionary[transition] = value inv_dictionary[value] = transition graph.add_node(dictionary[transition]) for arc in net.arcs: graph.add_edge(dictionary[arc.source], dictionary[arc.target]) unique_source_corr = dictionary[unique_source] if unique_source in dictionary else None unique_sink_corr = dictionary[unique_sink] if unique_sink in dictionary else None return graph, unique_source_corr, unique_sink_corr, inv_dictionary
b047bf19c2e49eb5ca4aa4f06568e311ae2a6a6b
92,208
def _get_cve_id(full_cve): """Return the CVE id from the full CVE JSON dictionary. Args: full_cve (dict): Full CVE data as a JSON dictionary from API call. Returns (str): CVE id (e.g. CVE-2021-26068). """ try: return full_cve['id'] except KeyError: return None
70b96bc83f7185ebf2aa1f20901de5bb271ffdf2
232,757
def get_distance(particle): """Gets the Manhattan distance""" return abs(particle[0][0]) + abs(particle[0][1]) + abs(particle[0][2])
cc939a283b4623d4450ca4fb446b544bce014475
339,050
def safe_pathname(filename: str) -> str: """Generate a safe pathname out of the string passed""" return "".join( [c for c in filename if c.isalpha() or c.isdigit() or c == " "] ).rstrip()
5d59b4a2dfc46f0318968755b1a8557417fa57dd
419,750
def get_route_table_output_interface(device, table, route): """Get route table output interface Args: device (obj): Device object table (str): Table name route (str): Route IP address Returns: output_interface (str) """ # Example Dictionary # {'table_name': {'inet.3': {'active_route_count': 5001, # 'destination_count': 5001, # 'hidden_route_count': 0, # 'holddown_route_count': 0, # 'routes': {'200.0.0.0/32': {'active_tag': '*', # 'age': '00:01:29', # 'metric': '1', # 'next_hop': {'next_hop_list': {1: {'best_route': '>', # 'mpls_label': 'Push ' # '574', # 'to': '106.187.14.121', # 'via': 'ge-0/0/1.0'}}}, # 'preference': '9', # 'protocol_name': 'LDP'}}, # 'total_route_count': 5001}}} out = device.parse('show route table {table} {route}'.format( table=table, route=route )) output_interface = out.q.contains('.*{route}.*'.format(route=route), regex=True).get_values('via', 0) if not output_interface: return None return output_interface
929eb0f81bb631398affd9f79c921649efdf6723
202,329
def isValid(s): """Determine if the given string is valid according to Sherlock Parameters ---------- s : str String Return ------ response : str 'YES' if it is a valid string else 'NO' """ chars = list(s) freq = {} occ = {} for c in chars: freq[c] = freq.get(c, 0) + 1 for v in freq.values(): occ[v] = occ.get(v, 0) + 1 if len(occ) > 2: return 'NO' if len(occ) == 2: occ = dict(sorted(occ.items(), key=lambda x: x[1])) val1 = list(occ.values())[0] key1 = list(occ.keys())[0] - 1 key2 = list(occ.keys())[1] if not(val1 == 1 and (key1 == key2 or key1 == 0)): return 'NO' return 'YES'
46f4618f71e4394e995822bd9ffa2f4ff8fcfb8e
541,785
def parse_typedefs_file(typedefsFile): """ Parses the typedefs file containing the typedefs defined in the input ontology file. These typedefs will be considered the valid relationships that terms may use. """ typedefs = [] typedefsFh = open(typedefsFile) for typedef in typedefsFh: typedefs.append(typedef.strip()) typedefsFh.close() return typedefs
6adc1971e77bf012af027b72a14dca182cff3bde
180,293
def class_name_to_function_name(name): """Convert a python class name in CamelCase to a lower case function_name with underscores.""" function_name = "" for char_idx, char in enumerate(name): if char == char.upper() and char_idx > 0 and name[char_idx - 1] == name[char_idx - 1].lower(): function_name += "_" function_name += char.lower() return function_name
64bc6303053821c4778a6d0f9205dc735e1fc334
626,115
def trans_color(color, alpha): """ Makes solid color semi-transparent. Returns: (int, int, int, int) Red, green, blue and alpha channels. """ if color[3] != 1.: return color return color[0], color[1], color[2], alpha
0c31de6bf1f939f303f3ddde133b1f0cf0acbda7
290,253
import functools import warnings def silence_warnings(func): """Function decorator that silences/ignores all Python warnings in the wrapped function. Example: >>> @silence_warnings >>> def foo(): >>> warnings.warn("this will not appear") """ @functools.wraps(func) def inner(*args, **kwargs): with warnings.catch_warnings(record=True): warnings.simplefilter('always') return func(*args, **kwargs) return inner
f803991c452a14b4c47e1efe25b2d0f4f86d7f2b
333,559
def cprint(*args, color=33, **kwargs): """Print with pretty colors, so it's easy to find.""" start_escape = '\x1b[{}m'.format(color) args = [start_escape] + list(args) + ['\x1b[0m'] return print(*args, **kwargs)
27b46a022af67315313bfecddb9ebb83b63f6aca
391,212
def read_queries(file, interactive=False): """ Return list of reference and generated queries """ query_list = [] with open(file, 'r') as src: for line in src.readlines(): if interactive is True: reference, gen = line.strip('\n').split(",") else: # output files by fairseq-generate contain an ID code as # first element, which can be omitted _, reference, gen = line.strip('\n').split(",") query_list.append([reference, gen]) src.close() return query_list
4efe991fa364ede10da23a141ed988df6134e7c9
673,233
import types import traceback def str_traceback(error, tb): """Returns a string representation of the traceback. """ if not isinstance(tb, types.TracebackType): return tb return ''.join(traceback.format_exception(error.__class__, error, tb))
41f14d4af2c8d0d7a43ee55f5008e4dec7fd1a97
296,397
def cone(toplexes, subcomplex, coneVertex="*"): """Construct the cone over a subcomplex. The cone vertex can be renamed if desired. The resulting complex is homotopy equivalent to the quotient by the subcomplex.""" return toplexes + [spx + [coneVertex] for spx in subcomplex]
6b1328a2f7c32988666b0c7039efd0ce6ecdffef
7,937
def sanitize_branch_name(branch_name: str) -> str: """ replace potentially problematic characters in provided string, a branch name e.g. copr says: Name must contain only letters, digits, underscores, dashes and dots. """ # https://stackoverflow.com/questions/3411771/best-way-to-replace-multiple-characters-in-a-string offenders = "!@#$%^&*()+={[}]|\\'\":;<,>/?~`" for o in offenders: branch_name = branch_name.replace(o, "-") return branch_name
6b40b61e99c9a819953f5ff8b5e06be9be75ddc5
408,656
def quaternion_inv(q): """ Inverse of quaternion q Args: q: (qw, qx, qy, qz) """ w, x, y, z = q d = w*w + x*x + y*y + z*z q_inv = (w/d, -x/d, -y/d, -z/d) return q_inv
037b02581b886f7dd247d96f6410df4a4f9ad7c5
313,751
def parse_data(input_str: str) -> tuple: """ 解析输入的数据 Args: input_str (str):输入的数据 Returns: N, K, sentences """ line_list = input_str.strip().split('\n') sentences = [] n, k = 0, 0 for line in line_list: line = line.strip() if not line: continue temp_ls = line.split(' ') if len(temp_ls) == 2: n, k = int(temp_ls[0]), int(temp_ls[1]) else: sentences.append([int(i) for i in temp_ls]) return n, k, sentences
d3ddd75b231e2080cdd73347226933d515225e06
553,995
def create_pattern_neighbors(width, n_states=2): """ This is a private function that returns the weights for calculating an unique number for each different neighborhood pattern in a random Boolean network. Parameters ---------- width : int Neighborhood size. n_states : int Number of discrete state in a cell. Returns ------- out : list List of weights of the neighbors. """ return [n_states**p for p in range(width)[::-1]]
815a9e31aba64390d0fb5c9b89f0359c96a1b720
145,074
def _get_enz_states(er_mech: str) -> list: """ Given a string with the enzyme mechanism written as a sequence of elementary reactions produces a list with the enzyme state numbers and metabolites that bind/unbind to the respective enzyme states. Args: er_mech: string with elementary reactions that describe the enzyme mechanism. Returns: List with all enzyme state numbers and substrates. """ er_mech = er_mech.replace('<->', '') er_mech = er_mech.replace('+', '') er_mech_list = er_mech.split() enz_states_dic = {} enz_states_list = [] state_i = 1 for i, entry in enumerate(er_mech_list): if entry.startswith('E_'): if entry not in enz_states_dic.keys(): enz_states_dic[entry] = state_i enz_states_list.append([state_i]) state_i += 1 else: enz_states_list.append([enz_states_dic[entry]]) if not er_mech_list[i + 1].startswith('E_'): enz_states_list[-1].append(er_mech_list[i + 1]) return enz_states_list
0a2be33ce4a48a558b2fd9a0ef66d00742f45d76
664,733
def to_curl_command(request): """ Convert a requests preparred request to curl command """ command = "curl -i -X {method}{headers} -d '{data}' '{uri}'" method = request.method uri = request.url data = request.body if data is None: data = '' headers = ["{0}: '{1}'".format(k, v) for k, v in request.headers.items()] headers = " -H ".join(headers) if headers: headers = " -H {} ".format(headers) return command.format(method=method, headers=headers, data=data, uri=uri)
3fa8d039f56525841e11ceaaf248823728f3870b
516,034
import re def join_lines(src, before, after, sep=" "): """ Remove the newline and indent between a pair of lines where the first ends with ``before`` and the second starts with ``after``, replacing it by the ``sep``. """ before_re = "][".join(before).join("[]") after_re = "][".join(after).join("[]") regex = "\n\\s*".join([before_re, after_re]) return re.sub(regex, sep.join([before, after]), src)
c11ace588e83edf4ea9447a2b1f043f01a07ffeb
684,232
def check_for_kill(comm, message): """Return True IFF a broadcast-kill message has been written to S3.""" try: comm.messages.get("broadcast-kill") # 12x cheaper than listl print(message) return True except comm.messages.client.exceptions.NoSuchKey: return False
098e54262b37419f9cf02c90fb040e18651b49ce
472,502
from typing import List def __split_label(string: str) -> List[str]: """ Return the name of label from a label statement :param string: :return: """ return string.split(":")
c66eeeff8ce5a4f00e7f3798a7d45d48f214846a
210,386
def _deckhand_render_exception_msg(errors): """ Helper function to create deckhand render exception msg. Parameters: errors: List of errors provided by deckhand render. Returns: string: formulated error message. """ err_msg = '' for err in errors: if isinstance(err, tuple) and len(err) > 1: err_msg += ': '.join(err) + '\n' else: err_msg += str(err) + '\n' return err_msg
47b4a6fa2f522031c70287f2079cd61057be5863
142,171
import stat def is_dir(attrs): """Returns true of attrs is the stat attrs for a directory, False otherwise.""" return stat.S_ISDIR(attrs.st_mode)
802e94039508060eed65981ebe028fb5178e80a1
144,976
from bs4 import BeautifulSoup def to_soup(html_source, parser='html.parser'): """Convert HTML source (text) to soup object. parser can be: * html.parser (Python's html.parser) * lxml (lxml's HTML parser) -- FASTEST * xml (lxml's XML parser) * html5lib """ return BeautifulSoup(html_source, parser)
7e0a891d40a651ecf1d5a6dbe6731d0539cbe3b2
248,137
def has_unknown_shape(shape): """Check if shape list has unknown dimension.""" return any(s is None or s < 0 for s in shape)
7cdebb41c84725036b2920b80b711720feea4a99
188,510
import json def json_minimal(data): """Get JSON data in minimal form.""" return json.dumps(data, separators=(",", ":"))
e4b76f5decb0060f86071fed1a6fecf55012daff
664,910
def user_id(msg): """ Use the userid from the msg as the user search criteria Args: msg (errbot.backends.base.Message): The message from the bot that triggered the LDAP lookup Returns: str: The user ID that will be used to search LDAP """ if hasattr(msg.frm, "userid"): return msg.frm.userid raise ValueError("Cannot determine user ID from msg.userid")
43c61915eca4c13d64e9e8a081e9a67fa97437ab
628,184
def cpu_bound(callable_): """ Marks callable as mainly cpu-bound and safe for running off the MainThread. """ callable_.__blocking_type__ = 'cpu' return callable_
77213b52b4226a90e25e63bc1576ec51fb79350c
612,730
def int_to_hex(num): """Convert an int to a hexadecimal string""" num = int(num) vals = { 10: 'a', 11: 'b', 12: 'c', 13: 'd', 14: 'e', 15: 'f'} if num < 0: sign = '-' num = -num else: sign = '' if num < 10: return sign + str(num) if num < 16: return sign + vals[num] return sign + int_to_hex(num / 16) + int_to_hex(num % 16)
6d62ceb245defecc9f857928b010dbf6ff2120f3
640,315
def read_box_line(line, box, dim): """Read box info from a line.""" raw = line.strip().split() box[f'{dim}lo'] = float(raw[0]) box[f'{dim}hi'] = float(raw[1]) if len(raw) > 2: if dim == 'x': box['xy'] = float(raw[2]) elif dim == 'y': box['xz'] = float(raw[2]) else: box['yz'] = float(raw[2]) return box
5987d2b8f46ec736c9f00ab2f4959590f48acddb
422,253
def get_dimensions_by_order(dims_in, dataset): """get dimension Parameters ---------- dims_in: int or list of int the dimensions by numerical order dataset: sidpy.Dataset Returns ------- dims_out: list of dimensions """ if isinstance(dims_in, int): dims_in = [dims_in] dims_out = [] for item in dims_in: if isinstance(item, int): if item in dataset._axes: dims_out.append([item, dataset._axes[item]]) return dims_out
3430f045ed57e3d98aec15ffb7298d1c727bee27
16,522
def InvertRelativePath(path): """Given a relative path like foo/bar, return the inverse relative path: the path from the relative path back to the origin dir. E.g. os.path.normpath(os.path.join(path, InvertRelativePath(path))) should always produce the empty string.""" if not path: return path # Only need to handle relative paths into subdirectories for now. assert '..' not in path, path depth = len(path.split('/')) return '/'.join(['..'] * depth)
6c081ac25b3d84bbf8c73940cfd0703afdfbcc3e
160,663
def split_div_mul(v): """Returns the base, div, and mul factor from a symbolic shape constant.""" if "*" in v: arr = v.split("*") if len(arr) != 2: raise ValueError(f"Too many mults in features {v}.") v, mul = arr[0], int(arr[1]) else: mul = 1 if "%" in v: arr = v.split("%") if len(arr) != 2: raise ValueError(f"Too many divs in features {v}.") v, div = arr[0], int(arr[1]) else: div = 1 return v, div, mul
a1310b1ee2baa4b505c8fb621329894ed7f11cc2
461,763
def get_new_ext(path, ext): """Returns the given file path, with the file extension changed to the given extension.""" if not ext.startswith('.'): ext = '.' + ext # os.path.splitext did not behave as expected here -> splitting extension off manually. index = path.rfind('.') asset_path = path[:index] asset_path = asset_path + ext return asset_path
04f8636b7b06f57e71ba495121f035818a87be42
608,141
def human_readable_size(byte_size): """Return human-readable size string, using base-10 prefixes.""" if byte_size < 10**3: return f'{byte_size}B' if byte_size < 10**6: return f'{byte_size / 10**3:.1f}kB' if byte_size < 10**9: return f'{byte_size / 10**6:.1f}MB' return f'{byte_size / 10**9:.1f}GB'
944414a4a2816c31bdb74bd3c7fd78d4cf834719
267,732
from typing import List def has_extension(filename: str, extensions: List[str]) -> bool: """Check whether a file has one of the specified extensions :param filename: The file to check :param extensions: The file extensions it should have, e.g. ['.md', .ipynb'] :return: True if the extension of the filename is in the list of specified allowed extensions """ extension = filename.split('.')[-1] return extension in extensions
5b010697bbe3091acba53895585600e03d18706b
423,445
import hashlib def zgosti(geslo, sol): """ Vrne zgostitev gesla pri podani soli. Uporabi funkcijo PBKDF2_HMAC za izpeljavo ključa z zgoščevalno funkcijo SHA256 in 100000 ponovitvami. """ return hashlib.pbkdf2_hmac('sha256', geslo.encode('utf-8'), sol, 100000)
0da3bbf21cf4c1c6517ddf5a5e987cdd63f67f48
485,308
from typing import Dict from typing import Tuple def check_negative_frequencies( mol: Dict ) -> Tuple[bool, None]: """ Check if a molecule contains any negative frequencies. Args: mol (Dict): A dictionary representing a molecule entry in LIBE Returns: Tuple[bool, None]: if the molecule has a negative/imaginary frequency, then return True; otherwise, return False """ if mol["vibration"] is None: return False, None if any([x < 0 for x in mol["vibration"]["frequencies"]]): return True, None else: return False, None
fb20c9d77881aebf867c0ec4e19dcad76a207f7d
196,236
import json def read_list_from_file(filename): """ Reads a json-formatted list of strings from *filename* """ assert filename.endswith('.json') file_list = json.load(open(filename)) assert isinstance(file_list,list) for s in file_list: assert isinstance(s,str) return file_list
5e1af861da2ff0700751d620eb2dc1258c33aa77
399,355
from pathlib import Path from typing import Optional def load_latest_model(model_folder: Path) -> Optional[Path]: """Finds the latest model inside the folder where all the models are saved Args: model_folder (Path): folder containing the models Returns: Path: file path of the most recent model """ model_cps = [ filename for filename in model_folder.parent.iterdir() if filename.name.startswith(model_folder.name) ] if len(model_cps) == 0: return None return sorted(model_cps)[-1]
62818ca57b6e2e4ae97890c30eca1e21b6fd0d45
639,432
def rsplit_int(s): """ splits off the largest substring of digits from the right a string """ if s and s[-1].isdigit(): x,y = rsplit_int(s[:-1]) return x, y+s[-1] else: return s, ""
b5df8c88b382e2d1cb37f7c8a0c1167cb7b798f2
340,182
def pad_list(listA, val=-1, length=None): """Pad list of lists with 'val' such that all lists have the same length. Parameters ---------- listA : list List of lists of different sizes. val : number, optional Value to pad the lists. length : number, optional Total length of the list. Returns ------- list A list of lists with the same same. Examples -------- Pad an uneven list of lists with a value. >>> from dbcollection.utils.pad import pad_list >>> pad_list([[0,1,2,3],[45,6],[7,8],[9]]) # pad with -1 (default) [[0, 1, 2, 3], [4, 5, 6, -1], [7, 8, -1, -1], [9-1, -1, -1]] >>> pad_list([[1,2],[3,4]]) # does nothing [[1, 2], [3, 4]] >>> pad_list([[],[1],[3,4,5]], 0) # pad lists with 0 [[0, 0, 0], [1, 0, 0], [3, 4, 5]] >>> pad_list([[],[1],[3,4,5]], 0, 6) # pad lists with 0 of size 6 [[0, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0], [3, 4, 5, 0, 0, 0]] """ # pad list with zeros in order to have all lists of the same size assert isinstance(listA, list), 'Input must be a list. Got {}, expected {}' \ .format(type(listA), type(list)) # get size of the biggest list if length: max_size = length else: max_size = len(max(listA, key=len)) # pad all lists with the a padding value return [l + [val] * int(max_size - len(l)) for l in listA]
427b674261e071a773cd5b2cf434e38f25cf0148
389,220
def remove_character_at(str, idx): """Removes the character from str at index idx, returning the remaining string str, int -> str >>> remove_character_at("boats", 2) 'bots' """ return str[:idx] + str[idx+1:]
abc7bedb33c5c9e024dd8cf5830f3b3ee8b08f42
30,708
def within_time_period(t, time_period): """ Check if time is in the time period Argument: t = given time in format (half, min, sec) time_period = tuple of (start_time, end_time) in format (half, min, sec) Return: boolean """ start_time = time_period[0] end_time = time_period[1] assert start_time[0] == end_time[0], "TIME PERIOD NOT IN THE SAME HALF !" if t[0] == start_time[0]: t_sec = t[1]*60+t[2] start_sec = start_time[1]*60 + start_time[2] end_sec = end_time[1]*60 + end_time[2] if t_sec >= start_sec and t_sec <= end_sec: return True return False
b996ed5b2d49c95faebae558bf064df02dde4867
687,211
def plural(n: int, def_one: str = '', def_many: str = 's') -> str: """ Returns the plural form of a noun based on the number n. By default, this function assumes a simple "s" can be used to make a noun plural. If the plural word needs to change completely, the two options may be given in the optional arguments. Example: plural(15, 'criterion', 'criteria') Returns 'criteria'. :param n: The number on which to determine if a plural form is needed or not. :param def_one: The string to return if the number is one. :param def_many: The string to return if the number is not one. :return: Returns the string to use for the plural form. """ if n == 1: return def_one else: return def_many
f1882280e45b33ae7b213db9bdfa3cc27bd63590
304,850
def getCurrentCats(imagepage): """Get the categories currently on the image.""" result = [] for cat in imagepage.categories(): result.append(cat.title(withNamespace=False)) return list(set(result))
a389f75b4a0cdaf4d22df65d7f8d104084d8b2cd
602,698
import copy def prune_unused_indices(symbolic_operator): """ Remove indices that do not appear in any terms. Indices will be renumbered such that if an index i does not appear in any terms, then the next largest index that appears in at least one term will be renumbered to i. """ # Determine which indices appear in at least one term indices = [] for term in symbolic_operator.terms: for op in term: if op[0] not in indices: indices.append(op[0]) indices.sort() # Construct a dict that maps the old indices to new ones index_map = {} for index in enumerate(indices): index_map[index[1]] = index[0] new_operator = copy.deepcopy(symbolic_operator) new_operator.terms.clear() # Replace the indices in the terms with the new indices for term in symbolic_operator.terms: new_term = [(index_map[op[0]], op[1]) for op in term] new_operator.terms[tuple(new_term)] = symbolic_operator.terms[term] return new_operator
d060c6473993892bd798780ad3911339d4b08199
522,461
def generate_cb_choices(list, checked=False): """Generates checkbox entries for lists of strings :list: pyhton list that shall be converted :checked: if true, selections will be checked by default :returns: A list of dicts with name keys """ return [{'name': m, 'checked': checked} for m in list]
962e32134496708ef97e5fcebdd1358d5df87094
280,325
def parse_gender(gender): """ Parse gender, 1 for male, 0 for female. :param gender: :return: """ if not gender: return None gender = gender.lower() if gender == 'male' or gender == 'm': return 1 elif gender == 'female' or gender == 'f': return 0 else: return None
d6b6ec728bd935ff298bddff8d90af9a082f4b04
359,348
def float_to_python(self, value): """ Convert a 'float' field from solr's xml format to python and return it. """ return float(value)
74bc65259288261c47f0168c586b1bff7a7ae615
59,958
import torch def get_flat_params_from(model): """ Get the flattened parameters of the model. Args: model: the model from which the parameters are derived Return: flat_param: the flattened parameters """ params = [] for param in model.parameters(): params.append(param.data.view(-1)) flat_params = torch.cat(params) return flat_params
4acc67cadabed4863367e7c6ef59cf04fe132315
666,294
def get_jwt_from_request(request): """ Fetch the JSON Web Token from a ``request`` object's ``META`` attribute. The token is in the ``Authorization`` header with the ``Bearer `` prefix. **Parameters** ``request`` Django ``request`` object """ return request.META.get("HTTP_AUTHORIZATION", " ").split(" ")[1]
dc1a251ad75acb32ffa71a552fce05739d1bb67a
338,693
import torch def compute_boxes(scores, offsets, scale, threshold): """Return bounding boxes, scores, offsets, and batch indices in matrix. PNet acts like a 12x12 convolution with stride 2, so need to convert bounding box indices back to original image coordinates. Arguments --------- scores : torch.Tensor size [n, 1, h, w] score for face presence at each image location offsets : torch.Tensor size[n, 4, h, w] offsets for each image location to recover full image coordinates scale : float scaling of original image prior to PNet application threshold : float minimum score value for inclusion Returns ------- bounding_boxes : torch.Tensor size [num_boxes, 10] Each row is a single bounding box. Column 0 is batch index. Columns 1 - 4 are bounding box top left and bottom right coordinates. Column 5 is score for that box. Columns 6-10 are offset values. """ stride = 2 kernel_size = 12 detection_indices = (scores > threshold).nonzero(as_tuple=True) batch_ix = detection_indices[0] if batch_ix.size()[0] == 0: return None offsets_ = offsets[batch_ix, :, detection_indices[1], detection_indices[2]] h_ix, w_ix = [stride * d + 1 for d in detection_indices[1:]] scores_ = scores[batch_ix, detection_indices[1], detection_indices[2]] bounding_boxes = torch.stack([batch_ix.to(torch.float32), torch.round(w_ix / scale), torch.round(h_ix / scale), torch.round((w_ix + kernel_size) / scale), torch.round((h_ix + kernel_size) / scale), scores_, offsets_[:, 0], offsets_[:, 1], offsets_[:, 2], offsets_[:, 3]], dim=1) return bounding_boxes
5b5b0f2a979ef6f6601416d165f88102d161e793
327,233
import re def parse_compound_ids(field: str): """ parse_compound_ids() uses regular expressions to extract the KEGG compound IDs from a product or substrate field in a KEGG record field Args: field (str): name of field that contains KEGG compound IDs in a string Returns: list: contains parsed KEGG compound IDs """ cpd_list = [] regex = 'CPD:(C\d+)' # matches 'CPD:' chars exactly and captures 'C' + any following digits (\d+) for entry in field: ids = re.findall(regex, str(entry), re.IGNORECASE) for i in ids: cpd_list.append(i) return cpd_list
38bccbc63f031bd2cf6f4364bbc6e02240814f6b
246,779
def air_density(temp, patm, pw = 0): """ Calculates the density of dry air by means of the universal gas law as a function of air temperature and atmospheric pressure. m / V = [Pw / (Rv * T)] + [Pd / (Rd * T)] where: Pd: Patm - Pw Rw: specific gas constant for water vapour [Rw = 461.495 MJ/kg/K] Rv: specific gas constant for dry air [Rv = 287.058 MJ/kg/K] T: air temperature [K] m/V: density of air [kg/m³] Parameters ---------- temp : float Air temperature [K]. patm : float Atmospheric pressure [Pa]. pw : float Vapour pressure [Pa]. Default to 0 Pa (dry air). Returns ------- float Air density [kg/m³]. """ rd, rw = 287.058, 461.495 # specific gas constant for dry air and water vapour [J / (kg K)] pd = patm - pw return (pd / (rd * temp)) + (pw / (rw * temp))
1af7afbf562fec105566a2c934f83c73f0be1173
705,812
def SplitStream(stream, fnProcessLine, fnLineOutsideChunk): """ Reads the given input stream and splits it into chunks based on information extracted from individual lines. Arguments: - fnProcessLine: Called on each line with the text and line number. Must return a triplet, composed of the name of the chunk started on this line, the data extracted, and the name of the architecture this test applies to (or None to indicate that all architectures should run this test). - fnLineOutsideChunk: Called on attempt to attach data prior to creating a chunk. """ lineNo = 0 allChunks = [] currentChunk = None for line in stream: lineNo += 1 line = line.strip() if not line: continue # Let the child class process the line and return information about it. # The _processLine method can modify the content of the line (or delete it # entirely) and specify whether it starts a new group. processedLine, newChunkName, testArch = fnProcessLine(line, lineNo) # Currently, only a full chunk can be specified as architecture-specific. assert testArch is None or newChunkName is not None if newChunkName is not None: currentChunk = (newChunkName, [], lineNo, testArch) allChunks.append(currentChunk) if processedLine is not None: if currentChunk is not None: currentChunk[1].append(processedLine) else: fnLineOutsideChunk(line, lineNo) return allChunks
29ddf3d9a8f374240c20c60d0e2330cfdf4394ba
298,382
import requests def _is_temporal_problem(exception): """ Checks if the obtained exception is temporal and if download attempt should be repeated :param exception: Exception raised during download :type exception: Exception :return: `True` if exception is temporal and `False` otherwise :rtype: bool """ return isinstance(exception, (requests.ConnectionError, requests.Timeout, requests.exceptions.ChunkedEncodingError))
ac64bd648385bb4e2d67e60ae02e2adf9d73ad89
473,125
def _delete_full_gap_columns(full_gaps, sequence, start, end): """ Return the sequence without the full gap columns. >>> full_gaps = [False, True, False, False, False, True] >>> _delete_full_gap_columns(full_gaps, "Q-V-Q-", 1, 6) 'V-Q' """ cleaned_seq = [] for i in range(start, end): if not full_gaps[i]: cleaned_seq.append(sequence[i]) return ''.join(cleaned_seq)
a6f997fb1a596955cc35b85983a97459e1f8bd47
324,392
def _combine_texts_to_str(text_corpus, ignore_words=None): """ Combines texts into one string. Parameters ---------- text_corpus : str or list The texts to be combined. ignore_words : str or list Strings that should be removed from the text body. Returns ------- texts_str : str A string of the full text with unwanted words removed. """ if isinstance(ignore_words, str): words_to_ignore = [ignore_words] elif isinstance(ignore_words, list): words_to_ignore = ignore_words else: words_to_ignore = [] if isinstance(text_corpus[0], list): flat_words = [text for sublist in text_corpus for text in sublist] flat_words = [ token for subtext in flat_words for token in subtext.split(" ") if token not in words_to_ignore ] else: flat_words = [ token for subtext in text_corpus for token in subtext.split(" ") if token not in words_to_ignore ] return " ".join(flat_words)
3d74ceea04cd559f32a93b1b89b8bf4e82fb6ef1
377,866
def _remove_nulls(data, skip=None): """Remove all null/None/empty values from a dict or list, except those listed in skip.""" if isinstance(data, dict): new_dict = {} for key, val in data.items(): new_val = _remove_nulls(val, skip=skip) if new_val is not None or (skip is not None and key in skip): new_dict[key] = new_val return new_dict elif isinstance(data, list): new_list = [] for val in data: new_val = _remove_nulls(val, skip=skip) if new_val is not None: new_list.append(new_val) return new_list # Could delete required but empty blocks - services, etc. # elif hasattr(data, "__len__") and len(data) <= 0: # return None else: return data
c30190def2540974c817a8919378015e4c72a43c
418,896
def is_node_equal(node_1, node_2): """ check if two expression AST nodes are equal since pycparser doesn't provide such property :param node_1: First expression node :param node_2: Second expression node :return: Boolean """ # naive comparison, can be optimized return node_1.__repr__() == node_2.__repr__()
cbb96902e50ae90c169b306fc9126df0f7c8c74e
467,083
def expand_recurring(number, repeat=5): """ Expands a recurring pattern within a number. Args: number(tuple): the number to process in the form: (int, int, int, ... ".", ... , int int int) repeat: the number of times to expand the pattern. Returns: The original number with recurring pattern expanded. Example: >>> expand_recurring((1, ".", 0, "[", 9, "]"), repeat=3) (1, '.', 0, 9, 9, 9, 9) """ if "[" in number: pattern_index = number.index("[") pattern = number[pattern_index + 1:-1] number = number[:pattern_index] number = number + pattern * (repeat + 1) return number
25bec2962fc451fda7e387fa7d867b1d89519ec9
528,033
def asoctal(s): """Convert the given octal string to an actual number.""" return int(s, 8)
fdffd7a434ffc546fcaa4583143ee1d20151c829
472,192
def echo(value): """ Returns whatever we get """ return value
2029be2aebc65e23ff82bf31d7577f0966cac112
212,939
def serializeRegressor(tree): """ Convert a sklearn.tree.DecisionTreeRegressor into a JSON-compatible format """ LEAF_ATTRIBUTES = ['children_left', 'children_right', 'threshold', 'value', 'feature', 'impurity', 'weighted_n_node_samples'] TREE_ATTRIBUTES = ['n_classes_', 'n_features_', 'n_outputs_'] encoded = { 'nodes': {}, 'tree': {}, 'n_leaves': len(tree.tree_.threshold), 'params': tree.get_params() } for attr in LEAF_ATTRIBUTES: encoded['nodes'][attr] = getattr(tree.tree_, attr).tolist() for attr in TREE_ATTRIBUTES: encoded['tree'][attr] = getattr(tree, attr) return encoded
d94f8cde0144cd842175480332a398def8e19ae8
25,012
def remove_problematic_columns(x_df, columns_to_remove): """Return new df with problematic columns removed.""" return x_df.drop(columns_to_remove, axis=1)
491e80f4e1ac2bfb93c8afcadd70f5b206a4b90c
527,378
def generate_polish_exp(nodes): """ Parameters ---------- nodes : list/ arr_like Nodes of the netlist. Returns ------- exp : list A Polish Expression. """ exp = [] for i in range(len(nodes)): exp.append(nodes[i]) if i!=0: if i<(len(nodes)//2): exp.append('V') else: exp.append('H') return exp
859076139f311ffbf7d80ebf65d20726e5e93d9d
228,031
import requests def test_steam_api_key(api_key=None): """ Test if api_key is valid for steam api Parameters ---------- api_key (int): api steam key Returns ------- (bool): True if valid """ # According to https://developer.valvesoftware.com/wiki # /Steam_Web_API#GetGlobalAchievementPercentagesForApp_.28v0001.29 url_test = f'http://api.steampowered.com/ISteamUser\ /GetPlayerSummaries/v0002/?key={api_key}&steamids=76561197960435530' return requests.get(url_test).status_code == 200
1e0efec20eb7180fd7c8157f154a0076f419869e
74,323
def select_and_combine(i, obj, others, combine_func, *args, **kwargs): """Combine `obj` and an element from `others` at `i` using `combine_func`.""" return combine_func(obj, others[i], *args, **kwargs)
71908e7f5fcbac4e466e14acf7ed2d7576bcb864
366,992
def build_response(content): """Builds bot response """ response = content response += "\n***\n^(I am a bot and I byte | [source](https://github.com/jasmaa/shiritori-bot))" return response
e99e730ae5a355db8aa9bcd45abc23d05bf648c0
607,246
def is_scalar_type(param_type: str) -> bool: """Tests if a parameter type is a scalar DeCoP value (e.g. boolean). Args: param_type (str): The type name of the parameter. Returns: bool: True if the type is scalar, false if it is complex. """ return param_type.lower() in {'boolean', 'integer', 'real', 'string', 'binary'}
b59e1f703362a476b6f046111a41bf76a875d25b
218,224
def get_plugin_name(plugin): """ Return the PyPI name of the given plugin. """ return 'taxi-' + plugin
9002607babc935afe2096d667ed567c3b10e80f3
630,849
def _is_cuda(*args): """Returns True is any of the argument is on a CUDA device, False otherwise.""" for arg in args: if arg.is_cuda: return True return False
68ae140afebe0bde8c2d10d2a3c37195475e3138
97,660
def to_float(string): """ Convert string to float >>> to_float("42.0") 42.0 """ try: number = float(string) except: number = 0.0 return number
51400568c8575b9663af0f861f4bf4ecc0b3d179
412,151
def next_token(mention): """ Compute the token following a mention. Args: mention (Mention): A mention. Returns: The tuple ('next', TOKEN), where TOKEN is the (lowercased) token following the mention. If no such token exists, set TOKEN to 'NONE'. """ next_t = mention.get_context(1) if next_t: return "next", next_t[0].lower() else: return "next", "NONE"
fc3d6e3bada571edc959e07889d34a11e5a5068f
375,083
from typing import List from typing import Optional def fetch_one(result_set: List[List]) -> Optional[List]: """ Returns the first record returned from a query. If the result set is empty then None is returned :param result_set: a list of records returned from AuroraDatabase.query (list) :return: (list|None) """ return result_set[0] if result_set else None
6308fe918830b962a3dc58e7105f073861e038a1
328,322
def pr(vp,vs): """ Computes the Poisson ratio Parameters ---------- vp : array P-velocity. vs : array S-velocity. Returns ------- pr : array Poisson ratio. """ vpvs=vp/vs pr = 0.5*((vpvs**2-2)/(vpvs**2-1)) return (pr)
bec82f868b847b85e39c90016f6787e20faa91ae
42,788
from typing import List def flatten_string_list(l: List[List[str]]) -> List[str]: """Flatten a list of list of str Args: l (List[List[str]]): [description] Returns: List[str]: [description] """ return [item for sublist in l for item in sublist]
2051adf16139fa013b6ef566e512e4c830673788
519,566
def normalize(df): """ Normalizes columns per wiki for a selected number of features (listed in 'cols' variable). :param df: DataFrame whose columns are to be normalized :return: DataFrame """ cols = [ "editors", "major_edits", "anonymous_edits", "pls", "transcluded_in", ] for col in cols: if col in df.columns: df[col + "_norm"] = ( df[col] / df.groupby("dbname")[col].transform("sum") ) * 100 del df[col] return df
f06953632433d14cc8891bf36977da2c45bf1649
509,758
def update_playbook_task_name(playbook): """ update the name of the task to be the same as playbookName it is running :param playbook: playbook dict loaded from yaml :return: updated playbook dict """ for task_id, task in playbook.get("tasks", {}).items(): if task.get("type") == "playbook": task["task"]["name"] = task["task"]["playbookName"] return playbook
f04bdfd2b6bab0963f0beb8daf615d723b3a12ff
312,965
def exponentiate(base: int, exponent: int): """ Calculates an exponentiation. Returns ------- result : int Exponentiation. The result of the calculation """ result = base for i in range(1, exponent): result = result * base return result
adaf258e10ba4cf4d1fe3a7994ac7737e4255874
276,240
def crop_image(img, x1, y1, x2, y2): """ returns the cropped image specified by x1, y1, x2, y2 """ return img[y1:y2, x1:x2]
1e00397a87d7ed80eb9b122e329aaa00cd5a81c1
322,389
def _concat(*lists): """Concatenates the items in `lists`, ignoring `None` arguments.""" concatenated = [] for list in lists: if list: concatenated += list return concatenated
a1eea1c074fe1eee1ca454899bf9dec2719a333e
699,813
def school_abbreviation(name): """ Creates the abbreviation used for the school based on the long name. Removes intermediary words. :param name: (String) Long name for the school :return: (String) abbreviated name for the school """ name = name.split() abbv = "" no_lst = ["of", "the", "in", "at"] for no in no_lst: for item in name: if item.lower() == no: name.remove(item) for item in name: abbv += item[0].lower() return abbv
2a8d7b16b65d36b534256a0af055d50f976d77af
309,097
def is_prod_of_two_3_digit_num(n): """Determine whether n is the product of 3-digit numbers.""" result = False for i in range(100, 1000): if n % i == 0 and n // i in range(100, 1000): result = True break return result
db0cb1b3ae1ecb8b15d01582f8c0599ce00ce766
20,647
def find_zero_constrained_reactions(model): """Return list of reactions that are constrained to zero flux.""" return [ rxn for rxn in model.reactions if rxn.lower_bound == 0 and rxn.upper_bound == 0 ]
52c3f45ea21f3efc0459c62011fcd59c0c1cb5e6
440,855
import torch def _squash(input_tensor, dim=2): """ Applies norm nonlinearity (squash) to a capsule layer. Args: input_tensor: Input tensor. Shape is [batch, num_channels, num_atoms] for a fully connected capsule layer or [batch, num_channels, num_atoms, height, width] or [batch, num_channels, num_atoms, height, width, depth] for a convolutional capsule layer. Returns: A tensor with same shape as input for output of this layer. """ epsilon = 1e-12 norm = torch.linalg.norm(input_tensor, dim=dim, keepdim=True) norm_squared = norm * norm return (input_tensor / (norm + epsilon)) * (norm_squared / (1 + norm_squared))
715b5819498d4c3a7c40c623fc9a40d2fcfb3773
695,397
def bit(number, index): """Return the indexth bit of number.""" return (number >> index) & 1
a08882b99caa6ebd321c2cf677ffb26eb4aba4c8
106,626
def const(con): """Define a constant mapping for elastic search index. This helper may be used to define index mappings, where the indexed value is always set to a specific constant. Example: .. code-block:: python mapping = {'field': const('I am a constant')} """ return lambda obj: con
71b9a36887f0d022e230c0d7a2b3fd25530d0638
397,178
def make_parse_func(func, vocab): """Create a function that calls func and parses the output in vocab.""" def parse_func(t): return vocab.parse(func(t)).v return parse_func
d13da779ee3f90e53bd0332e4ccf294ce7fd5796
239,355
import torch def flatten_feature_map(feature_map): """ Flatten input tensor from [batch, y, x, f] to [batch, y*x*f] Args: feature_map: tensor with shape [batch, y, x, f] Returns: reshaped_map: tensor with shape [batch, y*x*f] """ map_shape = feature_map.get_shape() if(map_shape.ndims == 4): (batch, y, x, f) = map_shape prev_input_features = int(y * x * f) resh_map = torch.reshape(feature_map, [-1, prev_input_features]) elif(map_shape.ndims == 2): resh_map = feature_map else: raise ValueError("Input feature_map has incorrect ndims") return resh_map
99dc93d7e0e21d19e8252c6a1106e208ac4937ec
620,349