content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def create_result(json_data, start, fps): """Transforms yolo prediction data to Veritone Object.""" left = json_data['topleft']['x'] top = json_data['topleft']['y'] width = json_data['bottomright']['x'] - left height = json_data['bottomright']['y'] - top result = { 'boundingPoly': { 'left': left, 'top': top, 'width': width, 'height': height }, 'confidence': json_data['confidence'].astype(float), 'found': json_data['label'], 'start': start * 1000, # in ms 'end': int((start + 1) * 1000 / fps), 'type': 'object', } return result
9130b5319cd1e7e7bbc7523a294b14e74461ffd4
60,974
def poly_eval(coefs, x): """ Evaluates the polynomial whose coefficients are given in coefs on the value x. Polynomial coefficient are stored in coefs from the lowest power to the highest. x is required to be a ring element. """ out = 0 for i in range(len(coefs)): out = coefs[i] * (x ** i) + out return out
59cf7049e382c1e3827266875bbe896e07cf9688
60,978
def new_polygon(biggon, ratio): """ Given a polygon (biggon), this function returns the coordinates of the smaller polygon whose corners split the edges of the biggon by the given ratio. """ smallgon = [] L = len(biggon) for i in range(L): new_vertex = (biggon[i][0] *ratio + biggon[(i+1)%L][0] *(1-ratio), biggon[i][1] *ratio + biggon[(i+1)%L][1] *(1-ratio), ) smallgon.append(new_vertex) return tuple(smallgon)
765d14bf265b2d659d74cf3bab502776905bb19a
60,987
def rem_comment(line): """Remove a comment from a line.""" return line.split("#", 1)[0].rstrip()
86907c71b7e285e98345b28be39c2646401eeccc
60,989
def extract_minor(chord): """Checks whether the given cord is specified as "minor". If so, it pops out the specifier and returns a boolean indicator along with the clean chord. E.g. ---- Am --> True, A B# --> False, B# F#m --> True, F# """ if chord[-1] == 'm': return True, chord[:-1] else: return False, chord
2a687030cae49a223b335a64d27e48bcdd7471e7
60,990
import math def polygon_area(n_sides, side_len): """Find the area of a regular polygon :param n_sides: number of sides :param side_len: length of polygon sides :return: area of polygon >>> round(polygon_area(4, 5)) 25 """ perimeter = n_sides * side_len apothem_denominator = 2 * math.tan(math.pi / n_sides) apothem = side_len / apothem_denominator return perimeter * apothem / 2
fba3ae96771892eb100d887775dbc4f6636f7b8c
60,991
import base64 def decode_from_b16(b16encoded: bytes) -> str: """ Decodes from base-16 to a utf-8 string. >>> decode_from_b16(b'48656C6C6F20576F726C6421') 'Hello World!' >>> decode_from_b16(b'48454C4C4F20574F524C4421') 'HELLO WORLD!' >>> decode_from_b16(b'') '' """ # b16decode the input into bytes and decode that into a human readable string return base64.b16decode(b16encoded).decode("utf-8")
4b8ee5333ade64b922fbff1060c5747b285451fb
60,993
def yes_no(flag: bool): """Boolean to natural readable string.""" return "Yes" if flag else "No"
13f8c0c1fed55819f27f1527114b0dc7e60e3c73
60,998
def make_breakable(text, maxlen): """ make a text breakable by inserting spaces into nonbreakable parts """ text = text.split(" ") newtext = [] for part in text: if len(part) > maxlen: while part: newtext.append(part[:maxlen]) part = part[maxlen:] else: newtext.append(part) return " ".join(newtext)
ab85cf4ad33d48198bd679bcb8627c423a763079
60,999
def refresh_device(device): """ Task that refreshes a device. :param device: device to be refreshed. :return: response from SNS """ return device.refresh()
49785f94b4f935b8ba8d91827b0da69bd04a0829
61,002
import inspect def get_caller_name(depth=2, mod=True, cls=False, mth=False): """ Gets the name of the calling module/class/method with format [module][.class][.method] :param int depth: the depth of the caller if passing through multiple methods (optional) :param bool mod: include module in name (optional) :param bool cls: include class in name (optional) :param bool mth: include method in name (optional) :return: caller name as [module][.class][.method] :rtype: str """ stack = inspect.stack() start = 0 + depth if len(stack) < start + 1: return '' parent_frame = stack[start][0] name = [] module = inspect.getmodule(parent_frame) if module and mod: name.append(module.__name__) if cls and 'self' in parent_frame.f_locals: name.append(parent_frame.f_locals['self'].__class__.__name__) if mth: codename = parent_frame.f_code.co_name if codename != '<module>': name.append(codename) del parent_frame, stack return '.'.join(name)
f5cf6f464a4a5e1edefc94b5a239d2a17aabe9d5
61,005
import math def getKLDivergence( P, Q ): """Compute KL-divergence from P to Q""" divergence = 0 assert len(P) == len(Q) for i in range(len(P)): p = P[i] q = Q[i] assert p >= 0 assert q >= 0 if p > 0: divergence += p * math.log( p / q ) return divergence
e942150f42e58e82d421e68362b2969be0c7e7fd
61,008
from typing import List def _generate_avails_keys(future_len: int) -> List[str]: """ Generate availabilities keys (one per future step) Args: future_len (int): how many prediction in the future data has Returns: List[str]: a list of keys """ return [f"avail_{i}" for i in range(future_len)]
b6bd5d32914faf4eed8fce0fa7c066da9ac64043
61,012
import socket def open_port(host, port): """Test whether a port is open at host. Return boolean""" sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: sock.connect((host, port)) sock.shutdown(2) return True except: return False
84f18c7c1ccab6b1a73296135cff34be34ebfcb7
61,018
def identify_candidates(all_sso, min_obs=40, dist_cutoff=10): """Identify the objects which might be lightcurve determination candidates. Parameters ---------- all_sso: pd.DataFrame The alert observations of the SSOs. min_obs: int, opt Minimum number of observations to consider an object a candidate for lightcurve determination. Default 40. dist_cutoff: int, opt The maximum ssdistnr value in the alert == arcseconds between measured position and expected position. Default 10. Returns ------- list of str List of the object names. """ # Pull out the list of objects with many observations, within @dist_cutoff of the attributed sso. objs = all_sso.query('ssdistnr < @dist_cutoff').groupby('ssnamenr')[['jd']].count().query('jd > %d' % min_obs) names = objs.sort_values('jd', ascending=False) objnames = names.index.values print(f'# Found {len(objnames)} objects with more than {min_obs} observations') return objnames
70551a357a89f19b829fe5457694ba67e99bab0d
61,021
def get_annotation_namespace(annotation): """ Utility function to extract the namespace from a payload annotation (e.g. @Message.ExtendedInfo) :param annotation: the payload annotation :type annotation: str :return: the namespace """ if '@' in annotation: annotation = annotation.rsplit('@', 1)[1] return annotation.rsplit('.', 1)[0]
8a08457273cf1298491fe7569da251fc174ab7b1
61,026
def fmt_time(value): """ < 60 seconds -> displayed in seconds (limit the decimal digits to 1 or keep int) < 3600 seconds -> display as X m Y s (if float, trim decimal digits) >= 3600 seconds -> display as X h Y m Z s (if float, trim decimal digits) :param value: seconds or None :return: Formatted value """ if value is None: return "N/A" if not isinstance(value, int) and not isinstance(value, float): raise TypeError(f"Expected int or float, got {type(value)}") if 0 < value < 0.1: return f"{value:.3f} s" elif value < 60: if isinstance(value, int): return f"{value} s" else: return f"{value:.1f} s" elif value < 3600: return f"{value//60:.0f} m {value%60:.0f} s" else: return f"{value//3600:.0f} h {(value%3600)//60:.0f} m {value%60:.0f} s"
2db88f2ef1ee0b7b1d69be3cedb80f4abe240ad4
61,029
def strategy(history, memory): """ Orannis's strategy: Cooperate, defect if opponent defects OR if they haven't defected in 3 turns. """ choice = 1 num_rounds = history.shape[1] if num_rounds >= 1 and history[1, -1] == 0: choice = 0 if ( num_rounds >= 3 and history[1, -3] == 1 and history[1, -2] == 1 and history[1, -1] == 1 ): choice = 0 return choice, None
e50186bf34b151c92058b40b189b86052e65d268
61,031
def nextChar(char) : """ Prend un caractère alphanumérique et l'incrémente dans les espaces a-z, A-Z et 0-9 Retourne un tuple comprent la valeur incrémentée et un boolean à True si on a bouclé """ end = False if char.isalpha() : if char.isupper() : #ASCII to 65 to 90 if ord(char)+1 <= 90 : char = chr(ord(char)+1) else : char = 'A' end = True elif char.islower() : #ASCII 97 to 122 if ord(char)+1 <= 122 : char = chr(ord(char)+1) else : char = 'a' end = True else : print("invalid char") elif char.isdigit() : #ASCII 48 to 57 if ord(char)+1 <= 57 : char = chr(ord(char)+1) else : char = '0' end = True return (char, end)
319ae042f0fd95c2a7acd9b9e212ac656d335113
61,033
def ordered_deduplicate(sequence): """ Returns the sequence as a tuple with the duplicates removed, preserving input order. Any duplicates following the first occurrence are removed. >>> ordered_deduplicate([1, 2, 3, 1, 32, 1, 2]) (1, 2, 3, 32) Based on recipe from this StackOverflow post: http://stackoverflow.com/a/480227 """ seen = set() # Micro optimization: each call to seen_add saves an extra attribute # lookup in most iterations of the loop. seen_add = seen.add return tuple(x for x in sequence if not (x in seen or seen_add(x)))
f007e7a9b047ee3fc919e879c8a0ec22e1a8495b
61,035
import six def decode_response(resp, definition): """Decode the response if we know how to handle it""" if six.PY3: try: return resp.content.decode() except: return resp.content return resp.content
8e63a3a55d529f9c3df5ab0ce8520b75da6a6525
61,041
import json def get_manifest(config_digest, layer_digest): """A dummy image manifest with a config and single image layer""" return json.dumps( { "schemaVersion": 2, "config": { "mediaType": "application/vnd.oci.image.config.v1+json", "size": 7023, "digest": config_digest, }, "layers": [ { "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip", "size": 32654, "digest": layer_digest, } ], "annotations": {"com.example.key1": "peas", "com.example.key2": "carrots"}, } )
bb4e8b0643fe291bb63feb7f76440bdbdeb30d03
61,043
def remove_new_lines(in_string): """Remove new lines between text""" out_string = in_string if out_string: out_string = in_string.splitlines() out_string = [l.strip() for l in out_string] out_string = " ".join(out_string) return out_string
5576745123f462c35c6a452a4a43f7daa91adc64
61,047
def prompt(usage: str, order: str, aspect: str, context: list[str], question: str) -> str: """Generate the language model prompt based on the given parameters. Parameters ---------- usage : str The inference task intended by the prompt: `general` or `premise`. The `general` usage expects a story sentence (or somthing similar in style) to be used as `question`; the `premise` usage expects a inferred result (or something similar in style) to be used as `question`. order : str The inference direction intended by the prompt: `forward` or `backward`. The `forward` direction will deduce additional information based on the `question`; the `backward` direction will hypothesis additional information based on the `question`. aspect : str The inference aspect intended by the prompt: `causal`, `emotional`, `spatial`, `possession`, or `miscellaneous`. These aspects corresponds to the dimensions outlined in the GLUCOSE Dataset. context : list[str] The story context that the inference is based upon. question : str The question that the inference intends to answer. Returns ------- str A prompt for the language model. """ return (f"glucose: {usage} {order} {aspect} " f"context: {' '.join(map(str.strip, context))} " f"premise: {question.strip()}")
67333f8ebe9c43bba8372acee304fc5de3564df0
61,048
def language_check(message ,texts): """ check if the input is a correct language """ if message not in texts: return False return True
5a4e7fc8ae78c2e6043fef6a4b5d6eebda199039
61,049
def copy_graph(graph): """ Make a copy of a graph """ new_graph = {} for node in graph: new_graph[node] = set(graph[node]) return new_graph
d7c31da77eb742d4167801f60ecfcd9fd330e8e0
61,051
def points_equal(a, b): """ checks if 2 [x, y] points are equal """ for i in range(0, len(a)): if a[i] != b[i]: return False return True
cbcee4c2a5d9e34520416a6655956176b80883ba
61,053
def count_pairs(sets): """Computes the number of unordered elelemt pairs in disjoint sets.""" total_size = total_pairs = 0 for size in sets.sizes(): total_pairs += total_size * size total_size += size return total_pairs
9cc92c3759486ffb63e1ad5f6a31ad48687c4f55
61,057
import hashlib def filter_hash(data, alg='md5'): """ Filter for hashing data. F.e. :: - echo: {from: '{{ my_var | hash("sha1") }}', to: two.output} :param data: data to hash :param alg: algorithm to use """ if hasattr(hashlib, alg): m = getattr(hashlib, alg)() m.update(data.encode()) return m.hexdigest() else: raise ValueError('Unknown algorithm: ' + data)
dcba4d76e956f82823197fa0b30e0f24b348b31a
61,058
import yaml def read_yaml(filename): """Reads data from a yaml file.""" with open(filename, 'r', encoding='utf-8') as file: data = yaml.load(file, Loader=yaml.FullLoader) return data
f68b95a9c3ed18628ee46e2cfcd4997168952099
61,059
def convert_data_to_numbers(tokens, smiles): """ Prepare data for the RNN, convert smiles into arrays of number corresponding to their index in the tokens array :param tokens: list of the tokens used :type tokens: list of str :param smiles: SMILES to convert :type smiles: list of str :return: input and output of the RNN, input start with a & and and with \\n and output end with two \\n """ x_train = [[tokens.index(atom) for atom in smile] for smile in smiles] y_train = [x[1:] + [0] for x in x_train] return x_train, y_train
a520ba5828186a79b6c615ebc175e2fedb638cf1
61,060
import struct def i32(data): """ return int32 from len4 string""" low, high = struct.unpack('<hh', data[:4]) return (high << 16) + low
dcff2aea361e2da4aa012a6d9fbcdec05c43233e
61,061
def get_op_slice_sizes(op_slices): """Returns OpSlice sizes for a list of list of OpSlice. The outer list has an element per op, while the inner list is the list of OpSlice that compose the op. Args: op_slices: List of list of OpSlice. Returns: List of list of OpSlice sizes where the outer list has an entry per op. """ op_slice_sizes = [] for op in op_slices: op_slice_sizes.append([op_slice.slice.size for op_slice in op]) return op_slice_sizes
681fb9468165ae5ca66390d510541ff1c95858a6
61,066
def get_satellite_name(tle): """Return input string with spaces and dashes replaced with underscore""" satname = str(tle[0]).replace(" ", "_") satname = satname.replace("-", "_") return (satname)
a6fb6598681ba93dd7cea1c36e2a02fc4da68ba5
61,068
def _get_bucket(url): """ Retrieves the bucket based on the URL :param string url: URL to parse :return: bucket name :rtype: string """ first_slash_index = url.find('/') return url[:first_slash_index]
2f938703ce72adac9fabdec207d5b210c7bc3de1
61,079
def bigquery_serialize_datetime(py_datetime): """ Convert a python datetime object into a serialized format that Bigquery accepts. Accurate to milliseconds. Bigguery format: 'YYYY-[M]M-[D]D[( |T)[H]H:[M]M:[S]S[.DDDDDD]]' https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#canonical-format_3 Args: py_datetime (datetime.datetime): The date to convert. Returns: (str): The Serialized date. """ # deserialize with datetime.strptime(serialized_str + '00', '%Y-%m-%d %H:%M:%S.%f') return py_datetime.strftime('%Y-%m-%d %H:%M:%S.%f')
7d362503195e1ac003f530afa5c25450dff1c086
61,084
def make_all_seqs(l): """ Makes all possible sequences, including Ns, of length l """ if l > 8: print("Warning - large barcodes detected!") print("It may be faster to use option '--dont_build_reference'!") nts = ['A', "C", "G", "T", "N"] all_seqs = nts for i in range(l - 1): new_seqs = [] for seq in all_seqs: for nt in nts: new_seqs.append(seq + nt) all_seqs = new_seqs return (all_seqs)
875152c28bddee735ca1157de15746008fbfd53d
61,085
from typing import Optional import re def search_via_regex(text_in: str, pattern: str) -> Optional[dict]: """Search pattern using regular expression (ignore case) and return dict with 'text_before', 'text_match', 'text_after' or None""" if not text_in: return None pattern = r'(?P<before>^|\W)(?P<match>' + pattern + r')(?P<after>\W|$)' search_res = re.search(pattern, text_in, flags=re.IGNORECASE) if not search_res: return None # first match res_span = search_res.span() text_before = text_in[:res_span[0]] + search_res.group("before") text_match = search_res.group("match") text_after = search_res.group("after") + text_in[res_span[1]:] # parts = re.split(r"\s+", text_match) return { 'text_before': text_before, 'text_match': text_match, 'text_after': text_after, }
acd8b55cea69ceb9461c0e832e6ae9869f131747
61,086
from typing import List def predict_with_threshold(probabilities, vague_threshold=0.5) -> List[int]: """ Classify the requirements as vague or not with a custom threshold. The first entry of a probability is considered the _not vague_ and the second entry the _vague_ probability example probabilities: [[0.6, 0.4], [0.3, 0.7], [0.9, 0.1]] Args: probabilities: The classification probabilities vague_threshold (int): The threshold to count as vague requirement (default 0.5) Returns: List[int]: The classification 0 if not vague 1 if vague """ result = [1 if probability[1] >= vague_threshold else 0 for probability in probabilities] return result
d167c86b5e3d631cb602454cb0387f4ea9d4ed67
61,087
def format_seconds(time_seconds): """Formats some number of seconds into a string of format d days, x hours, y minutes, z seconds""" seconds = time_seconds hours = 0 minutes = 0 days = 0 while seconds >= 60: if seconds >= 60 * 60 * 24: seconds -= 60 * 60 * 24 days += 1 elif seconds >= 60 * 60: seconds -= 60 * 60 hours += 1 elif seconds >= 60: seconds -= 60 minutes += 1 return f"{days}d {hours}h {minutes}m {seconds}s"
e548a9c53701e728dd526712f204c947db8910cb
61,088
import re def clean_str(text): """ Tokenization/string cleaning for dataset Every dataset is lower cased except """ # if string: text = re.sub(r"\\", " ", text) text = re.sub(r"\'", " ", text) text = re.sub(r"\"", " ", text) return text.strip().lower()
cd07713711c87ad0b0ef3d22355666a89363da84
61,091
def deep_merge(a, b): """ Merge two dictionaries recursively. """ def merge_values(k, v1, v2): if isinstance(v1, dict) and isinstance(v2, dict): return k, deep_merge(v1, v2) else: return k, v2 a_keys = set(a.keys()) b_keys = set(b.keys()) pairs = [merge_values(k, a[k], b[k]) for k in a_keys & b_keys] \ + [(k, a[k]) for k in a_keys - b_keys] \ + [(k, b[k]) for k in b_keys - a_keys] return dict(pairs)
7d627bd64fa478641c9408b85b36d7d78c19a020
61,094
import hashlib import base64 def code_challenge(verifier): """ Function to generate code challenge based on RFC spec. See: https://tools.ietf.org/html/rfc7636#section-4.2 """ digest = hashlib.sha256(verifier).digest() return base64.urlsafe_b64encode(digest).rstrip(b"=")
81523ecffb3ef0bfcf09cb4d7dc8d79f52ee3681
61,099
from typing import List from typing import Dict import itertools def reification_master(sort_name: str, sort_args: List[str], domainObj_dict: Dict[str, List[str]]) -> List[str]: """ Function to reify the given sort, i.e. event/fluent, w.r.t the domainObj in the argument :param sort_name: name of the sort :param sort_args: list of arguments to the sort :param domainObj_dict: dictionary of all domainObj, with domainObj_names as keys, which maps to instances as a list :return: list of reified atoms """ reification_prefix = sort_name # "load" if len(sort_args) == 0: return [reification_prefix] else: result = [] domainObj_list = [] # list of lists for domainObj in sort_args: if domainObj in domainObj_dict.keys(): domainObj_list.append(domainObj_dict[domainObj]) else: domainObj_list.append([domainObj]) for permutation in list(itertools.product(*domainObj_list)): atom = reification_prefix + "_" # "load_" perm_list = list(permutation) for x in perm_list: atom += x + "_" # "load_a1_a2_" atom = atom[:-1] # "load_a1_a2" result.append(atom) return result
5d3ddb24979d0718397d5160af37e0e1776d7ffb
61,100
import tarfile def what_comp(filename): """ Return what compression type based on file suffix passed. Currently based on suffix could be updated to be based on FileMagic Currently assumes input is a pathlib Current known versions: GZIP .gz or .tgz BZ2 .bz2 XZ .xz or .lzma LZ4 .lz4 None None """ # Grab current suffix, force lower case suffix = filename.suffix suffix = suffix.lower() # GZIP if suffix in [".gz", ".tgz"]: return "GZIP" elif suffix in [".bz2"]: return "BZ2" elif suffix in [".xz", ".lzma"]: return "XZ" elif suffix in [".lz4"]: return "LZ4" elif suffix in [".zst"]: return "ZSTD" # check that it's an actual tar file elif tarfile.is_tarfile(filename): # is a tar just without compression so continue return None else: # we don't know what this file is throw raise Exception(f"{filename} has unknown compression or not tar file")
72e9f8c9b65097c9280c78c41d67a0b6cb6a14dc
61,102
def check_strands(hsp_objs, fwd_strand): """Return True if hsps in the list are not all on the same strand. """ hsps_on_different_strands = False for hsp in hsp_objs: x = True if hsp.hit_frame < 0: x = False if x != fwd_strand: hsps_on_different_strands = True return hsps_on_different_strands
2ba991c4f592b22213113e2d90035943b8f4e99f
61,104
import random def get_random_value(world, feature, rng=None): """ Gets a random value for a given feature according to its domain. :param World world: the PsychSim world in which the feature is defined. :param str feature: the named feature. :param random.Random rng: the random state used to sample values. :return: a random value according to the feature's domain. """ assert feature in world.variables, 'World does not contain feature \'{}\''.format(feature) var = world.variables[feature] domain = var['domain'] rng = random.Random() if rng is None else rng if domain is float: return rng.uniform(var['lo'], var['hi']) if domain is int: return rng.randint(var['lo'], var['hi']) if domain is list or domain is set: return rng.choice(var['elements']) if domain is bool: return bool(rng.randint(0, 1)) return None
b067f610cd5696b552a7abde501ab7272b0df7a0
61,109
import math def GetPrintPrecision(tolerance): """Returns int, number of decimals to display in a format string. tolerance - float target difference between 2 values {0:10.{precision}f}.format(10.0, precision=printPrecision(tolerance)) """ if tolerance >= 0: return 0 # Log10 will be negative because tolerance with be below 0 width = math.log10(tolerance) return math.ceil(-width)
f45d082442400b400b85bf4a6ab1fd54303e047f
61,110
def get_pk_column(self): """ Gets the primary key column. """ return self.query.get_meta().pk.column
143d0cd7383186631f615eeaf4d9c67579c7067e
61,112
from types import FunctionType def get_class_methods(class_item=None): """ Returns the class methods of agiven class object :param class_item: Class item to introspect :type class_item: object :returns: list -- Class methods """ _type = FunctionType return [x for x, y in class_item.__dict__.items() if isinstance(y, _type)]
3a3ca1e72cc5101d92472b566a80ca10ef4a5fbc
61,113
import re def parse_error_message(stderr_data): """Parses the stderr data and returns only the message with ERROR level.""" messages = stderr_data.split("\n") result = "" # Each line of message should be: date, time, log level, line number, message. # E.g. 2017-10-03 18:25:19.742351: E file.cc: xx xxxx. # We only want the message with ERROR level. pattern = r"(\d+-\d+-\d+) (\d+:\d+:\d+.\d+:) ([EWI]) (\S+) (.*)" def _get_log_level_and_message(line): match = re.match(pattern, line) if match is None: return None, line else: return match.group(3), match.group(5) previous_is_error = False for message in messages: log_level, message_wthout_timestamp = _get_log_level_and_message(message) if log_level == "E": result += message_wthout_timestamp result += "\n" previous_is_error = True elif log_level == "W" or log_level == "I": previous_is_error = False else: # No log level, part of multiline message. if previous_is_error: result += message result += "\n" return result
fc20a300faa584176d8f0aad464f7256644f7165
61,115
def filter_dataframe(dataframe, by_column, list_of_values, boolean): """ Filter function for dataframe. Filters the [dataframe] such that the [by_column] values have to be in the [list_of_values] list if boolean == True, or not in the list if boolean == False """ df = dataframe.copy() df = df[df[by_column].isin(list_of_values) == boolean] df.reset_index(inplace=True, drop=True) return df
07c7142d167accdc42a7a960ed1af51582e0923a
61,119
from typing import Dict def delete_project_from_config(project: str, config: Dict) -> Dict: """Delete a project entry from the config dictionary.""" config.pop(project) return config
e939f314b323cb20c2c7a9f3b1e3cf65cb2c5c4b
61,126
def get_cluster_name(cluster_config_path): """ Get from path/to/cluster_name.yaml -> cluster_name """ return cluster_config_path.split(',')[0].split( '/')[-1].replace('.yaml', '').replace('.yml', '')
0515fe5e198058c476b73518820a84e1cd368302
61,136
import itertools def groupby(attribute, nodes): """Takes a group of nodes and returns a generator of (attribute, nodes) for each attribute value A simple wrapped around itertools.groupby that creates a lambda for the attribute """ keyfunc = lambda x: x.get(attribute) nodes = sorted(nodes, key = keyfunc) return itertools.groupby(nodes, key = keyfunc)
eaeb1470f2c4e8eac408b60e1001d68302caacab
61,141
def add_until_100(array: list): """add numbers to a sum from list: array unless makes sum > 100""" if not array: # base case return 0 sum_remaining = add_until_100(array[1:]) if array[0] + sum_remaining > 100: return sum_remaining else: return array[0] + sum_remaining
123b4d7e9e7d833055ae84692f8dec119cf03317
61,143
def Ky(beta_liq, beta_vapor, m_distrib): """ Calculates the masstransfer coefficent Ky. Parameters ---------- beta_liq : float The masstransfer coefficent beta of liquid, [kmol / (m**2 * s)] beta_vapor : float The masstransfer coefficent beta of vapor, [kmol / (m**2 * s)] m_distrib : float The distribution coefficient, [dismensionless] Returns ------- Ky : float The masstransfer coefficent Ky, [kmol / (m**2 * s)] References ---------- Дытнерский, стр.194 формула 5.8 """ return ((1 / beta_vapor) + (m_distrib / beta_liq))**(-1)
c47ff7a890842b2cdb00ec05102df069590251cd
61,149
def char_name(c): """ Return the name of a character. Specifically, returns a descriptive name instead of whitespace. No type checking is done. Parameters: c: a string containing the character """ cnames = { ' ': 'space', '\t': 'tab', '\n': 'newline', '\r': 'carriage return', '\f': 'formfeed', '\v': 'vertical tab', '\b': 'backspace', '\a': 'bell', } return cnames[c[0]] if c[0] in cnames else c[0]
7a8004dc13858c2232cc3fcecb149d866c57b415
61,151
def parse_schema_key(key): """Unpack tuple of schema key.""" _, dtype = key[:2] rule = '.*' if len(key) <= 2 else key[2] repeat = '' if len(key) <= 3 else key[3] return dtype, rule, repeat
eaade5737a80e2f82fb5175838ae623a6e09df19
61,153
from typing import List def namespaces_of(name: str) -> List[str]: """Returns a list of each of the namespaces for a given name.""" if not name: return ["/"] parts = [n for n in name.split("/") if n] return ["/"] + ["/" + "/".join(parts[:i]) for i in range(1, len(parts))]
fb3f06f9795c76a07a1013401e06176abeb9105c
61,154
import re def get_label_pattern(label): """Get the label pattern regex.""" return re.compile('^' + re.sub(r'%.*?%', r'(.*)', label) + '$', re.IGNORECASE)
19a113acb7226e16677b342460feee70ef82ad15
61,158
def add_ground_truths(classifications_df, cazy_dict): """Retrieve ground truth CAZyme/non-CAZyme classifications and add to the df of predictions. :param classifications_df: pandas dataframe of prediction tool CAZyme/non-CAZyme classifications for each protein. Each unqiue protein is one row, each prediction tool is one column. Return df containing the prediction tool predictions and CAZy ground truths added as an additional column, called 'CAZy'. """ cazy_c_nc_classification = [] # tool_predictions = clasifications_df protein_accessions = classifications_df.index for protein_accession in protein_accessions: try: cazy_dict[protein_accession] cazyme_classification = 1 except KeyError: cazyme_classification = 0 cazy_c_nc_classification.append(cazyme_classification) classifications_df["CAZy"] = cazy_c_nc_classification return classifications_df
c2f48f77ad693f4168265b6a7107a4d93539f952
61,159
from functools import reduce import operator def add_multi(xs): """ xs -> xs[0] + xs[1] + ... + xs[len(xs)-1] """ return reduce(operator.add, xs)
2584ecfd65942efb7f8bea738848f04fceef6dda
61,161
def argmax(values): """Returns the index of the largest value in a list.""" return max(enumerate(values), key=lambda x: x[1])[0]
e177e140ead2e221de8ec03c410facc587ca39a1
61,172
def basic_dict_get(dic): """Return the value of the key "value" from the dict.""" return dic["value"]
a58d566ec968e62cec37d51c5d52c6da3c8c6ba9
61,179
def fletcher(barray): """Return the two Fletcher-16 sums of a byte array.""" assert isinstance(barray, bytes) or isinstance(barray, bytearray) a = 0 b = 0 for c in barray: a = (a + c) % 255 b = (a + b) % 255 return (a, b)
435001b608e2e30065f83211b585e80edfc153ae
61,182
from typing import Any def _member_filter(member: Any, instance_type: Any) -> bool: """Return True if the member matches the filters. Args: member: class data- or method-member instance_type: optional instance type Returns: bool: True if the member matches the applied filter """ return (instance_type is None or isinstance(member, instance_type))
4be26548bf79bb387f6ce92281ce1a6cea2dec8a
61,183
def get_hydro_node_injections(generators, nodes, dispatch): """Get hydro node injections Parameters ---------- generators : pandas DataFrame Generator information nodes : pandas DataFrame Node information dispatch : pandas DataFrame Dispatch at each node Returns ------- hydro : pandas DataFrame Hydro generation at each node """ # Hydro generators mask_hydro = generators['FUEL_CAT'].isin(['Hydro']) generators[mask_hydro] # Hydro dispatch at each node hydro = (dispatch .T .join(generators.loc[mask_hydro, 'NODE'], how='left') .groupby('NODE').sum() .reindex(nodes.index, fill_value=0)) hydro['level'] = 'hydro' return hydro
85562965562c6aa806c896cf296ad86850ac3ce8
61,191
def AdjustsTemplate(changes): """Returns True if there's any template-level changes.""" return any([c.adjusts_template for c in changes])
600843b36e04558217b3ec1fd03180ff8e56138b
61,192
def is_weekend_or_monday(date): """Checks if the current date is a weekend or Monday. Args: date: datetime.datetime obj, the date to check. Returns: Bool indicating if it is a weekend or Monday. """ return date.weekday() in (0, 5, 6)
cb92eb5a9d303803e4f8518a6e51d0d909fd3f09
61,197
def pipe(*funcs, name=None, doc=None): """Create a pipeline of functions. The first function is passed the original arguments, and the remaining functions take as single argument the return value of the previous function. P = func.pipe(f, g, h) P(x) # equivalent to... h(g(f(x))) The name and docstring of the newly created function can be given through the 'name' and 'doc' keyword-only arguments. """ def pipe_func(*args, **kwargs): ifuncs = iter(funcs) result = next(ifuncs)(*args, **kwargs) for func in ifuncs: result = func(result) return result pipe_func.__name__ = name or "pipe({})".format(", ".join(f.__name__ for f in funcs)) pipe_func.__doc__ = doc return pipe_func
1bf7ac1cd588bc44e616c58afe4f6ccd05a616fc
61,198
import random def generate_credit_card(card_type=None): """ Function to generate the credit card number of the profile. Args: card_type: String value, either "Visa" or "Mastercard". (optional) Returns: The return value. String value containing credit card number. """ if not card_type: card_type = random.choice(['VISA', 'Mastercard']) if card_type.lower() == "visa": return '4'+str(random.randint(123242323223432,986879876876876)) elif card_type.lower() == "mastercard": return '5'+str(random.randint(123242323223432,986879876876876))
9ff54df80d7b112e8618b684a699c2de88e18f79
61,204
def attribute_type(attribute_helper, attribute_name): """ Use the attribute helper to return the attribute type. :param attribute_helper: wrapper Class is a helper to extract attribute information :param attribute_name: name of the attribute to type :return: data type of the attribute """ attr_type = None check_type = attribute_helper.get_type(attribute_name) if check_type is not None: attr_type = str(check_type) return attr_type
e11ea4914db7e9d06a0bfb0930a4939cc4a50a7a
61,206
def get_name(line): """Parses line from /proc/pid/maps and returns name. The line has format as follow: 55e88a5ab000-55e88a7c8000 r-xp 00068000 08:02 14434818 /usr/bin/nvim The last element can contain spaces so we cannot use split here. Find it manually """ pos = 0 for _ in range(5): pos = line.index(' ', pos) + 1 return line[pos:].strip()
3959299c3bd41d877f1a01504770bee54a62a058
61,208
def _v3_dot_ ( s , other ) : """``dot''-product of two 3-vectors >>> v3 = ... >>> other = ... >>> print 'Dot is ' , p3.Dot ( other ) """ res = s.X ( ) * other.X ( ) res += s.Y ( ) * other.Y ( ) res += s.Z ( ) * other.Z ( ) return res
ed9f2045c576f325699b08b29413bf0e19c86625
61,210
def make_example_id(*, claim_id, wikipedia_url, sentence_id, scrape_type): """Create a string example id for claim-evidence pairs. Args: claim_id: Fever claim id wikipedia_url: The wikipedia url of the evidence sentence_id: The sentence id of the evidence scrape_type: The scrape that this evidence came from Returns: A string example id """ return f'{claim_id}@{wikipedia_url}@{sentence_id}@{scrape_type}'
163381d40ab7ea502aa9fc3dbb317c823268e420
61,211
def compute_georange(geomean, geosd, count): """Compute the geometric range of one geometric standard deviation around the geometric mean. Return the geometric range.""" georange = 0.0 if count > 0: if geosd > 0.0: georange = geomean * geosd - geomean / geosd else: georange = 0.0 return georange
45347c9cec104e0cc82c43d69c9665dfa5e79804
61,212
import configparser def getconfig(account_type='demo'): """ from a file 'config.ini' get configuration to use IG API --- Keyword argument: account_type -- "demo" (by default) or "live" --- Return: proxy user proxy password API key IG identifier IG password IG account """ config = configparser.ConfigParser() config.read('config.ini') return config['proxy']['user'], config['proxy']['password'], \ config[account_type]['key'], \ config[account_type]['identifier'], \ config[account_type]['password'], \ config[account_type]['account']
8c3c01654924f906da8d095c89d5dd57d7d20ad9
61,213
def max_profit(a): """ write a function that takes a list of prices a and returns the max profit possible by buying at a given price then selling at a future price, for e.g. [2, 5, 1, 3, 10] should return 9 (10 - 1) [4, 3, 2, 1] should return 0 (prices are always decreasing) """ if len(a) == 1: return 0 min_price, max_ = float("inf"), 0 for price in a: profit = price - min_price max_ = max(profit, max_) min_price = min(price, min_price) return max_
d0e5f0214dd0d4f6da385a0f3eed458307e62bde
61,214
def InvertBoolean(boolean): """Inverts the boolean value passed in.""" return not boolean
bd7c85ae0122835cd19d7ab1b6156365be8d168b
61,216
def convertRunOptionsToSEDict(options): """Converts tuflow command line options to scenario/event dict. Tuflow uses command line option (e.g. -s1 blah -e1 blah) to set scenario values which can either be provided on the command line or through the FMP run form. The TuflowLoader can use these arguments but requires a slightly different setup. This function converts the command line string into the scenarion and event dictionary expected by the TuflowLoader. Args: options(str): command line options. Return: dict - {'scenario': {'s1': blah}, 'event': {'e1': blah}} Raises: AttributeError: if both -s and -s1 or -e and -e1 occurr in the options string. -x and -x1 are treated as the same variable by tuflow and one of the values would be ignored. """ if ' -s ' in options and ' -s1 ' in options: raise AttributeError if ' -e ' in options and ' -e2 ' in options: raise AttributeError outvals = {'scenario': {}, 'event': {}} vals = options.split(" ") for i in range(len(vals)): if vals[i].startswith('-s'): outvals['scenario'][vals[i][1:]] = vals[i + 1] elif vals[i].startswith('-e'): outvals['event'][vals[i][1:]] = vals[i + 1] return outvals
ff70d83ac928fffbc1e4d701def353394e7bd9f8
61,221
def factorize(n): """Returns list of prime factors of positive integer n.""" i = 2 factors = [] while True: q, r = divmod(n, i) if r: i += 1 if i > n: return factors else: factors.append(i) if q < i: return factors n = q
84ea99c73ed25b6df41be0284aab3f764755450b
61,222
def next_collatz_seq(current_number): """Returns the next collatz sequence number after current_number""" next_number = 0 if current_number == 1: next_number = 0 elif current_number % 2 == 0: next_number = current_number / 2 else: next_number = (current_number * 3) + 1 return next_number
4b6ce80ecd76f68d3733670155f7509386c2fa71
61,225
def convert_position(position): """Convert position to 'absolute' position! (B7 becomes (1, 6))""" letter = position[0] column = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J'].index(letter) row = int(position[1:]) - 1 return row, column
05d13a99d68684407827b4c3bdbe19dad98238b9
61,226
def key_value_arg_type(arg): """Simple validate/transform function to use in argparse as a 'type' for an argument where the argument is of the form 'key=value'.""" k, v = arg.split("=", 1) return (k, v)
51e32b522f809b0bb8e8f469964576fefd55538c
61,229
def get_puns(fpath): """ Gets the list of puns from a file """ with open(fpath) as punny_boi: puns_file = punny_boi.read() puns = puns_file.splitlines() return puns
f88f3d8776aa72dca711f2ec330c73aca7c1d960
61,230
import unicodedata def strip_unicode(str): """Replace unicode characters with ascii characters (e.g., replace é with e).""" # Documentation for normalize function: # https://docs.python.org/3/library/unicodedata.html#unicodedata.normalize # Basically, (from what I understand) this splits the characters with accent # marks, etc. (e.g. é) into two parts: the latin character (e.g. e) and a # special "combining" character that represents the accent. The string is then # encoded into ascii with the 'ignore' option, so it ignores characters that # cannot be represented in ascii, thus removing the special combining characters # but leaving behind the regular ones. The resulting binary is then decoded back # into utf-8. return (unicodedata.normalize('NFD', str) .encode('ascii', 'ignore') .decode('utf-8'))
7924195ad04fcda25d6b2ad3a39ad3eb4b63d938
61,232
import re def html_id_ok(objid, html5=False): """Check whether objid is valid as an HTML id attribute. If html5 == True, then use the more liberal html5 rules. """ if html5: return not re.search('\s', objid) else: return bool(re.match("^[a-zA-Z][a-zA-Z0-9\-\.\:\_]*$", objid))
3b7c21546fe99bfce9916eb923afef9ae6b91aba
61,233
def permute_cycle_tz(tz, cycle): """ Permute a daily cycle by a given amount of hours. This is a way to convert from local time to UTC Parameters ---------- tz: String String corresponding to the timezone cycle: List of length N_HOUR_DAY Hourly scaling factors to be permuted Returns ------- Permuted cycle scaling factors """ shift = int(tz) # half hours for the time zone are not considered try: answer = [cycle[shift - i] for i in range(len(cycle), 0, -1)] except IndexError: answer = [cycle[shift - i + 24] for i in range(len(cycle), 0, -1)] return answer
dbbe01cb9e42fcbe98e59c68611ee3e01f55da00
61,235
def convert_to_mixed_fraction(number, denominators): """ Convert floats to components of a mixed fraction representation Returns the closest fractional representation using the provided denominators. For example, 4.500002 would become the whole number 4, the numerator 1 and the denominator 2 Args: number (float): number for convert denominators (iter of ints): denominators to use, default [1 .. 20] Returns: whole, numerator, denominator (int): Integers of the mixed fraction """ int_number = int(number) if int_number == number: return int_number, 0, 1 # whole number, no fraction frac_number = abs(number - int_number) if not denominators: denominators = range(1, 21) for denominator in denominators: numerator = abs(frac_number) * denominator if abs(numerator - round(numerator)) < 0.01: # 0.01 accuracy break else: return None return int_number, int(round(numerator)), denominator
bc3f111ff5b9cf68b37234b42d9daea2824c2bf4
61,237
def closeable(class_): """Makes a class with a close method able to be a context manager. This decorator is a great way to avoid having to choose between the boilerplate of __enter__ and __exit__ methods, versus the boilerplate of using contextlib.closing on every with statement. Args: class_: The class being decorated. Raises: ValueError: If class didn't have a close method, or already implements __enter__ or __exit__. """ if 'close' not in class_.__dict__: # coffee is for closers raise ValueError('Class does not define a close() method: %s' % class_) if '__enter__' in class_.__dict__ or '__exit__' in class_.__dict__: raise ValueError('Class already defines __enter__ or __exit__: ' + class_) class_.__enter__ = lambda self: self class_.__exit__ = lambda self, t, v, b: self.close() and None return class_
7d5ce6a7366a8c5e263495d5ba6921d217b92767
61,241
def compare_trace(trace, sol): """Compares TRACE with the SOLUTION trace, and returns the turn number where the two traces differ, or -1 if the traces are the same. """ i = 0 while i < min(len(trace), len(sol)): state, sol_state = trace[i], sol[i] if not state.is_correct(sol_state): return i i += 1 if len(trace) != len(sol): return len(trace) return -1
16241272339d9dbf9d2d59a0a2c21027f448d790
61,243
def is_cache_resource(resource): """Return whether resource is a cacheFile resource""" required = set(["maya", "node", "cacheFile"]) tags = resource.get("tags", []) return required.issubset(tags)
dae56ee36a290be58af943aeeed477bfeeabbc85
61,245
import inspect def get_number_parameters(func): """Returns the number of parameters of a specific function.""" return len(inspect.signature(func).parameters)
7a32c9466755fb60bcf7d0a207c9fa83a91a9754
61,246
def clean_string(text: str): """Replace MS Office Special Characters from a String as well as double whitespace Args: text (str): Returns: str: Cleaned string """ result = ' '.join(text.split()) result = result.replace('\r', '').replace('.', '').replace( '\n', ' ').replace(u'\xa0', u' ').replace(u'\xad', u'-').rstrip().lstrip() return result
64990c668e7aa8507ed9c0bfa79ce8941d54b89e
61,249
def SexoFP_(kin_RE,kout_RE,Vspine): """Returns the fixed point of the exocytosis event size Sexo, i.e. the number of AMPARs delivered to the spine membrane during one exocytosis event. Parameters ---------- kin_RE : float Rate at which AMPAR containing endosomes enter the spine (dynamics of exocytosis event size Sexo). kout_RE : float Rate at which AMPAR containing endosomes leave the spine. Vspine : float Spine volume. Returns ------- float Fixed point of the exocytosis event size Sexo. """ return kin_RE/kout_RE*Vspine
1a3723293fcd058e713653703a922321ae89e269
61,250
def get_item_absolute_limit(page, per_page): """Get the total possible number of items.""" return per_page * (page + 1)
94c2fc0cb1d0af3eabc23775ef03865df82d6c0b
61,254
def tree_map(f, tr) : """ apply f recursively to all the tree nodes """ return (f(tr[0]), tuple([tree_map(f, x) for x in tr[1]]))
ec8c12b09cb4fedf61085802cd4d8b659e5903c5
61,260
def _get_recent_value_counts(column, num_x): """Get the the number of occurrences of the x most recent values in a datetime column. Args: column (pd.Series): data to use find value counts num_x (int): the number of values to retrieve Returns: value_counts (list(dict)): a list of dictionary with keys `value` and `count`. Output is sorted in descending order based on the value counts. """ datetimes = getattr(column.dt, "date") frequencies = datetimes.value_counts(dropna=False) values = frequencies.sort_index(ascending=False)[:num_x] df = values.reset_index() df.columns = ["value", "count"] df = df.sort_values(["count", "value"], ascending=[False, True]) value_counts = list(df.to_dict(orient="index").values()) return value_counts
c795c07d078570a6812c05e9d51cbcf5fea98ee4
61,264
def _resolve_nat_element(element_or_ip_address): """ NAT elements can be referenced by either IP address or as type Element. Resolve that to the right dict structure for the rule :param str,Element element_or_ip_address: Element or IP string :rtype: dict """ try: src = {'element': element_or_ip_address.href} except AttributeError: src = {'ip_descriptor': element_or_ip_address} return src
e3a33e99c416d19f5f9d656cb6b92ed67d25df9c
61,265