content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def _CalculateTimeDiff(date1, date2): """Calculate a time difference string based on two provided datetimes. Args: date1: A datetime.datetime object. date2: A datetime.datetime object. Returns: A string indicating the time difference between the two datetimes. """ timediff = abs(date1-date2) diff_seconds = timediff.seconds days, remainder = divmod(diff_seconds, 24*3600) hours, remainder = divmod(remainder, 3600) minutes, seconds = divmod(remainder, 60) if days > 0: return '%02d Days, %02dh:%02dm:%02ds' % (days, hours, minutes, seconds) else: return '%02dh:%02dm:%02ds' % (hours, minutes, seconds)
367b37a926a3397a844ec91b81d7ef59f53269ce
98,950
def _sign(x): """ Return the sign of x INPUTS ------ x: number value to get the sign of OUTPUTS ------- s: signed int -1, 0 or 1 if negative, null or positive """ if (x > 0): return 1 elif (x == 0): return 0 else: return -1
f5c712fb4b03863469d9aa7e7d910b2d56ad92cf
98,952
def not_the_same(u1, u2): """Returns whether the users share the same id :u1: User to compare :u2: User to compare :returns: Bool """ return u1["id"] != u2["id"]
0417527a7a28553fb1bde1130823b9b8bf95b7f0
98,954
def update_vocab_info(vocab, word_counts): """ return dict of { word : {'count', 'index'} } """ return { word: {'count': word_counts[word], 'index':i} for i,word in enumerate(vocab)}
a7271175af5aad6d766c6db1c16e2574ee6a901b
98,957
def get_fileroot(perc_file: str) -> str: """ Get the root of the Percolator result file. Parameters ---------- perc_file The result file to obtain the fileroot from. Returns ------- The file root, including the path and "percolator". """ return ".".join(perc_file.split(".")[:-3])
cb05efcbf7b4806ca6a66edf697405bb299a4fcd
98,961
def sign (n) : """Returns the sign of n. >>> sign (4) 1 >>> sign (-6.9) -1 >>> sign (0) 0 """ if n > 0 : return 1 elif n < 0 : return -1 else : return 0
c719bf1c17865dd36a80fd18d9df96fd0390d8b3
98,964
def normalized_path(path): """Replaces \ in paths on Windows with /""" return path.replace('\\', '/')
edfcb42b5177130fc1f9a07e3dbd6e9c3eb7d093
98,970
def collect_node_positions(nodes): """ Creates a list of node position tuples for nodes. :param nodes: nodes list in skeleton data structure from amiramesh reader. :return: list of node positions. """ return [node.position() for nidx, node in nodes.iteritems()]
1771028188124081f474ceb3ecbdff0903dff373
98,971
def retrieve_from_cosmos_db(db_handler, vault_id): """ Retrieve vault from CosmosDB defined in db_handler :param db_handler: handler for CosmosDB :param vault_id: vault ID to be retrieved :return: Vault """ vault_ret = db_handler.find_fuzzy_vault(vault_id) return vault_ret
c69302240556980a35811777e9bbd81fca19310d
98,972
import math def fade(given_value: float) -> float: """Smoothing [0, 1] values. Parameters: given_value: float [0, 1] value for smoothing Returns: smoothed [0, 1] value Raises: ValueError: if input not in [0, 1] """ if given_value < 0 or given_value > 1: raise ValueError('expected to have value in [0, 1]') return 6 * math.pow(given_value, 5) - 15 * math.pow(given_value, 4) + 10 * math.pow(given_value, 3)
a2e26436400bd0a8c84be62382b6f8c52f2e7d37
98,974
def descale(data, data_max, data_min): """Reverse normalization Args: data (np.array): Normalized data data_max (float): max value before normalization data_min (float): min value before normalization Returns: [np.array]: Reverse-Normalized data """ data_descaled = data*(data_max-data_min)+data_min return data_descaled
e078dc5eb9613a9106dd2210f467011525dcb646
98,977
from typing import Optional from typing import List from typing import Tuple def check_bracketed(word: str, brackets: Optional[List[Tuple[str, str]]] = None) -> bool: """ Checks whether a given string is surrounded by brackets. Parameters ---------- word : str Text to check for final brackets brackets: List[Tuple[str, str]]], optional Brackets to check, defaults to [('[', ']'), ('{', '}'), ('<', '>'), ('(', ')')] Returns ------- bool True if the word is fully bracketed, false otherwise """ if brackets is None: brackets = [("[", "]"), ("{", "}"), ("<", ">"), ("(", ")")] for b in brackets: if word.startswith(b[0]) and word.endswith(b[-1]): return True return False
b16c1f9a6695b2c5a37c6313c92a2f42256197fe
98,979
def _flatten_vault(obj): """Flatten the structure of the vault into a single dict """ data = { 'id': obj.id, 'name': obj.name, 'auto_bind': obj.auto_bind, 'auto_expand': obj.auto_expand, 'backup_policy_id': obj.backup_policy_id, 'created_at': obj.created_at, 'description': obj.description, 'project_id': obj.project_id, 'provider_id': obj.provider_id, 'user_id': obj.user_id, 'status': obj.billing.status, 'operation_type': obj.billing.protect_type, 'object_type': obj.billing.object_type, 'spec_code': obj.billing.spec_code, 'size': obj.billing.size, 'consistent_level': obj.billing.consistent_level, 'charging_mode': obj.billing.charging_mode, 'is_auto_pay': obj.billing.is_auto_pay, 'is_auto_renew': obj.billing.is_auto_renew, 'bind_rules': obj.bind_rules.tags, 'resources': obj.resources, 'tags': obj.tags, } return data
c968d21232f4487e911b468aa5a09b74bbf1e617
98,980
import json def create_control_message(reg_id, msg_type, msg_id): """ Creates a JSON message body for an ack/nack/control message """ message = json.dumps({'to': reg_id, 'message_type': msg_type, 'message_id': msg_id}) # print "Control Message created::", message return message
32a98e9e1ad9f013f077c3d08c5d7d52d97e19e1
98,982
def test_path(path): """ Takes a path and checks if it resolves to "/" / -> True /./ -> True /<dir>/../ -> True /<anything> -> False """ length = len(path) # Handle single slash if length == 1 and path == "/": return True # Strip trailing slash if path[-1] == "/": path = path[:-1] # Not portable to non-unix split = path.split("/")[1:] loc = 0 for thing in split: if thing == ".": # Same directory continue elif thing == "..": # One level up loc -= 1 else: loc += 1 if loc == 0: return True return False
01077fbc38f0fa4fecdcc792b80fe3462755b1e8
98,983
def _get_fields(errors): """Parses an API call error response and retrieves paths in the API call response and return it as a list of string messages. Args: errors (list): The list of errors returned from the unsuccessful API call Returns: A list of string paths e.g ["person.age"] """ return list(map(lambda error: (error.get("path", []) and ".".join(error.get("path",[]))) or "", errors))
4f99115f3ed9d0880871757b0ff31ff4f78c115f
98,986
def relative_time_in_days(end_date, start_date): """ Returns the difference between dates in day unit. """ try: difference = (end_date - start_date).days except ValueError: difference = 0 return difference
3568559373bae2399f7b528b4e46a1545f13f4d6
98,988
import click def is_group(obj): """ Examine the object to determine if it is wrapping a click.Group :param obj: the object in question :returns: if the object is a click.Group in some fashion """ # Using self of the bound method so this is safe across weirdly decorated groups. return hasattr(obj, 'command') and isinstance(obj.command.__self__, click.Group)
26f01f19ae328656f8d1161ded754f48132d4d76
98,991
def reversed_edges(path): """ Returns an list with edges reversed. e.g. [(1, 2), (2, 3)] returns [(2, 1), (3, 2)]. Used to check directionality of edges Args: path - a path, as a sequence of edges Returns: An list of reversed edges. """ # Reversed initialization reversed_edges = [] # Loop for edge in path: reversed_edges.append(edge[::-1]) return reversed_edges
5c678addb81ff2d75dbf52f121705d5ce10c3fe6
99,004
def label_lflank(row): """ Substitute the 0 counts by NA if the only left flanking junction(s) is/are ambiguous """ if (row['lfl_reads'] == 0) & (row['lfl_junctions_ambi'] > 0): return "NA" else: return row['lfl_reads']
a38e31ac397ead8a410a4f5430998e9f88d1ec60
99,008
from typing import Callable def bundle_callables(*callables: Callable) -> Callable: """ Bundle many callables into a single function. Callables will be invoked in the order given. """ def bundle_callables_inner(*args, **kwargs): """Call all callables in order.""" for method in callables: method(*args, **kwargs) return bundle_callables_inner
f691941d904b064718c124d78c0419db8661c4c7
99,009
import operator def hamming(str1, str2): """Hamming distance between two strings""" return sum([i for i in map(operator.__ne__, str1, str2)])
a3a2398f967d71dd1d44febb5833f0f7e9dc5c59
99,011
def aggregate_norm_comparison(factor_df, M_matrix): """ Aggregate factor information with cosine similarity matrix, which contains the true norm. """ return factor_df.groupby(['kernel', 'factor', 'iter']).agg('sum').merge( M_matrix.set_index(['kernel', 'factor', 'iter']), left_index=True, right_index=True, suffixes=('_linear', '_factor') )
060dcd08e2147be49d40d04af802daf3286cfe02
99,018
import pickle def unpickle_model(file_location): """ Loads model from previously saved .pkl file. :param file_location: :return: """ with open(file_location, 'rb') as f: return pickle.load(f)
dbe06e03ac6350e44bc48866ed6953f1fd727b39
99,022
import math def rev_pentagonal(n): """Reverse pentagonal. Return 0 for n < 0 or negate result is if is not positive integer result.""" if n < 0: return 0 delta = int(math.sqrt(1 + 24 * n)) # delta is square and 1 + 24 * n % 6 == 0 so delta %6 == 5 if delta * delta == 1 + 24 * n and delta % 6 == 5: return (delta + 1) // 6 else: return - (delta + 1) // 6
43bcbac5c1e0ac498089063f22df79840043f68a
99,023
def eq(max_x, x): """Returns equally separated decreasing float values from 1 to 0 depending on the maximu value of x Parameters ---------- max_x : int maximum x value (maximum number of steps) x : int current x value (step) Returns ------- float y value Example ------- >>> eq(3, 0) 1.0 >>> eq(3, 1) 0.6666666666666667 >>> eq(3, 3) 0.0 """ return ((-1/max_x) * x) + 1
c0076295dadf280db472f32d664eeca3a49a1780
99,024
from typing import Counter def count_ngrams(text, n): """ Counter of symbol ngrams Inputs: - text: str, some text - n: int, length of symbol ngram Output: - ngrams: Counter, ngram-> number of times it was in text """ ngrams = [text[i:i+n] for i in range(len(text)-n+1)] return Counter(ngrams)
5a90eb16f6b904fac71ec58d11e96dc1cc69ece7
99,025
def length(mention): """ Compute length of a mention in tokens. Args: mention (Mention): A mention. Returns: The tuple ('length' LENGTH), where LENGTH is the length of the mention in tokens. The length is stored as a string. """ return "length", str(len(mention.attributes["tokens"]))
64c13215004bbecf1976188005881abaf242dcab
99,026
def parse_systemd_envfile(filepath): """Parse systemd environment file into a dict. """ result = dict() with open(filepath, 'r') as fo: for ln in fo.readlines(): ln = ln.strip() if ln == '' or ln.strip().startswith("#"): continue k, v = map(str.strip, ln.split('=', 1)) result[k] = v return result
5d01cd26d223a448898a15d537a2a3f1f2aa0c4c
99,027
def format_time_str(time_s): """ Format time in seconds to timestring in HH:MM:SS format """ time_s = int(time_s) m, s = divmod(time_s, 60) h, m = divmod(m, 60) time_str = '{:d} : {:02d} : {:02d}'.format(h, m, s) return time_str
33df5a9338827af5e186c1ef1e45814529932e4d
99,028
import unicodedata def remove_accents(text): """ Remove accents from a string """ try: text = text.decode("utf-8") except (UnicodeEncodeError, AttributeError): pass text = text.replace("’", "'") # accent used as apostrophe text = unicodedata.normalize("NFKD", text).encode("ascii", "ignore") text = text.decode() return text
c45e5f3864cf91bcc56608f37bdb9e31fc9a0074
99,029
import re def extract_number(line): """ Extracts the phone number from a vCard-file line, by removing everything but numbers and '+' Arguments: line {string} -- the line to extract the phone number from Returns: string -- the phone number """ line = line[line.index(":")+1:].rstrip() line = re.sub('[^0-9+]', '', line) return line
8ee8a63e09b15426525dbc0058ce6027cbbc0bae
99,032
import ast def node_is_result_assignment(node: ast.AST) -> bool: """ Args: node: An ``ast`` node. Returns: bool: ``node`` corresponds to the code ``result =``, assignment to the ``result `` variable. """ if isinstance(node, ast.Assign): return len(node.targets) == 1 and isinstance(node.targets[0], ast.Name) and node.targets[0].id == "result" if isinstance(node, ast.AnnAssign): return node.target.id == "result" # type: ignore return False
d6fe2332f55f7caef933a5e782fc1db84c84e0ef
99,039
import json def return_json_file_contents(filename): """ Simple function for returning the contents of the input JSON file """ try: with open(filename) as json_data: return json.load(json_data) except IOError: print("File not found: "+filename) return None
0702cd6914eb5ba9d0e14a0559466708c3a356eb
99,042
def out_of_bounds(test_list, idx, val): """Solution to exercise C-1.23. Give an example of a Python code fragment that attempts to write an ele- ment to a list based on an index that may be out of bounds. If that index is out of bounds, the program should catch the exception that results, and print the following error message: “Don’t try buffer overflow attacks in Python!” """ try: test_list[idx] = val except IndexError: print("Don't try buffer overflow attacks in Python!") return test_list
a3eb7771fe43065b7a29090b554465a192aaae1b
99,045
def ensure_ascii(in_string): """Remove any non-ASCII characters from the input string Parameters ---------- in_string : str Input string output_string : str String with non-ASCII characters removed """ encoded_string = in_string.encode('ascii', 'ignore') return encoded_string.decode()
6440cb66d6bee0af7cd2e93052636598e553ec19
99,049
def create_table(src, dst): """Create a translation table from two strings. Given strings src="ABC" and dst="abc", this returns a table which maps 'A' to 'a', 'B' to 'b', and 'C' to 'c'. The strings must be of equal length. """ assert len(src) == len(dst) return {ord(from_): ord(to) for (from_, to) in zip(src, dst)}
3d64fd9ca0741865005bd0f18f894ecb1a920e35
99,052
import contextlib import bz2 import pickle def load_collection(file_to_load): """ Load file file_to_load and return its collection, where: collection_data = {'TITLE': title, 'DESC': desc, 'INFO': info, 'DATA': data} """ with contextlib.closing(bz2.BZ2File(file_to_load, 'rb')) as f: collection_data = pickle.load(f) return collection_data
3b6da5850c3f91e906af7c198d05997cc1a29e9d
99,056
import csv def extractor(filename): """Return a tuple containing two lists of the Activity title and Activity type(s) Args: - filename: string of the filename Returns: Tuple with: - Lists of Activity Names - Lists of Activity Types """ assert filename.endswith('.csv'), 'File must be a csv file!' activity_names = [] activity_types = [] with open(filename, 'r') as cas_file: cas_reader = csv.reader(cas_file, delimiter=',') for row in cas_reader: activity_names.append(row[0]) activity_types.append([int(tp) for tp in row[1:] if int(tp)]) return (activity_names, activity_types)
dd886dab33bb79972af6d913eedceaa0e9eb7d7c
99,063
def choices(*args): """ Simple helper to create choices=(('Foo','Foo'),('Bar','Bar'))""" return zip(args, args)
14237e2da7b9dd7c6366eb76b1437fc8db2cdcc7
99,067
def _atomise_author(author): """ Convert author from whatever it is to a dictionary representing an atom:Person construct. """ if type(author) is dict: return author else: if author.startswith("http://") or author.startswith("www"): # This is clearly a URI return {"uri" : author} elif "@" in author and "." in author: # This is most probably an email address return {"email" : author} else: # Must be a name return {"name" : author}
cef106a2b764b37ede1f81df7d74f7d930fda5b5
99,070
import re def is_version_number(text): """ Does this token look like a semantic versioning string? http://semver.org/ It's ambiguous if this is a version number or just a number. >>> is_version_number('32') False This is more likely a version number. >>> is_version_number('2.2') True >>> is_version_number('0.2.1') True >>> is_version_number('1.2-alpha') True >>> is_version_number('1.2.3-rc1') True >>> is_version_number('1.3.1.Final') True >>> is_version_number('1.11.0.RELEASE') True >>> is_version_number('0.1-SNAPSHOT') True """ return bool(re.match(r''' v? \d+ # Major number (?: [.] \d+)? # Minor number [.] \d+ # Patch number (?: [\-.](?:\w+|rc[.]?\d+))* # Tag ''', text, re.VERBOSE | re.IGNORECASE))
d1c26427741cae73022907771cd9b94028842bdd
99,071
import math def _phi(x): """Cumulative density function for the standard normal distribution """ return (1.0 + math.erf(x / math.sqrt(2.0))) / 2.0
4c681132799d881001b848f5799e36de5ddbec93
99,072
def _cast_types(args): """ This method performs casting to all types of inputs passed via cmd. :param args: argparse.ArgumentParser object. :return: argparse.ArgumentParser object. """ args.x_val = None if args.x_val == 'None' else int(args.x_val) args.test_size = float(args.test_size) args.dual = (args.dual in ['True', "True", 'true', "true"]) args.tol = float(args.tol) args.C = float(args.C) args.fit_intercept = (args.fit_intercept in ['True', "True", 'true', "true"]) args.intercept_scaling = float(args.intercept_scaling) args.class_weight = None if args.class_weight == 'None' else {} args.random_state = None if args.random_state == 'None' else int(args.random_state) args.max_iter = int(args.max_iter) args.verbose = int(args.verbose) args.warm_start = (args.warm_start in ['True', "True", 'true', "true"]) args.n_jobs = None if args.n_jobs == 'None' else int(args.n_jobs) args.l1_ratio = None if args.l1_ratio == 'None' else float(args.l1_ratio) # --------------- # return args
40285a3b93606b6503f5ad1486fb61cc61d32a8e
99,081
def _es_down_template(request, *args, **kwargs): """Returns the appropriate "Elasticsearch is down!" template""" return 'search/down.html'
ccff299d7f7ab62b8bbd387b563d11eec176428b
99,086
def my_linear_model(B, x): """Linear function for the regression. Args B (1D array of 2): Input 1D polynomial parameters (0=constant, 1=slope) x (array): Array which will be multiplied by the polynomial Returns ------- An array = B[1] * (x + B[0]) """ return B[1] * (x + B[0])
775bb4b36005c2704ecf163e2df1e3c19231959a
99,087
def _match_url_start(ref, val): """ Checks that the val URL starts like the ref URL. """ ref_parts = ref.rstrip("/").split("/") val_parts = val.rstrip("/").split("/")[0:len(ref_parts)] return ref_parts == val_parts
45a7476e9d2422c8503383610c32c56956e0a803
99,088
def purify_preferences(preferences: str, separator: str = ' ') -> list: """Removes the integer string from the specified preferences string and sorts it. Args: preferences (str): Preferences string of type L(ike) or D(islike). separator (str, optional): Seperator for splitting the preferences string. Defaults to ' '. Returns: list: A purified preference in form of a sorted list of strings. """ preference_list = preferences.split(separator) num_preferences = int(preference_list[0]) pure_preferences = ['none'] if (num_preferences == 0) else preference_list[1:] return pure_preferences
bf7cdb9a4d58cf477bd4a6714f03923f2a206eb1
99,090
def ip(T,x,c): """ Performs intrinsic plasticity Parameters: T: array The current thresholds x: array The state of the network c: Bunch The parameter bunch """ # T += c.eta_ip*(x.sum()/float(c.N_e)-c.h_ip) T += c.eta_ip*(x-c.h_ip) return T
cc9f53a2be38f9c33429509c6acb76af21ea1403
99,091
def _compute_intersection(w1, w2): """ Compute intersection of window 1 and window 2""" col_off = max(w1.col_off, w2.col_off) row_off = max(w1.row_off, w2.row_off) width = min(w1.col_off+w1.width, w2.col_off+w2.width) - col_off height = min(w1.row_off+w1.height, w2.row_off+w2.height) - row_off return col_off, row_off, width, height
0e91609a84f6f5a281356f1b3724229a20ea3f4a
99,094
def calculate_total_number_of_travel_requests_in_timetables(timetables): """ Calculate the total number of travel_requests in timetables. :param timetables: [timetable_document] :return: total_number_of_travel_requests: int """ total_number_of_travel_requests = 0 for timetable in timetables: travel_requests = timetable.get('travel_requests') number_of_travel_requests_of_timetable = len(travel_requests) total_number_of_travel_requests += number_of_travel_requests_of_timetable return total_number_of_travel_requests
6b6404cecb11ac0ee3cd0510fdef52fb415f47f5
99,102
import torch def nanmean(signals): """Computes the mean, ignoring nans Arguments signals (torch.tensor [shape=(batch, time)]) The signals to filter Returns filtered (torch.tensor [shape=(batch, time)]) """ signals = signals.clone() # Find nans nans = torch.isnan(signals) # Set nans to 0. signals[nans] = 0. # Compute average return signals.sum(dim=1) / (~nans).float().sum(dim=1)
a3d391160f290cc2547a700990359a5e89c0ed6e
99,107
import importlib import inspect def _ensure_class(class_or_class_name, compatible_class=None): """ Ensure that the supplied value is either a class or a fully-qualified class name. :param name: A class or a fully-qualified class name. :param expected_base_class: If specified then the class must be, or (eventually) derive from, this class. :return: The class represented by class_or_class_name. """ target_class = class_or_class_name if isinstance(class_or_class_name, str): name_parts = class_or_class_name.split('.') module_name = '.'.join( name_parts[:-1] ) target_class_name = name_parts[-1] # Raises ModuleNotFoundError if we can't resolve part of the module path target_module = importlib.import_module(module_name) target_class = getattr(target_module, target_class_name, None) if not target_class: raise ImportError("Class not found: '{}'.".format(class_or_class_name)) if not inspect.isclass(target_class): raise TypeError( "'{}' is not a class.".format(class_or_class_name) ) if compatible_class and not issubclass(target_class, compatible_class): raise ValueError( "Class '{}' does not derive from '{}.{}'.".format( class_or_class_name, compatible_class.__module__, compatible_class.__name__ ) ) return target_class
f29b84de215459f694b2f48b890389f3f0fe2334
99,110
import torch def rgb2yuv(rgb, device): """ Convert RGB image into YUV https://en.wikipedia.org/wiki/YUV """ rgb = ((rgb + 1) / 2) * 255.0 rgb_ = rgb.transpose(1, 3) # input is 3*n*n default A = torch.tensor([[0.299, -0.14714119, 0.61497538], [0.587, -0.28886916, -0.51496512], [0.114, 0.43601035, -0.10001026]]).to(device) # from Wikipedia yuv = torch.tensordot(rgb_, A, 1).transpose(1, 3) yuv = yuv / 255.0 return yuv
689990a31bc6d02348c6427c1478e09d26da7f1d
99,111
def shebang_to_require(shebang, use_bytes=True): """Convert shebang to the format of requirement. If the use_bytes argument is set to False, executable path is returned as a string instead of the default bytes type.""" executable_path = shebang.split()[0][2:] if use_bytes: return executable_path.encode() else: return executable_path
6edd3f4367ae9b7275cf80ad3f768b2d6e58708d
99,114
def get_output_path_from_s3_url(s3_url): """ transform s3 output path (from beam runs spreadsheet) into path to s3 output that may be used as part of path to the file. s3path = get_output_path_from_s3_url(s3url) beam_log_path = s3path + '/beamLog.out' """ return s3_url \ .strip() \ .replace("s3.us-east-2.amazonaws.com/beam-outputs/index.html#", "beam-outputs.s3.amazonaws.com/")
6b2a307d730dfdec38196b65a332b5671d21927e
99,116
def get_dtypes_and_required_cols(get_dtypes): """ Get OED column data types and required column names from JSON. :param get_dtypes: method to get dict from JSON :type get_dtypes: function """ dtypes = get_dtypes() col_dtypes = { k: v['py_dtype'] for k, v in dtypes.items() if v['py_dtype'] == 'str' } required_cols = [ k for k, v in dtypes.items() if v['py_dtype'] == 'str' if v['require_field'] == 'R' ] return col_dtypes, required_cols
9242f4685df7393b79c77fb1a5a175a92d368dd2
99,117
def copy_fill_settings(da_in, da_out): """ propagate _FillValue and missing_value settings from da_in to da_out return da_out """ if '_FillValue' in da_in.encoding: da_out.encoding['_FillValue'] = da_in.encoding['_FillValue'] else: da_out.encoding['_FillValue'] = None if 'missing_value' in da_in.encoding: da_out.attrs['missing_value'] = da_in.encoding['missing_value'] return da_out
71401d838fb89e828f32da3950c6f0b048f49159
99,122
import threading def _startThread(fn): """ Start a daemon thread running the given function. """ thread = threading.Thread(target=fn) thread.setDaemon(True) thread.start() return thread
8743838bb4aa4846b1fb4ae1fb03f1cef63dc981
99,127
import string import random def random_string(lenght): """ Generate a random string of lowercase letters and digits Parameters ---------- lenght: int Lenght of the resulting string Returns ------- str The random string """ if not isinstance(lenght, int): raise ValueError("Lenght must be an integer") characters = string.ascii_lowercase + string.digits result_str = "".join(random.choice(characters) for i in range(lenght)) return result_str
061d38d4fff4fd2014c676875b3634d4439ff15c
99,136
def sql_from_filename(filename): """ Given a filename return the SQL it contains. This function exists to support unit testing. :param filename: path to exiting file containing SQL :return: str: sql commands to execute """ with open(filename, 'r') as infile: return infile.read() + "\n"
da965543f6ed2fafdb3717237c80457bf690e3ff
99,138
import math def bilinear_interp(array, point): """Interpolate between the values at the integer-positions around the given float-position in the given array.""" x = point[0] y = point[1] x_lower = math.floor(x) x_upper = x_lower + 1 y_lower = math.floor(y) y_upper = y_lower + 1 return (array[x_lower, y_lower] * (x_upper - x) * (y_upper - y) + array[x_lower, y_upper] * (x_upper - x) * (y - y_lower) + array[x_upper, y_lower] * (x - x_lower) * (y_upper - y) + array[x_upper, y_upper] * (x - x_lower) * (y - y_lower) )
6a40f5b9c32d41e69acb48580847845b6e2c7a02
99,140
import hashlib def md5_checksum_stream(s, buffer_size=8192): """ calculate md5 for a stream in case of a large file that cannot be hold in memory :param s: stream :return: """ m = hashlib.md5() while True: data = s.read(buffer_size) if not data: break m.update(data) return m.hexdigest()
b810e99a06aa21f797293a928e544fc1348a58b4
99,142
from typing import Sequence from typing import Tuple def get_ddl_schema(fields: Sequence[Tuple[str, str]]) -> str: """Get the ddl style schema from (name, dtype) fields. Parameters ---------- dtypes : sequence of tuple List of (name, dtype) tuples, similar to the output of pyspark.sql.DataFrame.dtypes. Returns ------- str The ddl schema. """ ddl_schema = '\n'.join([f'{name} {dtype},' for name, dtype in fields]) # Remove the last comma. return ddl_schema[:-1]
9fcab4887a93b6872d09a90d85ddc48fb7fb041f
99,143
def hms_to_seconds(h: int, m: int, s: int) -> int: """Convert h:m:s to seconds. """ return (h * 60 + m) * 60 + s
c3cfe75966918af5fa1de92b7a8e0dece39b2f88
99,146
def contains_placeholder(tokens): """check for placeholders tokens""" ph = ["⦅ph_ent_uri#1⦆", "⦅ph_unk#1⦆"] return any([p in tokens for p in ph])
0b81f63c639be83b59180d68c0a7b4232049c0ee
99,147
def is_parallel(last, current): """ Returns True if the motion between last and current notes is parallel. """ parallel = False if last[0] - current[0] < 0 and last[1] - current[1] < 0: parallel = True elif last[0] - current[0] > 0 and last[1] - current[1] > 0: parallel = True return parallel
2a47e50f23fd15a91908fe0d7448e3c9555ae0ae
99,151
def hexstrings2int(hexstrings): """ list of hex strings to list of integers >>> hexstrings2int(["0000", "0001", "FFFF"]) [0, 1, 65535] """ return [int(hexstring, 16) for hexstring in hexstrings]
6da1ba1c35786fe4a4bc2b40c7f0072f8e94c197
99,152
from typing import List import difflib def _get_case_insensitive_close_matches( word: str, possibilities: List[str], n: int = 3, cutoff: float = 0.6 ) -> List[str]: """A case-insensitive wrapper around difflib.get_close_matches. Args: word : A string for which close matches are desired. possibilites : A list of strings against which to match word. n : the maximum number of close matches to return. n must be > 0. cutoff : Possibilities that don't score at least that similar to word are ignored. Returns: The best (no more than n) matches among the possibilities are returned in a list, sorted by similarity score, most similar first. Examples: >>> from ee_extra.utils import _get_case_insensitive_close_matches >>> _get_case_insensitive_close_matches("mse", ["MSE", "ERGAS"]) ["MSE"] """ lower_matches = difflib.get_close_matches( word.lower(), [p.lower() for p in possibilities], n, cutoff ) return [p for p in possibilities if p.lower() in lower_matches]
a5d8fd48b38b320445b79f1bdb9f390a075e861f
99,153
def _var(x, ddof=0): """ Calculate variance for an array Uses Welford's algorithm[1] for online variance calculation. Parameters ---------- x : array-like The data ddof : int Degrees of freedom References ---------- .. [1] https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Online_algorithm """ n = 0.0 mean_ = M2 = 0.0 for x_ in x: n += 1 delta = x_ - mean_ mean_ += delta / n delta2 = x_ - mean_ M2 += delta * delta2 return M2 / max(n - ddof, 0)
e9d2180d4f894a8ab92f15f170e794ef772c210f
99,155
def check_claim_attrs(claim_attrs, expected_claim): """ Check if field 'attrs' in gotten claim matches with expected claim json. :param claim_attrs: value of field 'attrs' in gotten claim. :param expected_claim: :return: True of False. """ for key in expected_claim.keys(): if claim_attrs[key] != expected_claim[key][0]: return False return True
924838a2ceee64f73dc7f85d94e73e0ee46f59b7
99,157
async def health_check(): """ Check service health """ return {'status': 'ok'}
877979c81a4251b013d5bf44c3c75d2fbc1cfc56
99,162
import math def numdigits(a): """Return the number of digits in the positive integer a""" if a == 0: return 1 return math.floor(math.log(a, 10)) + 1
182797c87af5378c7a23fbe3f9055627a449ad44
99,163
def _slice_matrix(mat, idx_first_residue=1, residue_min=1, residue_max=None): """ Slice a matrix given the lower and upper bond in parameters. The matrix has to be a numpy array with one row per residue. The slice will occurs on the rows and the sub-matrix is returned. Parameters ---------- mat : numpy array the matrix to slice idx_first_residue: int the index of the first residue in the matrix residue_min: int the lower bound of residue frame residue_max: int the upper bound of residue frame Returns ------- sub_mat: numpy 2D array the matrix sliced Raises ------ IndexError when something is wrong about the lower/upper bound """ if residue_max is None: residue_max = mat.shape[0] # Sanity checks if residue_min <= 0 or residue_max <= 0: raise IndexError("Index start at 1") if residue_min >= residue_max: raise IndexError("Lower bound > upper bound") # Check if the parameters residue_min and residue_max are in the range of the matrix # Take in account the optional offset (idx_first_residue) # range of indexes of the matrix residues_idx = range(idx_first_residue, mat.shape[0] + idx_first_residue) if residue_min not in residues_idx or residue_max not in residues_idx: raise IndexError("Index out of range") # Slice the matrix according to the index of first residue, residue_min and residue_max return mat[residue_min - idx_first_residue: residue_max - idx_first_residue + 1]
ac60ec86564d8581ad2993d44e8d9781b19fc2d1
99,164
def add_gctx_to_out_name(out_file_name): """ If there isn't a '.gctx' suffix to specified out_file_name, it adds one. Input: - out_file_name (str): the file name to write gctx-formatted output to. (Can end with ".gctx" or not) Output: - out_file_name (str): the file name to write gctx-formatted output to, with ".gctx" suffix """ if not out_file_name.endswith(".gctx"): out_file_name = out_file_name + ".gctx" return out_file_name
0cb90a0b6203a6162d8cdfb2c39daf8190a41a40
99,166
def max_retry(count): """ Set the maximum number of time a task can be retried before being disabled as per Luigi retry policy. """ def wrapper(cls): cls.retry_count = count return cls return wrapper
db272e043c622930764f5e24894b89d820138914
99,171
def is_leaf_pbtype(xml_pbtype): """ Returns true when the given pb_type is a leaf. """ assert xml_pbtype.tag == "pb_type", xml_pbtype.tag return "blif_model" in xml_pbtype.attrib
04206693e0681e52c75c11d9bb78e3805aa6f478
99,177
def _suppress_none(val): """Returns an empty string if None is passed in, otherwide returns what was passed in""" if val is None: return '' return val
9ecfe67d344fa28ee7ce1135903c9aed73f1fd04
99,178
def read_file(archive): """Read the file with the URLs.""" with open(archive, 'r') as f: urls = ([line.rstrip() for line in f]) return urls
58ba4659746e64bc6ffdf67d150c1cbce203dc9d
99,179
def get_vowels_in_word(word): """Return vowels in string word--include repeats.""" vowel_str = "aeiou" vowels_in_word = "" for char in word: if char in vowel_str: vowels_in_word += char return vowels_in_word
64f40b259a2e91fd51a3c151e60c7e1509ba2be6
99,180
def dataset_predict(model, data_set, role): """ Predict for data_set. :param model: tf.keras.models.Model. Pre_model. :param data_set: GHData. :param role: int. Data set id. :return: tuple (labels, predictions). Labels and predictions of the same shape. :raise: ValueError. """ (data, labels, _) = data_set.get_dataset(role) if data is None or labels is None: raise ValueError('no data') pre = model.predict(data) return labels, pre
e19f25ef80755db1c265279dd2fd8ce6cc369bd0
99,182
def generate_cutoffs_dict(generate_cutoffs): """Return a dictionary of cutoffs for a given family with specified stringencies.""" def _generate_cutoffs_dict(family, stringencies=('normal',)): """Return a dictionary of cutoffs for a given family.""" cutoffs_dict = {} for stringency in stringencies: cutoffs_dict[stringency] = generate_cutoffs(family) return cutoffs_dict return _generate_cutoffs_dict
e62210cddb13eb8f165ff4185fa106787f1b931a
99,183
def create_sample_dists(cleaned_data, y_var=None, categories=[]): """ Each hypothesis test will require you to create a sample distribution from your data Best make a repeatable function :param cleaned_data: :param y_var: The numeric variable you are comparing :param categories: the categories whose means you are comparing :return: a list of sample distributions to be used in subsequent t-tests """ htest_dfs = [] # Main chunk of code using t-tests or z-tests return htest_dfs
9873653a9aedb7b07e949bdb2cc5442e4de5d902
99,190
def _WordPrefixes(words): """Returns a set with all the strings that can become longer words.""" word_prefixes = set() for word in words: for i in range(len(word)-1): word_prefixes.add(word[:i+1]) return word_prefixes
f0041d0830d328fb5d7bebd7ec1b7096e6c711d8
99,191
def auto_value(x,default): """ auto_value(x,default) returns x, unless x is None, in which case it returns default """ if x is None: return default else: return x
0d58682e9213f0d3f031b8708474f7caac2f1aa0
99,198
def count_of_keys_available(file_name): """ Count the number of non-blank and non-comment lines in a file. By comment, we mean the lines starting with '#'. :param file_name: string :return: int """ with open(file_name, 'r') as f: lines = f.readlines() count_of_lines_starting_with_a_key = len( [line for line in lines if (line.strip(' \n') != '' and line[0] != '#')]) f.close() return count_of_lines_starting_with_a_key
518cf051eae84b1712ffb2f8340ef3d6b6e6b26b
99,202
def nvmf_get_targets(client): """Get a list of all the NVMe-oF targets in this application Returns: An array of target names. """ return client.call("nvmf_get_targets")
47b9a2b4eaf8fc70a8947740b461d90c93986150
99,203
def convert_ms(duration_ms: int) -> str: """ Converts milliseconds to a string representation of a timestamp (MM:SS). """ minutes, seconds = divmod(duration_ms / 1000, 60) rounded_seconds = int(round(seconds, 0)) if rounded_seconds - 10 < 0: rounded_seconds = "0" + str(rounded_seconds) # type: ignore duration = f"{int(minutes)}:{rounded_seconds}" return duration
00218c36e84ee06f1133f021cf13f95a5aa955b3
99,207
def time_string(delta, hours, minutes, seconds, delim, always_show=True): """ Convert seconds into the format specified """ t_hours, remainder = divmod(delta, 3600) t_minutes, t_seconds = divmod(remainder, 60) output = [] if always_show or t_hours > 0: output.append(hours % t_hours) if always_show or t_minutes > 0: output.append(minutes % t_minutes) if always_show or t_seconds > 0: output.append(seconds % t_seconds) return delim.join(output)
89453cc2e48073156eaf77df9cee65305906061b
99,208
def simplify_county_names(dframe): """ Removes words 'county' and 'parish' and extra white space from county field of a dataframe """ dframe['County'] = dframe['County'].str.replace('County', '') dframe['County'] = dframe['County'].str.replace('Parish', '') dframe['County'] = dframe['County'].str.strip() return dframe
625959eafe281d2183d738aefcaa3bc7c6321ea1
99,210
def fibonacci(n): """ Takes an integer n and returns the n-th term of Fibonacci series. """ try: type(n) == int and n >= 1 except: raise ValueError("Please input a positive integer.") if n == 1 or n == 2: return 1 else: return fibonacci(n-1) + fibonacci(n-2)
76b60c27d84c429e10ac5a8ee1ac5425c90c54c7
99,212
def greatest_subarray_sum(nums: list[int], inverse_mode: bool = False) -> int: """ Find the greatest subarray sum in nums, or find the smallest subarray sum using the inverse_mode. """ def min_max_helper(x: list[int]) -> int: return min(x) if inverse_mode else max(x) result = nums[0] last_total = nums[0] for num in nums[1:]: curr_total = min_max_helper([last_total + num, num]) result = min_max_helper([result, curr_total]) last_total = curr_total return result
7be5e7bd81f52cb01b158ca05fa5b6fffbe238f5
99,217
import configparser def get_aws_setting(name=''): """ Gets a setting specified in the AWS section of the settings.ini file. :param name: One of the variable names in the AWS section of settings.ini :return: String """ config = configparser.ConfigParser() config.read('settings.ini') try: config['AWS'][name] except KeyError: print(f'\'{name}\' is not a valid argument for get_aws_path(). Exiting.') exit() return config['AWS'][name]
9ad94ab895fffdfa95b9477309d3b124eb4d71f5
99,220
from typing import List def solve(weights: List[int], profits: List[float], capacity: int) -> List[List[float]]: """ Solves given knapsack instance with dynamic programming and returns solution matrix. :param weights: Weights of items :param profits: Profits of items :param capacity: Capacity of knapsack instance :return: Solution matrix as list of lists of floats representing the maximum possible profit in the cell """ assert len(weights) == len(profits) n: int = len(weights) # items are numbered 0 to n-1 f: List[List[float]] = [[0 if weights[0] > c else profits[0] for c in range(capacity + 1)]] for j in range(1, n): f.append([f[j - 1][c] if weights[j] > c else max(f[j - 1][c], f[j - 1][c - weights[j]] + profits[j]) for c in range(capacity + 1)]) return f
ff16df93777bb59fa14d2aba4841415e5a13a4fe
99,221
def swap_rows(mat_in, row, col): """ This function swaps a matrix row with another row. Swaping will be done if the element at the position specified by the user via the 'row' and 'col' variables, is zero. Swaping occurs with a row that has a non-zero element in the same 'col'. If all rows contain a zero at the 'row', 'col' position, the original input matrix is returned. :param mat_in: list of lists, i.e. a matrix :param row: int, row index in a matrix :param col: int, column index in a matrix :return: tuple, a list of lists and a multiplier that is either 1 if no swapping has been done, or -1 if swapping was done. """ if mat_in[row][col] != 0: # return the original matrix if the matrix element at the # input [row][col] position is not zero multiplier = 1 return mat_in, multiplier else: # find a row with non-zero matrix element in the same [col] position # and swap. If swaping is done, the matrix determinant should be multiplied # by -1, therefore a multiplier = -1 is returned togeteher with the matrix with # swapped rows. for idx in range(row+1, len(mat_in)): if mat_in[idx][col] != 0: mat_in[row], mat_in[idx] = mat_in[idx], mat_in[row] multiplier = -1 return mat_in, multiplier # if the if-else block is skipped, means all rows have a zero # matrix element along the [col], in this case we return the original matrix multiplier = 1 return mat_in, multiplier
a7f55e938a3bfb0aaf79453583fcd6b70b8c190e
99,223
import codecs import json def read_json_line(json_path, encoding='utf-8', limit = None): """ Read Json File, one json object per line """ f_in = codecs.open(json_path, encoding = encoding) json_obj_list = [] cnt = 0 for line in f_in: cnt += 1 if cnt % 100000 == 10: print ("File Reading line %d" % cnt) if limit is not None: if cnt > limit: break line = line.strip().replace("\n", "") json_obj = json.loads(line) json_obj_list.append(json_obj) return json_obj_list
284847da84f532cebc8494b1c1d49471f6c8ef31
99,224
def item_action(item): # noqa """Returns actionnable method for the specified item. Args: item (any): Any object with a compile method. Returns: function: An actionnable method. """ def action(self, element=None): """Perform an action.""" self.append(item.compile(element)) return self return action
0dc01a996c218160abe470c333064f8e097e09d5
99,226
def phrase(*values): """ create a phrase use generator values :param values: possible values :return: a string combining the different values """ return lambda: " ".join([v() for v in values])
30f3229cdd76995d9ec375918137a15bbb1d103e
99,228
def index(x, index: int = 0): """ returns its input indexed by index value. Parameters ---------- index : int The index value. """ return x[index]
faae6ebb4cf7a609630b403649cba5dc318ae00a
99,231
def client_color_to_rgb(value: int): """Converts a color number from Tibia's client data to a RGB value. Parameters ---------- value: :class:`int` A numeric value representing a color. Returns ------- int: The hexadecimal color represented.""" if value < 0 or value > 215: return 0 return ((value // 36 * 0x33) << 16) + ((value // 6 % 6 * 0x33) << 8) + ((value % 6 * 0x33) & 0xFF)
945232751dac1d3316c50a23608c2041ee02e754
99,236