content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def hill_func(x, a, b, c, d): # Hill function """Calculates the Hill function at x. a : sigmoid low level b : sigmoid high level c : approximate inflection point d : slope of the sigmoid """ return a + (b - a) / (1.0 + (c / x) ** d)
fa73e9c929ac9e7e192e26db5719559d7234294a
118,035
def merge_events(items0, items1, data0, data1): """Merge events. Merge two dataframes based on given items. The ``items`` should be a subset of the ``data``s index. Parameters ---------- items0 : list items1 : list data0 : pandas.DataFrame data1 : pandas.DataFrame Returns ------- merged : pandas.DataFrame """ data0["index0"] = data0.index data0 = data0.loc[data0.index.intersection(items0)] data0 = data0.reindex(items0) data0.index = range(len(data0)) data1["index1"] = data1.index data1 = data1.loc[data1.index.intersection(items1)] data1 = data1.reindex(items1) data1.index = range(len(data1)) merged = data0.join(data1) return merged
9bae0dd15182fca15664beb672a6e273417db72d
118,036
def select_extinction(extinction, way="min"): """ For each star sort and select only one extinction value. Parameters ---------- extinction : list of tuples A list returned by the extinction function. way : string A method to select only one extinction value. - "min" - "max" Default value "min". """ stars_indexes = set([star[0] for star in extinction]) extinction_values = [] for star_id in stars_indexes: star_extinction = [] for ext in extinction: if star_id == ext[0]: star_extinction += [ext] if way == "min": extinction_values += [min(star_extinction, key=lambda x: x[-1])] else: extinction_values += [max(star_extinction, key=lambda x: x[-1])] return extinction_values
ed685d988be55ca08fbb6b5643e57439b941ad49
118,037
def to_byte(word: int) -> int: """ >>> hex(to_byte(0x12345678)) '0x78' >>> to_byte(257) 1 >>> hex(to_byte(0x00001101)) '0x1' Masks the given value to the size of a byte (8 bits) :param word: :return: """ assert isinstance(word, int), 'Argument is not of type int' return word & 0xFF
52f7edb8c25f0c8841c0609c8b9d9d2641021afd
118,041
import codecs def _decode_string_escape_py3(str_): """ Python3 string escape Do not use directly, instead use decode_string. """ # Based on: http://stackoverflow.com/a/4020824 return codecs.decode(str_, "unicode_escape")
232ed33e02f1ff4b409b568b5a4337631561b79d
118,042
def is_create_required(event): """ Indicates whether a create is required in order to update. :param event: to check """ if event['RequestType'] == 'Create': return True new = event['ResourceProperties'] old = event['OldResourceProperties'] if 'OldResourceProperties' in event else new return old.get('Priority', 0) != new.get('Priority', 0) or old.get('ListenerArn', None) != new.get('ListenerArn', None)
f2739f09c045acd8e755a6822e063c6daac6769d
118,043
def date_to_term(date): """ Finds the term based on the given term. If the date is before June, then it is the spring term ("V"). If the date is in June, July or before the 20th of August, it is the summer term/vacation ("S"). Else, the term is the fall term ("H"). :return: A three character long string consisting of the two last digits of the year and a letter for the term. """ term = "H" if date.month < 6: term = "V" elif date.month < 8 or date.month == 8 and date.day < 20: term = "S" return f"{term}{date.year % 100}"
d60b3ef8cfc8b32e0fd5593183d8d20ae86b11bd
118,046
def lr_schedule0(epoch): """Learning Rate Schedule Learning rate is scheduled to be reduced after 10, 20, 30, 40 epochs. Called automatically every epoch as part of callbacks during training. # Arguments epoch (int): The number of epochs # Returns lr (float32): learning rate """ lr = 1e-1 if epoch > 180: lr *= 0.5e-3 elif epoch > 160: lr *= 1e-3 elif epoch > 120: lr *= 1e-2 elif epoch > 10: lr *= 1e-1 print('Learning rate: ', lr) return lr
932ce82b02fc771d860dd53952a5b2792d9a99db
118,048
from typing import Dict from typing import Tuple def cluster_expenses(d: Dict[str, float], total_expenses: float, min_quota: float = 0.025) \ -> Tuple[Dict[str, float], float]: """ Combines all expenses that account for less than min_quota into one category 'other'. Args: d: dict that contains the labels and the expenses total_expenses: total expenses of that period min_quota: partition value Returns: Tuple: the new dictionary with new category other and the total expenses. """ d['other'] = 0. for key in d.copy().keys(): if d[key] < min_quota * total_expenses: d['other'] += d[key] del d[key] return d, total_expenses
60df043146ed963534323545cb14a83445b21e73
118,051
from typing import Tuple def fix_start_end_swap(start: float, end: float) -> Tuple[float, float]: """Assigns the lower value to start and the larger value to end. Args: start: The parsed start time in milliseconds. end: The parsed end time in milliseconds. Returns: A tuple with start at first position and end at the last position. """ return min(start, end), max(start, end)
e944feae63a0d3cebbf63883b7bb3ab0d80c3671
118,052
def _neighbor_keys_from_bond_keys(key, bnd_keys): """ Determine neighbor keys of an atom from the bond keys """ nkeys = [] for bnd_key in bnd_keys: if key in bnd_key: nkey, = bnd_key - {key} nkeys.append(nkey) return frozenset(nkeys)
2cf82e8d506eec64203077a063e2b37fc4413e4b
118,054
def complete_list_with(dest, source, update): """ Calls *update()* for every missing element in *dest* compared to *source*. The update function will receive the respective element from *source* as an argument. Modifies and returns *dest*. """ if len(dest) >= len(source): return dest while len(dest) < len(source): dest.append(update(source[len(dest)])) return dest
5401d99cb029d271b78193d21388ed14792e3e1d
118,057
def _lower(text): """Convert the supplied text to lowercase""" return text.lower()
fd381b0d95644b4c83fb9d90e3c24ffeb994d459
118,059
import torch def rgb2gray(img: torch.Tensor) -> torch.Tensor: """Converts a single image (not batched) from RGB to gray scale. source: https://stackoverflow.com/questions/14330/rgb-to-monochrome-conversion """ return (0.2125 * img[:, 0]) + (0.7154 * img[:, 1]) + (0.0721 * img[:, 2])
cb52bc3ccbb04c31089134262800f84656b963fc
118,066
def idstr(ident): """Returns a string for the given identifier.""" return 'ident:' + hex(int.from_bytes(ident,'big')).lstrip('0x')
a08c004d034067bfd75110745d5214d7da93ba57
118,070
def _split_left_right(name): """Split record name at the first whitespace and return both parts. RHS is set to an empty string if not present. """ parts = name.split(None, 1) lhs, rhs = [parts[0], parts[1] if len(parts) > 1 else ''] return lhs, rhs
04869178d80e79650bde2567c2a6df40376c3d8a
118,076
def filter_uncontested(votes, threshold): """ Filter elections that are effectively uncontested. If `threshold` votes is won by any party, the election is considered uncontested. """ effectively_uncontested = votes > threshold masked_votes = votes[~effectively_uncontested] return masked_votes.dropna()
e36f9c5d4997e90be8274317b89f41bdd65a0bb9
118,078
def locate_chapters_and_recipes(directory_of_this_script): """ Returns a list of chapters and a dictionary of chapter -> list of recipes. """ paths = (directory_of_this_script.parent).glob('chapter-*') chapters = sorted((path.parts[-1] for path in paths)) # chapter 15 is different and "hardcoded" # so we remove it from the list of chapters to process chapters.remove('chapter-15') recipes = {} for chapter in chapters: paths = (directory_of_this_script.parent / chapter).glob('recipe-*') _recipes = sorted((path.parts[-1] for path in paths)) recipes[chapter] = _recipes return chapters, recipes
6504121cb07e2749461a68e34e496738e593d495
118,082
def _GetOperationByNameDontThrow(graph, name): """Returns an Operation with the given name. Args: graph: Graph where to look for the operation. name: String, name of Operation to return. Returns: The Operation with the given name. None if the name does not correspond to any operation in the graph """ try: return graph.get_operation_by_name(name) except KeyError: return None
5c5c71c453f8b48336ae6123b754cd887b9abdcd
118,085
def fbfill(x): """Computes forward and then backward fill.""" return x.ffill().bfill()
81673c18c0c36b7a29ecaaba852b8f0732748018
118,087
def message_level_number(message_level): """ Level number from message level tag. """ result = 0 if message_level == "error(parsing)": result = 1 if message_level == "error(2)": result = 2 if message_level == "error(mac)": result = 2 if message_level == "error(3)": result = 3 return result
4af3f112fddf9d4b003fc66523991030c2c51dd4
118,092
from typing import List from typing import Dict import heapq def top_anomalies(points: List[Dict], n=10) -> List[Dict]: """Returns top n anomalies according to severity.""" return heapq.nlargest( n, points, key=lambda point: point["severity"] )
a7ed3495070e3c8b913137736b1cb841c93443db
118,093
def _peek_last(stack): """Returns the top element of stack or None""" return stack[-1] if stack else None
0638d3afb504f2e12c54a7a001b1a19045039385
118,101
def euclidean(feat, query, featl2norm=None, qryl2norm=None): """ Euclidean distance. Args: feat: N x D feature matrix query: Q x D feature vector featl2norm: 1 x N vector qryl2norm: Q x 1 vector Returns: dist: 1 x N vector """ dotprod = query.dot(feat.T) # return dotprod if qryl2norm is None: qryl2norm = (query ** 2).sum(1).reshape(-1, 1) if featl2norm is None: featl2norm = (feat ** 2).sum(1).reshape(1, -1) return - 2 * dotprod + qryl2norm + featl2norm
926d13a54be7335daaac93c18f4e184b9aeaa1db
118,105
def _ApplySizeLimit(regions, size_limit): """Truncates regions so that the total size stays in size_limit.""" total_size = 0 regions_in_limit = [] for region in regions: total_size += region.size if total_size > size_limit: break regions_in_limit.append(region) return regions_in_limit
ada9054f48f414fca37c513515295948c2e14d26
118,107
import re def get_year(text): """ Given some text, parse out the year. Example formats: - May 19, 2007 - Jan 7, 2011, 4:25 PM """ match = re.findall(r', [0-9]{4}', text) return match[0][2:]
b0f70cbeebf23043b24f2999b6f3af8fb12b629e
118,108
def class_name(obj): """ Returns lowercase class name of any object's class, especially that of a Django model instance. """ return obj.__class__.__name__.casefold()
99c3bc50ea444940dbcd0978c6e46cb7028b9c15
118,109
def str_to_bytes(s): """Translate string or bytes to bytes. """ if isinstance(s, str): return bytes(s, encoding="UTF-8") else: return s
a34bd844cc35fcd815e2af3329dda8effbaf5999
118,114
import re def cleanNoPython(data: str) -> str: """Remove NOPYTHON blocks.""" return re.sub(r"\/\/___NOPYTHON\[\[.*?\/\/\]\]___NOPYTHON\s*", "", data, flags=re.DOTALL)
e65f6514ca59e9d3c7312155566ac3a961492ccd
118,118
from typing import List def move_string_to_the_end(row: List[str], string: str) -> List[str]: """This function simply shuffles any blank strings to the end of the list. The number of list elements does not change, only their position. """ length_before = len(row) row = [x for x in row if x != string] length_after = len(row) appendix = [string for _ in range(length_before - length_after)] return row + appendix
292ed8c8daefd509c738f630c3182401d27708b2
118,121
import hashlib def compute_md5(file_to_checksum, chunk_size=1024*64): """Computes MD5 checksum of specified file. file_to_checksum should be an open, readable, file handle, with its position at the beginning, i.e. so that .read() gets the entire contents of the file. NOTE: under python3, the file should have been open in binary mode ("rb") so that bytes (not strings) are returned when iterating over the file. """ md5gen = hashlib.md5() while True: chunk = file_to_checksum.read(chunk_size) if not chunk: return md5gen.hexdigest() md5gen.update(chunk)
5d0cd4a7724e105cff4fb3e81d488a2f0cda239d
118,133
def create_object(path, content_type='image/png', bytes=4096, hash='fcfc6539ce4e545ce58bafeeac3303a7', last_modified='2016-08-27T23:12:22.993170'): """Creates a fake swift object""" return { 'hash': hash, 'last_modified': last_modified, 'name': path, 'content_type': content_type, 'bytes': bytes, }
6c0c1173c957dbbf2b70e7050347a385be74bd59
118,135
def add_to_module(module, name=None): """Decorator to register a function or class to a module""" def wrapper(value): key = name or getattr(value, '__name__', None) if key: setattr(module, key, value) return value return wrapper
aa11f1fc867f20eb468bdb7aabd48b91a7ce9bfa
118,140
def dirty(graph, implicit=True): """ Return a set of all dirty nodes in the graph. These include implicit and explicit dirty nodes. """ # Reverse the edges to get true dependency dirty_nodes = {n for n, v in graph.node.items() if v.get('dirty', False)} if not implicit: return dirty_nodes # Get implicitly dirty nodes (all of the packages that depend on a dirty package) dirty_nodes.update(*map(set, (graph.predecessors(n) for n in dirty_nodes))) return dirty_nodes
5ecc0bb661aa0565d320851ed260e1a0a2acde55
118,143
def filtrar_entre(valores, menor, maior): """ Cria uma lista com os números de 'valores' que estejam no intervalo ['menor', 'maior') (o primeiro intervalo é fechado e o segundo é aberto). Parâmetros: lista de floats e os limites. Retorna: a lista filtrada. """ nova_lista = [] for valor in valores: if (valor >= menor) and (valor < maior): nova_lista.append(valor) return nova_lista
d237a40fc82b9cec4bc9052a20cc0064c5dabcf1
118,144
from typing import Iterable def munch(iterable: Iterable, n: int): """ Consume an iterable in sequential n-length chunks. >>> [list(x) for x in munch([1, 2, 3, 4], 2)] [[1, 2], [3, 4]] :param iterable: sequence to iterate. :param n: int - Chunk length. :return: Iterator """ args = [iter(iterable)] * n return zip(*args)
5a4a8bf421613d8ec7ad0b5cbff28c68f9629132
118,148
import re def extract_flag_from_string(str, var_name): """Parse a flag value from a string formatted as a flagfile is. Args: str (str): String expected to be in the format of a flagfile, i.e. --a=a.val --b=b.val flag_name (str): Name of the flag. Returns: _: Value corresponding to the flag. If the flag does not exist in the flagfile, an empty result will be given. """ m = re.findall(f"--{var_name}=(.*)", str) return m[0] if len(m) == 1 else ""
9b75b4ff57b46026910e03af69d5fdf31bc0514b
118,150
def any_dont_exist(paths): """Return True if any path in list does not exist.""" return not all([path.exists() for path in paths])
1846bd54e24bae019a55c4cf0cf55375d979d9ef
118,152
def cols_to_stack(columns, rows, cols): """Takes a 2D array of columns, reshapes to cols along 3rd axis The reverse function of stack_to_cols Args: stacked (ndarray): 2D array of columns of data rows (int): number of rows of original stack cols (int): number of rows of original stack Returns: ndarray: a 2D array where each output[idx, :, :] was column idx Raises: ValueError: if input shape is not 2D Example: >>> a = np.arange(18).reshape((2, 3, 3)) >>> cols = stack_to_cols(a) >>> print(cols) [[ 0 1 2 3 4 5 6 7 8] [ 9 10 11 12 13 14 15 16 17]] >>> print(np.all(cols_to_stack(cols, 3, 3) == a)) True """ if len(columns.shape) != 2: raise ValueError("Must be a 2D ndarray") return columns.reshape((-1, rows, cols))
40342fbb2651045f1f8b9e06556c678516358910
118,154
def nombre_aretes(matrice): """Renvoie le nombre d'arêtes dans le graphe""" nb_aretes = 0 for i in range(len(matrice)): for j in range(len(matrice)): if matrice[i][j] == 1: nb_aretes += 1 return nb_aretes // 2
13262de15fd2d5a8018a63e8dabb529169d5c35a
118,156
def make_stat_flag_data(cls_dt): """ 如果销户日期为真,设置客户状态为关闭, n-正常, c-关闭 :param cls_dt: 销户日期 :return:客户状态标识 """ if cls_dt: stat_flag = "c" else: stat_flag = "n" return stat_flag # return 'n' # 默认正常
891fe5b340488cb8ef147647df8ecf44f8f7a498
118,157
import json def user_exists(uname): """Checks if user exists in the passfile""" with open('passfile.txt', 'r') as f: for line in f: user = json.loads(line) if user['username'] == uname: return True return False
ab730e72af8a18da8a43f71e3683704e69df3dc0
118,160
import json def device_info(dev, testbed_obj, showcmd='show version', save_to_json=False): """ This function connects to the device provided when called (dev) in the instantiated testbed (testbed_obj) and executes the provided show command (if none was provided, 'show version' is executed by defulat. If the Save option = True (-s in the command line) was provided then the output will be saved to a JSON file in the current working directory with the name <device name>.json. The default behavior is NOT to save the output. :param dev: the testbed device to query :param testbed_obj: the testbed object :param showcmd: what show command to execute :return: Return the device object and the show command response """ device = testbed_obj.devices[dev] device.connect() response = device.parse(showcmd) print(f"Response from {dev} is of type {type(response)} and length {len(response)}") print(response) print() print(json.dumps(response, indent=4)) print(response.keys()) if save_to_json: json_filename = f"{dev}.json" with open(json_filename, 'w', encoding='utf-8') as f: json.dump(response, f, ensure_ascii=False, indent=4) print(f"\nFILE SAVED: Saved Response to JSON file {json_filename}") return device, response
0b0597a53a1d3919d2c5e314b3ddbe4d40678253
118,161
def td_format_seconds_6(td_object): """ Formats a 6-digit string from a timedelta object Hours -> seconds; input must be < 10h Returns 9h99.99 """ seconds = int(td_object.total_seconds()) minutes, seconds = divmod(seconds, 60) hours, minutes = divmod(minutes, 60) msg = '' if hours != 0: msg += '{0:1d}h'.format(hours) if hours > 0: msg += '{0:02d}{1:02d}'.format(minutes, seconds) else: msg += "{0:2d}{1:02d}".format(minutes, seconds) return msg
f97ebca2ad7b7ce42c3d06a4f3953ff84476d2ab
118,164
def keep_resources(request): """ Presence of `keep_resources` retains the AWS resources created by cloudformation during the test run. By default, the resources are deleted after the run. """ return request.config.getoption("--keep-resources")
217b9e2ba86cd54ae2a19bc4eed05ffd6896bca2
118,165
def flatten_list(L): """ Flattens the given list. Remark: If we want to allow for tuples, use: if isinstance(element, (list, tuple)): If we don't want to allow for inherited subclasses of list, use: if type(element) == list """ flattened = [] for element in L: if isinstance(element, list): flattened.extend(flatten_list(element)) else: flattened.append(element) return flattened
b2e4588d46bb4e6243bb0a827d3dd484db6619c2
118,166
def _dataframe_to_dict(dataframe): """ Converts pandas dataframe to dictionary. """ return dataframe.to_dict("records")
4324a483831f42c47982c7533fd8140679996180
118,172
import csv def read_ship_db_file(csv_file: str) -> list: """Reads the MID_DB_FILE file into a `list`""" all_rows: list = [] fields: list = ["MMSI", "name", "unk", "vtype"] with open(csv_file, 'r', encoding='ISO-8859-1') as csv_fd: reader = csv.DictReader(csv_fd, fields) for row in reader: all_rows.append(row) return all_rows
03c9e1a313d603f3cf2b3c59995fac62e3bf9891
118,173
def extract_query_string_parameters(event): """ Returns the query string parameters from the request The format is: a sequence of two-element tuples (name, value) Supports regular and multivalue querystringparameters :param event: :return: """ query_string_params = event.get("queryStringParameters", {}) multi_query_string_params = event.get("multiValueQueryStringParameters", {}) for qsp in query_string_params.keys(): if qsp not in multi_query_string_params: multi_query_string_params[qsp] = [query_string_params[qsp]] result = [] for name, value_list in multi_query_string_params.items(): for value in value_list: result.append((name, value)) return result
163529cbcc09beddff77a6292137469b685369fc
118,178
import pytz def utc_timestamp(dt): """ Converts a datetime object into a timestamp """ timezone = pytz.timezone("utc") dt = timezone.localize(dt) return int(dt.timestamp())
a6c7180392c16fd201cd09628e4cfbada0420004
118,179
from typing import Any from typing import Hashable def is_hashable(obj: Any) -> bool: """ Return True if object is hashable, according to Python. """ return isinstance(obj, Hashable)
296b2b9241a70c2672879d4d90027aa62064e926
118,181
import time def sleep_a_while(t:int) -> str: """SLeeping a while function Args: t (int): seconds to sleep Returns: str: sleeping description """ time.sleep(t) return f"sleep for {t} seconds"
d5f2a8ecd5c2115d1fc66278c7a8183030a72ba9
118,183
def prettySize(bytes): """ format a byte count for pretty display """ bytes = float(bytes) if bytes < 1024: return '<1KB' elif bytes < (1024 * 1024): return '%dKB' % (bytes / 1024) else: return '%.1fMB' % (bytes / 1024.0 / 1024.0)
e4efe6b6525d2f6aa8d285b324bc50ac29244df1
118,186
def etcd_generate_run_command(etcd_config): """ Generate run command for etcd from config dictionary :param etcd_config: The config dictionary :return: A list of string representing the running command """ # Fundamental args command = [ etcd_config["exe"], "--name", etcd_config["name"], "--data-dir", etcd_config["data_dir"], "--listen-peer-urls", "http://" + etcd_config["listen"]["address"] + ":" + etcd_config["listen"]["peer_port"], "--listen-client-urls", "http://" + etcd_config["listen"]["address"] + ":" + etcd_config["listen"]["client_port"], "--initial-advertise-peer-urls", "http://" + etcd_config["advertise"]["address"] + ":" + etcd_config["advertise"]["peer_port"], "--advertise-client-urls", "http://" + etcd_config["advertise"]["address"] + ":" + etcd_config["advertise"]["client_port"], ] # If the instance should run as a proxy if "proxy" in etcd_config: command.append("--proxy") command.append(etcd_config["proxy"]) command.append("--proxy-refresh-interval") command.append("10000") # If strict reconfig mode is on if etcd_config.get("strict_reconfig", False): command.append("--strict-reconfig-check") # If cluster information exists if "cluster" in etcd_config: # Two types are possible: init and join if etcd_config["cluster"]["type"] == "init": # When initializing, either discovery mode and token or peer members must be given if "discovery" in etcd_config["cluster"]: command.append("--discovery") command.append(etcd_config["cluster"]["discovery"]) else: command.append("--initial-cluster-state") command.append("new") command.append("--initial-cluster") # If member argument is given, use it. # Otherwise, suppose that this instance is the only instance in the cluster if "member" in etcd_config["cluster"]: command.append(etcd_config["cluster"]["member"]) else: command.append( etcd_config["name"] + "=" + \ "http://" + etcd_config["advertise"]["address"] + ":" + etcd_config["advertise"]["peer_port"] ) elif etcd_config["cluster"]["type"] == "join": # When joining, list of cluster members must be given command.append("--initial-cluster-state") command.append("existing") command.append("--initial-cluster") command.append(etcd_config["cluster"]["member"]) return command
f22b8b5b6cc253d7fbf117579ccf649dce2fb834
118,187
import json def ParseJsonStats(logline): """Parse json stats logged before compiler_proxy quitting. Args: logline: string (in json form) Returns: json object """ try: return json.loads(logline) except ValueError as ex: print('failed to parse stats as json. stats=%s error=%s' % (logline, ex)) return None
63bd628aa95ed5fe4d35bdd5cee59b8c203f24a2
118,190
def cmap(i, j, n): """Given a pair of feed indices, return the pair index. Parameters ---------- i, j : integer Feed index. n : integer Total number of feeds. Returns ------- pi : integer Pair index. """ if i <= j: return (n * (n + 1) // 2) - ((n - i) * (n - i + 1) // 2) + (j - i) else: return cmap(j, i, n)
caa0c4a55e51296af34bf05188796ecd839896c8
118,193
def next_letter(letter): """ Helper function to get the next letter after a letter. Example: next_letter('A') -> B, @param letter is the letter you are starting with @returns letter + 1 in the alphabet """ res = '' if ord(letter) < 65: res = 'Z' elif ord(letter) >= 90: res = 'A' else: res = chr(ord(letter) + 1) return res
b177dea090b843e1612819fdccbad58f4ed604ab
118,195
def normal_time_averaged_sound_intensity(time_averaged_sound_intensity, unit_normal_vector): """ Normal time-averaged sound intensity :math:`I_{n,T}`. :param time_averaged_sound_intensity: Time-averaged sound intensity :math:`\\mathbf{I}_T`. :param unit_normal_vector: Unit normal vector :math:`\\mathbf{n}`. .. math:: I_{n,T} = \\mathbf{I}_T \\cdot \\mathbf{n} """ return time_averaged_sound_intensity.dot(unit_normal_vector)
b85df57ba11585ec8ca407f2d757ce385315d4d2
118,197
import struct def pack_uint_bytes(val: int) -> bytes: """ Pack an non-negative integer value into bytes :param val: the integer. :return: the buffer. """ if val <= 0xFF: return struct.pack('!B', val) elif val <= 0xFFFF: return struct.pack('!H', val) elif val <= 0xFFFFFFFF: return struct.pack('!I', val) else: return struct.pack('!Q', val)
eef82a18c478ba77a2cfbe83f55465c522f788ec
118,198
import re def protocol_and_address(address): """ Returns the Protocol and Address pieces of a Channel Address Parameters ---------- address : str The address from which to remove the address prefix. Returns ------- protocol : str The protocol used. None in case the protocol is not specified. addr : str The piece of the address without the protocol. """ match = re.match('.*?://', address) protocol = None addr = address if match: protocol = match.group(0)[:-3] addr = address.replace(match.group(0), '') return protocol, addr
b20295f4288739359e02e74759ba7e9b876882f3
118,200
def _parse_single_arg(function_name, additional_parameter, args, kwargs): """ Verifies that a single additional argument has been given (or no additional argument, if additional_parameter is None). Also verifies its name. :param function_name: the name of the caller function, used for the output messages :param additional_parameter: None if no additional parameters should be passed, or a string with the name of the parameter if one additional parameter should be passed. :return: None, if additional_parameter is None, or the value of the additional parameter :raise TypeError: on wrong number of inputs """ # Here all the logic to check if the parameters are correct. if additional_parameter is not None: if len(args) == 1: if kwargs: raise TypeError("{}() received too many args".format( function_name)) additional_parameter_data = args[0] elif len(args) == 0: kwargs_copy = kwargs.copy() try: additional_parameter_data = kwargs_copy.pop( additional_parameter) except KeyError: if kwargs_copy: raise TypeError("{}() got an unexpected keyword " "argument '{}'".format( function_name, kwargs_copy.keys()[0])) else: raise TypeError("{}() requires more " "arguments".format(function_name)) if kwargs_copy: raise TypeError("{}() got an unexpected keyword " "argument '{}'".format( function_name, kwargs_copy.keys()[0])) else: raise TypeError("{}() received too many args".format( function_name)) return additional_parameter_data else: if kwargs: raise TypeError("{}() got an unexpected keyword " "argument '{}'".format( function_name, kwargs.keys()[0])) if len(args) != 0: raise TypeError("{}() received too many args".format( function_name)) return None
b2f509a15fdbf063139ea4d8de720ca8e111c715
118,207
def override_dict(defaults, overrides): """ Override a dictionary with values in another dictionary. This will recursively descend into matching nested dictionaries. :param dict defaults: The original values. :param dict overrides: The overriding values. :return: The modified ``params`` dictionary. :rtype: Dict """ for (key, value) in overrides.items(): if isinstance(value, dict): # NOTE: if the parameter already exists, we can only use it as a # default value if it is a dictionary. if key in defaults and isinstance(defaults[key], dict): sub_defaults = defaults[key] else: sub_defaults = {} defaults[key] = override_dict(sub_defaults, value) else: defaults[key] = value return defaults
a85d66a48b451f5c67e6ab3408f6dd4dd9995683
118,211
def _normalize_params(image, width, height, crop): """ Normalize params and calculate aspect. """ if width is None and height is None: raise ValueError("Either width or height must be set. Otherwise " "resizing is useless.") if width is None or height is None: aspect = float(image.width) / float(image.height) if crop: raise ValueError("Cropping the image would be useless since only " "one dimention is give to resize along.") if width is None: width = int(round(height * aspect)) else: height = int(round(width / aspect)) return (width, height, crop)
eb4f0b5b39dc9925d6d18c6bf7bb517a7db9199a
118,215
def simple_ma(close, period=10): """ Takes the arithmetic mean of a given set of prices over the specific number of days in the past. Usually calculated to identify the trend direction of a stock :param close: closing price :param period: specified period (default: 10) :return: simple moving average """ return close.rolling(window=period).mean()
718ec800ec9213532a155cf0cb0db6df256abc9e
118,217
def new_module(name): """new_module(name) -> module Create a new module. Do not enter it in sys.modules. The module name must include the full package name, if any.""" # This is not used in pyjs return __builtins__.__class__(name)
12cea443cb105bda9d3fdf572ed679d0c361ff0e
118,218
from typing import Tuple def coco_to_percent(rect: Tuple[float, float, float, float], shape: Tuple[int, int]) -> Tuple[ float, float, float, float]: """ Args: rect: (x_min,y_min,x_max,y_max) shape: (x_net_len,h_net_len) Returns: (x_min,y_min,x_max,y_max) PS: but all belong to [0,1] """ # print(f'[x_min,y_min,x_max,y_max] is {rect[0]} {rect[1]} {rect[2]} {rect[3]}') will_return = list(rect) will_return[0], will_return[2] = will_return[0] / shape[0], will_return[2] / shape[0] will_return[1], will_return[3] = will_return[1] / shape[1], will_return[3] / shape[1] # print(f'[x_mid,y_mid,x_len,y_len] is {will_return[0]}, {will_return[1]}, {will_return[2]}, {will_return[3]}') return will_return[0], will_return[1], will_return[2], will_return[3]
72e98d31a7a1fd58946f5f2f6364f543758a0b58
118,228
def get_unmet_demand_hours(solution, carrier='power', details=False): """ Get information about unmet demand from ``solution``. Parameters ---------- solution : solution container carrier : str, default 'power' details : bool, default False By default, only the number of hours with unmet are returned. If details is True, a dict with 'hours', 'timesteps', and 'dates' keys is returned instead. """ unmet = (solution['e'] .loc[dict(c=carrier, y='unmet_demand_' + carrier)] .sum(dim='x') .to_pandas()) timesteps = len(unmet[unmet > 0]) hours = solution.time_res.to_pandas()[unmet > 0].sum() if details: return {'hours': hours, 'timesteps': timesteps, 'dates': unmet[unmet > 0].index} else: return hours
4289e0c6ef93ea3ac362a8098f8791db3ea4949d
118,236
def dict_to_progress_file(file_dict: dict, log_path: str) -> bool: """ Turns a dictionary into a delphin progress file. :param file_dict: Dictionary holding the information for the progress file :param log_path: Path to were the progress file should be written :return: True """ file_obj = open(log_path + '/progress.txt', 'w') spaces = 15 file_obj.write(' Simtime [s] \t Realtime [s]\t Percentage [%]\n') for line_index in range(0, len(file_dict['simulation_time'])): sim_string = ' ' * (spaces - len(str(file_dict['simulation_time'][line_index]))) + \ str(file_dict['simulation_time'][line_index]) real_string = ' ' * (spaces - len(str(file_dict['real_time'][line_index]))) + \ str(file_dict['real_time'][line_index]) if int(file_dict['percentage'][line_index]) == 100: percentage_string = ' ' * (spaces - len('1e+02')) + '1e+02' elif file_dict['percentage'][line_index] == int(file_dict['percentage'][line_index]): percentage_string = ' ' * (spaces - len(str(int(file_dict['percentage'][line_index])))) + \ str(int(file_dict['percentage'][line_index])) else: percentage_string = ' ' * (spaces - len(str(file_dict['percentage'][line_index]))) + \ str(file_dict['percentage'][line_index]) file_obj.write(sim_string + '\t' + real_string + '\t' + percentage_string + '\n') file_obj.close() return True
82235244990fef172574fd3fda42547474ffb446
118,240
def get_halfway_point(p1, p2): """Returns the coordinates of the point halfway between the two given points as a tuple """ return (p1[0] + p2[0]) / 2, (p1[1] + p2[1]) / 2
34d25eeb0356267e5d956431df1a17c284fdd896
118,242
def default_case(experiment_code, participantid, project_id): """Creates a minimal case.""" return { "type": "case", "experiments": {"submitter_id": experiment_code}, "submitter_id": participantid, "primary_site": "unknown", "disease_type": "unknown", "project_id": project_id }
65cf20a0ddc2114c83dc859a966a00a9fe48b3e8
118,245
def build_file_unique_id(doc): """Build file unique identifier.""" doc['unique_id'] = '{0}_{1}'.format(doc['bucket_id'], doc['file_id']) return doc
ae2db3946acd5f459b513158727bd59b25c70577
118,246
def get_unique_elts(seq, keep_left_most=True): """ Get unique elements of list (seq) whilst preserving order Args: seq (iterable): iterable of hashable objects keep_left_most (bool, optional, default True): if True, keep the left-most (aka the first occurring) element when there are repeats, otherwise keep right-most Returns: (list): list from seq with repeats removed """ seen = set() seen_add = seen.add if keep_left_most: return [x for x in seq if not (x in seen or seen_add(x))] else: return list(reversed([x for x in reversed(seq) if not (x in seen or seen_add(x))]))
f41cacf8ef2cb9a1ab0e60bcdf03caed5c2e5aa0
118,251
import hashlib def checksum(data): """ Compute the checksum of arbitrary binary input. Args: data (bytes): data as bytes Returns: bytes: checksum of the data """ chksum = hashlib.new('sha512_256') chksum.update(data) return chksum.digest()
8fb92c0874add30be402e0f962644eae1d701a56
118,253
import six def _read_xml_element(element, xml_ns, tag_name, default=""): """ Helper method to read text from an element. """ try: text = element.find(xml_ns + tag_name).text if isinstance(text, six.text_type): return text else: return text.decode('utf-8') except AttributeError: return default
c3e7f33aefb673b7132601150e63715895a39117
118,256
def parse_csv_line(line): """ Splits a line of comma seperated values into a list of values. Removes leading and trailing quotes if there are any. """ parsed_line = [] for x in line.split(","): if x[0] == "\"" and x[-1] == "\"": x = x[1:-1] parsed_line.append(x.strip()) return parsed_line
3120e3e04480bceedf03a92ac457cebe5cbd545e
118,257
import torch def sample_from_gaussian(mu, log_sig): """Returns a sample from a Gaussian with diagonal covariance.""" return mu + log_sig.exp() * torch.randn_like(log_sig)
975c14c352fa16cb74cf9b3487443012d360a080
118,258
def halfstr(s): """ Split the given string, returning each half in a list. """ length = len(s) half = length // 2 if half * 2 != length: raise ValueError('string {0!r} not of even length'.format(s)) return [s[:half], s[half:]]
9ddc24e46fd18f1f82510bd3683e3b1984a740cc
118,259
def get_name(df): """ Find :class:`~pandas.DataFrame` name. :param df: Input :type df: pandas.DataFrame or pandas.Series :return: Name or None if name does not exist :rtype: str or None """ return df.name if hasattr(df, 'name') else None
ec744f314d5a9e4504f68e163c8b6636800f0de6
118,263
import torch def place_tensor(tensor): """ Places a tensor on GPU, if PyTorch sees CUDA; otherwise, the returned tensor remains on CPU. """ if torch.cuda.is_available(): return tensor.cuda() return tensor
c27f08d97a59974dfc33e367d3c5a0724fb8b7e7
118,264
import string def column_name(column): """Create a column name starting from an index starting at 0 eg : column=0 -> name='A' """ if column <= 25: return string.ascii_uppercase[column]; else: return column_name((column//26)-1) + column_name(column%26)
f87e88e5d9e249bc9dae68e5cd015624fc94eda8
118,272
def parse_password(data: str) -> tuple[int, int, str, str]: """Parse the input data into a tuple of (num1, num2, letter, password)""" nums, letter, password = data.split() nums_list = nums.split("-") return int(nums_list[0]), int(nums_list[1]), letter[:-1], password
64c7e706c34f98a0719865b05e8e712c5969472f
118,274
def extract_data(data: str) -> tuple: """ Extract polymer template and pair insertion rules """ polymer, rules = data.split('\n\n') rules = dict(list(map(lambda x: x.split(' -> '), rules.splitlines()))) return polymer, rules
fd85896e1cd469ffa70aa47a7de3587843b38dd1
118,275
def set_speed_num(speed_num: int) -> str: """ Set the speed number. Part of the supported Knitout extensions. Valid speed numbers are positive integers in the range 0 - 15 Raises ValueError """ if 0 <= speed_num <= 15: return 'x-speed-number {}'.format(speed_num) else: raise ValueError('int speed_num (val: {}) must in range 0-15.'.format(speed_num))
5618efb815dc65d922d6f214fd1ff9b6601f6e3d
118,280
def shorten(x, length): """ Shorten string x to length, adding '..' if shortened """ if len(x) > (length): return x[:length - 2] + '..' return x
4fb6ba2272b8443b05a49fb8dbba3e1adb0c597f
118,281
def Newcombprecangles(epoch1, epoch2): """ ---------------------------------------------------------------------- Purpose: Calculate precession angles for a precession in FK4, using Newcombs method (Woolard and Clemence angles) Input: Besselian start epoch1 and Besselian end epoch2 Returns: Angles zeta, z, theta in degrees Reference: ES 3.214 p.106 Notes: Newcomb's precession angles for old catalogs (FK4), see ES 3.214 p.106 Input are Besselian epochs! Adopted accumulated precession angles from equator and equinox at B1950 to 1984 January 1d 0h according to ES (table 3.214.1, p 107) are: zeta=783.7092, z=783.8009 and theta=681.3883 The Woolard and Clemence angles (derived in this routine) are: zeta=783.70925, z=783.80093 and theta=681.38830 (same ES table) This routine found (in seconds of arc): zeta,z,theta = 783.709246271 783.800934641 681.388298284 for t1 = 0.1 and t2 = 0.133999566814 using the lines in the example Examples: >>> b1 = 1950.0 >>> b2 = celestial.epochs("F1984-01-01")[0] >>> print [x*3600 for x in celestial.Newcombprecangles(be1, be2)] [783.70924627097793, 783.80093464073127, 681.38829828393466] ---------------------------------------------------------------------- """ t1 = (epoch1-1850.0)/1000.0 #1000 tropical years t2 = (epoch2-1850.0)/1000.0 tau = t2 - t1 d0 = 23035.545; d1 = 139.720; d2 = 0.060; d3 = 30.240; d4 = -0.27; d5 = 17.995 a0 = d0 + t1*(d1+d2*t1); a1 = d3 + d4*t1; a2 = d5 zeta_a = tau*(a0+tau*(a1+tau*a2)) d0 = 23035.545; d1 = 139.720; d2 = 0.060; d3 = 109.480; d4 = 0.39; d5 = 18.325 a0 = d0 + t1*(d1+d2*t1); a1 = d3 + d4*t1; a2 = d5 z_a = tau*(a0+tau*(a1+tau*a2)) d0 = 20051.12; d1 = -85.29; d2 = -0.37; d3 = -42.65; d4 = -0.37; d5 = -41.80 a0 = d0 + t1*(d1+d2*t1); a1 = d3 + d4*t1; a2 = d5 theta_a = tau*(a0+tau*(a1+tau*a2)) # Return values in degrees return zeta_a/3600.0, z_a/3600.0, theta_a/3600.0
8ce4fca6eeb16fd0a1bf042ece3ed5826d51b821
118,285
def record_edf_annotations_to_sec_items(raw_edf_annotations): """ rec = hdf['record-0'] annotation_items = record_edf_annotations_to_sec_items(rec['edf_annotations']) # returns (text, <start time (sec)>) pairs """ byte_texts = raw_edf_annotations["texts"] # still byte encoded antexts = [s.decode("utf-8") for s in byte_texts[:]] starts100ns_arr = raw_edf_annotations["starts_100ns"][:] starts_sec_arr = starts100ns_arr / 10000000 # (10**7) * 100ns = 1 second items = zip(antexts, starts_sec_arr) return items
6a333d41b83717bb027f866ab6ecd5bb3b9b3768
118,289
def mk2mass_del(mk): """mk2mass_del - M_K to mass, Delfosse et al. (2000). Usage: mass = mk2mass_del(mk) Where mk is absolute CIT K magnitude and mass is in solar masses. This version is the original polynomial from the paper. NOTE: the range of the parameters is not checked to ensure the relation is used within the domain of applicability, this is left to the user. References: Delfosse et al. (2000) A&A 364 217 """ val = (((0.37529*mk - 6.2315) * mk + 13.205) * mk + 6.12) * mk + 1.8 return 10.0**(1.0e-3 * val)
0ab6765385df9c4b7575eaf1da4ada75de72c9d8
118,291
def transform_ratio(value): """ Transformation that takes ratios and applies a function that preserves equal distances to origin (1) for similar relationships, eg. a ratio of 2 (twice the size) is at the same distance of 1 (same size) as 0.5 (half the size). Read: 'how many times larger or smaller than reference disparity'.""" if value >= 1: return value - 1 else: return 1 - 1 / value
bb438f708a0ea21998a74699f5a5c847f7a50b27
118,294
def flattenDict(dict): """ Takes a dictionary with aggregated values and turns each value into a key with the aggregated key (from dict) as the corresponding value. """ flat_dict = {p: g for g, sublist in dict.items() for p in sublist} return flat_dict
dca39ff54b6026c119a7adcc2314c33cffc91158
118,295
import math def compute_idfs(documents): """ Given a dictionary of `documents` that maps names of documents to a list of words, return a dictionary that maps words to their IDF values. Any word that appears in at least one of the documents should be in the resulting dictionary. """ frequencias = dict() for file_name, word_list in documents.items(): for word in word_list: if word not in frequencias: frequencias[word] = {file_name} else: frequencias[word].add(file_name) for key, value in frequencias.items(): frequencias[key] = math.log(len(documents) / len(value)) return frequencias
509f4f33d0fa57cc17ecb692f629b1f033019402
118,297
def _is_conflict_exception(e): """ Check whether the botocore ClientError is ConflictException. :param e: botocore exception :type e: ClientError :return: True if e is ConflictException """ error_code = e.response['Error']['Code'] return error_code == 'ConflictException'
05d6aee3786ee44f0bf94f53ca342f68778f44a7
118,301
def line_error(params, args): """ Line Error, calculates the error for the line equations y = mx + b :param params: values to be used in model :param x: inputs :param y: observations :return: difference between observations and estimates """ x, y = args m, b = params[0:2] y_star = m * x + b return y - y_star
5b920c418f25874901904553d517a7ca510146da
118,302
def input_type(form_field): """ Filter which returns the name of the formfield widget """ return form_field.field.widget.__class__.__name__
19668b78ed77c38d8f84197d6cb3afdee94c1342
118,303
def getVal(item): """ Get value of an item, as weight / value, used for sorting Args: item: A dictionary with entries "weight" and "value" Returns: The item's weight per value """ return item["weight"] / item["value"]
9a7621177b578a83a0109cb6aaa9b720fd7e93af
118,304
import re def replace_double_inline_code_marks(text: str) -> str: """Finds and replaces cases where we have `` to `.""" return re.sub("(`+\b)|(\b`+)", "`", text)
29978384deb490e3c7553242592f9b774370ff2e
118,305
import random def get_random_chip(piece_stack): """ Method to get a random chip from a given stack :param piece_stack: List of pieces :return: random tuple representing chip """ number = int((random.random() * len(piece_stack))) chip = piece_stack[number] piece_stack.remove(chip) return chip
e8b328f789be9fd7179a97c924ffe844c20c78d9
118,306
def prettify_colnames(df): """ Cleaning up column names for readbility and ease of accesibility PARAMETERS df : Dataframe to be cleaned (dataframe) OUTPUTS : 1. Returns a list of cleaned column names for the given dataframe """ cols = list(df.columns) new_cols=[] for c in cols: new_cols.append(c.strip().lower().replace(" ", "_")) return new_cols
55a2d0ba5b97d8006a90b9a9d2bc498fe2905c93
118,317
def dictstr(d): """Stringify a dict in the form 'k=v, k=v ...' instead of '{k:v, ...}'""" return ", ".join("%s=%r" % (k, v) for k, v in d.iteritems())
5984f7fb7adc1cfab2e11ab36e4fd6e54746c7be
118,318
def check_valid(input_password): """ Checks to see if the input password is valid for this training program Invalid in this case means you don't want to train on them Additionaly grammar checks may be run later to futher exclude passwords# This just features that will likely be universal rejections Returns TRUE if the password is valid FALSE if invalid """ # Don't accept blank passwords for training. if len(input_password) == 0: return False # Remove tabs from the training data # This is important since when the grammar is saved to disk tabs are used # as seperators. There certainly are other approaches but putting this # placeholder here for now since tabs are unlikely to be used in passwords if "\t" in input_password: return False # Below are other values that cause problems that we are going to remove. # These values include things like LineFeed LF #Invalid characters at the begining of the ASCII table for invalid_hex in range (0x0,0x20): if chr(invalid_hex) in input_password: return False # UTF-8 Line Seperator if u"\u2028" in input_password: return False return True
6ae09635a734875267dfe34a692df7555097e481
118,319
def get_value(query): """ Returns the single value item from an executed query :param query: The query executed :return: The value within """ return query.first()[0]
ee2b7f700412ce8cde2240a5708825bc322a0273
118,320