content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def translate_relaxation(quantity: str) -> str: """Convert names of dynamic quantities to their relaxations. Args: quantity: The name of the quantity to convert the name of. Returns: The translated name. """ translation = { "alpha": "max_alpha_time", "gamma": "max_gamma_time", "com_struct": "tau_F", "msd": "diffusion_constant", "rot1": "tau_R1", "rot2": "tau_R2", "struct": "tau_S", } return translation.get(quantity, quantity)
b8b3bc80613ca58af961eb08bb059bb5081e3eae
64,008
def parse_piccolo_url(url): """Parse URL of form: server:[port][/piccolo_lib] Port is an integer """ url = url.strip() u = url.split('/') server_port = u[0] try: piccolo_lib = u[1] except IndexError: piccolo_lib = None u = server_port.split(':') server = u[0] try: port = int(u[1]) # Expect port to be an integer if specfied except IndexError: port = None except ValueError: # not an integer, silently ignore port = None if not server: server = None if not piccolo_lib: piccolo_lib = None return (server, port, piccolo_lib)
a19317217884dca3035112a6e968935d81e20458
64,015
def _pick_counters(log_interpretation): """Pick counters from a dictionary possibly containing step and history interpretations.""" for log_type in 'step', 'history': counters = log_interpretation.get(log_type, {}).get('counters') if counters: return counters else: return {}
9afa112d566e6a427fba2776c115444a02253135
64,019
def coerce_row_to_dict(schema, row): """ >>> from datashape import dshape >>> schema = dshape('{x: int, y: int}') >>> coerce_row_to_dict(schema, (1, 2)) # doctest: +SKIP {'x': 1, 'y': 2} Idempotent >>> coerce_row_to_dict(schema, {'x': 1, 'y': 2}) # doctest: +SKIP {'x': 1, 'y': 2} """ if isinstance(row, dict): return row return dict((name, item) for name, item in zip(schema[0].names, row))
89ea04b6b73a8b7218a4cf2a1b584d2db583379c
64,020
import pickle def pickle_load(name, extension='.pkl'): """Load data with pickle. Parameters ---------- name: str Path to save to (includes dir, excludes extension). extension: str, optional File extension. Returns ------- Contents of file path. """ filename = name + extension infile = open(filename, 'rb') data = pickle.load(infile) infile.close() return data
8946ebbb28ac25fdc18bd98327ee9210eff8b2cc
64,023
def onlist(listtocheck, item): """ Check if something is on a list. :type listtocheck: list :param listtocheck: The list to check. :type item: object :param item: The item to check if on the list. """ # Return the result return item in listtocheck
d48a347d8e21a7c22407a6d9a30acb51c2f99d38
64,030
def get_percent(part, whole): """ Get which percentage is a from b, and round it to 2 decimal numbers. """ return round(100 * float(part)/float(whole) if whole != 0 else 0.0, 2)
df36a60d32ea8fad065c4a873c78e9aa7b801632
64,035
from typing import Tuple import re def extract_special_terms(description: str) -> Tuple[str, ...]: """ Extract all words (space delimited) which presumably cannot be part of an natural language sentence. These are usually code fragments and names of code entities, or paths. """ result = [] for word in description.split(): no_punctation_word = word.rstrip(").,;:?!\"'").lstrip("(") contains_non_word_char = re.search(r"\W", no_punctation_word) contains_non_initial_upper_case = re.search(r"\B[A-Z]", no_punctation_word) if contains_non_initial_upper_case or contains_non_word_char: result.append(word) return tuple(result)
34673db91a44db89ab9f3e915850a13cc1c42585
64,041
def lower_keys(x): """Recursively make all keys lower-case""" if isinstance(x, list): return [lower_keys(v) for v in x] if isinstance(x, dict): return dict((k.lower(), lower_keys(v)) for k, v in x.items()) return x
f7c4537e09f3900c369a7c67a307b8b064e9e9ba
64,046
def valueOfLength (size): """Returns a valid JSON value for name update of the given length in bytes.""" prefix = '{"text": "' suffix = '"}' overhead = len (prefix) + len (suffix) assert size > overhead result = prefix + 'x' * (size - overhead) + suffix assert len (result) == size return result
ad1ae6be48e9f204890de8f9ca62967b3b4ee36d
64,047
import typing import pathlib def size_of_directory(directory: str, units='bytes') -> typing.Union[int, float]: """ Returns the size of directory. It ignores the size of directories. Credits: derived from https://stackoverflow.com/a/55659577/3967334 Parameters ---------- directory : str Path to directory units: str One of `bytes` (default), `kilobytes`, `megabytes`, `gigabytes` Returns ------- out: int Size """ # the exponent needed in the denominator when doing the conversion units_conversion_exponent = {'bytes': 0, 'kilobytes': 1, 'megabytes': 2, 'gigabytes': 3} size = sum(file.stat().st_size for file in pathlib.Path(directory).rglob('*')) return size/1024**units_conversion_exponent[units]
0702849c8662010db4ff0cb24e7fc34520c05746
64,050
from typing import Dict def response_success(response: Dict) -> bool: """ Parse a response from a request issued by any botocore.client.BaseClient to determine whether the request was successful or not. :param response: :return: boolean :raises: KeyError if the response is not an AWS response """ # If the response dict is not constructed as expected for an AWS response, # this should raise a KeyError to indicate something is very wrong. status_code = int(response["ResponseMetadata"]["HTTPStatusCode"]) if status_code: # consider 300+ responses to be unsuccessful return 200 <= status_code < 300 else: return False
467e2e3f8e2279c936d447dda0a499ed4b5792cf
64,051
def check_b4_adding(word, gpat, ypat, unused): """ Return True if: gpat indicates that a letter is green and a letter does not match ypat indicates that a letter is yellow and this letter matches or this letter is not found in the word the letter matches a letter known to be unused @param word String word to check @param gpat String Green pattern @param ypat String yellow pattern @param unused String unused letters @return True/False """ bad = False for indx, letter in enumerate(gpat): if letter != '': if letter != word[indx]: return True for indx, ylets in enumerate(ypat): if ylets != '': for ltr in ylets: if ltr == word[indx]: return True if ltr not in word: return True for letter in word: if letter in unused: bad = True break return bad
5a146ceb9bdc2b9712945ca98d93339d7e6fee4f
64,054
def get_info(ads): """Get information on a list of AndroidDevice objects. Args: ads: A list of AndroidDevice objects. Returns: A list of dict, each representing info for an AndroidDevice objects. """ return [ad.device_info for ad in ads]
12da7781e55b3c1ff242ad85727baa2bed205355
64,062
from io import StringIO import csv def make_csv(data): """Create a CSV string from an array of dictionaries""" if type(data) == str: return data buf = StringIO() writer = csv.DictWriter(buf, data[0].keys()) writer.writeheader() for row in data: writer.writerow(row) r = buf.getvalue() buf.close() return r
45ab68deea201951600a2019ab73529247838569
64,063
def sample_at(field, x=0, y=0, point=None): """ Sample from a 2D scalar field at a given coordinate; return 1 if given coordinate is out-of-bounds Works with either a named point argument or x and y coordinates positionally following the filed argument. In case all three are specified and point is not None, it will override the x and y arguments. :param field: field from which to sample :type field: numpy.ndarray :param x: x coordinate for sampling location :type x: int :param y: y coordinate for sampling location :type y: int :param point: full coordinate for sampling location. :type point: Point2d :return: scalar value at given coordinate if (x,y) are within bounds of the scalar field, 1 otherwise """ if point is not None: x = point.x y = point.y if x < 0 or x >= field.shape[1] or y < 0 or y >= field.shape[0]: return 1 return field[y, x]
94a018d99be805064f9d363eaeb2c2fae63f101e
64,068
import torch def q_learning_confidence(lambda_, t, q=torch.tensor(0.008), e=torch.tensor(0.001)): """ :param q: the convergence rate of q-learning :param e: the error of q-learning :param lambda_: lambda_ > 0 :return: confidence bound of q-learning """ confidence_q = torch.tensor(2) * (q + e) * (torch.tensor(1) - torch.pow((q + e), t)) \ / (torch.sqrt(lambda_) * (torch.tensor(1) - (q + e))) return confidence_q
30659b86191a39838de6b637111cca7fe2eeea11
64,070
from typing import Union import torch from typing import Dict from typing import Optional def load_torch_model( saved_model: Union[torch.nn.Module, Dict], model_definition: Optional[torch.nn.Module] = None, ) -> torch.nn.Module: """Loads a PyTorch model from the provided``saved_model``. If ``saved_model`` is a torch Module, then return it directly. If ``saved_model`` is a torch state dict, then load it in the ``model_definition`` and return the loaded model. """ if isinstance(saved_model, torch.nn.Module): return saved_model elif isinstance(saved_model, dict): if not model_definition: raise ValueError( "Attempting to load torch model from a " "state_dict, but no `model_definition` was " "provided." ) model_definition.load_state_dict(saved_model) return model_definition else: raise ValueError( f"Saved model is of type {type(saved_model)}. " f"The model saved in the checkpoint is expected " f"to be of type `torch.nn.Module`, or a model " f"state dict of type dict." )
53e7178d955cf7da0471e6a4ed663522598f113a
64,071
def homo(a): """Homogenize, or project back to the w=1 plane by scaling all values by w""" return [ a[0]/a[3], a[1]/a[3], a[2]/a[3], 1 ]
6db1a1cf11aabf94e6a95753b6a3913683620649
64,075
import random import string def random_string(length=32): """ Return a random string of ASCII letters and digits of length 'length' """ # The random string s = '' # Add a number of random samples for i in range(length): s += random.choice(string.ascii_letters + string.digits) # Return the random string return s
612cba9263e63e901d1c3234c35e025824eec422
64,077
def triangle_number(n): """ Return the nth triangle number; i.e., the value of ``1+2+3+...+(n-1)+n``. """ return n*(n+1)/2
84ecc4da3c3e70148471132783828583f2d117a0
64,078
def is_small_cave(cave: str) -> bool: """Check if a cave is small. Args: cave (str): a cave represented by a letter or label Returns: bool: True if cave is small; False otherwise """ return 'a' <= cave <= 'z'
f804521c73de28177c2bc62d344947fb35fa2a00
64,079
import re def _normalize_keyword( query_string, findterms=re.compile(r'"([^"]+)"|(\S+)').findall, normspace=re.compile(r'\s{2,}').sub ): """ Breaks the string into a list based on spaces and double quotes :param query_string: String to be normalized :param findterms: Return all non-overlapping matches of regex pattern in string, as a list of strings :param normspace: Return the string obtained by replacing the leftmost non-overlapping occurrences of regex pattern in string by replacement :return: Normalized keywords in a list """ return [normspace('', (t[0] or t[1]).strip()) for t in findterms(query_string)]
18a674e694a6092222c072e074c40ab673256f63
64,080
def sublist_bigrams(pos_list): """ bigrams at sublist level >>> sublist_bigrams(['V V', 'R , V O V D N', 'P O V']) [('V', 'V'), ('R', ','), (',', 'V'), ('V', 'O'), ('P', 'O'), ('O', 'V')] """ pos_list = [s.split() for s in pos_list] bigrams = [ (a, b) for poss in pos_list for a, b in zip(poss, poss[1:]) ] return bigrams
dd873b8e76d92a11d21a950785deffad49c68495
64,082
def list_response(response_list, cursor=None, more=False, total_count=None): """Creates response with list of items and also meta data used for pagination Args : response_list (list) : list of items to be in response cursor (Cursor, optional) : ndb query cursor more (bool, optional) : whether there's more items in terms of pagination total_count(int, optional): Total number of items Returns : (dict) : the response to be serialized and sent to client """ return { 'list' : response_list , 'meta' : { 'nextCursor': cursor.urlsafe() if cursor else '' , 'more' : more , 'totalCount': total_count } }
7a90057b0ee53f07f4d208bd051610fc1fdcf4ec
64,083
from datetime import datetime def str_to_time(ds): """ Convert a date string to a datetime object Input: ds: date string, e.g., 2019-01-03 """ return datetime.strptime(ds, "%Y-%m-%d %H:%M:%S")
3f7cbe81442c0bea9ce0a525b00bf87ed1953d52
64,085
def order_tuple(toOrder): """ Given a tuple (a, b), returns (a, b) if a <= b, else (b, a). """ if toOrder[0] <= toOrder[1]: return toOrder return (toOrder[1], toOrder[0])
d055d6819b5a37da8a3486cac92bebac01f7bdf3
64,093
def nakshatra_pada(longitude): """Gives nakshatra (1..27) and paada (1..4) in which given longitude lies""" # 27 nakshatras span 360° one_star = (360 / 27) # = 13°20' # Each nakshatra has 4 padas, so 27 x 4 = 108 padas in 360° one_pada = (360 / 108) # = 3°20' quotient = int(longitude / one_star) reminder = (longitude - quotient * one_star) pada = int(reminder / one_pada) # convert 0..26 to 1..27 and 0..3 to 1..4 return [1 + quotient, 1 + pada]
81481e688bd3ebb9343b62ea8c31da20182b4189
64,098
import struct def int_2_bytes(int_value, is_little_endian=False): """ 将int转换成4字节的bytes串。 :param int_value: :param is_little_endian: :return: """ # 小端数据返回 if is_little_endian: return struct.pack('<i', int_value) # 大端数据返回 return struct.pack('>i', int_value)
dd4dceb7f500d68db3a5621845475397c75817f6
64,099
def get_square(n: int) -> int: """Get `n`-th square number Args: n: Index of square number Examples: >>> print(get_square(100)) 100 """ return n * n
a73f2579fa9c6329d535848c0b992d3db7c6091c
64,101
from pathlib import Path def _transform_path(path, search_str, replace_str): """ This function replaces a single directory in the given path by the given string and returns the new path. :param path (str || Path): The path that has to be transformed. :param search_str (str): The directory that has to be changed. :param replace_str (str): The directory that the subdirectory has to be changed to. Returns: (Path): The new path if the replacement was successful, else the original path. """ result = Path(path) subdirectories = list(path.parts) if search_str in subdirectories: subdirectories[subdirectories.index(search_str)] = replace_str result = Path(*subdirectories) return result
a5aafdeb388f9873ef8ccaaac77f19f1c7b8ebfd
64,111
def convert_public_keys_to_names(nodes_by_public_key, public_keys): """Convert a set/list of node public keys to a set of names""" return set([nodes_by_public_key[public_key]['name'] for public_key in public_keys])
8747b7da6e0d0c3a2acf728d13e98e6afa0eb8e4
64,114
def local_regon_checksum(digits): """ Calculates and returns a control digit for given list of digits basing on local REGON standard. """ weights_for_check_digit = [2, 4, 8, 5, 0, 9, 7, 3, 6, 1, 2, 4, 8] check_digit = 0 for i in range(0, 13): check_digit += weights_for_check_digit[i] * digits[i] check_digit %= 11 if check_digit == 10: check_digit = 0 return check_digit
7a99fefaa8806b24572df07c1cde8c950ae62226
64,116
import torch def construct_edge_list(sim_score_mat, thres): """ Constructs edge lists for a PyG graph (COO representation) based on the pairwise similarity matrix of the molecule and the molecular features """ print('Constructing COO edge list based on similarity matrix.') srcs, dsts = [], [] n = len(sim_score_mat) for i in range(n): for j in range(n): if sim_score_mat[i][j] > thres: srcs.append(i) dsts.append(j) edge_index = torch.tensor([srcs,dsts], dtype=torch.long) print('Done.') return edge_index
edbf97053327713db4a01e47a4312281e68d2f84
64,122
def _extract_at_points_packed( z_est, index_bbox_valid, slice_index_uv, y_lo, y_hi, x_lo, x_hi, w_ylo_xlo, w_ylo_xhi, w_yhi_xlo, w_yhi_xhi, ): """ Extract ground truth values z_gt for valid point indices and estimated values z_est using bilinear interpolation over top-left (y_lo, x_lo), top-right (y_lo, x_hi), bottom-left (y_hi, x_lo) and bottom-right (y_hi, x_hi) values in z_est with corresponding weights: w_ylo_xlo, w_ylo_xhi, w_yhi_xlo and w_yhi_xhi. Use slice_index_uv to slice dim=1 in z_est """ z_est_sampled = ( z_est[index_bbox_valid, slice_index_uv, y_lo, x_lo] * w_ylo_xlo + z_est[index_bbox_valid, slice_index_uv, y_lo, x_hi] * w_ylo_xhi + z_est[index_bbox_valid, slice_index_uv, y_hi, x_lo] * w_yhi_xlo + z_est[index_bbox_valid, slice_index_uv, y_hi, x_hi] * w_yhi_xhi ) return z_est_sampled
0b25dc140d5672fec42844336342aedd24b77de0
64,131
import json async def expected_startlist() -> dict: """Create a mock startlist object.""" with open("tests/files/expected_startlist_individual_sprint.json", "r") as file: startlist = json.load(file) return startlist
8e5bd083f0272c6d02ba8ecbbfd30506b0178322
64,133
def evaluate_velocity_at(layer, depth, prop): """ Evaluate material properties at some depth in a velocity layer. .. seealso:: :func:`evaluate_velocity_at_top`, :func:`evaluate_velocity_at_bottom` :param layer: The velocity layer to use for evaluation. :type layer: :class:`~numpy.ndarray`, dtype = :py:const:`.VelocityLayer` :param depth: The depth at which the material property should be evaluated. Must be within the bounds of the layer or results will be undefined. :type depth: float :param prop: The material property to evaluate. One of: * ``p`` Compressional (P) velocity (km/s) * ``s`` Shear (S) velocity (km/s) * ``r`` or ``d`` Density (g/cm^3) :type prop: str :returns: The value of the material property requested. :rtype: :class:`~numpy.ndarray` (dtype = :class:`float`, shape equivalent to ``layer``) """ thick = layer['bot_depth'] - layer['top_depth'] prop = prop.lower() if prop == "p": slope = (layer['bot_p_velocity'] - layer['top_p_velocity']) / thick return slope * (depth - layer['top_depth']) + layer['top_p_velocity'] elif prop == "s": slope = (layer['bot_s_velocity'] - layer['top_s_velocity']) / thick return slope * (depth - layer['top_depth']) + layer['top_s_velocity'] elif prop in "rd": slope = (layer['bot_density'] - layer['top_density']) / thick return slope * (depth - layer['top_depth']) + layer['top_density'] raise ValueError("Unknown material property, use p, s, or d.")
c7c30022030acafcbf69d6d088c3d36644130eb5
64,135
def replicate(lst, n): """ Replicate each of the elements of lst a given number of times. For example, for lst = [a, b, c] and n = 3, the returned list is [a, a, a, b, b, b, c, c, c]. """ def rep_helper(val, n): i = 0 v = "" while i < n: v = v + val i += 1 return v list_of_lists = [list(rep_helper(a, n)) for a in lst] return [val for sublist in list_of_lists for val in sublist]
d8c78f8c0737704ec1969d540e5d89b990ea8340
64,142
def ReserveHospRsrvEnd(t): """Hospitalization reserve: End of period""" return 0
adc01e22aee1bc14a114fb37f27b40abd73810d7
64,145
def _capitalize_first(s): """ Capitalize only the first letter and leave the rest alone. Note that normal Python capitalize() will lowercase all other letters. This does not. """ return s[0].upper() + s[1:]
ff5a01d643f66b7ce0c84aea48f3588bf80ce629
64,148
def calc_scale_size(boundary, size, scale_up=True): """Returns the biggest possible size to fit into the boundary, respecting the aspect ratio. If `scale_up` is True the result can be larger than size. All sizes have to be > 0. """ bwidth, bheight = boundary iwidth, iheight = size if bwidth <= 0 or bheight <= 0 or iwidth <= 0 or iheight <= 0: raise ValueError scale_w, scale_h = iwidth, iheight if iwidth > bwidth or iheight > bheight or scale_up: bratio = float(bwidth) / bheight iratio = float(iwidth) / iheight if iratio > bratio: scale_w = bwidth scale_h = int(bwidth / iratio) else: scale_w = int(bheight * iratio) scale_h = bheight return scale_w, scale_h
15a28dbfb9ccbb093e9a0c8ea48144bd67e1f9aa
64,149
def get_head_block_args(n_classes: int, num_feature_maps: int, avgpool_target_size=(1, 1), buffer_reduction=None): """ Wrap the args for the head block into a dict. Check Config classes for arg doc string :param n_classes: number of classes :param num_feature_maps: number of feature maps of the conv output (i.e. the input feature map for head block) :param avgpool_target_size: target size for the average pooling later :param buffer_reduction: whether to use a buffer layer, e.g. fc-fc- :return: head block args dictionary """ head_block_args = {'n_classes': n_classes, 'in_channels': num_feature_maps, 'buffer_reduction': buffer_reduction, 'avgpool_target_shape': avgpool_target_size} return head_block_args
3edc5fbf271d529fd525c6a789cef757618cfb76
64,152
def load_event_map(event_map_filepath: str) -> dict[str,int]: """Loads event map from a specified filepath, and returns a dictionary containing the map.""" map = {} index = 0 with open(event_map_filepath) as f: for string_label in f: map[string_label.strip()] = index index += 1 return map
37c2186189e320132ea7f95521f08a73dacccca4
64,153
def validate_string(string): """Check string is non-empty.""" return True if string else False
eee186fdf80d22ce3579a35d8b83854dbef0fb5e
64,154
def fib(n): """nth fibonacci number (iterative)""" a, b = 0, 1 for _ in range(n): a, b = b, a+b return a
ea29c86093dbb1ec19fa926d030c7f665e5ea194
64,157
def getElectronState(radicalElectrons, spinMultiplicity): """ Return the electron state corresponding to the given number of radical electrons `radicalElectrons` and spin multiplicity `spinMultiplicity`. Raise a :class:`ValueError` if the electron state cannot be determined. """ electronState = '' if radicalElectrons == 0: electronState = '0' elif radicalElectrons == 1: electronState = '1' elif radicalElectrons == 2 and spinMultiplicity == 1: electronState = '2S' elif radicalElectrons == 2 and spinMultiplicity == 3: electronState = '2T' elif radicalElectrons == 3: electronState = '3' elif radicalElectrons == 4: electronState = '4' else: raise ValueError('Unable to determine electron state for {0:d} radical electrons with spin multiplicity of {1:d}.'.format(radicalElectrons, spinMultiplicity)) return electronState
0a7d18b934e494baa5967596b5d47f58b67c1eba
64,159
def stoploss_from_absolute(stop_rate: float, current_rate: float) -> float: """ Given current price and desired stop price, return a stop loss value that is relative to current price. The requested stop can be positive for a stop above the open price, or negative for a stop below the open price. The return value is always >= 0. Returns 0 if the resulting stop price would be above the current price. :param stop_rate: Stop loss price. :param current_rate: Current asset price. :return: Positive stop loss value relative to current price """ # formula is undefined for current_rate 0, return maximum value if current_rate == 0: return 1 stoploss = 1 - (stop_rate / current_rate) # negative stoploss values indicate the requested stop price is higher than the current price return max(stoploss, 0.0)
9005a45ab0cda284a04c8cc5c7079ad8f2eb2673
64,160
import random def one_in(n: int) -> bool: """Return True with odds of 1 in n.""" return not random.randint(0, n)
a019bbd1f9c7653404fd726c724261e56d8b4fd2
64,161
def executeSQLQmark(cursor, query, params=None): """ Execute a python 2.5 (sqlite3) style query. @param cursor: A sqlite cursor @param query: The query to execute @param params: An optional list of parameters to the query """ if params is None: return cursor.execute(query) return cursor.execute(query, params)
554c4523cc390158563ebc158e03845ca8cd62f0
64,162
def tosize(seq, size): """Return a list of fixed length from sequence (which may be shorter or longer).""" return (seq[:size] if len(seq) >= size else (seq + [None] * (size-len(seq))))
36a54818946106c52edcfc8d5be69ab362c9b98a
64,165
def make_rectangle(x1, x2, y1, y2): """Return the corners of a rectangle.""" xs = [x1, x1, x2, x2, x1] ys = [y1, y2, y2, y1, y1] return xs, ys
08e473af06e355ed454ff0368f38bbab69ebcfb5
64,175
import json def get_min_cache(path: str) -> float: """Get the minimum caching time of all the entries in a config file. :param path: path of the config file to use :type path: str :return: float number representing the smallest caching time. """ result = float('inf') with open(path, 'r') as config_file: file = json.loads(config_file.read()) for entry in file: if 'cache' in entry and entry['cache'] < result: result = entry['cache'] return result
bc229835e2f279d4a6990429844fe9dcf4db5721
64,177
def get_numbers_of_blocks(connection): """ Get number of generated and processed blocks for the last n days(argument) :param connection: pymysql.Connection :return: number of blocks :rtype: int """ cur = connection.cursor() cur.execute("SELECT COUNT(*) FROM Block ") for row in cur: number = row[0] cur.close() return number
a0c484622ba536a77f6052ff5831dc6467434b7f
64,178
def is_vlan_in_primary_vea_addl_vlans(vlanid, primary_sea): """ This API is used to find out if the supplied VLANID exists in the SEA's primary VEA's additional VLANIDs. A True is returned if the VLAN is found. :param vlanid: The vlan to search :param primary_sea: The SEA to search the VLANs. :returns: True or False. """ if primary_sea: addl_vlans = primary_sea.get_primary_vea().addl_vlan_ids if int(vlanid) in addl_vlans: return True return False
58bf477e108e1e0a7fe0d9ab8b5d5b311caa1bd8
64,183
def unflatten_dict(flat_dict): """Convert a flattened dict to a nested dict. Inverse of flatten_config. Args: flat_dict: A dictionary to unflatten. Returns: A dictionary with all keys containing `.` split into nested dicts. {'a.b.c': 1} --> {'a': {'b': {'c': 1}}} """ result = {} for key, val in flat_dict.iteritems(): parts = key.split('.') cur = result for part in parts[:-1]: if part not in cur: cur[part] = {} cur = cur[part] cur[parts[-1]] = val return result
de01c934239eb29f33fe6ad8846cd828cabea61d
64,192
def add_note(html): """ Adds a warning to the top of the html page :param html: the raw html :return: html with warning """ warning = '<p>~!This is a generated file. Any modifications to it will be lost upon next update. Please use the documentation files in the project repository. !~</p>' html = warning + html return html
1f02accc06f2907592face99eff666418071b7ab
64,199
def scale_to_control(x, axis_scale=350., min_v=-1.0, max_v=1.0): """ Normalize raw HID readings to target range. Args: x (int): Raw reading from HID axis_scale (float): (Inverted) scaling factor for mapping raw input value min_v (float): Minimum limit after scaling max_v (float): Maximum limit after scaling Returns: float: Clipped, scaled input from HID """ x = x / axis_scale x = min(max(x, min_v), max_v) return x
9c5df3adaad9c040d13bb2f12e7704328b6ec749
64,201
def get_displayed_page_number(i, num_pages, site): """Get page number to be displayed for entry `i`.""" if not i: i = 0 if site.config["INDEXES_STATIC"]: return i if i > 0 else num_pages else: return i + 1 if site.config["INDEXES_PAGES_MAIN"] else i
07332420003e1bd4887917487b868dd3e6d22c40
64,202
def invLinear(rlz,**kwargs): """ Inverse linear fitness method .. math:: fitness = \\frac{1}{a * obj + b * penalty} @ In, rlz, xr.Dataset, containing the evaluation of a certain set of individuals (can be the initial population for the very first iteration, or a population of offsprings) @ In, kwargs, dict, dictionary of parameters for this fitness method: objVar, string, the name of the objective variable a, float, linear coefficient for the objective function (default = 1.0) penalty, float, measuring the severity of the constraint violation. (default = 1.0) b, float, linear coefficient for the penalty measure. (default = 1.0) @ Out, fitness, float, the fitness function of the given objective corresponding to a specific chromosome. """ if kwargs['a'] == None: a = 1.0 else: a = kwargs['a'] if kwargs['b'] == None: b = 1.0 else: b = kwargs['b'] if kwargs['penalty'] == None: penalty = 0.0 else: penalty = kwargs['penalty'] objVar = kwargs['objVar'] fitness = 1.0/(a * rlz[objVar] + b * penalty) return fitness
8c6803a36597f724e76b62422ed482f52abf28f4
64,205
def float_to_ratio(value: float) -> int: """Casts to an int, but rounds to the nearest value first.""" return int(round(value * 100))
d16e1312bf13b9a83a41dd6914d5b4da831c3028
64,207
import re import itertools def possible_spack_module_names(python_mod_name): """Given a Python module name, return a list of all possible spack module names that could correspond to it.""" mod_name = re.sub(r'^num(\d)', r'\1', python_mod_name) parts = re.split(r'(_)', mod_name) options = [['_', '-']] * mod_name.count('_') results = [] for subs in itertools.product(*options): s = list(parts) s[1::2] = subs results.append(''.join(s)) return results
06450a60423726a098772cf8e45037861237e508
64,208
def has_method(method_name): """Validator that checks if a given class has method with the given name""" def _validator(kls): return hasattr(kls, method_name) and callable( getattr(kls, method_name) ) return _validator
28b741d05157c3ebfdfd9aba40194ff12aaa323a
64,209
def create_ipv4_filter_rule(container_ip, bridge, proto, container_port): """return a iptables v4 filter rule for forwarding a host port to a container IP:port""" return '-A FORWARD -d {container_ip} ! -i {bridge} -o {bridge}' \ ' -p {proto} -m {proto} --dport {container_port}'\ ' -j ACCEPT\n'.format(container_ip=container_ip, bridge=bridge, proto=proto, container_port=container_port)
bd8a187209f4daa51d0d303f156db71a5f02e194
64,212
import aiohttp from typing import Tuple async def get_html_contest( session: aiohttp.ClientSession, contest_id: int, ) -> Tuple[int, str]: """Get html of the contest main page. Args: session (aiohttp.ClientSession) : session contest_id (int) : contest id of the contest Returns: Tuple[int, str] : contest_id, html code of the contest. """ url = "http://codeforces.com/contest/{contest_id}?locale=ru".format( contest_id=contest_id, ) async with session.get(url) as resp: return contest_id, await resp.text()
c1a2e864e6cad1ac24882ba534f35ffb604c2e68
64,214
def flatten(in_list): """given a list of values in_list, flatten returns the list obtained by flattening the top-level elements of in_list.""" out_list = [] for val in in_list: if isinstance(val, list): out_list.extend(val) else: out_list.append(val) return out_list
bc19c7fa396900c05380ed7a2cfae096e25fbce6
64,216
def code(msg: str): """Format to markdown code block""" return f'```{msg}```'
a4cd668e427e78852f514a875f6498606668d4ae
64,222
def synchronized(func): """ synchronized decorator for class object, the class must contain a member called lock. """ def _synchronized(*args, **kwargs): self = args[0] with self.lock: return func(*args, **kwargs) return _synchronized
2c053edf83247fe4aad8e4e34e08e1d0e4f5349d
64,223
def parse_string_literal(string): """Evaluate a string with certain special values, or return the string. Any further parsing must be done outside this module, as this is as specialized as we're willing to be in assuming/interpreting what a string is supposed to mean. Parameters ---------- string : string Returns ------- val : bool, None, or str Examples -------- >>> print(parse_string_literal('true')) True >>> print(parse_string_literal('False')) False >>> print(parse_string_literal('none')) None >>> print(parse_string_literal('something else')) 'something else' """ if string.strip().lower() == 'true': return True if string.strip().lower() == 'false': return False if string.strip().lower() == 'none': return None return string
49676321fb4a60b5e5fe5c6ceecf2a96dd1948df
64,226
def colomns(board: list) -> list: """ Rerurn list of the colomns of the given board. """ new_board = [] for symbol in range(len(board)): new_line = [] for line in board: new_line += line[symbol] new_line = "".join(new_line) new_board.append(new_line) return new_board
28694a41c2bf1a848816e81934ea895371b15af4
64,229
def hill_func(x, a, b, c, d): """Hill function commonly used to fit MIC curves Args: x (numpy.array) Concentration vector (n, 1) a (float) b (float) c (float) d (float) Returns: y (float) """ return a+(b-a)/(1+(x/c)**d)
3fcd0d947445e0cf25206f9fa036fb9365bbf743
64,230
def _insert_rst_epilog(c): """Insert the rst_epilog variable into the configuration. This should be applied after other configurations so that the epilog can use other configuration variables. """ # Substitutions available on every page c[ "rst_epilog" ] = """ .. |eups-tag| replace:: {eups_tag} .. |eups-tag-mono| replace:: ``{eups_tag}`` .. |eups-tag-bold| replace:: **{eups_tag}** """.format( eups_tag=c["release_eups_tag"] ) return c
613b7b51c2399b3a1b23539b5227c3b23bfb817e
64,231
def get_entailments(synset): """ Look up and return the entailments for a given verbal synset. """ entailments = synset.entailments() entailments = sorted(lemma.name() for synset in entailments for lemma in synset.lemmas()) return entailments
dbf589c6f55a4d3dedaeed81100314a7a2e7e1c5
64,232
import math def normpdf(x, mu, sigma): """ Function to calculate the probability density function of normal distribution Parameters : x(float):- x value mu(float):- mu of normal distribution sigma(float):- sigma of normal distribution Return : float:- probability density function of normal distribution at x point """ if(sigma == 0): sigma = float('inf') try: sigma = float(sigma) x = float(x) mu = float(mu) except ValueError: print('x, mu or sigma are not all numeric') exit() else: denom = (2 * math.pi)**.5 * sigma num = math.exp(-(float(x) - float(mu))**2 / (2 * sigma**2)) return(num / denom)
ee24e2c882ebcd6b2027fe763184b907bb133836
64,234
def density_track_distance(nn, interpolated_track, n_neighbors=30): """Compute distance to n'th nearest neighbor""" dist, _ = nn.kneighbors(X=interpolated_track, n_neighbors=n_neighbors, return_distance=True) return dist[:,-1]
30917dc70decbf5cfa856c5192ea49710a6de6ab
64,235
def passes_language_test(t008, t041s): """ Checks if data in 008 and 041$a fulfills Recap language test args: t008: str, value of 008 MARC tag t041s: list, list of language codes found in 041 $a returns: Boolean: True if applicable for Recap, False if not """ if t041s is None: t041s = [] passes = True langs = set() langs.add(t008[35:38]) for code in t041s: langs.add(code) if "eng" in langs: # multilanguge materials and English are not to be # sent to Recap passes = False return passes
00d807f3c96bc85d1531eadf8f82781f04d04ff1
64,238
def colors_i_like(palette=None): """ A list of colours I like to choose from. palette options: None Base IBM Wong Tol CB_pairs """ if palette is None: return [ '#FA7268', '#F8A72A', '#DAF7A6', '#900C3F', '#6BADB0', '#DB869D', '#F6D973', 'mediumvioletred', 'skyblue', 'gold', 'palegreen', 'coral', ] elif palette == 'Base': return [ '#D81B60', '#1E88E5', '#FFC107', '#FE6100', '#004D40' ] elif palette == 'IBM': return [ '#648FFF', '#785EF0', '#DC267F', '#FE6100', '#FFB000' ] elif palette == 'Wong': return [ '#000000', '#E69F00', '#56B4E9', '#009E73', '#F0E442', '#0072B2', '#D55E00', '#CC79A7' ] elif palette == 'Tol': return [ '#332288', '#117733', '#44AA99', '#88CCEE', '#DDCC77', '#CC6677', '#AA4499', '#882255', ] elif palette == 'CB_pairs': return [ '#FFC20A', '#0C7BDC', '#994F00', '#006CD1', '#E1BE6A', '#40B0A6', '#E66100', '#5D3A9B', '#1AFF1A', '#4B0092', '#FEFE62', '#D35FB7', '#005AB5', '#DC3220', '#1A85FF', '#D41159', ]
335414c5048499faf6d706468d73d6644be024b9
64,242
def get_extension(extension: str) -> str: """ Return the extension with a dot. :param extension: The extension to format. :type extension: str :return: Return the extension with a dot or empty if no extension. :rtype: str """ return ("." + extension) if len(extension) > 0 else ""
0d7524851036b48e06768fb65ec0c0f658e504c2
64,248
def add_coordinates(a, b): """Helper to add 2 coordinates vectors""" return tuple(u + v for u, v in zip(a, b))
99219faca85d896ec7d63308c1e81fbdbe3f80af
64,249
def get_average_gross_value_per_movie(actors: dict) -> float: """ Utility function to calculate the average gross value of actors in a given dictionary, per movie :param actors: Dictionary (Actor Name --> Actor Node) :return: Average Gross Value of the actors """ total_gross_value = sum([actor_node.gross_value/(len(actor_node.movies_starred_in)+1) for actor_node in actors.values()]) return total_gross_value/len(actors)
93e0eaf0c7d33b9a97aeb5d5f5e3ca8f79daf63f
64,252
def terms_accept_url() -> str: """Path to accepted terms.""" return '/terms/accept'
44f3409491591eb5284d249d34da4cf06f150a3a
64,256
def get_device_type(devices): """Return the type of available device(s) in the current instance Returns: str: "cpu" or "gpu" """ return "cpu" if devices[0] == -1 else "cuda"
79222654d6c4023a2f634b819c54357fad8b19ae
64,260
def _setter_error_message(attribute_name): """ Use the same error message for attributes """ return "Set new `{}` using .set_{}".format(attribute_name, attribute_name)
1133583e0009a890fe73bc65bc804fa2a6dad8e9
64,264
import secrets def random_file_name(filename: str) -> str: """ Replace file name with a random string and preserving its extension. Result would look like <random_string>.<extension> :param filename: str :return: str """ return f'{secrets.token_urlsafe(16)}.{filename.split(".")[-1]}'
78d12aa08d55f0fba71427844830a71710ac1bf0
64,265
def trajectory_importance_max_avg(states_importance): """ computes the importance of the trajectory, according to max-avg approach: delta(max state, avg) """ avg = sum(states_importance) / len(states_importance) return max(states_importance) - avg
8fde9c62c661f8ea2041a159864d2c27e662988d
64,266
import re def get_mentions (sentence,dict_configuration) : """Get the pronouns and mentions from a sentence""" mentionList=[] pronounsList=dict_configuration.keys() for unit in sentence : if unit[2] in pronounsList : mentionList.append (unit[2]+"\t"+"PRON\t"+unit[0]) if re.search("Mention=Yes",unit[-1]) : mentionList.append(unit[2]+"\t"+"MENTION\t"+unit[0]) return mentionList
e0eaeed2db9af57e65606616e92d0521169847b9
64,275
import requests def get_weather_date(city: str) -> dict: """The function gets weather date from 'http://api.openweathermap.org' :param city: (str), :return: a dictionary with data """ url = ( f"http://api.openweathermap.org/data/2.5/weather?q=" f"{city}&units=metric&appid=6d8309305eeab8655e9b0c4ed74f5b9e" ) r = requests.get(url).json() return r
8c1c4776fecd6ea8e0d3a579afdfdc68d24f5228
64,277
from typing import Any def sort_internal_lists(data: Any) -> Any: """ Sort all lists & sets within a given data structure :param data: Data structure to internally sort :return Data structure with sorted lists """ if isinstance(data, dict): for key, value in data.items(): data[key] = sort_internal_lists(value) elif isinstance(data, (set, list)): return sorted(list(data)) return data
bcbe7cbf1caaeb8bdf61f575a2d06c2090c82a63
64,281
def get_middle_indexes(lst): """ Fetch indexes of one or two middle numbers of a list. """ n = len(lst) if n <= 2: return [None] if n % 2 == 0: return [n / 2 - 1, n / 2] else: return [n // 2]
06164efb2b575e582acefd590dffbff44869ef6c
64,288
import math def distance3D(loc3d1=tuple(), loc3d2=tuple()): """ get distance from (x1, y1, z1), (x2, y2, z2) loc3d1 : (x1, y1, z1) loc3d2 : (x2, y2, z2) return : distance """ return math.sqrt((loc3d1[0] - loc3d2[0])**2 + (loc3d1[1] - loc3d2[1])**2 + (loc3d1[2] - loc3d2[2])**2)
e780a5125a725f3d99a1f730d9ba4b2b9167e059
64,290
def paymo_bfs(graph, start, goal, diameter = float('inf')): """ Description: Breadth-First Search for the connection level of the sender and receiver. This function also has a diameter input to specify the maximum connection distance desired before False is returned. Input: graph [defaultdict(set)] - Graph of the current state of the user network start [int] - userID who is sending the money using paymo goal [int] - userID who is receiving the money using paymo diameter [int or float] - maximum number of connections before False is returned Output: len(path) - 1 [int] - connections distance OR False [Bool] - if the connection is greater than the diameter """ queue = [(start, [start])]; disc = set([start]); while queue: (vertex, path) = queue.pop(0); if len(path) - 1 > diameter: return False; elif goal == vertex: return len(path) - 1; for next in graph[vertex] - set(path) - disc: if next not in disc: queue.append((next, path + [next])); disc.add(next); return False;
b24aecec4c8139b47768ec9776754df7b94d3892
64,296
def escape(string: str) -> str: """ Returns the SQL representation of the given string, by escaping all single-quotes with two single-quotes and surrounding the string with single-quotes. """ return f"""'{string.replace("'", "''")}'"""
de83d47884d201fa3f9b3ec4f656d1b17f546163
64,297
import gzip def gz_open(fname, omode): """ Use :py:mod:`gzip` library to open compressed files ending with .gz. Parameters ---------- fname : str Path to file to open. omode : str String indicating how the file is to be opened. Returns ------- file File Object. """ if fname.endswith(".gz"): return gzip.open(fname, omode) return open(fname, omode)
1016ef102586d36e8949d4bad36d79ec13af1e14
64,300
import math def bin_of_week(dt, bin_size_mins=30): """ Compute bin of week based on bin size for a pandas Timestamp object or a Python datetime object. Based on .weekday() convention of 0=Monday. Parameters ---------- dt : pandas Timestamp object or a Python datetime object, default now. bin_size_mins : Size of bin in minutes; default 30 minutes. Returns ------- 0 to (n-1) where n is number of bins per week. Examples -------- dt = datetime(2020, 2, 4, 1, 45) bin = bin_of_week(dt, 30) # bin = 51 """ # if dt is None: # dt = datetime.now() # Number of minutes from beginning of week (Monday is 0) minutes = (dt.weekday() * 1440) + (dt.hour * 60) + dt.minute # Convert minutes to bin time_bin = math.trunc(minutes / bin_size_mins) return time_bin
f49cd29e43e32bd3d331cdcca593ed00c6737a90
64,302
def _f_j(q_j, b_j_norm, a_1_j, a_2_j, m): """Compute the objective with respect to one of the coefficients i.e. (1/2m)||q_j||^2 + a_1||b_j|| + (a_2/2)||b_j||^2. """ return ( ((q_j @ q_j) / (2.0 * m)) + (a_1_j * b_j_norm) + ((a_2_j / 2.0) * (b_j_norm ** 2)) )
da18c80087ebb0dfa26985c2e0551d1887bade6f
64,305
def sanitize_text(text): """ Replace commas with spaces in input. """ text = text.replace(",", " ") return text
19a0e0585c3314634303f02adb527bd6f0230cd9
64,307
def second_to_day(seconds): """ :param seconds: (int) Time in seconds starting at 0 as start of data collection. :return: (int) Time in days starting at 0 as start of data collection """ return int(seconds) / 86400
396accdfdd5486ffb3dcafa00ba8db6020562334
64,308
def encode_classes(df): """Encodes the output classes as integers and returns a dictionary of the encodings. Parameters ---------- df : Pandas DataFrame The dataframe containing the photometry of all events. Returns ------- Pandas DataFrame The same dataframe with encoded column Type_ID. Pandas dict Dictionary of encoded classes. """ df['Type_ID'] = df['Type'].astype('category').cat.codes #some clunky trickery to get the mapping from classes to values encoding_dict = df[['Type', 'Type_ID']].drop_duplicates(subset=['Type', 'Type_ID']).sort_values(by='Type_ID').reset_index(drop=True)['Type'].to_dict() return df, encoding_dict
fb1f6384e04e86b033e4b079676468a4e7e4ff60
64,317
from io import StringIO def figure_to_svg(fig): """Convert a figure to svg for inline display.""" fc = fig.get_facecolor() ec = fig.get_edgecolor() fig.set_facecolor('white') fig.set_edgecolor('white') try: string_io = StringIO() fig.canvas.print_figure(string_io, format='svg') svg = string_io.getvalue() finally: fig.set_facecolor(fc) fig.set_edgecolor(ec) return svg
d3272210cc6d07821715cd248ed3e1c5ae03dc7c
64,324
def __get_mp_chunksize(dataset_size: int, num_processes: int) -> int: """ Returns the number of chunks to split the dataset into for multiprocessing. Args: dataset_size: size of the dataset num_processes: number of processes to use for multiprocessing Returns: Number of chunks to split the dataset into for multiprocessing """ chunksize = dataset_size // num_processes return chunksize if chunksize > 0 else 1
0702cdc7e7772a21d25cf8a7b6f288e7171b2f6c
64,328
def verified_blacklisted_tag(x, tag): """ check for '<' + blacklisted_tag + ' ' or '>' as in: <head> or <head ...> (should not match <header if checking for <head) """ initial = x[0:len(tag) + 1 + 1] blacklisted_head = "<{0}".format(tag) return initial == (blacklisted_head + " ") or initial == (blacklisted_head + ">")
34ef302ba97c38cbeea2a72bd258ab5631528832
64,341