content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
import torch from typing import Tuple def get_model_size(model: torch.nn.Module) -> Tuple[int, int]: """Return number of model parameters.""" num_total_params = sum([p.numel() for p in model.parameters()]) num_train_params = sum([p.numel() for p in model.parameters() if p.requires_grad]) return num_total_params, num_train_params
33b86535b86d7b4205ccb20269d9862d5ad7371e
51,585
def count_bots_in_range(bots, bot): """Count bots in range of bot.""" counter = 0 for other_bot in bots: if bot.is_in_radius(*other_bot.coordinates): counter += 1 return counter
20035f2c589eb511ca7f8efbca03d8d8d21494ca
51,588
from typing import List from typing import Dict from functools import reduce def combine_dictionaries(dicts: List[Dict]) -> Dict: """Function that flattens a list of Dict into a single Dictionary. Note that ordering of the list matters, because keys will overwrite each other. Args: dicts (List[Dict]) Returns: Dict """ if not dicts: return {} return reduce(lambda a, b: {**a, **b}, dicts)
a4b9e288886c1753cee23b9816c19b471e2e2aea
51,591
def _parse_ssh_public_keys(file_content): """Parses ssh-keys, so that the returned output has the right format.""" file_content = file_content.replace("\r\n", "") ssh_keys = file_content.replace("ssh-rsa", "\nssh-rsa").strip() return ssh_keys.splitlines()
2865280ca4e15db9ad26bb2d3b6380c35acd801b
51,593
def get_field_by_data_key(schema, data_key): """Helper method to get a field from schema by data_key name. :param schema: Instantiated schema. :type schema: :class:`~marshmallow.schema.Schema` :param str data_key: Name as the field as it was serialized. :return: The schema field if found, None otherwise. :rtype: :class:`~marshmallow.fields.Field` or None """ field = None if hasattr(schema, "fields_by_data_key"): if data_key in schema.fields_by_data_key: field = schema.fields_by_data_key[data_key] else: for field_name in schema.fields: field_data_name = schema.fields[field_name].data_key or field_name if field_data_name == data_key: field = schema.fields[field_name] break return field
2ab2e0be9a7e8ada0cfce13af69e319112d89c09
51,596
def findTrueWidth(string): """ Find the length of a byte string without trailing zeros. Useful for optimizing string matching functions. Parameters ---------- string : a byte string as an array of int8 A byte string as an array of int8 Returns ------- int Number of bytes in string. Examples -------- >>> a = np.chararray(1, itemsize=5) >>> a[0] = b'abc' >>> findTrueWidth(np.frombuffer(a,dtype=np.int8)) 3 """ # Find the length of a byte string (without the trailing zeros) # the input must be dtype=np.int8, use np.frombuffer() width = string.shape[0] trailing = 0 for i in reversed(range(width)): if string[i]: break else: trailing+=1 return width-trailing
740987ffee4b347fbc9563e7959c824bae7fbbd9
51,598
def preprocessing_tolist(preprocess): """ Transform preprocess into a list, if preprocess contains a dict, transform the dict into a list of dict. Parameters ---------- preprocess : category_encoders, ColumnTransformer, list, dict, optional (default: None) The processing apply to the original data Returns ------- List A list containing all preprocessing. """ list_encoding = preprocess if isinstance(preprocess, list) else [preprocess] list_encoding = [[x] if isinstance(x, dict) else x for x in list_encoding] return list_encoding
73cf9f225d1b1c6421c4045381b79bbab3be40b4
51,602
def group_dict_by_keys(condition, d: dict): """ Groups the given dict "d" into two parts whether the keys satisfy the condition or not """ return_values = [dict(), dict()] for key in d.keys(): match = bool(condition(key)) ## does the keys satisfy condition? idx = int(not match) ## idx=0 if the keys staisfy the condition else 1 return_values[idx][key] = d[key] return (*return_values,)
e9a1fd991e77f9dcbbe0c76270c50bf00d3328df
51,603
def process( records, parse_record=None, include_record=None, transform_record=None, ): """ Create a pipeline that optionally parses, filters, and transforms the given records. Return an iterable of records. records: Iterable<list<str>> parse_record: function(list<str>)->list<object> Function to convert the text fields of each record into usable values. include_record: function(list<object>)->bool Predicate that returns whether a record should be included or discarded. Applied after parsing a record. transform_record: function(list<object>)->list<object> Function to transform a record before it is converted into an event. Applied after including / discarding records. """ # Parse records if requested if parse_record is not None: records = map(parse_record, records) # Filter records if requested if include_record is not None: records = filter(include_record, records) # Transform records if requested if transform_record is not None: records = map(transform_record, records) return records
f05e1eef9be0400e9b97b5bfb3d0a1afc36c8438
51,606
def prettyTime(time : int) -> str: """takes a time, in seconds, and formats it for display""" m, s = divmod(time, 60) h, m = divmod(m, 60) s = round(s, 2) if h: return "%s hour(s), %s minute(s), %s second(s)" % (int(h),int(m),s) else: return "%s minute(s), %s second(s)" % (int(m),s)
a77c5ac0e6d3030a983a957955ff339ca2db47fa
51,613
def format_param_doc(doc: str) -> str: """ Applies some simple formatting to a parameter documentation string. :param doc: The parameter documentation to format. :return: The formatted string. """ if not doc.endswith("."): return f"{doc}." return doc
fc74ad0961143851b88460cd5473bde2d9999bb1
51,616
def next_direction(direction): """ Return the new pointing direction of a direction after a 1/4 rotation to the right. """ return {"d": "l", "r": "d", "u": "r", "l": "u"}[direction]
435fa4c55033efe9ca8d1279ffa2474493d47de0
51,617
def set_attr_forbid_unknown(cls): """Override the __setattr__ method of a dataclass `cls` to forbid setting fields that do not exist in `cls`""" def __setattr__(self, key, value): if key not in self.__class__.__dataclass_fields__: raise AttributeError( f"Class {self.__class__} has no attribute {key}. " f"Available fields: {', '.join(self.__class__.__dataclass_fields__.keys())}" ) else: return super(cls, self).__setattr__(key, value) return __setattr__
02ac33feefe1be1c44713728c041f9c9208123d0
51,621
import re def _SubstituteDefaultProject(secret_version_ref, default_project_id, default_project_number): """Replaces the default project number in place of * or project ID. Args: secret_version_ref: Secret value reference. default_project_id: The project ID of the project to which the function is deployed. default_project_number: The project number of the project to which the function is deployed. Returns: Secret value reference with * or project ID replaced by the default project. """ return re.sub( r'projects/([*]|{project_id})/'.format(project_id=default_project_id), 'projects/{project_number}/'.format( project_number=default_project_number), secret_version_ref)
b86c2a6c1bc871d422dc69daeda598590ac951be
51,623
def unread(thread, user): """ Check whether there are any unread messages for a particular thread for a user. """ return bool(thread.userthread_set.filter(user=user, unread=True))
e5155b63830d7773b5ac6c1945cff3b9874c2613
51,634
from datetime import datetime def generate_timestamp(is_year=True, is_month=True, is_day=True, is_hour=True, is_minute=True, is_second=True, is_year_digit_4=True, is_padding=True, is_separate_date_and_time=True, separate_character="_"): """ This function generate timestamp. You can use this function result for making file name according to the timestamp Keyword Arguments: is_year {bool} -- whether contain year (default: True) is_month {bool} -- whether contain month (default: True) is_day {bool} -- whether contain day (default: True) is_hour {bool} -- whether contain hour (default: True) is_minute {bool} -- whether contain minute (default: True) is_second {bool} -- whether contain second (default: True) is_year_digit_4 {bool} -- whether the number of digits for year is 4 or not (default: True) is_padding {bool} -- whether padding each number (default: True) is_separate_date_and_time {bool} -- whether separate date and time (default: True) separate_character {str} -- character when separate date and time """ now = datetime.now() timestamp = "" element = { "year" : str(now.year), "month": str(now.month), "day": str(now.day), "separate": separate_character if is_separate_date_and_time else "", "hour": str(now.hour), "minute": str(now.minute), "second":str(now.second) } element["year"] = (element["year"] if is_year_digit_4 else element["year"][2:]) if is_year else "" element["month"] = (element["month"].zfill(2) if is_padding else element["month"]) if is_month else "" element["day"] = (element["day"].zfill(2) if is_padding else element["day"]) if is_day else "" element["hour"] = (element["hour"].zfill(2) if is_padding else element["hour"]) if is_hour else "" element["minute"] = (element["minute"].zfill(2) if is_padding else element["minute"]) if is_minute else "" element["second"] = (element["second"].zfill(2) if is_padding else element["second"]) if is_second else "" timestamp = "".join(list(element.values())) return timestamp
762489d6c89e7cb3be3d2b2704ca4d5983064239
51,636
import math def conv_deg_rad(deg): """ convert degrees to radians """ rad = deg * math.pi / 180 return rad
d0c4851f8698f60b01d73c3c38310ac5be11006c
51,637
def get_inputdim(args): """ calculate full input dimension """ return args.feats_dim * (args.lctx + 1 + args.rctx)
ace1b7b879f2949243985fdd0206c6ba6d904f84
51,638
def set_fragment_indicies(x): """ Returns the indicees without leading and trailing gaps. Parameters ---------- x = string sequence Returns ------- list of start index and end index with the first and last non gap character """ e = len(x) ei = e si = 0 for i in range(ei): if x[i] == '-' and si == i: si = i + 1 if x[e - i - 1] == '-' and ei == e - i: ei = e - i - 1 if ei == si: break return [si, ei]
daaa1382991d3c9e88b3a354c8330c3685663e4c
51,640
def OffsetCurve(curve, i, n, dx=0.3, dy=0.3): """Adds random noise to the pairs in a curve. i is the index of the curve n is the number of curves dx and dy control the amplitude of the noise in each dimension. """ xoff = -dx + 2 * dx * i / (n-1) yoff = -dy + 2 * dy * i / (n-1) curve = [(x+xoff, y+yoff) for x, y in curve] return curve
dd0e2ca9404e3161469400b7dfccc07957b902bb
51,641
def is_complex_list(in_list): """ Return True if at least one element in in_list is complex, else return False """ isComplex = [isinstance(x, complex) for x in in_list] if True in isComplex: return True else: return False
2dd3c1d77fcbd5c13953521852388bc4f9f7574d
51,644
def tail(*args, **kwargs): """Returns last n rows""" return lambda df: df.tail(*args, **kwargs)
7c1dca9f3d8bc467e5008db543f5d1743bf9245e
51,645
def seq_a(n): """ Returns an element in a sequence given the n value """ return ((7.0+(1.0/(float(n)+1.0)))/(3.0-(1.0/(float(n)+1.0)**2)))
083999824e3ef9d7675af73c4881649f7fc882b3
51,647
def to_frame_size(frame_size=None, shape=None, img=None): """Converts an image size representation to a (width, height) tuple. Pass *one* keyword argument to compute the frame size. Args: frame_size: the (width, height) of the image shape: the (height, width, ...) of the image, e.g. from img.shape img: the image itself Returns: a (width, height) frame size tuple Raises: TypeError: if none of the keyword arguments were passed """ if img is not None: shape = img.shape if shape is not None: return shape[1], shape[0] if frame_size is not None: return tuple(frame_size) raise TypeError("A valid keyword argument must be provided")
4ab5417b7716ff97af78cee5cf7614fe91f51138
51,648
def instructions_from_file(file_path): """Return list of key instructions from file. Args: file_path (str): Text file containing file instructions. Returns: (list): A list of instructions to get the bathroom key. """ list_of_instructions = [] with open(file_path, 'r') as instruction_file: for line in instruction_file: line = line.rstrip('\r\n') list_of_instructions += [line] return list_of_instructions
da2b74a109e36a9c011502abba7000969d8f4414
51,654
def get_master_port(args): """Get master port from `args.init_method`. :param argparse.ArgumentParser args: `args` is a argparse that should contain `init_method`, `rank` and `world_size`. :return: The port that master used to communicate with slaves. :rtype: str or None """ if args.init_method is not None: address = args.init_method.split(":") port = address[-1] return port else: return None
e9932494b6b19c7bf3f84fb865a6cab086e526d2
51,660
import time def slow_square(n): """Square the given input, slowly.""" time.sleep(5.0) return n * n
8e868bca57d58303761c8febbccaf75741562a13
51,664
def vector_to_parameters(vector, dims): """Second helper function for gradient checking. The inverse of the previous function. Returning the parameters with given dimensions from a vector arguments: -vector: a numpy array of shape (n,1) -dims: a list with the dimensions of the parameters. Containing 1x2 arrays with the shape of each parameter, for example: dims = [[3,4], [3,1]] for two parameters, a weights 3x4 matrix and a bias parameter b of shape 4x1 """ n = len(dims) parameters = [] end_dims = 0 for ii in range(n): # if the parameter is b or db, with shape [n, 1] if dims[ii][1] == 1: parameter = vector[end_dims:end_dims+dims[ii][0]].reshape((dims[ii][0], dims[ii][1])) end_dims += dims[ii][0] else: # if the parameter is W or dW parameter = vector[end_dims:end_dims+dims[ii][0]*dims[ii][1]].reshape((dims[ii][0], dims[ii][1])) end_dims += dims[ii][0]*dims[ii][1] parameters.append(parameter) return parameters
c32f179250e96fd71c3b3e87547eb49ad7373a3d
51,665
def numpy_counter_to_dict(counter): """ Convert a counter fill with numpy value to a dict of str :param counter: The counter obj :return: A dict of string representing the counter """ return {str(item[0]): item[1] for item in counter.items()}
c212a845357672536a207fb19c3c6da319b9a3e2
51,669
def _unfloat(flt, precision=5): """Function to convert float to 'decimal point assumed' format >>> _unfloat(0) '00000-0' >>> _unfloat(3.4473e-4) '34473-3' >>> _unfloat(-6.0129e-05) '-60129-4' >>> _unfloat(4.5871e-05) '45871-4' """ if flt == 0.0: return "{}-0".format("0" * precision) num, _, exp = "{:.{}e}".format(flt, precision - 1).partition("e") exp = int(exp) num = num.replace(".", "") return "%s%d" % (num, exp + 1)
0e252d1c24a9c5e29d952ca7ff797f014158ec09
51,670
def compute_image_size(args): """Computes resulting image size after a convolutional layer i=input channel, o=output channel, k = kernel size, s = stride, p = padding, d = dilation old_size = size of input image, new_size= size of output image. """ old_size, i, o, k, s, p, d = args new_size = int((old_size + 2 * p - d * (k - 1) - 1) // s) + 1 return new_size
537ba40ef4effe3f9944e4608b71a51f4925e748
51,671
def str2bytes(x): """Convert input argument to bytes""" if type(x) is bytes: return x elif type(x) is str: return bytes([ ord(i) for i in x ]) else: return str2bytes(str(x))
9a445af02bf9eb280dc28ad72b5c449e27c0512f
51,687
def start_text(**kwargs): """Return a start text.""" name = kwargs.get("name", "undefined") parameters = kwargs.get("parameters", "undefined") message = ( "Start of generation: {name}" + '\n' + "Parameters: {parameters}" ).format( name=name, parameters=parameters ) return message
278c0cfc8295f544544adfa2174219c4c7a223e2
51,691
def clamp(n, smallest, largest): """ Force n to be between smallest and largest, inclusive """ return max(smallest, min(n, largest))
75afadfc7c49d07f9a2c23074d666cb0b55b0e00
51,693
def Truncate(inputs, channels): """Slice the inputs to channels if necessary.""" input_channels = inputs.size()[1] if input_channels < channels: raise ValueError('input channel < output channels for truncate') elif input_channels == channels: return inputs # No truncation necessary else: # Truncation should only be necessary when channel division leads to # vertices with +1 channels. The input vertex should always be projected to # the minimum channel count. assert input_channels - channels == 1 return inputs[:, :channels, :, :]
7013f7394ebc4f7097b23748b6afb744c1915b09
51,694
def _extract_message_size(data: bytes): """Read out the full length of a CoAP messsage represented by data. Returns None if data is too short to read the (full) length. The number returned is the number of bytes that has to be read into data to start reading the next message; it consists of a constant term, the token length and the extended length of options-plus-payload.""" if not data: return None l = data[0] >> 4 tokenoffset = 2 tkl = data[0] & 0x0f if l >= 13: if l == 13: extlen = 1 offset = 13 elif l == 14: extlen = 2 offset = 269 else: extlen = 4 offset = 65805 if len(data) < extlen + 1: return None tokenoffset = 2 + extlen l = int.from_bytes(data[1:1 + extlen], "big") + offset return tokenoffset, tkl, l
93ec368e1f8dc6a2d425991e72614f94bfe8c197
51,695
def find_nodes_by_attr(subtree, attr, value): """ Returns list of nodes in `subtree` that have attribute `attr` equal to `value`. """ results = [] if subtree[attr] == value: results.append(subtree) if 'children' in subtree: for child in subtree['children']: child_restuls = find_nodes_by_attr(child, attr, value) results.extend(child_restuls) return results
d4640222d730caa88ecc3defcdcc0e8ee06a2645
51,702
from typing import Optional from typing import Any def extract_event_description(event) -> Optional[Any]: """Helper function to extract event description. Args: event (dict): Full event data return from API. Returns: str: event description if exists, else: None. """ event_description = None if event.get('Description'): event_description = event.get('Description') elif event.get('Phrase'): event_description = event.get('Phrase') elif event.get('Summery'): event_description = event.get('Summery') return event_description
970ecb360825d7c6873bab80a222f9d632157953
51,711
def transform_figlets(fig: str) -> list[list[int]]: """ Converts a figlet string to an array, setting a '1' where there is a '#' symbol and 0 otherwise """ array: list[list[int]] = [[]] array_counter = 0 for char in fig: if char == "\n": array.append([]) array_counter += 1 elif char == "#": array[array_counter].append(1) else: array[array_counter].append(0) return array
153603c056a552a51a23c26413332817e123c07d
51,712
def pyroII(h, kr, rho, cp, r): """ Calculate the pyrolysis number Py II for a biomass particle. .. math:: Py^{II} = \\frac{h}{\\rho\\,C_p\\,R\\,K} Parameters ---------- h : float Convective heat transfer coefficient [W/m²K] kr : float Rate constant [1/s] rho : float Density of the biomass particle [kg/m³] cp : float Heat capacity of the biomass particle [J/kgK] r : float Radius or characteristic length of the biomass particle [m] Returns ------- pyII : float Pyrolysis number Py II [-] Example ------- >>> pyroII(h=862.6129, kr=1.38556, rho=540, cp=3092.871049, r=0.0001847) 2.018038 References ---------- D.L. Pyle and C.A. Zaror. Heat Transfer and Kinetics in the Low Temperature Pyrolysis of Solids. Chemical Engineering Science, vol. 39, no. 1, pg. 147-158, 1984. """ pyII = h / (kr * rho * cp * r) return pyII
50275cfe5e328d1ecb7be8dcb6246938abb45b5c
51,713
def soma_dobro(a, b): """ dados dois números inteiros retorna sua soma porém se os números forem iguais retorna o dobro da soma soma_dobro(1, 2) -> 3 soma_dobro(2, 2) -> 8 """ if a == b: return (a + b) * 2 return a + b
8920154364f85c688cdcde884a53ecc3f5ceff58
51,715
from typing import Tuple def text_color(bg_color: Tuple[int, int, int]) -> Tuple[int, int, int]: """ Determines the text color based on background color. :param bg_color: The color of the background. :return: The text color as a tuple for (r, g, b) values. """ r, g, b = bg_color if (r * 0.299 + g * 0.587 + b * 0.114) > 186: return 0, 0, 0 else: return 255, 255, 255
9fa849dd0928d846522f80e9a3d8afc2e961bafd
51,717
def unf_heat_capacity_oil_Wes_Wright_JkgC(gamma_oil, t_c): """ Oil heat capacity in SI. Wes Wright method :param gamma_oil: specific oil density(by water) :param t_c: temperature in C :return: heat capacity in SI - JkgC ref1 https://www.petroskills.com/blog/entry/crude-oil-and-changing-temperature#.XQkEnogzaM8 """ return ((2 * 10** (-3) * t_c - 1.429 ) * gamma_oil + (2.67 * 10 ** (-3)) * t_c + 3.049) * 1000
c74a2351e15353e4a556bf1754e50c3bf5e25699
51,721
from typing import Dict from typing import List def split_dict_to_subdicts(dct: Dict, prefixes: List, extra_key: str) -> Dict: """ Splits dict into subdicts with spesicied ``prefixes``. Keys, which don't startswith one of the prefixes go to ``extra_key``. Examples: >>> dct = {"train_v1": 1, "train_v2": 2, "not_train": 3} >>> split_dict_to_subdicts(dct, prefixes=["train"], extra_key="_extra") >>> {"train": {"v1": 1, "v2": 2}, "_extra": {"not_train": 3}} Args: dct: dictionary with keys with prefixes prefixes: prefixes of interest, which we would like to reveal extra_key: extra key to store everything else Returns: dictionary with subdictionaries with ``prefixes`` and ``extra_key`` keys """ subdicts = {} extra_subdict = { k: v for k, v in dct.items() if all(not k.startswith(prefix) for prefix in prefixes) } if len(extra_subdict) > 0: subdicts[extra_key] = extra_subdict for prefix in prefixes: subdicts[prefix] = { k.replace(f"{prefix}_", ""): v for k, v in dct.items() if k.startswith(prefix) } return subdicts
4f46086834cc9d021e88f2ddaaaf5fb21fa0a3d4
51,725
def _get_line_number(vcf): """ Get number of lines in vcf file """ with open(vcf) as vcf_input_file: i = -1 for line in vcf_input_file: i += 1 return i
61904a6274e2b7344bff151fa3e571bc3bac7418
51,727
def most_frequent_kmer(seq, k): """ Function to return the k-mer that is most frequently occuring in the sequence Args: seq: the genetic sequence k : length of k-mer Return: k-mers that occur the most times in the sequence """ kmer_count = {} for i in range(len(seq) - k + 1): kmer = seq[i:i + k] if kmer in kmer_count.keys(): kmer_count[kmer] += 1 else: kmer_count[kmer] = 1 max_value = max(kmer_count.items(), key=lambda x: x[1])[1] list_of_kmers = [] for key, value in kmer_count.items(): if value == max_value: list_of_kmers.append(key) return list_of_kmers
58f9b2e42c9970469a4c6e8ff218264a09c7cf85
51,729
def make_callable(funcname): """ Return a callable object from a string. This function resolves a name into a callable object. It automatically loads the required modules. If there is no module path, it considers the callable is a builtin. :param funcname: name of the callable. :type funcname: str. :returns: :rtype: callable. Examples -------- Loading a function from a library: >>> func = make_callable('itertools.chain') >>> list(func(range(3), range(4))) [0, 1, 2, 0, 1, 2, 3] Loading a builtin: >>> func = make_callable('map') >>> list(func(lambda x: x + 1, range(4))) [1, 2, 3, 4] """ if '.' not in funcname: module_name = 'builtins' object_name = funcname else: module_name, object_name = funcname.rsplit('.', 1) module = __import__(module_name, fromlist=['*']) try: callable_ = getattr(module, object_name) except AttributeError: raise AttributeError('module {} has no attribute {}'.format( module.__name__, object_name, )) return callable_
68bd89d16985953c8dc0d668b0ec0bf0a2acc7e0
51,735
def ensure_overlap(lh, mor, expr): """ ensures label overlap with weights and expression matrix Args: lh (pandas DataFrame): sparse DataFrame indicating likelihood for regulators mor (pandas DataFrame): sparse DataFrame indicating mode or regulation for transcription factors expr (:obj: `pandas DataFrame`): pandas DataFrame of shape [n_feats, n_samps] Returns: expression, mor, lh (pandas DataFrame): expression, mode of regulation, and likelihood frames, respectfully, re-indexed to be concordant with weights associated with regulon """ expression = expr.reindex(mor.columns) mor = mor.reindex(expression.index, axis=1) lh = lh.reindex(expression.index, axis=1) return expression, mor, lh
716e577a6748199ad9ef95e5b2fecacca0b24cf3
51,737
def isColorImage(np_image): """ Check if image is colored (has 3 channels) Return True if image is colored, false otherwise """ if len(np_image.shape) == 3: if np_image.shape[2] == 3: return True return False
91d8749905727af1c02caa3a66484b31eb931a94
51,739
def run_episode(env, policy, **kwargs): """ Run a single episode for some time steps :param env: object Initialized OpenAI's gym environment. :param policy: ndarray Policy to be followed by the agent. :param kwargs: :T int default 1k Time steps which this episode lasts :render boolean default False Display the game play or not :discount float default None (gamma) discount factor :return: total_reward: Total reward for this episode """ # kwargs T = kwargs.get('T', 1000) render = kwargs.get('render', False) discount = kwargs.get('discount', None) total_reward = 0 state = env.reset() for t in range(T): if render: env.render() action = policy[state] state, reward, done, _ = env.step(action) total_reward += pow(discount, t) * reward if discount else reward if done: break return total_reward
6d8b35a58597c4ca7b881136a6773cd34e43bff6
51,747
def _NormalizeResourceFormat(resource_format): """Translate Resource Format from gcloud values to config-connector values.""" if resource_format == 'terraform': return 'hcl' return resource_format
463b98ec9cbb8f41944d9a5217838bcef2f7bb0b
51,749
def index_restrict(i, arr): """ Quick Internal Function - ensure that index i is appropriately bounded for the given arr; i.e. 0 <= i < len(arr) :param i: :param arr: :return: """ if i < 0: i = 0 elif i > len(arr) - 1: i = len(arr) - 1 return i
a589d70f873079918d57107d60258d25601b4b0f
51,760
from typing import List def helper_create_jwt(jwt_manager, roles: List[str] = [], username: str = 'test-user'): """Create a jwt bearer token with the correct keys, roles and username.""" token_header = { 'alg': 'RS256', 'typ': 'JWT', 'kid': 'flask-jwt-oidc-test-client' } claims = { 'iss': 'https://example.localdomain/auth/realms/example', 'sub': '43e6a245-0bf7-4ccf-9bd0-e7fb85fd18cc', 'aud': 'example', 'exp': 2539722391, 'iat': 1539718791, 'jti': 'flask-jwt-oidc-test-support', 'typ': 'Bearer', 'username': f'{username}', 'realm_access': { 'roles': [] + roles } } return jwt_manager.create_jwt(claims, token_header)
6057b0357ca03058b4af5998b82ba1b1960d003a
51,762
def inverse_image(img): """ Inverse the image Black becomes white White becomes black """ for i in range(img.shape[0]): for j in range(img.shape[1]): if img[i, j] > 255/2.0: img[i, j] = 0 else: img[i, j] = 255 return img
ee1b7f13213d0baa0fcff82faab917f0353268e2
51,764
def binary_recursive(decimal: int) -> str: """ Take a positive integer value and return its binary equivalent. >>> binary_recursive(1000) '1111101000' >>> binary_recursive("72") '1001000' >>> binary_recursive("number") Traceback (most recent call last): ... ValueError: invalid literal for int() with base 10: 'number' """ decimal = int(decimal) if decimal in (0, 1): # Exit cases for the recursion return str(decimal) div, mod = divmod(decimal, 2) return binary_recursive(div) + str(mod)
7231dd529e1e308e83654a0d57cf0cb2113f4f2b
51,765
import pickle def safe_unpickle(string): """Unpickle the string, or return ``None`` if that fails.""" try: return pickle.loads(string) except: return None
d48fab5b94f63d3d1eed98390130bdb69c20fd35
51,766
def trim_masic_suffix(container_name): """ Trim any masic suffix i.e swss0 -> swss """ arr = list(container_name) index = len(arr) - 1 while index >= 0: if arr[-1].isdigit(): arr.pop() else: break index = index - 1 return "".join(arr)
124f530bde6d35d518f441f3e2145210122ea959
51,771
from datetime import datetime def timestamp_now() -> str: """Return current time as a string suitable for use in the filesystem.""" now = datetime.now() return now.isoformat(timespec="milliseconds").replace(":", "-").replace(".", "_")
95982b5b36efffc7ab1ea5db0138359dc05bc54a
51,773
def _letter_map(word): """Creates a map of letter use in a word. Args: word: a string to create a letter map from Returns: a dictionary of {letter: integer count of letter in word} """ lmap = {} for letter in word: try: lmap[letter] += 1 except KeyError: lmap[letter] = 1 return lmap
7c5fd7221182f825dd51a71b594dbf0297ef6843
51,774
import json def read_hyperparameters(hyperparameter_json): """Read the json file and return the hyperparameters as dict Args: hyperparameter_json (json): Json file containing the hyperparameters of the trained model Returns: [dict]: Python dictionary of the hyperparameters """ with open(hyperparameter_json) as f_in: return json.load(f_in)
afe83afb0dd26f46b2a90957fbd9c6ca69233ddc
51,775
import hashlib import base64 def get_credential_from_wifi_password(wifi_password: str) -> str: """Calculate MQTT credential from WiFi password.""" hash_ = hashlib.sha512() hash_.update(wifi_password.encode("utf-8")) return base64.b64encode(hash_.digest()).decode("utf-8")
0e7a440c5a335b29c2e2d8affacc3b660bf28c09
51,778
from typing import Tuple def compute_character_attack_speed( frames_per_animation: int, base_animation_length: int, speed_coefficient: float = 1.0, engine_tick_rate: int = 60) -> Tuple[float, float]: """Computes the minimum and maximum character_attack_speed for a certain ability to reach the specified frames per animation breakpoint. Args: frames_per_animation: breakpoint to calculate attack speed values for base_animation_length: animation length of ability speed_coefficient: speed-up scalar of ability engine_tick_rate: server tick rate Returns: Tuple[int, int]: min attack speed to reach frames_per_animation breakpoint and max attack speed to leave frames_per_animation breakpoint. """ _coeff = (base_animation_length - 1) * engine_tick_rate / (speed_coefficient * base_animation_length) min_aps = _coeff / frames_per_animation max_aps = _coeff / (frames_per_animation - 1) return min_aps, max_aps
ab1ca81bd89ca5f4ff0e2068a4df51e09140274a
51,784
import time def DDHHMMSS_format(seconds): """Convert seconds into #days:HH:MM:SS format. Args: seconds(int): number of seconds to convert Returns: str in the format of DD:HH:MM:SS """ seconds = int(seconds) if seconds < 86400: return time.strftime("%H:%M:%S", time.gmtime(seconds)) num_days = seconds / 86400 return "{} {} {}".format( num_days, 'Day' if num_days == 1 else 'Days', time.strftime( "%H:%M:%S", time.gmtime(seconds % 86400)))
7b689e72a54c081a969597df39a8fbba467ac965
51,791
def find_pivot(matrix, col: int) -> int: """ Given the matrix and the column index, finds the line that should be swaped with the "current" pivot line. The number returned is the index of the line """ col_terms = (matrix[line][col] for line in range(col, len(matrix))) col_terms_abs = list(map(abs, col_terms)) max_abs = max(col_terms_abs) return col_terms_abs.index(max_abs) + col
37d7e68ccc36fcd3f653ad289616875fd08f8c48
51,795
from typing import Iterable from typing import Any from typing import List def transpose(m: Iterable[Iterable[Any]]) -> List[List[Any]]: """Transpose the first 2 dimensions of an iterable object with at least 2 dimensions. NOTE: only works when sublists are of arbitrary length.""" return [list(i) for i in zip(*m)]
01a2b294c74cad9335f3fe0981832c7f3fb9da04
51,801
def get_races_from_message(message): """ Parses a comma separated list from a Slack message with a kwarg of race. """ message_kwargs = ( message.get("kwargs", {"race": ""}).get("race", "").lower() ) races = message_kwargs.replace(" ", "").split(",") return races
c4912c87766cd8c80e696d3f27914aec64fc5397
51,803
def _get_folder_info(folder): """Get names and descriptions for all parent folders except top level. """ folder_info = "" if folder and folder.parent: folder_info = _get_folder_info(folder.parent) folder_info += " %s %s" % ( folder.name.replace("Unnamed folder", ""), folder.description or "") return folder_info
a1cfd82b50ab273466adf26538c5861528e7eaee
51,805
def textToList(data): """Takes csv data with newlines and returns array of strings via split.""" samples=data.split('\n'); return samples;
06bbd03b13190009ecfb1f7202d6e78836674eb1
51,811
def parse_new_patient(in_data): """To change the input data from string to integer This function will take the input data with keys "patient id" and "patient age" and change them from string to integer. If it contains more than integer it will show an error message. :param in_data: JSON contains keys "patient id" and "patient age" :return: the changed data format (to int) or error message """ if type(in_data) is not dict: return "No dictionary exists" if "patient_id" in in_data.keys(): try: in_data["patient_id"] = int(in_data["patient_id"]) except ValueError: return "Patient ID is not a number or can't convert to integer" else: return "No such key: patient_id" if "patient_age" in in_data.keys(): try: in_data["patient_age"] = int(in_data["patient_age"]) except ValueError: return "Patient age is not a number or can't convert to integer" else: return "No such key: patient_age" return in_data
1480aa4e9d08f22e1b84fc6a38b0e5522adddb62
51,817
def transitions(trj, nsteps=1, lag_time=1, separator='0'): """ Return the temporal list of transitions observed. Parameters ---------- trj : the symbolic trajectory. nsteps : number of steps. lag_time : step length. separator: the special symbol indicating the presence of sub-trajectories. Example ------- >>> trj = [1,2,1,0,2,3,1,0,2,3,2,3,1,2,3] >>> pykov.transitions(trj,1,1,0) [(1, 2), (2, 1), (2, 3), (3, 1), (2, 3), (3, 2), (2, 3), (3, 1), (1, 2), (2, 3)] >>> pykov.transitions(trj,1,2,0) [(1, 1), (2, 1), (2, 2), (3, 3), (2, 1), (3, 2), (1, 3)] >>> pykov.transitions(trj,2,2,0) [(2, 2, 1), (3, 3, 2), (2, 1, 3)] """ result = [] for pos in range(len(trj) - nsteps * lag_time): if separator not in trj[pos:(pos + nsteps * lag_time + 1)]: tmp = trj[pos:(pos + nsteps * lag_time + 1):lag_time] result.append(tuple(tmp)) return result
3b98db1f79748fc0b00667f5235549ba2a129371
51,819
def groupms_byenergy(microstates, ticks): """ This function takes in a list of microstates and a list of energy numbers (N values), divide the microstates into N bands by using the energy number as lower boundaries. The list of energy will be sorted from small to large. """ N = len(ticks) ticks.sort() ticks.append(1.0e100) # add a big number as the rightest-most boundary resulted_bands = [[] for i in range(N)] for ms in microstates: it = -1 for itick in range(N): if ticks[itick] <= ms.E < ticks[itick+1]: it = itick break if it >= 0: resulted_bands[it].append(ms) return resulted_bands
39c84e02f36946d2cff77b2ecb66794205be28a7
51,824
import six def isIterable(usrData): """ Returns True if is the object can be iter'd over and is NOT a string >>> common.isIterable([5, 10]) True >>> common.isIterable('sharp') False >>> common.isIterable((None, None)) True >>> common.isIterable(stream.Stream()) True """ if hasattr(usrData, "__iter__"): if six.PY3: if isinstance(usrData, str) or isinstance(usrData, bytes): return False return True else: return False
a5616da053af4c55daeb8d934efeff7cb4b7ebf3
51,825
def pixel_color_checker(data, row_index, pixel_index, R, G, B): """ :param data: image data as array :param row_index: index of image row to be checked :param pixel_index: index of pixel to be checked :param R: value (in range [0, 255]) at the R (red) value of the RGB notation :param G: value (in range [0, 255]) at the G (green) value of the RGB notation :param B: value (in range [0, 255]) at the B (blue) value of the RGB notation :return: boolean specifying whether the pixel of interest is colored as specified by the RGB values """ if (data[row_index][pixel_index][0] == R) and (data[row_index][pixel_index][1] == G) \ and (data[row_index][pixel_index][2] == B): return True else: return False
c188eca743b67dcf27456b3d7375507f6007f816
51,826
def is_error(status): """Determine if the response has an error status :param status: HTTP Status string to inspect :return: True if the status code is 400 or greater, otherwise False """ return int(status.split(' ', 1)[0]) >= 400
2823925d4ddba01d0fb74bcec9de0da3eb22ac84
51,827
def intify(obj): """ Takes an object that is a recursive composition of primitive types, lists and dictionaries, and returns an equivalent object where every `float` number that is actually an integer is replaced with the corresponding :obj:`int`. Args: obj: an object as specified. Returns: The modified version of ``obj``. """ if isinstance(obj, list): return [intify(x) for x in obj] elif isinstance(obj, dict): return {k: intify(v) for k, v in obj.items()} elif isinstance(obj, float) and obj.is_integer(): return int(obj) else: return obj
c91ef3c6d6c06eb9ccfb5a2ebd9ecb409be56cc8
51,829
import torch def one_hot_embedding(labels, num_classes): """ Embedding labels to one-hot form. Args: :param labels: (LongTensor) class label, sized [N,]. :param num_classes: (int) number of classes. :return: (tensor) encoded labels, size [N, #classes]. """ y = torch.eye(num_classes) # [D, D] return y[labels]
9a3ed1d8b11d1b1f98ed8e354e879ee70a6e84e3
51,834
def get_dlp_results_sql(project_id, dataset_id, table_id, min_count=0) -> str: """ Generate sql to query the DLP results table: https://cloud.google.com/dlp/docs/querying-findings and counts the number of finding in each field,info-type, likelihood grouping :param project_id: Project Id :param dataset_id: Dataset :param table_id: Table :return:a sql query that generates a result set with the following columns [field_name, info_type_name, likelihood, count_total] """ return """SELECT locations.record_location.field_id.name AS field_name, info_type.name as info_type_name, likelihood as likelihood, COUNT(*) AS count_total FROM {}.{}.{}, UNNEST(location.content_locations) AS locations GROUP BY locations.record_location.field_id.name, info_type.name, likelihood HAVING count_total > {} """.format( project_id, dataset_id, table_id, min_count )
1590f366e2e2264e5ca14a9cdda395dc57ad9bbc
51,835
def to_coord_system(vertices, width, height): """ Changes vertices to image coordinate system (2D image) Parameters: vertices (ndarray): The 3D face vertices. width (int): The rendering width. height (int): The rendering height. Returns: ndarray: The new vertices in coordinate system. """ proj_vertices = vertices.copy() proj_vertices[:, 0] = proj_vertices[:, 0] + width / 2 proj_vertices[:, 1] = proj_vertices[:, 1] + height / 2 proj_vertices[:, 1] = height - proj_vertices[:, 1] - 1 return proj_vertices
0e87d114c3af4d44f50b8b5b30c0e6912d208a1c
51,838
def solution(x, y): """ The number in the downhill diagonal through the point (x,y) is the triagular number for the input x+y-1. To get the value we subtract from this triagular number the value of y-1. Recall that the n-th triangular number is n(n+1)/2. """ summa = x+y return str((((summa-1)*summa)>>1)-(y-1))
5134c832ec77d84bc4c75f3eaedb737a8c1a6bc7
51,840
def _increment_name(name): """Returns a name similar to the inputted name If the inputted name ends with a hyphen and then a number, then the outputted name has that number incremented. Otherwise, the outputted name has a hyphen and the number '1' appended to it. """ index = name.rfind('-') if index == -1: # The is not already followed by a number, so just append -1 return name + '-1' else: # The number is already followed by a number, so increment the number value = int(name[index+1:]) return name[:index+1] + str(value+1)
8dbf69290a97ba7a926a1ff5286a0731a1214866
51,846
def find_intent(intent_name, intent_defs): """ Find an intent by name in a list of intent definitions. :param intent_name: the name of the intent to look for. :param intent_defs: a list of intent definitions. :return: the intent_def with matching name, or None. """ for intent_def in intent_defs: if intent_def.name == intent_name: return intent_def return None
d90163df3830b8c743fb812636ffbdba9d8f6e2e
51,853
def always_false(*args): """Predicate that always evaluates to False. Parameters ---------- args: any Variable list of arguments. Returns ------- bool """ return False
3e8dc2dfd585360aedf5ee7716eb9d0b67145cb2
51,854
def _get_scale_var_name(var_name): """ get scale var name """ return var_name + '.scale'
cc802fc1c4e5ee46fde60aa7335e24be0d782b0f
51,858
def GetProQ3Option(query_para):#{{{ """Return the proq3opt in list """ yes_or_no_opt = {} for item in ['isDeepLearning', 'isRepack', 'isKeepFiles']: if item in query_para and query_para[item]: yes_or_no_opt[item] = "yes" else: yes_or_no_opt[item] = "no" proq3opt = [ "-r", yes_or_no_opt['isRepack'], "-deep", yes_or_no_opt['isDeepLearning'], "-k", yes_or_no_opt['isKeepFiles'], "-quality", query_para['method_quality'], "-output_pdbs", "yes" #always output PDB file (with proq3 written at the B-factor column) ] if 'targetlength' in query_para: proq3opt += ["-t", str(query_para['targetlength'])] return proq3opt
059b0ba5ccda7955000af9b0e3c08dcd40c1b2e3
51,859
def test_keys(result): """ Returns just the test keys of a specific result """ return result["test_keys"]
890e4e65590a1e83b0b0caaeed998f7cb9a34ee4
51,863
def sameday(dt1, dt2): """Do two datetimes reference the same day? (ignore times)""" return (dt1.day == dt2.day and dt1.month == dt2.month and dt1.year == dt2.year)
3e9bf767b03615e5af9810fe8dc9bd7297c714d3
51,866
def is_int_as_str(x): """ Test if string x is an integer. If not a string return False. """ try: return x.isdecimal() except AttributeError: return False
72549b5767fa51d52d6bab9631b5d6ad6ede1d11
51,868
def are_n(n): """A bit of grammar. This function returns a string with the appropriate singular or plural present indicative form of 'to be', along with 'n'.""" choices = ['are no', 'is 1', f'are {n}'] if n < 2: return choices[n] else: return choices[2]
d3e3ba29b96912c34f9adae6220ec3ede68a43cd
51,873
def _should_return_json(request): """ Should the given request result in a JSON entity-body? """ return bool(request.args.get(b"json"))
45c0828472531fa42ad1247f0ae1cac3173b0a08
51,874
def bias_term(in_table, n_range): """ Compute the bias term to be subtracted off the cross spectrum to compute the covariance spectrum. Equation in Equation in footnote 4 (section 2.1.3, page 12) of Uttley et al. 2014. Assumes power spectra are raw (not at all normalized, and not Poisson-noise- subtracted). Parameters ---------- power_ci : np.array of floats 2-D array of the power in the channels of interest, raw (not normalized and not Poisson-noise-subtracted), of the frequencies to be averaged over. Size = detchans (if avg over freq) or n_bins/2+1 (if avg over energy). power_ref : np.array of floats 1-D array of the power in the reference band, raw (not normalized and not Poisson-noise-subtracted), of the frequencies to be averaged over. Size = 1 (if avg over freq) or n_bins/2+1 (if avg over energy). mean_rate_ci : np.array of floats 1-D array of the mean count rate in the channels of interest, in cts/s. Size = 1 (if avg over energy) or detchans (if avg over freq). mean_rate_ref : float Mean count rate in the reference band, in cts/s. meta_dict : dict Dictionary of meta-parameters needed for analysis. n_range : int Number of frequency bins averaged over per new frequency bin for lags. For energy lags, this is the number of frequency bins averaged over. For frequency lags not re-binned in frequency, this is 1. For frequency lags that have been re-binned, this is a 1-D array with ints of the number of old bins in each new bin. Same as K in equations in Section 2 of Uttley et al. 2014. Default=1 Returns ------- n_squared : float The bias term to be subtracted off the cross spectrum for computing the covariance spectrum. Equation in footnote 4 (section 2.1.3, page 12) of Uttley et al. 2014. """ ## Compute the Poisson noise level in absolute rms units Pnoise_ref = in_table.meta['RATE_REF'] * 2.0 Pnoise_ci = in_table.meta['RATE_CI'] * 2.0 ## Normalizing power spectra to absolute rms normalization ## Not subtracting the noise (yet)! abs_ci = in_table['POWER_CI'] * (2.0 * in_table.meta['DT'] / n_range) abs_ref = in_table['POWER_REF'] * (2.0 * in_table.meta['DT'] / n_range) temp_a = (abs_ref - Pnoise_ref) * Pnoise_ci temp_b = (abs_ci - Pnoise_ci) * Pnoise_ref temp_c = Pnoise_ref * Pnoise_ci n_squared = (temp_a + temp_b + temp_c) / (n_range * in_table.meta['SEGMENTS']) return n_squared
99472728853b8900ac65b9df048bf4132d902a1f
51,875
def read_write(sin, sout, l=None, chunk_size=32) -> int: """Reads l or all bytes from sin and writes to sout""" # number of bytes written res = 0 barr = bytearray(chunk_size) while True: if l and l < chunk_size: r = sin.read(l) sout.write(r) return res + len(r) else: r = sin.readinto(barr) if r == 0: return res res += r if r == chunk_size: sout.write(barr) else: sout.write(barr[:r]) if l: l -= r return res
428b54dc640822436aef944b4cf4562bc12be55a
51,876
from typing import Dict from typing import Any def pop_item_from_dict(d: Dict[str, Any], key: str, default: Any = None, must_exists: bool = False): """ Pops key from dict d if key exists, returns d otherwise. Args: d: (``Dict``) Dictionary for key to be removed. key: (``str``) Key name to remove from dictionary d. default: (``Any``) Default value to return if key not found. must_exists: (``bool``) Raises an exception if True when given key does not exists. Returns: Popped value for key if found, None otherwise. """ if key not in d and must_exists: raise KeyError(f"'{key}' not found in '{d}'.") val = d.pop(key) if key in d else default return val
bc72d49a191b41ee413cea6740890c4dc8442abf
51,879
def fsign(n, imply_pos=True): """ Format SIGN prefix '-' if n < 0 else '' (or '+' if imply_pos is false) """ return "-" if n < 0 else "" if imply_pos else "+"
1140100e3a606fc386488e06511636061274d4b3
51,886
def get_accuracy(loss_string, output, targets, batch_size, threshold=0.5): """ Helper function to calculate the accuracy of a given batch :param loss_string: loss function currently used :param output: the model prediction :param targets: target output :param batch_size: batch size :param threshold: decision threshold for binary classification :return: the accuracy of current batch """ if loss_string == "crossentropy": max_index = output.max(dim=1)[1] correct = (max_index == targets).sum() accuracy = int(correct.data) / len(targets) elif loss_string == "binarycrossentropy": binary_output = output > threshold correct = sum((binary_output.float() == targets).data.cpu().numpy()[0]) accuracy = correct / batch_size else: raise ValueError("Accuracy metrics not supported to current network") return accuracy
93222a23f9275f0631572fe916c9b7baaabc21c8
51,888
def fatorial_3(number): """Fatorial calculation recursively.""" if number == 0: return 1 else: return number * fatorial_3(number - 1)
db31419f00806093d2f5b99c0f5d6e08496ed56f
51,890
def error_func(x, indices, weights, target): """Given a range of positions x, calculate the weighted distances to nneighbors indicated by indices and compute the total squared error compared to the target differences. """ error = 0. for i in range(indices.shape[1]): error += (weights[:, i] * (x - x[indices[:, i]] - target[:, i])**2).mean() return error
6f180a90e69216c838113ab37285ff162ed8da45
51,891
import re def split_text_in_lines(text, max_len, prefix="", min_indent=None): """ Split `text` in the biggest lines possible with the constraint of `max_len` using `prefix` on the first line and then indenting with the same length as `prefix`. """ text = re.sub(r"\s+", " ", text) indent = " " * len(prefix) if min_indent is not None: if len(indent) < len(min_indent): indent = min_indent if len(prefix) < len(min_indent): prefix = " " * (len(min_indent) - len(prefix)) + prefix new_lines = [] words = text.split(" ") current_line = f"{prefix}{words[0]}" for word in words[1:]: try_line = f"{current_line} {word}" if len(try_line) > max_len: new_lines.append(current_line) current_line = f"{indent}{word}" else: current_line = try_line new_lines.append(current_line) return "\n".join(new_lines)
e8fe7e08d33d8a98347a183076f73d333414f37a
51,894
def non_basemap_layers(layers): """Retrieve all map layers which are not basemaps""" return [layer for layer in layers if not layer.is_basemap]
8a8bab454d97e8fe686be874c56421d2013a039f
51,901
def get_json(response): """ Retrieves the 'JSON' body of a response using the property/callable according to the response's implementation. """ if isinstance(response.json, dict): return response.json return response.json()
fda80c7100cb442f177ba18bfedbe0111161d846
51,903
import xxhash def reduce_thread_id(thread_id: int) -> str: """Make a shorter thread identifier by hashing the original.""" return xxhash.xxh32(thread_id.to_bytes(8, "little")).hexdigest()[:4]
66bb58a5df1e93e38fe5c8ad18986eaab4117a13
51,904