content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
from typing import Any from unittest.mock import patch def patch_metrics(metrics: dict[str, Any]): """Patch the Vallox metrics response.""" return patch( "homeassistant.components.vallox.Vallox.fetch_metrics", return_value=metrics, )
a0502413a160dddcd0c7a2bead193b8b9aa61edb
21,417
import struct def read_uint32(fp, pos): """Read 4 little-endian bytes into an unsigned 32-bit integer. Return value, position + 4.""" fp.seek(pos) val = struct.unpack("<I", fp.read(4))[0] return val, pos + 4
2b2f4fa99bc480aae5eb8bc8602f1c11aa775031
21,418
def nice_fn_b(x): """ This is a low order Taylor approximation of sin(x) at x=0. It is just a polynomial and is well behaved everywhere. """ return x - (x**3) / 6. + (x**5) / 120.
995f85d0ceeb1abf1c2906f3b555447f6d80fd5b
21,420
import collections def process_types(mrsty_file): """Reads UMLS semantic types file MRSTY.2019.RRF. For details on each column, please check: https://www.ncbi.nlm.nih.gov/books/NBK9685/ """ cui_to_entity_types = collections.defaultdict(set) with open(mrsty_file) as rf: for line in rf: line = line.strip() if not line: continue # Each line is as such: # 'C0000005|T116|A1.4.1.2.1.7|Amino Acid, Peptide, or Protein|AT17648347|256| # CUI|TUI|STN|STY|ATUI|CVF # Unique identifier of concept|Unique identifier of Semantic Type|Semantic Type tree number|Semantic Type. The valid values are defined in the Semantic Network.|Unique identifier for attribute|Content View Flag line = line.split("|") e_id = line[0] e_type = line[3].strip() # considering entities with entity types only if not e_type: continue cui_to_entity_types[e_id].add(e_type) return cui_to_entity_types
b20b26f5015abb2ee5ce718eaa83895d4cb4f5e2
21,422
import torch def Quaternion2EulerAngles(quat): """ Given a unit quaternion, returns the rotations along (x,y,z) axis. Angles in radian ! """ roll_num = 2*(quat[0]*quat[1]+quat[2]*quat[3]) roll_denom = quat[0]*quat[0] - quat[1]*quat[1] - quat[2]*quat[2] + quat[3]*quat[3] roll = torch.atan2(roll_num,roll_denom) val = 2*(quat[0]*quat[2]-quat[3]*quat[1]) if abs(val)>=1: pitch = torch.tensor(0) #copysign(np.pi/2,val) #90 degree if out of range else: pitch = torch.asin(val) yaw_num = 2*(quat[0]*quat[3]+quat[1]*quat[2]) yaw_denom = quat[0]*quat[0] + quat[1]*quat[1] - quat[2]*quat[2] - quat[3]*quat[3] # 1-2*(quat[2]*quat[2]+quat[3]*quat[3]) #yaw_num = 2*(quat[0]*quat[1]-quat[2]*quat[3]) #yaw_denom = 1-2*(quat[1]*quat[1]+quat[3]*quat[3]) yaw = torch.atan2(yaw_num,yaw_denom) return roll,pitch,yaw
d178cba30ffb17e081a8392c8e9f891894d8e319
21,423
import math def euclidean_distance_similarity(terms_in_query, terms_in_result, terms_weights): """ sqrt from sum of squares of differences of coordinates """ if len(terms_weights.values()) == 0: return math.sqrt(sum([(terms_in_result[term] - terms_in_query[term])**2 for term in terms_in_result.keys()])) return math.sqrt(sum([min(1, ((terms_in_result[term] - terms_in_query[term])**2/( float(terms_weights[term]) or 0.1))) for term in terms_in_result.keys()]))
2df95cb881a04b1198fe064c4c016fd8023a40de
21,424
def week_of_year(date): """ Our weeks starts on Mondays %W - week number of the current year, starting with the first Monday as the first day of the first week :param date: a datetime object :return: the week of the year """ return date.strftime("%W")
6de546b23e5cc717b3b0fc357da3af6b4fa1305b
21,425
def generate_jobdf_index(old_df, jobid_mask, keys, values): """ Generates index for jobdf from mask for job_id naming. """ if not jobid_mask: nold = len(old_df.index) if old_df is not None else 0 start = nold-1 if nold > 0 else 0 return range(start, start + values.shape[0]) return [jobid_mask % dict(zip(keys, v)) for v in values]
35b0c5307b0e364d152d06b3cc716b3fe9a04fba
21,426
def is_valid_input(curr_input): """ Gives the standard names stored within the standard dictionary. Returns the standard names currently being stored as list. Args: curr_input: the LUFS or peak input a user is trying to include as a platform standard Returns: is_valid: a boolean value for whether the input is valid or not error_msg: the error message to report if the input is not valid Raises: Any errors raised should be put here """ error_msg = "" is_valid = True if curr_input[0] == '-': if not curr_input[1:].isnumeric(): split_value = curr_input[1:].split(".") if len(split_value) != 2: error_msg = "Enter a numeric value" is_valid = False elif not split_value[0].isnumeric() or not split_value[0].isnumeric(): if split_value[0] != "": error_msg = "Enter a numeric value" is_valid = False else: error_msg = "Must enter a negative value" is_valid = False return is_valid, error_msg
ce8e57073cce2203cab2814e50bf28320acc6b8a
21,427
def append_hostname(machine_name, num_list): """ Helper method to append the hostname to node numbers. :param machine_name: The name of the cluster. :param num_list: The list of nodes to be appended to the cluster name. :return: A hostlist string with the hostname and node numbers. """ hostlist = [] for elem in num_list: hostlist.append(machine_name + str(elem)) return '%s' % ','.join(map(str, hostlist))
d2ff25273a21682ec31febc1e63fbc2f562017c3
21,430
import os import glob def get_yml_files(searchdir: str): """Glob for gitlab ci yml files in a dir, return a list of filenames.""" globstr = os.path.join(searchdir, "*.yml") return glob.glob(globstr)
ca05e0caab55185d325af85d38d9360af1e7cb43
21,431
def objsize(obj): """ Returns the size of a deeply nested object (dict/list/set). The size of each leaf (non-dict/list/set) is 1. """ assert isinstance(obj, (dict, list, set)), obj if not obj: return 0 if isinstance(obj, dict): obj = obj.values() elem = next(iter(obj)) if isinstance(elem, (dict, list, set)): return sum(objsize(v) for v in obj) else: return len(obj)
66c792c4799df530cded59c5a006ae251032a2b9
21,433
def reduceDictionary(data: dict): """ This function reduce a dictionary in the case of one only element dict. Eg. a = {"key": value} -> value """ if len(data) == 1: return data[list(data.keys())[0]] else: return data
9165a12bf04601d903fb1b6caed120a193279e30
21,435
from typing import Dict from typing import Any def merge_dict(dict1: Dict[str, Any], dict2: Dict[str, Any] ) -> Dict[str, Any]: """Merge two dictionaries into a third dictionary. Args: dict1: First dictionary to be merged. dict2: Second dictionary to be merged. Returns: A dictionary by merging the two dictionaries. Note that if the two dictionary has the same keys the value of the latter dictionary will be used. """ res = dict1.copy() res.update(dict2) return res
baa64359ab740fcf9689305d091a75fb7c7a0cbc
21,436
def normalise_error_asymptotic(absolute_error: float, scaling_factor: float) -> float: """ Given an error in the interval [0, +inf], returns a normalised error in [0, 1] The normalised error asymptotically approaches 1 as absolute_error -> +inf. The parameter scaling_factor is used to scale for magnitude. When absolute_error == scaling_factor, the normalised error is equal to 0.5 """ if absolute_error < 0: raise ValueError(f'Error to be normalised must be non-negative ' f': {absolute_error}') scaled_error = absolute_error / scaling_factor return scaled_error / (scaled_error + 1)
a3a2a99390acf65b334fc7b2b1d0792c042582fd
21,439
def is_id(numbers): """Tries to parse string to If it succeeds, assume its an ID and return True """ try: int(numbers) return True except ValueError: pass except TypeError: pass return False
02a11abded4e69de176fd9974e5ee51ddeb90d6e
21,440
import time def rate_limited(hz: float): """ Pasted from https://github.com/WarriorOfWire/circuitpython-utilities/blob/master/functional/rate_limited.py """ def decorator_rate_limit(decorated_fn): last_invocation = 0 nanos_per_invocation = 1000000000 / hz rate_limited_value = None def rate_limited_fn(*args, **kwargs): nonlocal last_invocation nonlocal rate_limited_value now = time.monotonic_ns() if now - last_invocation > nanos_per_invocation: # Normally we can schedule at the intended rate. last_invocation += nanos_per_invocation if last_invocation + nanos_per_invocation < now: # If we're falling behind, fall back to "with fixed delay" last_invocation = now rate_limited_value = decorated_fn(*args, **kwargs) return rate_limited_value return rate_limited_fn return decorator_rate_limit
a32e2d85ba3bc6d4e1de5f76e390fbf3b49a3280
21,441
def inverse_clamp(value, liveband): """ Ensures value is in (-∞, -liveband] ∪ [liveband, ∞) liveband must be a positive value """ if value > 0: value = max(value, liveband) if value < 0: value = min(value, -liveband) return value
07b0b617e1658bf6534b92360087bd0b150ef731
21,443
def __getTitleRow(): """CSV Title ROW""" return ['Batch', 'Image Width', 'Image Height', 'Avg. Brightness']
6c974f8c89f9db8baaa88e3ec381d91efccc9afe
21,444
def decorator(caller): """decorator(caller) converts a caller function into a decorator""" def wrap(real_fn): def wrapped(*args, **kwargs): return caller(real_fn, *args, **kwargs) return wrapped return wrap
c992c8a899560e4579cbe4bf54de32a4031a2bf9
21,445
import math def normalize_value(x, exponent_min, exponent_max): """ Normalize the input value. Note that this assumes that `allocation: lg2` is used for the color space in the OpenColorIO configuration. :param x: Input value :param exponent_min: Smallest exponent for the input values :param exponent_max: Largest exponent for the input values :return: Normalized value """ return (math.log2(x) - exponent_min) / abs(exponent_max - exponent_min)
808f70446a2e450fd7dfe1df480c696ed38163dd
21,447
def first_word(str): """ returns the first word in a given text. """ text=str.split() return text[0]
73f1efc24c6c68e92b2af824358b0656cfbe278b
21,448
def _calculate_cluster_distance(end_iter): """Compute allowed distance for clustering based on end confidence intervals. """ out = [] sizes = [] for x in end_iter: out.append(x) sizes.append(x.end1 - x.start1) sizes.append(x.end2 - x.start2) distance = sum(sizes) // len(sizes) return distance, out
5f3f8ae2a0e372d78dad06fe769da3ed45fbfb64
21,450
def _collatz_next_number(n): """Given a non negative whole number: Return n // 2 if n is equal. Return 3 * n + 1 otherwise. Throw an error, if n <= 0 or n is not an integer. """ if n % 2 == 0: return n // 2 else: return 3 * n + 1
58272775f3d021eb36ea9311ae34664ceede59ba
21,452
import os def ls(path): """ Returns a list of all files in a directory, relative to the directory. """ result = [] for dirpath, _, files in os.walk(path): for f in files: result.append(os.path.join(dirpath, f)[len(path) + 1:]) return result
cd7d685966bd39bb5394fbb6ed364664d55025de
21,453
import os from pathlib import Path from shutil import copyfile def get_working_directory() -> str: """This functions gets the working directory path. Returns: working_directory (str): The directory where database and yaml are located. """ db_filename = "tasks.db" userdir = os.path.expanduser("~") working_directory = os.path.join(userdir, "PoliCal") tasks_db = Path(os.path.join(working_directory, db_filename)) if not os.path.exists(working_directory): os.makedirs(working_directory) if not tasks_db.is_file(): dir_path = os.path.dirname(__file__) tasks_src = os.path.join(dir_path, db_filename) tasks_dst = os.path.join(working_directory, db_filename) copyfile(tasks_src, tasks_dst) return working_directory
e02f76609c67f2e005586f5ac37f83022c704b16
21,454
import torch def sinc(x: torch.Tensor): """ Implementation of sinc, i.e. sin(x) / x __Warning__: the input is not multiplied by `pi`! """ return torch.where(x == 0, torch.tensor(1., device=x.device, dtype=x.dtype), torch.sin(x) / x)
2b7fd194a0e5ef8449b88f711312fce9a2d0ba84
21,455
def coerced_input(prompt, type_ = float): """Continue asking user for input until they give one that's of a type I can use""" while True: try: return type_(input(prompt)) except ValueError: pass
1f2d931c60239c87e7bfe063537180119f5e2f3b
21,456
def get_id_from_errordetail(error_json): """ :param error_json : json data { "type": "/placeholder/type/uri", "status": 400, "title": "BadRequestError", "detail": "Title must be unique. An object https://ns.adobe.com/salesvelocity/classes/f412489610bf750ad40b30e0b7804a13 already exists with the same title." } """ detail = error_json['detail'] startIndex = detail.find("https://ns.adobe.com") if(startIndex != -1): detail = detail[startIndex:] endIndex = detail.find(" ") detail = detail[0: endIndex] detail = detail.replace("https://ns.adobe.com/", "") detail = detail.replace("/", ".") return "_" + detail else: return ""
1d28c4c27a747100f8d0cddbb46ed48bae0d4526
21,458
import sys import os def check_output_path(output_path): """ :method: a sub-method to make sure that the outpath with a "/" :param output_path: string :return: string """ if len(output_path) > 0: if output_path[-1] != "/": output_path = output_path + "/" else: print("wrong output_path") sys.exit(2) if not os.path.exists(output_path): os.system("mkdir " + output_path) return output_path
a4d8801fd9de1b6a9f26ee5850fe7515836dfaf3
21,459
from typing import Optional import sys def _calc_async_timeout(timeout: Optional[int]) -> int: """ see https://github.com/samuelcolvin/watchfiles/issues/110 """ if timeout is None: if sys.platform == 'win32': return 1_000 else: return 5_000 else: return timeout
e45d64a78e25490166dc8a25bf2115388780b5df
21,460
def TankPressure(P_max, V, init_H2O, V_H2O, gamma=1.4): """ Compute the air pressure inside the tank. """ # p = P_max*(rho/rho_max)**self.gamma p = P_max * ((V - init_H2O) / (V - V_H2O))**gamma return p
590409e9d214d9879a4760e3023c441902af412a
21,461
def collisions(distance_list, distance_cutoff): """ Determine if there are any collisions between non-bonded particles, where a "collision" is defined as a distance shorter than 'distance_cutoff'. :param distance_list: A list of distances. :type distance_list: List( `Quantity() <https://docs.openmm.org/development/api-python/generated/simtk.unit.quantity.Quantity.html>`_ ) :param distance_cutoff: The distance below which particles will be considered to have "collisions". :type distance_cutoff: `Quantity() <https://docs.openmm.org/development/api-python/generated/simtk.unit.quantity.Quantity.html>`_ :returns: - collision (Logical) - A variable indicating whether or not the model contains particle collisions. """ collision = False if len(distance_list) > 0: for distance in distance_list: if distance < distance_cutoff: collision = True return collision
cab45e8a2da21b9096656cce7eedce74fdae4d76
21,462
def _get_comparable_seq(seq_i, seq_j, start_phase, end_phase): """ Return the residues of the exon/subexon that can be shared. It takes into account that first/last residue could be different if the start/end phase is different from 0 or -1. >>> _get_comparable_seq("ASMGSLTSSPSSL", "TSMGSLTSSPSSC", 1, 2) ('SMGSLTSSPSS', 'SMGSLTSSPSS') """ if start_phase in [1, 2]: seq_i = seq_i[1:] seq_j = seq_j[1:] if end_phase in [1, 2]: seq_i = seq_i[:-1] seq_j = seq_j[:-1] return seq_i, seq_j
0d309f2c2081e84f7285485577b4edd84aba9635
21,463
from typing import Dict def csv_pep_filter(p, **kwargs) -> Dict[str, str]: """ CSV PEP filter, that returns Sample object representations This filter can save the CSVs to files, if kwargs include `sample_table_path` and/or `subsample_table_path`. :param peppy.Project p: a Project to run filter on """ sample_table_path = kwargs.get("sample_table_path") subsample_table_path = kwargs.get("subsample_table_path") sample_table_repr = p.sample_table.to_csv(path_or_buf=sample_table_path) s = "" if sample_table_repr is not None: s += sample_table_repr if p.subsample_table is not None: subsample_table_repr = p.subsample_table.to_csv( path_or_buf=subsample_table_path ) if subsample_table_repr is not None: s += subsample_table_repr return {"samples.csv": s}
8cadf3f1cf292e6b511bf7d2489ffaa0edf22191
21,464
import torch def onehot_labels(labels: torch.Tensor, n_classes: int): """Convert loaded labels to one-hot form. :param labels: tensor of shape (batch_size x n_cells) with integers indicating class :param n_classes: number of classes :return: tensor of shape (batch_size x n_cells x n_classes) with one-hot encodings """ onehot = torch.zeros((*labels.shape[:2], n_classes), device=labels.device) onehot.scatter_(-1, labels.unsqueeze(-1), 1.0) return onehot
3e27693b62eacf19d1bd996a79e18b82addc91e6
21,465
from pathlib import Path def get_csv_folder_path(): """Folder path of input data""" return Path("/tmp/data/")
d478e7a219ec4ec71081d326a70a5b4530aad7fe
21,466
def true_labels(dataframe): """Need a dataframe with 'Worst_diagnosis' and return set of clusterlabels without specified cancer type (true_labels) and with (true_labels2) for ground truth setting""" dataframe['true_labels']= dataframe['Worst_diagnosis'] dataframe['true_labels2']= dataframe['Worst_diagnosis'] true_list=[0,0,1,1,1,1,1,1,1,2,3,3,3,4,4,4] true_list2=[0,0,1,1,1,1,1,1,1,2,3,3,3,4,5,6] worst_list=[11,20,12,13,14,15,16,17,18,31,32,33,35,41,42,43] dataframe['true_labels']=dataframe['true_labels'].replace(worst_list, true_list) dataframe['true_labels2']=dataframe['true_labels2'].replace(worst_list, true_list2) return dataframe
1f8b22315d9f2379adc761aa788fe60eb21c0e98
21,467
import torch def decay_rewards(model_rewards, rewards, gamma): """ Computes the final rewards by decaying a final reward with decay rate gamma. Example ------- model_rewards = [0, 1, 2, 3, 4] rewards = 10 gamma = 0.9 result = [0.9(0.9(0.9(0.9*10))), 1 + 0.9(0.9(0.9*10)), 2 + 0.9(0.9*10), 3 + 0.9*10, 4 + 10] Arguments --------- model_rewards : list(T)<torch.FloatTensor> A list containing the rewards for every timestep as torch.FloatTensor with shape [B x 1]. reward : torch.Tensor A byte tensor with shape [B x 1] Return ------ rewards : torch.FloatTensor The final decayed rewards as a Tensor with shape [B x T] """ if gamma == 0: model_rewards[-1] += rewards.float() rewards = torch.cat(model_rewards, dim=1) else: R = rewards.float() rewards = [model_rewards[-1] + R] for r in model_rewards[::-1][1:]: R = gamma * R rewards.insert(0, r + R) rewards = torch.cat(rewards, dim=1) return rewards
41791d82bd52ee2c83bae68652781018d67e4a61
21,468
def expanded_form(num): """ Expands the form of a number placing all digits in the number into their place values :param num: Integer :return String representation of the number broken down into its place values :rtype: str """ str_num = str(num) length = len(str_num) result = [] for digit in str_num: if digit != "0": result.append(str(int(digit) * int("1{}".format("0" * (length - 1))))) length -= 1 return " + ".join(result)
dd2e403a933f8bf6b64c332f8e44912c5837d191
21,469
def list_encoder_factory(type_callable): """ Creates a function encoder that iterates on the elements of a list to apply the specified type_callable format. :param type_callable: type to apply to data :return: function that applies type_callable to a supplied list of data """ def inner(data): return [type_callable(x) for x in data] return inner
6a892956d94e88e24ad738de6a19e2394aa34842
21,470
def hex_to_rgb(value): """Given a color in hex format, return it in RGB.""" values = value.lstrip('#') lv = len(values) rgb = list(int(values[i:i + lv // 3], 16) for i in range(0, lv, lv // 3)) return rgb
274725a7f6695e190d3590565c935fbe1f6c7676
21,471
import re def get_album_art_url(html): """ Getting the album art url so that we can download it and add it to the music file later """ return re.findall('img src="(.*?)" width="500"', html)[0]
e79fee6ed2058bb93865b902ffa8484637adad38
21,472
import re def find_matching_sheets(wb, to_match, to_not_match): """Searches for the right worksheet and return their names as a list. Args: wb: Name of excel workbook with election results to_match (str): phrase to match to_not_match (str): phrase to exlude """ sheets =[] for s in wb.sheets(): s_match = re.search(to_match,s.name, flags=re.IGNORECASE) not_match = re.search(to_not_match,s.name, flags=re.IGNORECASE) if s_match: if not not_match: sheets.append(s_match.string) return(sheets)
6d219a6e1df470a112456fbcd44da111cf22bcd4
21,473
def determine_input_arg(arg_val, default_arg_val): """ if arg_val exists, use it, else return default_arg_val """ if arg_val: return arg_val else: return default_arg_val
c15500b7869a5b4c5a0c687228c926bfa4568f74
21,474
def check_col(col: str) -> str: """ Checks if specified col can be joined with list object """ columns_available = {"name", "year", "rating", "watched_date"} if col not in columns_available: raise Exception(f"Mentioned column name is not usable. Please use one of {columns_available}") return col
f0a5d1ab8d710bb3277629c511a60077636bfbd8
21,476
from typing import List import os def get_available_images() -> List[str]: """Read files in current directory and return `xxx` part from all Dockerfile.xxx.""" images = [f.split(".")[1] for f in os.listdir(".") if f.startswith("Dockerfile.")] return images
c38b3250cf3a07cfe70b43f40b7ef50c462311e7
21,477
def _dict_depth(d): """ Get the nesting depth of a dictionary. For example: >>> _dict_depth(None) 0 >>> _dict_depth({}) 1 >>> _dict_depth({"a": "b"}) 1 >>> _dict_depth({"a": {}}) 2 >>> _dict_depth({"a": {"b": {}}}) 3 Args: d (dict): dictionary Returns: int: depth """ try: return 1 + _dict_depth(next(iter(d.values()))) except AttributeError: # d doesn't have attribute "values" return 0 except StopIteration: # d.values() returns an empty sequence return 1
0c6fd91ed64b25ff023edbfbf210c6b354cd3af8
21,478
import argparse def _get_parser(): """Parse input options.""" parser = argparse.ArgumentParser( description=("Make 2d pdf plot."), formatter_class=argparse.RawTextHelpFormatter ) parser.add_argument( "--infile", "-i", default="all", type=str, help="text file to be read from" ) parser.add_argument( "--outfile", "-o", default="output", type=str, help="mesh file to be written to" ) parser.add_argument( "--x_label", "-x", default="Displacement (mm)", type=str, help="set x label", ) parser.add_argument( "--y_label", "-y", default="Load (kN)", type=str, help="set y label", ) return parser
ad48ff1a7b00a5de7fc799ec3cf299b7faa94039
21,479
import re def minify_part(script): """ Verwijder commentaar en onnodige spaties uit javascript embedded in templates """ # remove block comments script = re.sub(r'/\*.*\*/', '', script) # remove single-line comments script = re.sub(r'//.*\n', '\n', script) # remove whitespace at start of the line script = re.sub(r'^\s+', '', script) script = re.sub(r'\n\s+', '\n', script) # remove whitespace at end of the line script = re.sub(r'\s+\n', '\n', script) # remove newlines script = re.sub(r'\n', '', script) # remove whitespace around certain operators script = re.sub(r' = ', '=', script) script = re.sub(r' -= ', '-=', script) script = re.sub(r' \+= ', '+=', script) script = re.sub(r'\+ ', '+', script) script = re.sub(r' \+', '+', script) script = re.sub(r' \* ', '*', script) script = re.sub(r' :', ':', script) script = re.sub(r' == ', '==', script) script = re.sub(r' != ', '!=', script) script = re.sub(r' === ', '===', script) script = re.sub(r' !== ', '!==', script) script = re.sub(r' \+ ', '+', script) script = re.sub(r' - ', '-', script) script = re.sub(r' \? ', '?', script) script = re.sub(r' < ', '<', script) script = re.sub(r' > ', '>', script) script = re.sub(r' / ', '/', script) script = re.sub(r' && ', '&&', script) script = re.sub(r' \|\| ', '||', script) script = re.sub(r' >= ', '>=', script) script = re.sub(r' <= ', '<=', script) script = re.sub(r', ', ',', script) script = re.sub(r': ', ':', script) script = re.sub(r'; ', ';', script) script = re.sub(r'\) {', '){', script) script = re.sub(r'{ ', '{', script) script = re.sub(r' \(', '(', script) script = re.sub(r'} else', '}else', script) script = re.sub(r'else {', 'else{', script) script = re.sub(r' }', '}', script) return script
f41a697ee2a726c5c84c3b8e66229690a5d3b355
21,480
def inherit_function_doc(parent): """Inherit a parent instance function's documentation. Parameters ---------- parent : callable The parent class from which to inherit the documentation. If the parent class does not have the function name in its MRO, this will fail. Examples -------- >>> class A(object): ... def do_something(self): ... '''Does something''' >>> >>> class B(A): ... @inherit_function_doc(A) ... def do_something(self): ... pass >>> >>> print(B().do_something.__doc__) Does something """ def doc_wrapper(method): func_name = method.__name__ assert (func_name in dir( parent)), '%s.%s is not a method! Cannot inherit documentation' % ( parent.__name__, func_name) # Set the documentation. This only ever happens at the time of class # definition, and not every time the method is called. method.__doc__ = getattr(parent, func_name).__doc__ # We don't need another wrapper, we can just return the method as its # own method return method return doc_wrapper
0d22610e66118363fdeda6139eab0a8065e6c354
21,481
def humanize_size(size_in_bytes): """convert a speed in counts per second to counts per [s, min, h, d], choosing the smallest value greater zero. """ thr = 99 scales = [1024, 1024, 1024] units = ['k', 'M', 'G', 'T'] i = 0 while (size_in_bytes > thr) and (i < len(scales)): size_in_bytes = size_in_bytes / scales[i] i += 1 return "{:.2f}{}B".format(size_in_bytes, units[i])
a672b8aeba215081aef8b26d93fba95784bf18cb
21,482
import re def check_files(files, count_limit, size_limit): """ Check if uploaded files are conforming given requirements. """ if len(files) == 0: return False, "No file was uploaded" elif len(files) > count_limit: return False, "Limit on amount of files exceeded" for file in files: if file.size > size_limit: return False, "Limit on file size exceeded" if re.search(r'[^a-zA-Z0-9_\-\.]', file.name): return False, "File name contains invalid characters" return True, None
25ccb7641b9f4b8b3f71f25dbcbf5f73be8de6b0
21,484
def relevant_topics(freq: dict) -> list: """Returns the list of subjects that are relevant to the original email text""" max = 0 subjects = [] # find max number of occurrences for key in freq: if freq[key] > max: max = freq[key] # add the subjects that have at least max * 0.3 number of # occurrences to a list of subjects/categories (to remove outliers) for key in freq: if freq[key] >= max * 0.8 and max > 0: subjects.append(key) return subjects
40cf0c8e293b709d8bcf26d7974174c1ce108cd6
21,486
import math def distance_between_points(p0, p1): """ Distance between two points (x0, y0), (x1, y1) Parameters ---------- p0 : tuple Point 0 p1 : tuple Point 1 Returns ------- distance : value Distance """ return math.sqrt((p0[1] - p1[1])**2+(p0[0] - p1[0])**2)
58ee3b4536c912846704c581fa42065cd1b2f0a1
21,487
def _stocknames_in_data_columns(names, df): """Returns True if at least one element of names was found as a column label in the dataframe df. """ return any((name in label for name in names for label in df.columns))
30eee497f7a21755eda7e1915bf84dc800a09abc
21,488
def mark_low_points(points): """ Mark lowest points in their area. :param points: list of points :return: marked list of points """ for i in range(0, len(points)): for j in range(0, len(points[i])): point, _, _ = points[i][j] # Check if there are lower point around # up if i != 0 and points[i - 1][j][0] <= point: continue # down if i != len(points) - 1 and points[i + 1][j][0] <= point: continue # left if j != 0 and points[i][j - 1][0] <= point: continue # right if j != len(points[i]) - 1 and points[i][j + 1][0] <= point: continue # this is the lowest point points[i][j][1] = True return points
36aab1507c51e4961adbc5cd036444ab1d64ed8c
21,489
import six def to_bytes(value, encoding='utf-8'): """ Makes sure the value is encoded as a byte string. :param value: The Python string value to encode. :param encoding: The encoding to use. :return: The byte string that was encoded. """ if isinstance(value, six.binary_type): return value return value.encode(encoding)
c4f0e24d39ee565135326550006a3d5311adbb40
21,491
import os import json def load_monrovia_data(): """Get all companion plant references and the plants they correspond to.""" data = [] for file in os.listdir(os.getcwd() + '/data/monrovia'): if file.endswith('.json'): p_data = open('data/monrovia/' + file, 'r').read() p_data = json.loads(p_data) if 'url' in p_data and 'name' in p_data: data.append(p_data) return data
fe2d8e0a8443359fc518623f0f64b327e5147f88
21,492
import numpy from typing import Tuple def reconcile_overlap( previous_values: numpy.ndarray, current_values: numpy.ndarray, tile: numpy.ndarray, ) -> Tuple[numpy.ndarray, list, list]: """ Resolve label values between tiles This function takes a row/column from the previous tile and a row/column from the current tile and finds labels that that likely match. If labels in the current tile should be replaced with labels from the previous tile, the pixels in the current tile are removed from ``tile`` and the label value and pixel coordinates of the label are stored in ``labels`` and ``indices`` respectively. Args: previous_values: Previous tile edge values current_values: Current tile edge values tile: Current tile pixel values, flattened Returns: The modified tile with overlapping labels removed, a list of new labels, and a list of indices associated with the new labels. """ # Get a list of unique values in the previous and current tiles previous_labels = numpy.unique(previous_values) if previous_labels[0] == 0: previous_labels = previous_labels[1:] current_labels = numpy.unique(current_values) if current_labels[0] == 0: current_labels = current_labels[1:] # Initialize outputs labels, indices = list(), list() if previous_labels.size != 0 and current_labels.size != 0: # Find overlapping indices for label in current_labels: new_labels, counts = numpy.unique(previous_values[current_values == label], return_counts=True) if new_labels.size == 0: continue if new_labels[0] == 0: new_labels = new_labels[1:] counts = counts[1:] if new_labels.size == 0: continue # Get the most frequently occurring overlapping label labels.append(new_labels[numpy.argmax(counts)]) # Add indices to output, remove pixel values from the tile indices.append(numpy.argwhere(tile == label)) tile[indices[-1]] = 0 return tile, labels, indices
ef7a5ef98b54209dbae5b055b80215a3e1da5a71
21,493
import requests def get_team(team_id, gw): """ Get the players in a team Args: team_id (int): Team id to get the data from gw (int): GW in which the team is taken Returns: (tuple): List of integers, Remaining budget """ res = requests.get( 'https://fantasy.premierleague.com/api/entry/' + f'{team_id}/event/{gw}/picks/').json() # Scrape GW before FH to get the team from GW prior if res['active_chip'] == 'freehit': res = requests.get( 'https://fantasy.premierleague.com/api/entry/' + f'{team_id}/event/{gw-1}/picks/').json() # Adjust the player id with fplreview indices return [i['element'] for i in res['picks']], res['entry_history']['bank']
295c46628c14ad64715ee4a06537598878a8ef2e
21,495
from typing import Dict import re def basic_definitions(filepath: str) -> Dict[str, str]: """Builds a dictionary of definitions from a file. Does not reduce definitions to lambdas, only stoes them as they are defined """ definitions = dict() with open(filepath) as file: for line in file: res = re.search(r"^([a-zA-Z_]\w*) *= *(.+)$", line.strip()) if res is not None: definitions[res[1]] = res[2] return definitions
12028ec99dccc0c2881e6d10aa855168f91d5929
21,496
def get_indexes(start_index, chunk_size, nth): """ Creates indexes from a reference index, a chunk size an nth number Args: start_index (int): first position chunk_size (int): Chunk size nth (int): The nth number Returns: list: First and last position of indexes """ start_index = nth * chunk_size stop_index = chunk_size + nth * chunk_size return([start_index,stop_index])
a8dba6fef788b542b502e1e0ce52ce57f2372485
21,498
def _all_traverse(self, result): """Version of Node.traverse() that doesn't need a condition.""" result.append(self) for child in self.children: child._all_traverse(result) return result
77cab09267e0b4f8fb45b76724202ff990705dba
21,500
import importlib import subprocess import sys def try_import(module_name, second_try=False): """ Attempts to import a module, then runs pip install if not found and attempts again """ try: m = importlib.import_module(module_name) globals()[module_name] = m return m except (ImportError) as e: if not second_try: subprocess.check_call([sys.executable, "-m", "pip", "install", module_name]) return try_import(module_name, True) else: raise(e)
30057f61b160853fa246d09666e2349d92812d18
21,501
def _get_request_wait_time(attempts): """ Use Fibonacci numbers for determining the time to wait when rate limits have been encountered. """ n = attempts + 3 a, b = 1, 0 for _ in range(n): a, b = a + b, a return a
91e4c165fee5b821e8654468e954786f89ad1e0b
21,502
def get_title(soup): """ get the title of post """ title_contents = '' title_elem = soup.find('h3', attrs={'class': 'blogTitle'}) if title_elem: title_contents = title_elem.text.replace('</h3>', '') title_contents.replace('\n', ' ') return title_contents
eb16747dd9926b8f6338888ad21fd4ac8a93a14d
21,505
def GetProblemIndexFromKey(problems, problem_key): """Get a problem's index given its key and a problem list. Args: problems: Iterable of problems in the current contest. problem_key: String with the problem key that must be searched. Returns: The index of the requested problem in the problem list. If the problem is not found this method returns None. """ # Look at all the problems and return position of the first problem whose # key matches the looked key. for i, problem in enumerate(problems): if problem['key'] == problem_key: return i return None
08e84afe65695c0d0aedb391975b37c7b42f22af
21,508
from typing import List from pathlib import Path def get_core_paths(root: str) -> List[str]: """Return all the files/directories that are part of core package. In practice, it just excludes the directories in env module""" paths = [] for _path in Path(root).iterdir(): if _path.stem == "envs": for _env_path in _path.iterdir(): if _env_path.is_file(): paths.append(str(_env_path)) else: paths.append(str(_path)) return paths
96b9e6391fa8aabbec947a88c4f142b8007eaddc
21,509
import re def get_symbol_name(module, symbol_id): """Return a pretty printed name for the symbol_id if available. The pretty printed name is created from OpName decorations for the symbol_id if available, otherwise a numerical ID (e.g. '%23') is returned. Names are not unique in SPIR-V, so it is possible that several IDs have the same name in their OpName decoration. This function will return that name for the first ID found, and the rest will get a numerical name.""" if symbol_id in module.id_to_symbol_name: return module.id_to_symbol_name[symbol_id] for inst in module.global_instructions.name_insts: if inst.op_name == 'OpName' and inst.operands[0] == symbol_id: name = inst.operands[1] # glslang tend to add type information to function names. # E.g. "foo(vec4)" get the symbol name "foo(vf4;" # Truncate such names to fit our IL. regex = re.compile(r'^[a-zA-Z_][a-zA-Z0-9_]*') match = regex.match(name) if match is None: return '%' + str(symbol_id.value) new_name = match.group(0) symbol_name = '%' + new_name if symbol_name in module.symbol_name_to_id: # This name is already used for another ID (which is # possible, as the decorations we are using for symbol # names are not guaranteed to be unique). Let the first # ID use this name, and use a numerical name for this ID. symbol_name = '%' + str(symbol_id.value) break else: symbol_name = '%' + str(symbol_id.value) module.id_to_symbol_name[symbol_id] = symbol_name module.symbol_name_to_id[symbol_name] = symbol_id return symbol_name
b82ec2e06e289a58d1a4dc6ebd858a74a4c07ca6
21,510
import struct def unpack_uint32(byte_stream, endian="<"): """Return list of uint32s, (either endian) from bytes object. Unpack a bytes object into list of 32-bit unsigned integers. Each 4 input bytes decodes to a uint32. Args: byte_stream (bytes): length is a multiple of 4 endian (char, optional): "<" means little-endian unpacking, and ">" means big-endian unpacking Returns: list: unpacked uint32 numbers """ num_uint32 = len(byte_stream) // 4 out_uint32s = struct.unpack(endian + "I" * num_uint32, byte_stream) return out_uint32s
87f7bd12607e9be193098a407b51cbc01bdbe3c4
21,511
def make_decision(idx, df): """Create final result based on all analysis This function consists in a series of if statments. If statement can be seen as a 'rule' with requirements that need to be meet to the final SNAPPy result to be created. Args: idx (str): Internal SNAPPy id. df (dataframe): Tabular like file with the outputs from all the analysis performed. Returns: List with two items: rule used and final SNAPPy output. """ to_process = list(df.loc[idx]) # all methods agree ## rule_p1: no recomb, tree all equal tree pure, recomb equal tree all, tree all equal closser if ((to_process[8] == 0) & (str(to_process[1]) != 'nan') & (to_process[1] == to_process[3]) & (to_process[0] == to_process[1]) & (to_process[1] == to_process[7])): return ['rule_p1', to_process[1]] ## rule_c1: all trees and recomb trees and closser ref agree plus recomb is simple elif ((str(to_process[1]) != 'nan') & (to_process[1] == to_process[5]) & (to_process[2] >= 0.7) & (to_process[6] >= 0.7) & (to_process[8] == 1) & (to_process[1] == to_process[7]) & (str(to_process[1]) != 'nan')): return ['rule_c1', to_process[1]] # both trees plus 1 method agree ## rule_p2: tree pure agrees with tree all and recomb elif ((str(to_process[3]) != 'nan') & (to_process[3] == to_process[1]) & (to_process[4] >=0.7) & (to_process[2] >=0.7) & (to_process[3] == to_process[0])): return ['rule_p2', to_process[3]] ## rule_p3: tree pure agrees with tree all and closser elif ((str(to_process[3]) != 'nan') & (to_process[3] == to_process[1]) & (to_process[4] >=0.7) & (to_process[2] >=0.7) & (to_process[3] == to_process[7])): return ['rule_p3', to_process[3]] ## rule_c2: tree recomb agrees with tree all and closser and there is recomb elif ((str(to_process[5]) != 'nan') & (to_process[5] == to_process[1]) & (to_process[6] >=0.7) & (to_process[2] >=0.7) & (to_process[5] == to_process[7])): return ['rule_c2', to_process[5]] # one tree plus recomb and closser ## rule_p4: tree pure agrees with recomb and closser elif ((str(to_process[3]) != 'nan') & (to_process[4] >=0.9) & (to_process[3] == to_process[0]) & (to_process[3] == to_process[7])): return ['rule_p4', to_process[3]] ## rule_c3: tree recomb agrees with closser and recomb is simple elif ((str(to_process[5]) != 'nan') & (to_process[6] >=0.9) & (to_process[8] == 1) & (to_process[5] == to_process[7])): return ['rule_c3', to_process[5]] ## rule_b1: tree all agrees with recomb and closser elif ((str(to_process[1]) != 'nan') & (to_process[2] >=0.9) & (to_process[1] == to_process[0]) & (to_process[1] == to_process[7])): return ['rule_b1', to_process[1]] # ecomb gives complex ## rules_c4: tree all agrees tree recomb, and their result is a crf elif ((to_process[8] == 2)): if ((to_process[1] == to_process[5]) & (to_process[2] >= 0.7) & (to_process[6] >= 0.7) & ('_' in str(to_process[1])) & (str(to_process[1]) != 'nan')): return ['rule_c4', to_process[1]] ## rules_p5: tree all agrees tree pure, and closser, great support for 1 tree elif ((to_process[1] == to_process[3]) & (to_process[1] == to_process[7]) & ((to_process[2] >= 0.9) | (to_process[4] >=0.9)) & (str(to_process[1]) != 'nan')): return ['rule_p5', to_process[1]] ## rules_c5: tree all agrees tree recomb, and closser, and trees give crf elif ((to_process[1] == to_process[5]) & ('_' in str(to_process[1])) & (to_process[1] == to_process[7]) & (str(to_process[1]) != 'nan')): return ['rule_c5', to_process[1]] ## rules_p6: tree all agrees tree pure, and closser elif ((to_process[1] == to_process[3]) & (to_process[1] == to_process[7]) & (str(to_process[1]) != 'nan')): return ['rule_p6', to_process[1]] ## rules_u1: remaining cases are an complex URF else: return ['rule_u1', 'URF_CPX'] # recomb gives simple ## rules_c6: tree all agrees tree recomb, and their result is a crf elif ((to_process[8] == 1)): if ((to_process[1] == to_process[5]) & (to_process[2] >= 0.7) & (to_process[6] >= 0.7) & ('_' in str(to_process[1])) & (str(to_process[1]) != 'nan')): return ['rule_c6', to_process[1]] ## rules_p7: tree all agrees tree pure, and closser, great support for 1 tree elif ((to_process[1] == to_process[3]) & (to_process[1] == to_process[7]) & ((to_process[2] >= 0.9) | (to_process[4] >=0.9)) & (str(to_process[1]) != 'nan')): return ['rule_p7', to_process[1]] ## rules_c7: tree all agrees tree recomb, and closser, and trees give crf elif ((to_process[1] == to_process[5]) & ('_' in str(to_process[1])) & (to_process[1] == to_process[7]) & (str(to_process[1]) != 'nan')): return ['rule_c7', to_process[1]] ## rules_p8: tree all agrees tree pure, and closser elif ((to_process[1] == to_process[3]) & (to_process[1] == to_process[7]) & (str(to_process[1]) != 'nan')): return ['rule_p8', to_process[1]] ## rules_u1: remaining cases are an URF else: return ['rule_u2', f'URF_{"".join([str(x)[:2] for x in sorted(str(to_process[0]).split("/"))])}'] # no evidence of recomb ## rule_p9: pure and all trees agree elif ((to_process[1] == to_process[3]) & (to_process[4] >=0.7) & (to_process[2] >=0.7) & (str(to_process[1]) != 'nan')): return ['rule_p9', to_process[1]] # final, deal with problems of missing data ## rule_f1: if recomb res mssing output and closser res not missing give closser result elif (((str(to_process[0]) == '') | (str(to_process[0]) == 'nan')) & ((str(to_process[7]) != 'nan') | (str(to_process[7]) != ''))): return ['rule_f1', to_process[7]] ## rule_f2: if recomb res and closser outputs misisng and trees agree give trees result elif (((str(to_process[0]) == '') | (str(to_process[0]) == 'nan')) & ((str(to_process[7]) == 'nan') | (str(to_process[7]) == ''))): if ((to_process[1] == to_process[3]) & (str(to_process[1]) != 'nan')): return ['rule_f2', to_process[1]] ## rule_f3: if recomb res and closser outputs misisng and trees agree give trees result elif ((to_process[1] == to_process[5]) & (str(to_process[1]) != 'nan')): return ['rule_f3', to_process[1]] ## rule_f4: else return impossible to dtermine else: return ['rule_f4', 'impossible_to_determine'] ## rule_f5: give what is ouputed be recomb test, there is no recomb else: return ['rule_f5', to_process[0]]
f170cf16b2dd5844ee900ccf3253fbc4592a01c3
21,513
import math def process_mcc( intersection0_tab, intersection1_tab, erreur_pred1_tab, erreur_pred0_tab ): """ Process MCC over confusion matrix. :param intersection0_tab: TN :type intersection0_tab: int :param intersection1_tab: TP :type intersection1_tab: int :param erreur_pred1_tab: FP :type erreur_pred1_tab: int :param erreur_pred0_tab: FN :type erreur_pred0_tab: int :return: Return mean MCC :rtype: float """ TP = sum(intersection1_tab) TN = sum(intersection0_tab) FP = sum(erreur_pred1_tab) FN = sum(erreur_pred0_tab) numerateur = TP * TN - FP * FN denominateur = (TP + FP) * (TP + FN) * (TN + FP) * (TN + FN) if math.sqrt(denominateur) == 0: res = -2 else: res = numerateur / math.sqrt(denominateur) return res
9625750d62547cbb5a59e59332082824f2bff76e
21,515
import queue import threading def streaming_rpc_handler(cls, method_name): """Un-inverts the flow of control between the runner and the sdk harness.""" class StreamingRpcHandler(cls): _DONE = object() def __init__(self): self._push_queue = queue.Queue() self._pull_queue = queue.Queue() setattr(self, method_name, self.run) self._read_thread = threading.Thread(target=self._read) def run(self, iterator, context): self._inputs = iterator # Note: We only support one client for now. self._read_thread.start() while True: to_push = self._push_queue.get() if to_push is self._DONE: return yield to_push def _read(self): for data in self._inputs: self._pull_queue.put(data) def push(self, item): self._push_queue.put(item) def pull(self, timeout=None): return self._pull_queue.get(timeout=timeout) def empty(self): return self._pull_queue.empty() def done(self): self.push(self._DONE) self._read_thread.join() return StreamingRpcHandler()
67589d0d5de7fcd4ef23782d157183f8dacef4e4
21,516
def _compare_rules(left, right): """ Compares two rules to see if they are the same. @param left: rule to be compared. @param right: rule to compare with. @return: Boolean """ protocol_match = str(left['protocol']) == str(right['protocol']) cidr_match = str(left['cidrlist']) == str(right['cidr']) if 'startport' in left and 'startport' in right: startport_match = str(left['startport']) == str(right['startport']) elif 'icmptype' in left and 'icmptype' in right: startport_match = str(left['icmptype']) == str(right['icmptype']) else: startport_match = False if 'endport' in left and 'endport' in right: endport_match = str(left['endport']) == str(right['endport']) elif 'icmpcode' in left and 'icmpcode' in right: endport_match = str(left['icmpcode']) == str(right['icmpcode']) else: endport_match = False return protocol_match and cidr_match and startport_match and endport_match
a40029639bf22419ff83fcaf83c7ea316d23ee43
21,517
def bias_to_0(w): """Set weight column for constant data Xn = 1 to 0""" copy = w.copy() copy[0] = 0 return copy
2441e05e648a5f1cda9114cb18691ad855995fb8
21,518
def tag_to_rtsip(tag): """Convert tag to relation type, sense, id, and part.""" rel_type, rel_sense, rel_id, rel_part = tag.split(":") return rel_type, rel_sense, int(rel_id), rel_part
38237836cd16d34a594296c5077eee062600d899
21,519
def _clean_name(s: str) -> str: """ >>> _clean_name("Residual x ") 'residual_x' """ return str(s).strip().lower().replace(" ", "_")
15bce66826a7caa76f4f25810369047de451c276
21,520
def parse_table_widths(s): """Returns a list of the widths in the string for group(1) of a matched table_widths regex""" return [item.strip() for item in s.split(",")]
5a65f0c49fdc33ef36549976aeb8b8b1ccacf15d
21,521
import os def create_directory( parent_path: str, dir_name: str, verbosity: bool = False) -> bool: """Creates a "dir_name" directory in "parent_path" directory, suppresses FileExistsError and, return True, if required directory is generated/exists, otherwise return False Args: parent_path (str): Path to Target Directory, where new directory is needed to be created dir_name (str): Name of new directory verbosity (bool, optional): Prints error if Found, Defaults to False. Returns: bool: Returns True, if desired directory is created/exists, otherwise False """ try: os.mkdir(os.path.join(parent_path, dir_name), mode=0o771) except FileExistsError: pass except FileNotFoundError: if verbosity: print("Target directory does not exist") return False return True
aa85c58bbfcf92deea759ba772681e3690506db0
21,523
def distance_between_pieces_heuristic(state): """ Heuristic which computes a sum of the distances between pieces of the same colour. :param state: the state to compute the heuristic of :return: the heuristic value of the given state, where bigger values are better for the maximizing player """ white_pieces = state[0] black_pieces = state[1] def distance_pieces(pieces): distance = 0 for (x, y) in pieces: min_dist = 7 for (x2, y2) in pieces: if x != x2 and y != y2: min_dist = min(min_dist, abs(x - x2) + abs(y - y2)) distance += min_dist return distance return distance_pieces(black_pieces) - distance_pieces(white_pieces)
b02a963ef6709afa87eecb12f429a7331417373d
21,524
def committee_to_json(req_data): """ Simply convert the request object into JSON without filtering the data. """ if req_data is None or len(req_data) == 0: return None result = [] for item in req_data: result.append(item.json_data()) return result
3e6a09b2945ba8be6895ada9aa10d490593533ea
21,525
def calc_area(l, w): """ Params: l and w are both real and positive numbers representing the length and width of a rectangle """ if l <=0 or w <=0: raise ValueError return l * w
6dd805d83223b7817fd5c6efe2653af806421ff7
21,526
def vogal(str): """ Função que recebe um único caractere como parâmetro e devolve True se ele for uma vogal e False se for uma consoante. >>> vogal('a') True >>> vogal('b') False >>> vogal('E') True :param str: :return: """ vogais = ('a', 'e', 'i', 'o', 'u') if str.lower() in vogais: return True else: return False
821824ec9f01e3b06075b15257d3da48d8782141
21,527
import torch def sample_points(rays_o, rays_d, near, far, num_samples, perturb=False): """ Sample points along the ray Args: rays_o (num_rays, 3): ray origins rays_d (num_rays, 3): ray directions near (float): near plane far (float): far plane num_samples (int): number of points to sample along each ray perturb (bool): if True, use randomized stratified sampling Returns: t_vals (num_rays, num_samples): sampled t values coords (num_rays, num_samples, 3): coordinate of the sampled points """ num_rays = rays_o.shape[0] t_vals = torch.linspace(near, far, num_samples, device=rays_o.device) t_vals = t_vals.expand(num_rays, num_samples) # t_vals has shape (num_samples) # we must broadcast it to (num_rays, num_samples) if perturb: rand = torch.rand_like(t_vals) * (far-near)/num_samples t_vals = t_vals + rand coords = rays_o.unsqueeze(dim=-2) + t_vals.unsqueeze(dim=-1) * rays_d.unsqueeze(dim=-2) return t_vals, coords
28adc68aa8a6d8cf0fd54fa62d23bc2e7a1dc296
21,529
def html(html_data): """ Builds a raw HTML element. Provides a way to directly display some HTML. Args: html_data: The HTML to display Returns: A dictionary with the metadata specifying that it is to be rendered directly as HTML """ html_el = { 'Type': 'HTML', 'Data': html_data, } return html_el
898100fe5eef22fb7852b5991b1927ac4b3bf8f6
21,530
def distance_sqr_2d(pt0, pt1): """ return distance squared between 2d points pt0 and pt1 """ return (pt0[0] - pt1[0])**2 + (pt0[1] - pt1[1])**2
23820a4e5f396ddda9bea0a0701c26d416e6567b
21,532
def wlv_datetime(value): """Default format for printing datetimes.""" return value.strftime("%d/%m/%Y %H:%M:%S") if value else ""
c8bf50d239c7bad195e65f6036c7bcf522d38feb
21,533
import sys def bins_are_neighbours(bin1, bin2, interact_mtrx): """ Check if bin1 and bin2 contain interacting atoms, or have atoms in common. """ max_atom_idx = len(interact_mtrx[:, 0])-1 for atom_idx1 in bin1: if atom_idx1 > max_atom_idx: sys.exit("FATAL ERROR: atom_idx out of range!") continue for atom_idx2 in bin2: if atom_idx2 > max_atom_idx: sys.exit("FATAL ERROR: atom_idx out of range!") continue if interact_mtrx[atom_idx1, atom_idx2]: return True return False
27af4fff93a8f072dba3bb6fc4a8eb3b5e1fe80c
21,534
def get_diff_sq(a, b): """ Computes squared pairwise differences between a and b. :param a: tensor a :param b: tensor b :return: squared pairwise differences between a and b """ aa = a.matmul(a.t()) bb = b.matmul(b.t()) ab = a.matmul(b.t()) diff_sq = -2 * ab + aa.diag().unsqueeze(1) + bb.diag().unsqueeze(0) return diff_sq
fcaec3db6316c8872c24a89ee56e1b6e6b83787b
21,535
def reverse(head): """Function that reverses the order of a linked list Args: head (SinglyLinkedListNode): The head of the linked list to reverse Returns: (SinglyLinkedListNode): The head of the reversed linked list """ current = head prev = None prev_two = None while current.next: prev_two = prev prev = current current = current.next # Set pointer prev.next = prev_two # Finally, set the last pointer for the new head of the reversed linked list current.next = prev return current
ea18553307f8138ae9d6f0f9fbb32c7c2cd9ff0f
21,536
from typing import List def D0_dd( phi: List[float], s: List[float] ) -> float: """ Compute the central difference approximation of the second order derivative of phi along the same dimension, as in [3]. :param phi: List of the 3 upwind-ordered phi values (i.e. [\phi_{i-1}, \phi_i, \phi_{i+1}]). :param s: List of the 2 distances along the d dimension between the upwind-ordered phi values (i.e. [s_{i-1}, s_{i+1}]). :return: D^0_{dd}\phi_i. """ return 2.0 / sum( s[:2] ) * ( ( phi[2] - phi[1] ) / s[1] - ( phi[1] - phi[0] ) / s[0] )
20c012098827e929b08770fe159d87cf29eb15c2
21,537
def separate_categories(data): """ Separate the rows concerning "script-identifiable edits" from those concerning "other edits". Also, in each categry, and for each line, calculate the sum of levenshtein distances across all edits for that line. Return two separate DataFrames. """ # Line 6 should have an entry in both other and script. data = data.groupby(["category", "line"], axis=0).sum() other = data.loc["other", :].reset_index()\ .astype(int).sort_values(by=["line"]) script = data.loc["script-identifiable", :]\ .reset_index().astype(int).sort_values(by=["line"]) # print(script.head(), other.head()) return other, script
6d7b844bd69402fc32c2deb4c0ee3df131fdf6aa
21,538
import math def calculate_distance(coord1, coord2, box_length=None): """ Calculate the distance between two 3D coordinates. Parameters ---------- coord1, coord2 : list The atomic coordinates [x, y, z] box_length : float, optional The box length. This function assumes box is a cube. Returns ------- distance: float The distance between the two atoms """ #Do periodic boundary corrections if given a box_length if box_length is not None: #Distance = sqrt(sum of square differences of each dimension) #initialize the sum of square differences to 0 sum_square_diff = 0 #Iterate through dimensions for i in range(len(coord1)): #Find the raw distance between the two coordinates in this dimension dim_dist_uncorrected = math.fabs(coord1[i] - coord2[i]) #Periodic boundary corrections #If raw distance is less than half the box_length, no corrections are needed if dim_dist_uncorrected <= box_length / 2: dim_dist_corrected = dim_dist_uncorrected #If raw distance is greater than half the box length and less than one whole box length, correct accordingly elif (dim_dist_uncorrected > box_length / 2 and dim_dist_uncorrected <= box_length): dim_dist_corrected = box_length - dim_dist_uncorrected #If raw distance is greater than one whole box length, correct accordingly else: dim_dist_corrected = dim_dist_uncorrected - box_length * round(dim_dist_uncorrected / box_length) #Add the square difference to the total sum sum_square_diff += (dim_dist_corrected)**2 #Calculate distance after finding the sum of square differences distance = math.sqrt(sum_square_diff) #Otherwise assume no periodic boundaries else: sum_square_diff = 0 for i in range(len(coord1)): sum_square_diff += (coord1[i] - coord2[i])**2 distance = math.sqrt(sum_square_diff) return distance
954bdf6dc66ca4edf58b1b96aa63343930e1a604
21,539
import os import json def _json_to_bed(fname, header): """Convert JSON output into a BED file in preparation for annotation. """ out_file = "%s.bed" % os.path.splitext(fname)[0] with open(fname) as in_handle: with open(out_file, "w") as out_handle: for line in in_handle: cur_info = json.loads(line) parts = [str(cur_info.get(h, "")) for h in ["chrom", "start", "end"] + header] out_handle.write("\t".join(parts) + "\n") return out_file
c5dbcb3b994cb9167efdb1b6f2f93e177632e8a1
21,540
import mpmath def Xrange_to_mpfc(arr): """ Convert a Xrange array of size-1 to a mpf or mpc""" if arr.is_complex: return Xrange_to_mpfc(arr.real) + 1.j * Xrange_to_mpfc(arr.imag) else: m = arr._mantissa exp = arr._exp return mpmath.ldexp(float(m), int(exp))
0ea875ed5874aa87e922d61aa7b762a354a77061
21,541
def practice_problem2a(sequence, delta): """ What comes in: -- A sequence of integers, e.g. ([2, 10, 5, -20, 8]) -- A number delta What goes out: -- Returns a new list that is the same as the given list, but with each number in the list having had the given delta added to it (see example below) Side effects: None. Example: Given the list [2, 10, 5, -20, 8] and the number 6, this problem returns [8, 16, 11, -14, 14] Type hints: :type sequence: [int] :type delta: int """ #################################################################### # DONE 3. Implement and test this function. # The testing code is already written for you (above). #################################################################### # DIFFICULTY AND TIME RATINGS (see top of this file for explanation) # DIFFICULTY: 5 # TIME ESTIMATE: 5 minutes. #################################################################### seq = [] for k in range (len(sequence)): seq = seq + [sequence[k] + delta] return seq
31cc3f1272f5563db0f2edcb5c1f5f7052b10751
21,542
import torch def compute_tv_norm(values, losstype = "l2"): """Returns TV norm for input values. Source: regnerf/internal/math.py Args: values: [batch, H, W, *]. 3 or more dimensional tensor. losstype: l2 or l1 Returns: loss: [batch, H-1, W-1, *] """ v00 = values[:, :-1, :-1] v01 = values[:, :-1, 1:] v10 = values[:, 1:, :-1] if losstype == "l2": loss = ((v00 - v01)**2) + ((v00 - v10)**2) elif losstype == "l1": loss = torch.abs(v00 - v01) + torch.abs(v00 - v10) else: raise ValueError(f"Unsupported TV losstype {losstype}.") return loss
34088d342cac0f1d4ee0ec4946f73aec9cc92639
21,543
from typing import Union import torch from typing import Optional from typing import cast def _tuple_range_reader( input_range: Union[torch.Tensor, float, tuple], target_size: int, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None, ) -> torch.Tensor: """Given target_size, it will generate the corresponding (target_size, 2) range tensor for element-wise params. Example: >>> degree = torch.tensor([0.2, 0.3]) >>> _tuple_range_reader(degree, 3) # read degree for yaw, pitch and roll. tensor([[0.2000, 0.3000], [0.2000, 0.3000], [0.2000, 0.3000]]) """ target_shape = torch.Size([target_size, 2]) if not torch.is_tensor(input_range): if isinstance(input_range, (float, int)): if input_range < 0: raise ValueError(f"If input_range is only one number it must be a positive number. Got{input_range}") input_range_tmp = torch.tensor([-input_range, input_range], device=device, dtype=dtype).repeat( target_shape[0], 1 ) elif ( isinstance(input_range, (tuple, list)) and len(input_range) == 2 and isinstance(input_range[0], (float, int)) and isinstance(input_range[1], (float, int)) ): input_range_tmp = torch.tensor(input_range, device=device, dtype=dtype).repeat(target_shape[0], 1) elif ( isinstance(input_range, (tuple, list)) and len(input_range) == target_shape[0] and all(isinstance(x, (float, int)) for x in input_range) ): input_range_tmp = torch.tensor([(-s, s) for s in input_range], device=device, dtype=dtype) elif ( isinstance(input_range, (tuple, list)) and len(input_range) == target_shape[0] and all(isinstance(x, (tuple, list)) for x in input_range) ): input_range_tmp = torch.tensor(input_range, device=device, dtype=dtype) else: raise TypeError( "If not pass a tensor, it must be float, (float, float) for isotropic operation or a tuple of " f"{target_size} floats or {target_size} (float, float) for independent operation. Got {input_range}." ) else: # https://mypy.readthedocs.io/en/latest/casts.html cast to please mypy gods input_range = cast(torch.Tensor, input_range) if (len(input_range.shape) == 0) or (len(input_range.shape) == 1 and len(input_range) == 1): if input_range < 0: raise ValueError(f"If input_range is only one number it must be a positive number. Got{input_range}") input_range_tmp = input_range.repeat(2) * torch.tensor( [-1.0, 1.0], device=input_range.device, dtype=input_range.dtype ) input_range_tmp = input_range_tmp.repeat(target_shape[0], 1) elif len(input_range.shape) == 1 and len(input_range) == 2: input_range_tmp = input_range.repeat(target_shape[0], 1) elif len(input_range.shape) == 1 and len(input_range) == target_shape[0]: input_range_tmp = input_range.unsqueeze(1).repeat(1, 2) * torch.tensor( [-1, 1], device=input_range.device, dtype=input_range.dtype ) elif input_range.shape == target_shape: input_range_tmp = input_range else: raise ValueError( f"Degrees must be a {list(target_shape)} tensor for the degree range for independent operation." f"Got {input_range}" ) return input_range_tmp
f7b02a8c6f5e9d92de7897e988426aadf3847db1
21,544
import re def is_sale(word): """Test if the line is a record of a sale""" cattle_clue = r'(bulls?|steers?|strs?|cows?|heifers?|hfrs?|calf|calves|pairs?|hc|sc)' price_clue = r'[0-9,]+' has_cattle = any(bool(re.search(cattle_clue, this_word, re.IGNORECASE)) for this_word in word) has_price = any(bool(re.search(price_clue, this_word)) for this_word in word) return has_cattle & has_price
f4b53e28b18f7f7fdffeebf8884076224660ae48
21,545