content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
import os def generate_unique_logpath(logdir, raw_run_name): """Verify if the path already exist Args: logdir (str): path to log dir raw_run_name (str): name of the file Returns: str: path to the output file """ i = 0 while True: run_name = raw_run_name + "_" + str(i) log_path = os.path.join(logdir, run_name) if not os.path.isdir(log_path): return log_path i = i + 1
0e29d4fea450adb0e6eb1fa699b1d97513b3ec1d
19,108
def linear_search(item, my_list): """ Searching position by position :param item: the number to look for :param my_list: a list of integers :return: either True or False if the item is in the list or not. """ found = False for i in range(len(my_list)): if item == my_list[i]: found = True return found
463c23c85626be396c06f56d913fca9b5972fc0e
19,110
from typing import Optional def build_cfn( ret: str, name: str, *, params: Optional[list[str]] = None, body: Optional[list[str]] = None, vret: Optional[str] = None, ) -> str: """Builds a Cpp function""" if body: body = [" " + line for line in body] if ret == "string": ret = "std::string" return "\n".join( [ # returntype functionName(type varname, for all params) f'{ret} {name}({", ".join(params or [])})', "{", # { (";\n".join(body) + ";" if body else ""), # function body (f" return {vret};" if ret != "void" else ""), "};\n", # return varname; # }; ] )
5ab97857c14aac2f7e3d2fc3093339dde4f89917
19,111
import random import string def random_schema_name() -> str: """Generate a random PostgreSQL schema name for testing.""" return 'temp_{name}'.format( name=''.join( (random.choice(string.ascii_lowercase) for _ in range(10)), # noqa:S311 ), )
350ea729e34d1f8262f39f4a1985ab75b421cf26
19,112
from typing import Iterable def row2string(row: Iterable[float], sep: str = ', ') -> str: """ Converts a one-dimensional iterable of floats to string. Parameters ---------- row: list or tuple or 1-D ndarray sep: str string separator between elements (default: ', ') Returns ------- string representation of a row """ return sep.join("{0}".format(item) for item in row)
afdd62d390e336e774e5ef80244adac1751202f9
19,113
def images_to_channels(X_batch): """ This utility convert (Batch Size, TimeSteps, H, W, C) => (Batch Size, H, W, C, TimeSteps) => (Batch Size, H, W, C * TimeSteps) Refer Input of Mutli Scale Architecture ! """ input_data = X_batch.transpose(0,2,3,4,1) input_data = input_data.reshape(list(input_data.shape[:-2])+[-1]) return input_data
261c9cb1944df5eb924f761dacda188d33dde1dd
19,114
def estFgPcorrect(edistdeg): """ Use Newman et al (1998) distance-based correction for dip-slip earthquakes. Returns estFgP2 """ # polynomial spline fit to a number of real dip-slip EQs a0=1.17 ; a1=-7.27e-3 ; a2=6.01e-5 estFgP2=a0 + a1*edistdeg + a2*edistdeg**2 return estFgP2
764d59d5166f839b4dfb345f608d2e973447a4ca
19,115
import asyncio def send_message(connection, *messages, **kwargs): """ Sends a message to a connected player. :param connection: The connection to send the message to. :param messages: The message(s) to send. :return: A Future for the message(s) being sent. """ return asyncio.ensure_future(connection.send_message(*messages, **kwargs))
779ad884112dbf98e9eb78705edc0bb9dd0f93c7
19,116
import json def serialize_json(obj) -> bytes: """ An iPhone sends JSON like this: {"characteristics":[{"iid":15,"aid":2,"ev":true}]} Some devices (Tado Internet Bridge) depend on this some of the time. """ return json.dumps(obj, separators=(",", ":")).encode("utf-8")
08db1e3f33b3d260c907c70f8263ba43763ec662
19,117
import os def generate_ssh_key_pair(keys_prefix, keys_dir="/tmp/keys"): """ This function generates a new SSH key pair in the specified location. The key generation is conducted using the os's command ssh-keygen. """ os.system("mkdir -p "+keys_dir) os.system("ssh-keygen -f "+keys_dir+"/"+keys_prefix+" -N '' -C 'keys@deployer' -b 2048") return {'public':keys_dir+"/"+keys_prefix+".pub", 'private':keys_dir+"/"+keys_prefix}
993d44e4ab998f262e12862d3d6c18edcde72196
19,118
def minindex(li): """ Retourne l'index du minimum et le minimum. @param li liste @return tuple (minimum,position) .. exref:: :tag: Base :title: minimum avec position La fonction `min <https://docs.python.org/3/library/functions.html#min>`_ retourne le minium d'un tableau mais pas sa position. Le premier réflexe est alors de recoder le parcours de la liste tout en conservant la position du minimum. .. runpython:: :showcode: li = [0, 434, 43, 6436, 5] m = 0 for i in range (0, len(li)): if li[m] < li[i]: m = i print(m) Mais il existe une astuce pour obtenir la position sans avoir à le reprogrammer. .. runpython:: :showcode: li = [0, 434, 43, 6436, 5] k = [(v,i) for i, v in enumerate(li)] m = min(k) print(m) La fonction ``min`` choisit l'élément minimum d'un tableau dont les éléments sont des couples (élément du premier tableau, sa position). Le minimum est choisi en comparant les éléments, et la position départegera les exaequo. """ return min((v, i) for i, v in enumerate(li))
ba03a99131f53154df604a903df4949a50255204
19,119
def calculate_city_state_qty_delta(df): """ This function creates the specific market growth (city + state observation) rate using quantity by doing the following: 1. Creates the city_state_qty_delta_pop feature out of the quantity_of_mortgages_pop 2. Creates the city_state_qty_delta_nc feature out of the quantity_of_mortgages_nc 3. Returns the df with the new features """ # create city_state_qty_delta_pop df["city_state_qty_delta_pop"] = df.sort_values(["year"]).groupby(["city", "state"])[["quantity_of_mortgages_pop"]].pct_change() # create city_state_qty_delta_nc df["city_state_qty_delta_nc"] = df.sort_values(["year"]).groupby(["city", "state"])[["quantity_of_mortgages_nc"]].pct_change() return df
1603e51ff64ad6f232dc483af9b947378a70b886
19,120
def falsy_to_none_callback(ctx, param, value): # noqa: U100 """Convert falsy object to ``None``. Some click arguments accept multiple inputs and instead of ``None`` as a default if no information is passed, they return empty lists or tuples. Since pytask uses ``None`` as a placeholder value for skippable inputs, convert the values. Examples -------- >>> falsy_to_none_callback(None, None, ()) is None True >>> falsy_to_none_callback(None, None, []) is None True >>> falsy_to_none_callback(None, None, 1) 1 """ return value if value else None
9168083987696f7749ca8444356d32f08bde7e91
19,122
import os import pickle from typing import List import torch def load_torch_algo(algo_class, algo_config_filename, saved_model_filename, additional_params_filename, eval=True): """ :param algo_class: Class of the Algorithm to be instantiated :param algo_config_filename: path to the file containing the initialization parameters of the algorithm :param saved_model_filename: path (or list of paths) to the trained model(s) :param additional_params_filename: path to the file containing the trained parameters of the algorithm eg mean, var :param eval: boolean to determine if model is to be put in evaluation mode :return: object of algo_class with a trained model """ with open(os.path.join(algo_config_filename), "rb") as file: init_params = pickle.load(file) with open(additional_params_filename, "rb") as file: additional_params = pickle.load(file) # init params must contain only arguments of algo_class's constructor algo = algo_class(**init_params) device = algo.device if additional_params is not None: setattr(algo, "additional_params", additional_params) if isinstance(saved_model_filename, List): algo.model = [torch.load(path, map_location=device) for path in saved_model_filename] if eval: [model.eval() for model in algo.model] else: algo.model = torch.load(saved_model_filename, map_location=device) if eval: algo.model.eval() return algo
16a1948d918de0fa81a2556b2837e041299a85dc
19,123
def create_voting_dict(strlist): """ :param strlist: strlist: list of strings representing voting records. Each line space delimited {last_name} {party} {state} {votes...} :return: Dictionary mapping last name to list of integer votes: -1, 0, 1 (no, abstain, yes) >>> strlist = ['Lesko D MD 1 0 -1 0 1', 'Klein R MA 0 1 1 -1 0'] >>> voting_dict = create_voting_dict(strlist) >>> voting_dict['Lesko'][2] == -1 True >>> voting_dict['Klein'][4] == 0 True """ voting_dict = dict() for line in strlist: elements = line.split(' ') voting_dict[elements[0]] = [int(e) for e in elements[3:]] return voting_dict
7b7add11aec2aea7e6929491cee8e5e44baeb6e2
19,127
def pattern_match(s, p): """pattern match used in 'include' and 'exclude' option """ i, j, star_match_pos, last_star_pos = 0, 0, 0, -1 while i < len(s): if j < len(p) and p[j] in (s[i], '?'): i, j = i + 1, j + 1 elif j < len(p) and p[j] == '*': star_match_pos, last_star_pos = i, j j += 1 elif last_star_pos > -1: i, star_match_pos = star_match_pos + 1, star_match_pos + 1 j = last_star_pos + 1 else: return False while j < len(p) and p[j] == '*': j += 1 return j == len(p)
4b1f16079ca1081a66ad6cdc3ffe18cd53bfc5b1
19,128
def create_constant_tensor(shape, value): """Creates tensor with `shape`, filled with `value`.""" if len(shape) == 1: return [value] * shape[0] return [create_constant_tensor(shape[1:], value) for _ in range(shape[0])]
eacb2f8c5937d0fcfe4eaea4c5ed9bd0e5e4875d
19,129
import re import string def strip_lower_remove_punctuation(input_string): """ :param input_string: Input string as is :return: string without leading, trailing or double+ white spaces, lower case and no punctuation, ascii characters only """ cleaned_string = '' cleaned_string = re.sub(r'\s+', ' ', input_string.encode('ascii', 'ignore') .decode('ascii') .strip() .lower() .translate(str.maketrans(string.punctuation, ' ' * len(string.punctuation)))) return cleaned_string
f9e23db9a86b03c0c56f0cc098150d5cccd02d56
19,130
def gen_cat(df): """ Generate a data frame only including catgorical variables. Parameters ---------- df : pandas data frame whole data frame. Returns ------- df_new: pandas data frame new data frame only including categorical variables. """ feat_cat = ['derived_msa_md', 'county_code', 'conforming_loan_limit', 'derived_race', 'derived_sex', 'hoepa_status', 'interest_only_payment', 'balloon_payment', 'occupancy_type', 'total_units', 'applicant_race_1', 'applicant_sex', 'applicant_age_above_62', 'co_applicant_age_above_62', 'derived_loan_product_type', 'lien_status', 'open_end_line_of_credit', 'business_or_commercial_purpose' ] df_new = df[feat_cat] return df_new
479fde5dd0f25edaed67483eed1bada5b0e17d7e
19,131
def the_box(box, width=3, height=3): """Return all coordinates of the fields of the given box number. Args: box (int): The number of the box. width (int): The width of the sudoku. height (int): The height of the sudoku. Returns: list: The coordinates of the box with the given number. Raises: ValueError: If the box number is invalid. Example:: >>> the_box(0, width=3, height=3) [(0, 0), (0, 1), (0, 2), (1, 0), (1, 1), (1, 2), (2, 0), (2, 1), (2, 2)] """ if not 0 <= box < width * height: raise ValueError( "box must be less equal 0 and less than %d" % width * height) x = (box % height) * width y = (box // height) * height return [(y + i, x + j) for i in range(height) for j in range(width)]
e099b13a55310488809149d00df9719998a99191
19,134
def mass2_from_m1_q(mass1, q): """Return the secondary mass given samples for mass1 and mass ratio """ return mass1 * q
1c902c8564fbeca7b96c6e73384b4b690b2a348d
19,135
from typing import Type from typing import get_origin import enum def is_enum_type(type_: Type) -> bool: """Return True if the input is and :class:`enum.Enum`.""" return get_origin(type_) is None and issubclass(type_, enum.Enum)
857e7a0ec5e1c4051551a6322293a71ef659010f
19,136
def average(entry): """providing average time for an individidual test.""" return entry['total time (s)'] / float(entry['correct answers'] + entry['wrong answers'])
3e833ea6211a4541099a57cb928bcf1859e9f19f
19,137
def use_func_quantize(x, lim, n_levels='128', return_varname = 'qx'): """ Quantize x with the given number of levels parameters: 1. x: the name of vector or string of R vector 2. lim: the name of a vector of lower and upper limits or R vector 3. n_levels: the name of variable of the number of levels or a string number 4. return_varname: the return argument name in the R script """ rscript = '%s <- quantize(%s, lim=%s, n.levels=%s)\n' %(return_varname, x, lim, n_levels) return rscript
da944161ad8a0771422c55e8ec67150be6de5579
19,138
def clean_line(line): """ Cleans a single line """ ret = line.replace('-', ' ').replace('.', '. ').strip() return ret
833580237ffcbf607bd2ac619b721f9d260d3aae
19,139
def upilab5_1_9() : """5.1.9. Exercice UpyLaB 5.4 - Parcours vert bleu rouge Écrire une fonction distance_points() qui reçoit en paramètres deux tuples de deux composantes représentant les coordonnées de deux points et qui retourne la distance euclidienne séparant ces deux points. Pour rappel, la distance euclidienne entre les points (x_1, y_1) et (x_2, y_2) se calcule grâce à la formule : dist = \sqrt{(x_1 - x_2)^2 + (y_1 - y_2)^2} où, si a désigne un nombre positif, \sqrt{a} désigne la racine carrée de a et correspond à a^{\frac{1}{2}}. Exemple 1 : L’appel suivant de la fonction : distance_points((1.0, 1.0), (2.0, 1.0)) doit retourner : 1.0 Exemple 2 : L’appel suivant de la fonction : distance_points((-1.0, 0.5), (2.0, 1.0)) doit retourner (approximativement) :3.0413812651491097 """ def distance_points(p1, p2) : """ calcul de la distance entre deux points sur un plan z = 0 """ x_1, y_1 = p1 x_2, y_2 = p2 return ((x_1 - x_2)**2 + (y_1 - y_2)**2)**0.5
7aa9a9f5fa58eb05a3f41b02532d204d606c1d67
19,140
def parse_line(line): """parse composition line by deleting whitespace and separating the isotope and atomic density Parameters ---------- line: str line of isotope and composition Returns ------- tuple : (str, float) (isotope, atomic density) """ # remove whitespace in front line = line.lstrip() isotope, atom_density = line.split(" ") return (isotope, float(atom_density))
bf695c505e5bb56edfd64f44b6958227737b8c1b
19,141
def peat_soilprofiles(): """ Properties of typical peat profiles """ peatp = { 'sphagnum':{ 'soil_id': 1.0, 'z': [-0.1, -0.2, -0.3, -0.4, -0.5, -0.6, -0.7, -0.8, -0.9, -1., -1.5, -2.0], 'pF': { # vanGenuchten water retention parameters 'ThetaS': [0.945, 0.918, 0.918, 0.918, 0.918, 0.918, 0.918, 0.918, 0.918, 0.918, 0.918, 0.918], 'ThetaR': [0.098, 0.098, 0.098, 0.098, 0.098, 0.098, 0.098, 0.098, 0.098, 0.098, 0.098, 0.098], 'alpha': [0.338, 0.072, 0.072, 0.072, 0.072, 0.072, 0.072, 0.072, 0.072, 0.072, 0.072, 0.072], 'n': [1.402, 1.371, 1.371, 1.371, 1.371, 1.371, 1.371, 1.371, 1.371, 1.371, 1.371, 1.371]}, 'saturated_conductivity': [30*8.99E-05, 20*2.98E-05, 10*9.86E-06, 3.27E-06, 1.08E-06, 3.58E-07, 1.19E-07, 1.16E-07, 1.16E-07, 1.16E-07, 1.16E-07, 1.16E-07], }, 'carex': { 'soil_id': 2.0, 'z': [-0.1, -0.2, -0.3, -0.4, -0.5, -0.6, -0.7, -0.8, -0.9, -1., -1.5, -2.0], 'pF': { # vanGenuchten water retention parameters 'ThetaS': [0.943, 0.874, 0.874, 0.874, 0.874, 0.874, 0.874, 0.874, 0.874, 0.874, 0.874, 0.874], 'ThetaR': [0.002, 0.198, 0.198, 0.198, 0.198, 0.198, 0.198, 0.198, 0.198, 0.198, 0.198, 0.198], 'alpha': [0.202, 0.030, 0.030, 0.030, 0.030, 0.030, 0.030, 0.030, 0.030, 0.030, 0.030, 0.030], 'n': [1.349, 1.491, 1.491, 1.491, 1.491, 1.491, 1.491, 1.491, 1.491, 1.491, 1.491, 1.491]}, 'saturated_conductivity': [30*4.97E-05, 20*3.21E-05, 10*2.07E-05, 1.34E-05, 8.63E-06, 5.57E-06, 3.60E-06, 2.32E-06, 1.50E-06, 9.68E-07, 2.61E-07, 1.16E-07], }, } return peatp
93fc2a0be9124d67e399571bd4281b7a6652a124
19,142
def getRepositoriesURI(response): """ Get repositories URIs :param response: dictionary :return: list """ git_uris = [] for page in response.keys(): for value in response[page]['values']: git_uris.append(value['links']['clone'][0]['href']) return sorted(git_uris)
fb98d2f755a0bb46dc48e5ec95a827705f84720b
19,144
from typing import Optional from typing import Dict from typing import Any def init_manifest( package_name: str, version: str, manifest_version: Optional[str] = "ethpm/3" ) -> Dict[str, Any]: """ Returns an initial dict with the minimal requried fields for a valid manifest. Should only be used as the first fn to be piped into a `build()` pipeline. """ return { "name": package_name, "version": version, "manifest": manifest_version, }
dfdf014e1b6a9f0e9c8ba7ea6fed064809580f4e
19,145
def format_line(data, linestyle): """Formats a list of elements using the given line style""" return linestyle.begin + linestyle.sep.join(data) + linestyle.end
6a49a80f876ffe8a8f38e6e987051a0247858c6c
19,148
def extract(string, start='(', stop=')'): """ Extract the string that is contained between start and stop strings :param string: str, string to process :param start: str, start string :param stop: str, stop string :return: str, extracted string """ try: return string[string.index(start) + 1:string.index(stop)] except Exception: return string
75ac7aa9291b63a18c2bea2632099983137e5b7a
19,149
def get_energy_flows(fl): """ Subsets the flow list for all energy flows :param fl: df in standard flowlist format :return: df in standard flowlist format """ list_of_flows = ['Uranium','Biomass','Hardwood','Softwood','Wood'] flows = fl[(fl["Unit"]=="MJ") | (fl['Flowable'].isin(list_of_flows))] #Peat is captured in USGS_mineral_resource_flows so exclude here flows = flows[flows['Flowable']!='Peat'] flows = flows[flows["Context"].str.startswith("resource")] return flows
ee0f7ed5d5b843386630901f6cd418844e9d2438
19,150
def split_kwargs(kwargs, prefixes=[]): """Helper to separate kwargs into multiple groups Parameters ---------- prefixes : list of strs Each entry sets a prefix which puts entry with key starting with it into a separate group. Group '' corresponds to 'leftovers' :Output: dictionary with keys == `prefixes` """ if not ('' in prefixes): prefixes = prefixes + [''] result = [ [] for i in prefixes ] for k,v in kwargs.iteritems(): for i,p in enumerate(prefixes): if k.startswith(p): result[i].append((k.replace(p,'',1), v)) break resultd = dict((p,dict(x)) for p,x in zip(prefixes, result)) return resultd
f2bb12565d8bc5b958d3f357f957debb68a8b24f
19,152
def degrees_to_meters(degrees): """ 111195 = (Earth mean radius)*PI/180 (supposedly 'maximum error using this method is ~ 0.1%') :see: https://stackoverflow.com/questions/12204834/get-distance-in-meters-instead-of-degrees-in-spatialite """ ret_val = 111195 * degrees return ret_val
d28bdaa0ac98392bf483d5444eced0dbb9032fcf
19,154
from typing import List import torch def _broadcast_tensors_for_cat( tensors: List[torch.Tensor], dim: int ) -> List[torch.Tensor]: """ Broadcast all tensors so that they could be concatenated along the specific dim. The tensor shapes have to be broadcastable (after the concatenation dim is taken out) Example: Input tensors of shapes [(10,3,5), (1,3,3)] (dim=2) would get broadcasted to [(10,3,5), (10,3,3)], so that they could be concatenated along the last dim. """ if dim >= 0: dims = [dim] * len(tensors) else: dims = [t.ndim + dim for t in tensors] shapes = [list(t.shape) for t in tensors] for s, d in zip(shapes, dims): s.pop(d) shapes_except_cat_dim = [tuple(s) for s in shapes] broadcast_shape = torch.broadcast_shapes(*shapes_except_cat_dim) final_shapes = [list(broadcast_shape) for t in tensors] for s, t, d in zip(final_shapes, tensors, dims): s.insert(d, t.shape[dim]) final_shapes = [tuple(s) for s in final_shapes] return [t.expand(s) for t, s in zip(tensors, final_shapes)]
76633eb64ec17ff8777e9f518acdbffb3919376d
19,155
def levenshtein(s1, s2, D=2): """ Returns True iff the edit distance between the two strings s1 and s2 is lesser or equal to D """ if D == -1: return False if len(s1) < len(s2): return levenshtein(s2, s1) if len(s2) == 0: return len(s1) <= D return (levenshtein(s1[1:], s2[1:], D-1) # substitution\ or levenshtein(s1, s2[1:], D-1) # insertion\ or levenshtein(s1[1:], s2, D-1) # deletion\ or ( # character match (s1[0] == s2[0]) and \ levenshtein(s1[1:], s2[1:], D) ))
c1cf65c9a5d34f0f85a7db60ec3adce9d1ffde61
19,156
def seconds_to_milliseconds(seconds: float) -> int: """ Converts from seconds to milliseconds. :param seconds: time in seconds :return: converted time rounded to nearest millisecond """ return int(seconds * 1000)
b47c0e7d57fea9103d826cd7d2cab51d3ded124a
19,159
import os import pickle def get_df_jobs_data( exclude_wsl_paths=True, drop_cols=False, ): """ from methods import get_df_jobs_data df_jobs_data = get_df_jobs_data() df_jobs_data """ #| - get_df_jobs_data # ######################################################### path_i = os.path.join( os.environ["PROJ_irox_oer"], "dft_workflow/job_processing", "out_data/df_jobs_data.pickle") with open(path_i, "rb") as fle: df_jobs_data = pickle.load(fle) # ######################################################### if exclude_wsl_paths: df_jobs_data = df_jobs_data[df_jobs_data.compenv != "wsl"] #| - Drop columns if drop_cols: drop_cols_list = [ "incar_params", "submitted", "isif", "ediff_conv_reached_dict", "true_false_ratio", "num_nonconv_scf", "num_conv_scf", ] df_jobs_data = df_jobs_data.drop(columns=drop_cols_list) #__| return(df_jobs_data) #__|
d7aa4afe9092313296e2dac8e9bd46ea387137f3
19,160
def get_upsets_by_elo(player): """ Returns a list of SmashSet objects for a certain player where that player won and it was considered an "upset", considering a player with more Elo for that game to be the normally better player """ sets = [_set for _set in player.get_sets() if _set.won_by_player(player)] def __is_upset(_set): opponent_elo = _set.loser.get_elo(_set.game) if opponent_elo is None: return False return player.get_elo(_set.game) < opponent_elo return list(filter(__is_upset, sets))
57c924a4d6c3f2054202322d12cee60072be98b3
19,161
def quantities_summary(M_dict): """Pulls out the materials quantities data and creates a general summary with keys: story, member type, material type, material name display(pd.DataFrame.from_dict(quantities_summary(MEMBERS_dict), orient='index'))""" dlist = [] [[dlist.append(((k[1], k[0], data.get('MEMTYPE'), datum.mat_type, datum.material), datum)) for datum in data.get('Memb_Agg_Props', [])] for k, data in M_dict.items()] #levels = set([k[0] for k, _ in dlist]) #mem_types = set([str(k[1])[0] for k, _ in dlist]) #mem_class = set([k[2] for k, _ in dlist]) #mats = set([k[3] for k, _ in dlist]) #mat_names = set([k[4] for k, _ in dlist]) #display(f'levels, {levels}, mem_types, {mem_types}, ' + \ # f'mem_class, {mem_class}, mats, {mats}, mat_names, {mat_names}') #print() #print('D_LIST') #print(dlist[:20],'\n') sum_dict = dict() # Carry out the summations for i, (k, v) in enumerate(dlist): #if i<1: print(f'k: {k}, v: {v}') key = k[0], k[2], k[3], k[4] rdict = sum_dict.get(key, {}) if rdict: for d, w in zip(['length', 'area', 'volume', 'weight'], [v.length, v.area, v.volume, v.weight]): t = rdict[d] rdict[d] = t + w else: rdict = {'length':0, 'area':0, 'volume':0, 'weight':0} sum_dict[key] = rdict return sum_dict
538625500f32a66906e5b7887d52451ca27c1665
19,163
def python_exporter(op_def, context): """Export the python operators.""" return None, None
2f100e0d7da9def958a83dd05901a773f80582b6
19,167
import os def find_command( command): """ Searches the system's PATH for a specific command that is executable by the user. Returns the first occurence of an executable binary in the PATH, or None if the command is not available. """ if 'PATH' not in os.environ: return None for path in os.environ['PATH'].split( os.pathsep): command_file=os.path.join( path, command) if os.path.isfile( command_file) and os.access( command_file, os.X_OK): return command_file return None
a8f059f79574b77f1cc4ac88482566d641e0c313
19,168
def overrides(method): """Decorator to indicate that the decorated method overrides a method in superclass. The decorator code is executed while loading class. Using this method should have minimal runtime performance implications. This is based on my idea about how to do this and fwc:s highly improved algorithm for the implementation fwc:s algorithm : http://stackoverflow.com/a/14631397/308189 my answer : http://stackoverflow.com/a/8313042/308189 How to use: from overrides import overrides class SuperClass(object): def method(self): return 2 class SubClass(SuperClass): @overrides def method(self): return 1 :raises AssertionError if no match in super classes for the method name :return method with possibly added (if the method doesn't have one) docstring from super class """ # nop for now due to py3 compatibility return method # for super_class in _get_base_classes(sys._getframe(2), method.__globals__): # if hasattr(super_class, method.__name__): # if not method.__doc__: # method.__doc__ = getattr(super_class, method.__name__).__doc__ # return method # raise AssertionError('No super class method found for "%s"' % method.__name__)
99b5a8b953d02c57aa28e929415e6df6e50a9b93
19,169
def prepare_response(message, status = None, data = None): """returns a response dictionary needed to apply to wrappers""" if status and data: return {'message':message, 'status':status, 'data':data} elif status: return {'message': message, 'status': status} else: return {'message':message}
f34ba9b334456455dd2451cc581da74b705ab01f
19,174
def fit_range(x, inmin, inmax, outmin, outmax): """Maps a value from an interval to another Args: x (int or float): the input value inmin (int or float): The minimum of the input range inmax (int or float): The maximum of the input range outmin (int or float): The minimum of the desired range outmax (int or float): The maximum of the desired range Returns: int or float: the computed value """ return (x-inmin) * (outmax-outmin) / (inmax-inmin) + outmin
b37a88b1dc0e9e6b4c83d74232a63bbbdcf13243
19,176
def device_id_generate_doc_template_values(url_root): """ Show documentation about deviceIdGenerate """ required_query_parameter_list = [ { 'name': 'api_key', 'value': 'string (from post, cookie, or get (in that order))', # boolean, integer, long, string 'description': 'The unique key provided to any organization using the WeVoteServer APIs', }, ] optional_query_parameter_list = [ ] try_now_link_variables_dict = { } api_response = '{\n' \ ' "voter_device_id": string (88 characters long),\n' \ '}' template_values = { 'api_name': 'deviceIdGenerate', 'api_slug': 'deviceIdGenerate', 'api_introduction': "Generate a transient unique identifier (voter_device_id - stored on client) " "which ties the device to a persistent voter_id (mapped together and stored on the server)." "Note: This call does not create a voter account -- that must be done in voterCreate.", 'try_now_link': 'apis_v1:deviceIdGenerateView', 'try_now_link_variables_dict': try_now_link_variables_dict, 'url_root': url_root, 'get_or_post': 'GET', 'required_query_parameter_list': required_query_parameter_list, 'optional_query_parameter_list': optional_query_parameter_list, 'api_response': api_response, } return template_values
ed0648038f26207682ff25dac76cf1bd1c0b30b9
19,177
def factory(mac1, mac2): """Generate a mediatek factory partition""" header = [0x20, 0x76, 0x03, 0x01] return ''.join(map(chr, header + mac1 + [0xff] * 30 + mac2))
b83dc7ed9410a25570edf30080560803f246cbbd
19,178
from datetime import datetime def timeInRange(start: datetime, end: datetime, x: datetime): """ :param start: datetime :param end: datetime :param x: datetime :return: bool """ if start <= end: return start <= x <= end else: return start <= x or x <= end
7dbe9f2918cd7a3cee68d8fd4e09fb7222bda39f
19,179
def downsize_image(image, n): """ Downsizes an image by selecting every n pixel, it might be possible to resize to an arbitary resolution, but I didn't want to deal with interpolation and strange behaviour with semi-transparent pixels. """ return image[::n, ::n]
102fb43400a739ea6b353187eb3ecd6a1607d345
19,180
def data_loss(df_clean, df_raw): """ This function returns the data loss in percent. """ return f"{round((df_clean.shape[0]/df_raw.shape[0])*100,3)}% data loss"
0da1e75643c18b57f1952ec0b94aa996a0d1707f
19,182
def getDockerRelease(gpu=False): """Get the most recent docker release.""" r = "quay.io/comparative-genomics-toolkit/cactus:v1.2.3" if gpu: r += "-gpu" return r
f23afe2b4eaef37a70351489a59e9468f0528341
19,183
def GetRoleName(effective_ids, project): """Determines the name of the role a member has for a given project. Args: effective_ids: set of user IDs to get the role name for. project: Project PB containing the different the different member lists. Returns: The name of the role. """ if not effective_ids.isdisjoint(project.owner_ids): return 'Owner' if not effective_ids.isdisjoint(project.committer_ids): return 'Committer' if not effective_ids.isdisjoint(project.contributor_ids): return 'Contributor' return None
bc0440b12779ea1caaccb868493ed05516fb2738
19,184
def create_obj(conn, new_object): """ Create a new object into the allObjects table. Parametesr new_object (tuple): a tuple containing (name, ) Returns obj_id (int): idea of the new created object in the database """ sql = ''' INSERT INTO allObjects(name) VALUES(?) ''' cur = conn.cursor() cur.execute(sql, new_object) conn.commit() return cur.lastrowid
d4995345ba6d37b5d45a9ab424e9fd212bd712b8
19,185
import os def include_path_to_flags(include_path_json: dict, parent_target: dict) -> list: """ Convert list of includes flags returned by server to list of flags. include_path_json is json list returned by server in file group object. """ result = list() for include in include_path_json: if include.get('isSystem', False): result.append("-isystem") else: result.append("-I") # According to cmake-server(7) paths is either absolute or relative to source directory. if os.path.isabs(include['path']): result.append(include['path']) else: result.append(os.path.join(parent_target['source_dir'], include['path'])) return result
f383d5c4ed98628d2398216d5ae913eb123b0325
19,186
def to_fully_staffed_matrix_3(d): """ Parameters ---------- d : dict<object, object> Returns ------- dict<object, object> """ for key, val in d.items(): d[val] = key return d
d694b975c3e5930fc7a508d8d8acef4fff49a620
19,189
def five_by_five_shape(n): """ Determines shape of five by five view, allowing for fewer than 25 observations. Parameters: n: length of subject list to display Returns: Dimensions of grid/subplots as (nrows, ncols) """ if n // 5 == 0: return (1, n % 5) elif n % 5 > 0: return ((n // 5) + 1, 5) else: return (n // 5, 5)
d42bfff12064ecfb733c3068fffb87e75fcfee3c
19,191
def collect_psi4_options(options): """Is meant to look through the dictionary of psi4 options being passed in and pick out the basis set and QM method used (Calcname) which are appened to the list of psi4 options """ keywords = {} for opt in options['PSI4']: keywords[opt] = options['PSI4'][opt] basis = keywords['BASIS'] del keywords['BASIS'] QM_method = keywords['CALCNAME'] del keywords['CALCNAME'] return QM_method, basis, keywords
aa5729cbf06fb5f75fb535b97e2f2f043a3c9c23
19,192
def update_aliases(df, new_aliases=None, inplace=False): """ For each object: 1. Add new aliases if any 2. Make sure unique pipe-separated list with OBJECT first in list Args: df (pd.DataFrame): dataframe with an ALIASES column new_aliases (Series, array, list): aliases to append for each object. If Series, make sure indexes match the dataframe inplace (bool): If true, edit df directly, otherwise return a copy """ if not inplace: df = df.copy() # Make sure all strings df.ALIASES = df.ALIASES.fillna('') # Add new aliases if new_aliases is not None: new_aliases = new_aliases.fillna('') df.ALIASES = df.ALIASES.str.cat(new_aliases, sep='|') # Make sure 2MASS and Gaia included aliases = df.ALIASES.copy() mask_gaia = df.GAIADR2ID.notnull() aliases[mask_gaia] = aliases[mask_gaia].str.cat( 'Gaia DR2 ' + df.GAIADR2ID[mask_gaia] ) mask_2mass = df['TWOMASSID'].notnull() aliases[mask_2mass] = aliases[mask_2mass].str.cat( '2MASS J' + df['TWOMASSID'][mask_2mass] ) # Make sure object is not current list, and that list is unique objs = df.OBJECT.tolist() aliases = df.ALIASES.apply( # Remove object name and empty strings lambda row: [val for val in row.split('|') if val not in objs + [''] ] ) aliases = aliases.apply(set).str.join('|') # Unique and join with pipe # Add object name as first alias al_mask = aliases.str.len() > 0 df.loc[al_mask, 'ALIASES'] = df.OBJECT.str.cat(aliases, sep='|') df.loc[~al_mask, 'ALIASES'] = df.OBJECT.copy() return None if inplace else df
70d6629027b66fb4eedd31bf054b0e3eaea68a89
19,193
import re def find_wildcards(pattern): """ >>> find_wildcards('{foo}/{bar}') ['bar', 'foo'] """ re_wildcard = re.compile(r'\{(\w+)\}') found = [mo.group(1) for mo in re_wildcard.finditer(pattern)] return list(sorted(found))
00376bb421d6a26811dabff7d4ea8d1a116d4bd5
19,194
import os def check_cache_path_writable(cache_path): """ If the defined cache path is not writable, change it to a folder named cext_cache under the current directory where the script runs. """ try: check_file = os.path.join(cache_path, "check.txt") with open(check_file, "w") as f: f.write("check") os.remove(check_file) return cache_path except (OSError, IOError): new_path = os.path.realpath("cext_cache") print( "The cache directory {old_path} is not writable. Changing to directory {new_path}.".format( old_path=cache_path, new_path=new_path ) ) return new_path
ccf9e1ce48428f8b3e27d109f98476f16e5e30b2
19,195
import os def pipe(): """ Create a one-way pipe for sending data to child processes. Returns: A read/write pair of file descriptors (which are just integers) on posix or system file handle on Windows. The pipe may be used either by this process or subprocesses of this process but not globally. """ read, write = os.pipe() # The default behaviour of pipes is that they are process specific. i.e. they can only be used for this process to # talk to itself. Setting these means that child processes may also use these pipes. os.set_inheritable(read, True) os.set_inheritable(write, True) # On Windows, file descriptors are not shareable. They need to be converted to system file handles here then # converted back with open_osfhandle() by the child. if os.name == "nt": read, write = get_osfhandle(read), get_osfhandle(write) return read, write
05585cb1c03a0395f5e20ae47a6f78243828644a
19,196
def _get_BD_range(x): """Getting the BD range from a fraction ID (eg., "1.710-1.716"). Parameters ---------- x : str fraction ID Returns ------- tuple -- BD start, middle end """ if x.startswith('-'): [start,_,end] = x.rpartition('-') else: [start,_,end] = x.partition('-') if start.startswith('-inf'): end = round(float(end),3) - 0.001 end = round(end,3) mid = end elif end.endswith('inf'): start = round(float(start),3) + 0.001 start = round(start, 3) mid = start else: start = round(float(start),3) end = round(float(end),3) mid = round((end - start)/2 + start,3) return start, mid, end
5a54452446696e5ae2230981683b4e52de7c7ae0
19,197
import torch def rot90(x, k=1): """rotate batch of images by 90 degrees k times""" return torch.rot90(x, k, (2, 3))
081a495f66e06b816461754b809407af7c7a00bf
19,198
def _chi2_ls(f): """Sum of the squares of the residuals. Assumes that f returns residuals. Minimizing this will maximize the likelihood for a data model with gaussian deviates. """ return 0.5 * (f ** 2).sum(0)
af223f48bc0beffa9a99fba872c769a94fbe3235
19,200
def create_gas_command_enable(packer, gas_amount): """Creates a CAN message for the Honda DBC GAS_COMMAND.""" enable = gas_amount > 0.001 values = {} #initializing the value dict empty initially if enable == True: x_gas_enable = 0xCC05 else: x_gas_enable = 0x0000 print("kiacan.py x_gas_enable") print(x_gas_enable) if enable: values["THROTTLE_ENABLE_magic"] = x_gas_enable print("kiacan.py gas command enable") print(values) return packer.make_can_msg("THROTTLE_ENABLE", 0, values)
dc0d07bfa8c21b05054d848e08e374b85e5979f2
19,201
import os def expected_path(metadata_path, test_path): """Path to the expectation data file for a given test path. This is defined as metadata_path + relative_test_path + .ini :param metadata_path: Path to the root of the metadata directory :param test_path: Relative path to the test file from the test root """ args = list(test_path.split("/")) args[-1] += ".ini" return os.path.join(metadata_path, *args)
cc4f4a7b0664f9c08a510a506319ba29b98916b5
19,202
def build_scheduler(scheduler_class, warmup_steps, *args, **kwargs): """ scheduler wrapper to enable warmup; args and kwargs are passed to initialize scheduler class """ class Scheduler(scheduler_class): def __init__(self, warmup_steps, *args, **kwargs): self.warmup_steps = warmup_steps super().__init__(*args, **kwargs) def get_lr(self): if self.last_epoch < self.warmup_steps: return [base_lr * self.last_epoch / self.warmup_steps for base_lr in self.base_lrs] return super().get_lr() return Scheduler(warmup_steps, *args, **kwargs)
6eaa33748dba3261ddbf1a72af00eab6e6b4d9d8
19,203
from typing import Collection from typing import Callable def set_str(l: Collection[int], *, use_hex: bool = False, minlen: int = 4, maxlen: int = 8) -> str: """ Compact representation of a sorted set of numbers. """ assert minlen >= 3 assert maxlen >= minlen tostr: Callable[[int], str] = hex if use_hex else str # type: ignore l = sorted(set(l)) start = l[0] end = l[-1] step = l[1]-l[0] if len(l) <= minlen: return f"{{{', '.join(tostr(i) for i in l)}}}" if l == list(range(start, end+1, step)): fst = tostr(start) snd = tostr(start+step) lst = tostr(end) return f"{{{fst}, {snd}, ..., {lst}}}" if len(l) <= maxlen: return f"{{{', '.join(tostr(i) for i in l)}}}" return f"{{{tostr(l[0])},...(irregular)}}"
edac37387e6310cc75039ceb3140b344e5e61aad
19,204
def get_bond_score(bond, nn_bonds_by_site, cache): """ Get a score for a single bond. :param bond: The bond (el1, el2, length) :param nn_bonds_by_site: All nearest neighbor bonds in the compound :param cache: OptimizerCache object :return: bond score. Lower is better. For bonds that are ordered differently than ideal, the score will be appended with the difference between the bond lengths (some notion of how "wrong" it is). """ score = 0.0 ideal_bond_length = cache.get_ideal_bond_length(*bond.get_normalized_element_names()) for other_bond in nn_bonds_by_site.values(): other_ideal_bond_length = cache.get_ideal_bond_length(*other_bond.get_normalized_element_names()) # Get the ideal and actual ordering for the bond lengths ideal_comp = ideal_bond_length < other_ideal_bond_length actual_comp = bond < other_bond # If the orderings are different, or the two bonds contain the same pair of elements if ideal_comp != actual_comp or ideal_bond_length == other_ideal_bond_length: # Difference between the two actual bond lengths diff = abs(bond.length - other_bond.length) ideal_diff = abs(ideal_bond_length - other_ideal_bond_length) if ideal_bond_length == other_ideal_bond_length: bond_score = (pow(max(diff - 0.1, 0.0), 2) + pow(max(ideal_diff - 0.1, 0.0), 2)) / 2 else: bond_score = (pow(diff, 2) + pow(ideal_diff, 2)) / 2 score += bond_score return score
ebd3ab4e20908a05006dbfd930581de495ed0677
19,205
def healpix_ijs2ijd(istar, jstar, nside): """Converts from healpix i and j star to i and j dash, which is useful for finding neighbours. Parameters ---------- istar : array Healpix integer i star index. jstar : array Healpix integer i star index. Returns ------- idash : int Alternate pixel index along each ring. This is for pixel transformations as this maps exactly to healpix y without a factor. jdash : int Alternate pixel index along each ring. This is for pixel transformations as this maps exactly to healpix x without a factor. """ istar = istar.astype('float') + 0.5 jstar = jstar.astype('float') + 0.5 jdash = (istar + jstar - nside)/2 idash = (jstar - istar)/2 return idash, jdash
10de3528bfb8437d25c6d572a62f0c036fcb454e
19,206
import tempfile import shutil def temp_directory(fn): """Decorator for tests needing a temporary directory. """ def wrapper(*a, **kw): tempdir = tempfile.mkdtemp() kw['tempdir'] = tempdir try: fn(*a, **kw) finally: shutil.rmtree(tempdir) return wrapper
dd052fbd9312034947674dcfdb2c4c2faa022f00
19,207
def num_words(tokens): """Given list of words, return no. of words (int)""" return len(tokens)
89388c467380803e834d2ef287d33d17b882d666
19,208
def is_in_group(user, group_name): """Take a user and a group name, and returns `True` if the user is in that group.""" return user.groups.filter(name=group_name).exists()
797853cd5000cb1404545e3f20d38703c7a058dd
19,209
def Q(p0, p1, v0, v1, t0, t1, t): """Basic Hermite curve.""" s = (t-t0)/(t1-t0) h0 = (2*s+1)*(s-1)*(s-1) h1 = (-2*s+3)*s*s h2 = (1-s)*(1-s)*s*(t1-t0) h3 = (s-1)*s*s*(t1-t0) return h0*p0 + h1*p1 + h2*v0 + h3*v1
98974163fe98ec73151ed2f4927ec69754a7ff7d
19,210
def _propagate_pair(pair, go_dict): """Propagates a pair of annotations. For a given pair of annotation terms, the GO annotations will be replaced by a set of their (recursive) child terms (including itself). Other types of annotations are left untouched, but converted to a 1-member set. Parameters ---------- pair : tuple A sorted tuples of annotation terms, one for the host and one for the pathogen, e.g. ('h@GO:0030133', 'p@IPR009304') go_dict : dict A dictionary containing the GO hierarchy. Constructed via the obo_tools.importOBO() function in the goscripts package. Returns ------- tuple A sorted tuple of annotation term sets, one for the host and one for the pathogen. Each element in the tuple consists of a set of terms, e.g. the GO term itself and all of its descendants. """ # create empty list to store the propagated (child) annotations for the # two parent annotations in the pair (in same order as original pair) propagated_pair = [] # for both annotations in the pair, propagate through GO hierarchy for term in pair: # only for GO terms, not IPR if 'GO' in term: prefix = term[:2] go_object = go_dict.get(term[2:]) # append original annotation if it can't be found in GO dict if not go_object: propagated_pair.append([term]) else: # store all child terms of parent term in a list child_terms = [ prefix + i for i in go_object.recursive_children ] # add parent term itself and remove duplicates propagation_set = set(child_terms) | set([term]) # add propagated annotations to storage list propagated_pair.append(propagation_set) else: # store original term if it's not a GO term propagated_pair.append({term}) # convert the length-2 list of annotation lists (1 for each parent # annotation) to a tuple # # e.g. ('h@GO:0060384', 'p@GO:0016787') -> # ({'h@GO:0098546', 'h@GO:0030553', 'h@GO:0035438', 'h@GO:0030552', # 'h@GO:0061507'}, # {'h@GO:0098546', 'h@GO:0030553', 'h@GO:0035438',# 'h@GO:0030552', # 'h@GO:0061507'}) return tuple(propagated_pair)
b4745e2a01075c92c862ad23defe17a674b0317e
19,211
def find_or_create(find, create): """Given a find and a create function, create a resource iff it doesn't exist""" result = find() if result: return result else: return create()
6cfbbad5efd05eb89c01220f3d0d6bd258c01382
19,212
def hash128(str): """ return 7-bit hash of string """ hash = 0 for char in str: hash = (31 * hash + ord(char)) & 0xFFFFFFFF hash = ((hash + 0x80000000) & 0xFFFFFFFF) - 0x80000000 # EQUELLA reduces hashes to values 0 - 127 hash = hash & 127 return hash
4baf17e582a608f7907cbf63a14d9c82d0414c34
19,213
def accuracy(predictions, targets): """Computes raw accuracy (True Predictions) / (All Predictions) Args: predictions (list): a list of predicted labels targets (list): a list of gold labels Returns: float: the raw accuracy between the predictions and the gold labels """ assert len(predictions) == len(targets) count_pos = 0 for predic, gold in zip(predictions, targets): if predic == gold: count_pos += 1 return float(count_pos) / len(targets)
0547e2f5fba2c858dbcd85720f18541b9e2a7f98
19,214
from operator import mul from fractions import Fraction from functools import reduce def numCombs(n, k): """ n choose k algorithm """ return int( reduce(mul, (Fraction(n-i, i+1) for i in range(k)), 1) )
bee91e3afc5b20e6f93d6e299f33bc23654318ec
19,215
def hasrepression(graph, cycle): """Return whether the cycle (list of node IDs) in the graph (NetworkX DiGraph) includes any repressions.""" return any(graph.edges[cycle[i], cycle[(i + 1) % len(cycle)]]['repress'] for i in range(len(cycle)))
1eaa9638512ed5b283290fedf9bc9ba499c13748
19,216
def expiration_date_filter(all_options, expirationDate): """Takes a list of ALL_OPTIONS, and returns a list of only those with the argument EXPIRATIONDATE""" return [option for option in all_options if option['expiration_date'] == expirationDate]
4a05dbdeae84639db624c673046bdd1783adf88d
19,217
def re(inputStr): """ Reverse the string. (e.g. re('hello') -> olleh) """ inputList = list(str(inputStr)) inputList.reverse() return "".join(inputList)
e0187ca6b3d76dc72e8ad56cad80140b0382b9bd
19,219
def obtained_all_dist_params_dict(): """ Dictionary with all continuous distributions and their default parameters. """ obtained_dict = { 'alpha': {'a': '3.57', 'loc': '0.00', 'scale': '1.00'}, 'anglit': {'loc': '0.00', 'scale': '1.00'}, 'arcsine': {'loc': '0.00', 'scale': '1.00'}, 'argus': {'chi': '1.00', 'loc': '0.00', 'scale': '1.00'}, 'beta': {'a': '2.31', 'b': '0.63', 'loc': '0.00', 'scale': '1.00'}, 'betaprime': {'a': '5.00', 'b': '6.00', 'loc': '0.00', 'scale': '1.00'}, 'bradford': {'c': '0.30', 'loc': '0.00', 'scale': '1.00'}, 'burr': {'c': '10.50', 'd': '4.30', 'loc': '0.00', 'scale': '1.00'}, 'burr12': {'c': '10.00', 'd': '4.00', 'loc': '0.00', 'scale': '1.00'}, 'cauchy': {'loc': '0.00', 'scale': '1.00'}, 'chi': {'df': '78.00', 'loc': '0.00', 'scale': '1.00'}, 'chi2': {'df': '55.00', 'loc': '0.00', 'scale': '1.00'}, 'cosine': {'loc': '0.00', 'scale': '1.00'}, 'crystalball': {'beta': '2.00', 'm': '3.00', 'loc': '0.00', 'scale': '1.00'}, 'dgamma': {'a': '1.10', 'loc': '0.00', 'scale': '1.00'}, 'dweibull': {'c': '2.07', 'loc': '0.00', 'scale': '1.00'}, 'erlang': {'a': '10.00', 'loc': '0.00', 'scale': '1.00'}, 'expon': {'loc': '0.00', 'scale': '1.00'}, 'exponnorm': {'K': '1.50', 'loc': '0.00', 'scale': '1.00'}, 'exponpow': {'b': '2.70', 'loc': '0.00', 'scale': '1.00'}, 'exponweib': {'a': '2.89', 'c': '1.95', 'loc': '0.00', 'scale': '1.00'}, 'f': {'dfn': '29.00', 'dfd': '18.00', 'loc': '0.00', 'scale': '1.00'}, 'fatiguelife': {'c': '29.00', 'loc': '0.00', 'scale': '1.00'}, 'fisk': {'c': '3.09', 'loc': '0.00', 'scale': '1.00'}, 'foldcauchy': {'c': '4.72', 'loc': '0.00', 'scale': '1.00'}, 'foldnorm': {'c': '1.95', 'loc': '0.00', 'scale': '1.00'}, 'gamma': {'a': '1.99', 'loc': '0.00', 'scale': '1.00'}, 'gausshyper': {'a': '13.76', 'b': '3.12', 'c': '2.51', 'z': '5.18', 'loc': '0.00', 'scale': '1.00'}, 'genexpon': {'a': '9.13', 'b': '16.23', 'c': '3.28', 'loc': '0.00', 'scale': '1.00'}, 'genextreme': {'c': '-0.10', 'loc': '0.00', 'scale': '1.00'}, 'gengamma': {'a': '4.42', 'c': '3.12', 'loc': '0.00', 'scale': '1.00'}, 'genhalflogistic': {'c': '0.77', 'loc': '0.00', 'scale': '1.00'}, 'geninvgauss': {'p': '2.30', 'b': '1.50', 'loc': '0.00', 'scale': '1.00'}, 'genlogistic': {'c': '0.41', 'loc': '0.00', 'scale': '1.00'}, 'gennorm': {'beta': '1.30', 'loc': '0.00', 'scale': '1.00'}, 'genpareto': {'c': '0.10', 'loc': '0.00', 'scale': '1.00'}, 'gilbrat': {'loc': '0.00', 'scale': '1.00'}, 'gompertz': {'c': '0.95', 'loc': '0.00', 'scale': '1.00'}, 'gumbel_l': {'loc': '0.00', 'scale': '1.00'}, 'gumbel_r': {'loc': '0.00', 'scale': '1.00'}, 'halfcauchy': {'loc': '0.00', 'scale': '1.00'}, 'halfgennorm': {'beta': '0.67', 'loc': '0.00', 'scale': '1.00'}, 'halflogistic': {'loc': '0.00', 'scale': '1.00'}, 'halfnorm': {'loc': '0.00', 'scale': '1.00'}, 'hypsecant': {'loc': '0.00', 'scale': '1.00'}, 'invgamma': {'a': '4.07', 'loc': '0.00', 'scale': '1.00'}, 'invgauss': {'mu': '0.15', 'loc': '0.00', 'scale': '1.00'}, 'invweibull': {'c': '10.58', 'loc': '0.00', 'scale': '1.00'}, 'johnsonsb': {'a': '4.32', 'b': '3.18', 'loc': '0.00', 'scale': '1.00'}, 'johnsonsu': {'a': '2.55', 'b': '2.25', 'loc': '0.00', 'scale': '1.00'}, 'kappa3': {'a': '1.00', 'loc': '0.00', 'scale': '1.00'}, 'kappa4': {'h': '0.10', 'k': '0.00', 'loc': '0.00', 'scale': '1.00'}, 'ksone': {'n': '1000.00', 'loc': '0.00', 'scale': '1.00'}, 'kstwo': {'n': '10.00', 'loc': '0.00', 'scale': '1.00'}, 'kstwobign': {'loc': '0.00', 'scale': '1.00'}, 'laplace': {'loc': '0.00', 'scale': '1.00'}, 'laplace_asymmetric': {'kappa': '2.00', 'loc': '0.00', 'scale': '1.00'}, 'levy': {'loc': '0.00', 'scale': '1.00'}, 'levy_l': {'loc': '0.00', 'scale': '1.00'}, 'levy_stable': {'alpha': '1.80', 'beta': '-0.50', 'loc': '0.00', 'scale': '1.00'}, 'loggamma': {'c': '0.41', 'loc': '0.00', 'scale': '1.00'}, 'logistic': {'loc': '0.00', 'scale': '1.00'}, 'loglaplace': {'c': '3.25', 'loc': '0.00', 'scale': '1.00'}, 'lognorm': {'s': '0.95', 'loc': '0.00', 'scale': '1.00'}, 'loguniform': {'a': '0.01', 'b': '1.00', 'loc': '0.00', 'scale': '1.00'}, 'lomax': {'c': '1.88', 'loc': '0.00', 'scale': '1.00'}, 'maxwell': {'loc': '0.00', 'scale': '1.00'}, 'mielke': {'k': '10.40', 's': '4.60', 'loc': '0.00', 'scale': '1.00'}, 'moyal': {'loc': '0.00', 'scale': '1.00'}, 'nakagami': {'nu': '4.97', 'loc': '0.00', 'scale': '1.00'}, 'ncf': {'dfn': '27.00', 'dfd': '27.00', 'nc': '0.42', 'loc': '0.00', 'scale': '1.00'}, 'nct': {'df': '14.00', 'nc': '0.24', 'loc': '0.00', 'scale': '1.00'}, 'ncx2': {'df': '21.00', 'nc': '1.06', 'loc': '0.00', 'scale': '1.00'}, 'norm': {'loc': '0.00', 'scale': '1.00'}, 'norminvgauss': {'a': '1.00', 'b': '0.50', 'loc': '0.00', 'scale': '1.00'}, 'pareto': {'b': '2.62', 'loc': '0.00', 'scale': '1.00'}, 'pearson3': {'skew': '0.10', 'loc': '0.00', 'scale': '1.00'}, 'powerlaw': {'a': '1.66', 'loc': '0.00', 'scale': '1.00'}, 'powerlognorm': {'c': '2.14', 's': '0.45', 'loc': '0.00', 'scale': '1.00'}, 'powernorm': {'c': '4.45', 'loc': '0.00', 'scale': '1.00'}, 'rayleigh': {'loc': '0.00', 'scale': '1.00'}, 'rdist': {'c': '1.60', 'loc': '0.00', 'scale': '1.00'}, 'recipinvgauss': {'mu': '0.63', 'loc': '0.00', 'scale': '1.00'}, 'reciprocal': {'a': '0.01', 'b': '1.00', 'loc': '0.00', 'scale': '1.00'}, 'rice': {'b': '0.77', 'loc': '0.00', 'scale': '1.00'}, 'semicircular': {'loc': '0.00', 'scale': '1.00'}, 'skewnorm': {'a': '4.00', 'loc': '0.00', 'scale': '1.00'}, 't': {'df': '2.74', 'loc': '0.00', 'scale': '1.00'}, 'trapezoid': {'c': '0.20', 'd': '0.80', 'loc': '0.00', 'scale': '1.00'}, 'triang': {'c': '0.16', 'loc': '0.00', 'scale': '1.00'}, 'truncexpon': {'b': '4.69', 'loc': '0.00', 'scale': '1.00'}, 'truncnorm': {'a': '0.10', 'b': '2.00', 'loc': '0.00', 'scale': '1.00'}, 'tukeylambda': {'lam': '3.13', 'loc': '0.00', 'scale': '1.00'}, 'uniform': {'loc': '0.00', 'scale': '1.00'}, 'vonmises': {'kappa': '3.99', 'loc': '0.00', 'scale': '1.00'}, 'vonmises_line': {'kappa': '3.99', 'loc': '0.00', 'scale': '1.00'}, 'wald': {'loc': '0.00', 'scale': '1.00'}, 'weibull_max': {'c': '2.87', 'loc': '0.00', 'scale': '1.00'}, 'weibull_min': {'c': '1.79', 'loc': '0.00', 'scale': '1.00'}, 'wrapcauchy': {'c': '0.03', 'loc': '0.00', 'scale': '1.00'} } return obtained_dict
1d4149e03f65403b4a36365d9f77aeefbd9d4ce7
19,221
import numpy def line_intersections(Xa, ra, Xb ,rb): """ Compute the intersection point of two lines Arguments: Xa: a point on line a ra: direction along line a Xb: a point on line b rb: direction along line b Returns: ta: scale factor for ra (Xi = Xa + ta * ra) tb: scale factor for rb (Xi = Xb + tb * rb) """ assert isinstance(Xa, numpy.ndarray), "Xa must be numpy array" assert isinstance(ra, numpy.ndarray), "ra must be numpy array" assert isinstance(Xb, numpy.ndarray), "Xb must be numpy array" assert isinstance(rb, numpy.ndarray), "rb must be numpy array" assert Xa.shape == (3,), "Xa must be (3,)" assert ra.shape == (3,), "ra must be (3,)" assert Xb.shape == (3,), "Xb must be (3,)" assert rb.shape == (3,), "rb must be (3,)" normal = numpy.cross(ra, rb) if numpy.linalg.norm(normal) < 1.0e-4: ta = numpy.inf tb = numpy.inf return (ta, tb) delta_X = Xb - Xa if numpy.linalg.norm(delta_X) < 1.0e-4: ta = 0.0 tb = 0.0 return (ta, tb) ta = numpy.cross(delta_X, rb)[2] / normal[2] tb = numpy.cross(delta_X, ra)[2] / normal[2] return (ta, tb)
a203303f722f33df823d5afe952c40b4d07afe87
19,222
def _penalty_augmentation(alpha,L,P,type): # =========================================================================================== """ LSQ residual and Jacobian augmentation ======================================= Augments the residual and the Jacobian of a LSQ problem to include the regularization penalty. The residual and Jacobian contributions of the specific regularization methods are analytically introduced. """ # Compute the regularization penalty augmentation for the residual and the Jacobian resreg = L@P Jreg = L # Include regularization parameter resreg = alpha*resreg Jreg = alpha*Jreg if type=='residual': return resreg if type=='Jacobian': return Jreg
7783654710d9d153dd2a825c12bd754c3d26240e
19,223
import time def break_condition(i, max, total_length, time_val): """ This returns True if i >= max, and prints the percentage of it wrt total_length :param i: :param max: :param total_length: """ if i >= max: print('time to do ' + str(float(i) * 100./float(total_length)) +\ '% is ' + str(time.time() - time_val)) return True
5e93907bf941540c857659a3b9dac3382bb35943
19,224
def instance(module): """ Returns an instance of Routemaps based on name, action and sequence number. """ name = module.attributes['name'] action = module.attributes['action'] seqno = int(module.attributes['seqno']) _instance = dict(name=name, action=action, seqno=seqno, state='absent') try: result = module.api('routemaps').get(name)[action][seqno] except: result = None if result: _instance['state'] = 'present' _instance['seqno'] = str(seqno) _instance['set'] = ','.join(result['set']) desc = result['description'] _instance['description'] = desc if desc else '' _instance['match'] = ','.join(result['match']) cont = result['continue'] _instance['continue'] = str(cont) if cont else '' return _instance
152b546ecb0b9c9a07faae0075221edf48aea329
19,225
def onko_pituus_oikein(hetu): """Tarkistaa henkilötunnuksen pituuden po. 11 merkkiä Args: hetu (string): Henkilötunnus Returns: boolean: True: pituus oikein, False: pituus väärin """ # Lasketaan henkilötunnuksen pituus pituus = len(hetu) # tapa 1 if pituus == 11: pituus_ok = True else: pituus_ok = False # tapa 2 pituus_ok = (pituus == 11) return pituus_ok
ddea3dcba79f40c1711623fcc7b6ef0e9066e831
19,226
def ReadAllPoints(fcsvFilePath): """Read a list of tuples from a Slicer FCSV file. Tuple order is (name, description, x, y, z). """ lst = [] with open(fcsvFilePath, 'r') as f: for line in f: line = line.strip() if (line[0] == '#'): continue tokens = line.split(',') if (tokens[0].startswith('vtkMRMLMarkupsFiducialNode')): lst.append((tokens[11], tokens[12], tokens[1], tokens[2], tokens[3])) return lst
28abd8526b06c1459299bcb2cfaeeb2217dd1336
19,227
import colorsys def hsv_to_rgb(h, s, v): """Converts a (hue, saturation, value) tuple to a (red, green blue) tuple. Args: h, s, v: the HSV values Returns: an R, G, B tuple """ r, g, b = colorsys.hsv_to_rgb(h, s, v) return (int(255 * r), int(255 * g), int(255 * b))
0d348dfad8972b84688e11e68c9ac24dcf86dcce
19,228
def user_detail_response(): """Data representing a response from GET /workspaces/<id>/users/self""" data = { "avatar": "https://lh3.googleusercontent.com/a-/BOh14LjZkR7iuACWXfkCrZX3nixJCdRUc_3PYP9wu7CA=s96-c", "created_date": "2021-12-15T03:18:23.865839", "email": "foobar@fooey.com", "first_name": "John", "id": "705c088f-1211-4c0e-a520-1d5f76b6940e", "last_name": "Jingleheimerschmidt" } return data
13360f4c4ae02368116f804f9d7e3e17e0c9f47b
19,229
def validUTF8(data): """ 0. UTF-8 Validation Return: True if data is a valid UTF-8 encoding, else return False - A character in UTF-8 can be 1 to 4 bytes long - The data set can contain multiple characters - The data will be represented by a list of integers - Each integer represents 1 byte of data, therefore you only need to handle the 8 least significant bits of each integer """ successive_10 = 0 for b in data: # b = bin(b).replace('0b', '').rjust(8, '0')[-8:] b = format(b, '#010b')[-8:] if successive_10 != 0: successive_10 -= 1 if not b.startswith('10'): return False elif b[0] == '1': successive_10 = len(b.split('0')[0]) if successive_10 == 1 or successive_10 > 4: return False successive_10 -= 1 return True if successive_10 == 0 else False
c229465c4c5b2b8096c0fe8e94db1025e129ded4
19,230
def get_name() -> str: """Get the user's name for further use in the game. Returns: str: The name the player has entered. """ while True: name = str(input(""" What is your name: """)) if name and not name.isspace(): return name print(""" Name cannot be a space.""")
e57904913e2db503d63ec3570ccfe458ef96b006
19,231
from typing import Tuple import copy def test_handler_wrong_order(monkeypatch, lambda_module, context, order): """ Test handler() with an incorrect order """ async def validate_true(order: dict) -> Tuple[bool, str]: return (True, "") def store_order(order: dict) -> None: pass monkeypatch.setattr(lambda_module, "validate_delivery", validate_true) monkeypatch.setattr(lambda_module, "validate_payment", validate_true) monkeypatch.setattr(lambda_module, "validate_products", validate_true) monkeypatch.setattr(lambda_module, "store_order", store_order) user_id = order["userId"] order = copy.deepcopy(order) del order["userId"] del order["paymentToken"] response = lambda_module.handler({ "order": order, "userId": user_id }, context) print(response) assert response["success"] == False assert len(response.get("errors", [])) > 0
0090d256f4693ebfd3f7f5accd7f0e7f26c3fbdf
19,234
def absolute_difference_distance(x: float, y: float) -> float: """Calculate distance for `get_anomalies_density` function by taking absolute value of difference. Parameters ---------- x: first value y: second value Returns ------- result: float absolute difference between values """ return abs(x - y)
45f6e55dad7dac292d450122d33b82d16cc32e23
19,235
import csv import os def load_imageset(path, set_name): """ Returns the image set `set_name` present at `path` as a list. Keyword arguments: path -- path to data folder set_name -- image set name - labeled or unlabeled. """ reader = csv.reader(open(os.path.join(path, set_name+'.csv'), 'rt')) reader = [r[0] for r in reader] return reader
dc628f01256d5121b62090f01cb7c352aa597b33
19,236
def match_class(target): """ Get html content by class """ def do_match(tag): classes = tag.get('class', []) return all(c in classes for c in target) return do_match
f5646cf8a5cc63d1a534402c96f8529493887d9f
19,237
def txt_to_json(file): """ Takes a list of words in a text file Returns a valid json file with words in an array """ # accept only files that end in .txt if file[-4:] != '.txt': raise Exception('File must be a .txt') #convert the text list of words into a python list of words words = open(file, 'r').read().split('\n') # get rid of any words with no values words = [word for word in words if len(word) != 0] # write the JSON string result = '{ "words": [' i = 0 for word in words: result += '"' + word + '"' if i != len(words) - 1: result += ',' i += 1 result += "]}" return result
29abb40699e0fea885ed81c093165763431d2db1
19,238
def create_parameter_string(command_string): """Create the parameter string. The parameter string is everything after the first space character. All other space characters are removed.""" parameter_string = "".join(command_string.split(" ")[1:]) return parameter_string
80631d706cbe199d055eaac747912308be350581
19,239