content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
import random def generate_nationalities_and_passports(num_users, num_existing_users, countries): """Generates a list of nationalities and/or passports to be inserted into the database. Keyword arguments: num_users -- number of users being created by the script num_existing_users -- number of existing users in the evolutions countries -- dictionary with values consisting of a tuple of the country name and ISO numeric code Returns: values -- a list of strings to be added to the SQL script """ values = [] template = "({0}, {1}),\n" country_tuples = list(countries.values()) for i in range(num_users): user_id = i + num_existing_users + 1 # +1 because SQL ids start from 1 # Choose a random country and select it's id country_id = random.choice(country_tuples)[0] values.append(template.format(user_id, country_id)) # Make the last comma a semi-colon instead, with a newline for formatting values[-1] = values[-1][:-2] + ";\n" return values
dcbe58fbbaae802b579e790fb76f589442d484c3
411,667
def transpose_list(input): """ INPUT: - ``input`` -- a list of lists, each list of the same length OUTPUT: - ``output`` -- a list of lists such that output[i][j] = input[j][i] EXAMPLES:: sage: from sage.schemes.hyperelliptic_curves.monsky_washnitzer import transpose_list sage: L = [[1, 2], [3, 4], [5, 6]] sage: transpose_list(L) [[1, 3, 5], [2, 4, 6]] """ h = len(input) w = len(input[0]) output = [] for i in range(w): row = [] for j in range(h): row.append(input[j][i]) output.append(row) return output
47d1cf21e221ec5e5960cc90015ae3b80bcb93f2
162,933
def get_subjectaltname(certificate): """Return subjectAltName associated with certificate. """ return certificate.get_extension(6)._subjectAltNameString()
683dd5e3dd31a622ff70ba036701968d551bbf0c
68,807
def sideplr(p, p1, p2): """ Calculates the side of point p to the vector p1p2. Input p: the point p1, p2: the start and end points of the line Output -1: p is on the left side of p1p2 0: p is on the line of p1p2 1: p is on the right side of p1p2 """ return int((p.x-p1.x)*(p2.y-p1.y)-(p2.x-p1.x)*(p.y-p1.y))
2f6450133ffd8158f70e97cf31402fd95adb993f
241,092
def to_square_feet(square_metres): """Convert metres^2 to ft^2""" return square_metres * 10.7639
50510aad230efcb47662936237a232662fef5596
1,738
def ent_dims_from_state_dict(state_dict: dict) -> tuple[int, int]: """ From a state dict, return the appropriate values of `ent_hidden_size` and `ent_intermediate_size` which can be used for instantiating the model. """ # We assume that all transformer blocks have same dim. low_dim_key = "encoder.0.ent_output.dense.weight" key = low_dim_key if low_dim_key in state_dict.keys() else "encoder.0.output.dense.weight" # The entity output maps from hidden to intermediate so gives us exactly the shape we need return state_dict[key].shape
7ed009d9683df7ec98b3c99ab289b3bce438692d
601,701
def identify_galaxy_entries(misp_data, galaxy_type, initial): """ Generate a dictionary of the entries in the specified galaxy within the data misp_data - The events and attributes loaded from the MISP server galaxy_type - The type of the galaxy to look at initial - The initial dictionary of entries """ events = misp_data["events"] entries = initial for event in events: if "GalaxyCluster" in event: galaxycluster = event["GalaxyCluster"] for galaxy in galaxycluster: if "Galaxy" in galaxy: if galaxy["type"] == galaxy_type: entries[galaxy["value"]] = True return entries
c862bbb63c2c8f4de185d6074a2df4e593d6895d
334,409
def underscore(s: str) -> str: """Appends an underscore (_) to s.""" return f'{s}_'
8094e8b72993d3fabee5ea88196f91b1cbcaa9ae
491,782
def example_function_keyword(a, b, c=1): """ Example with keyword and default value """ return a * b * c
5b51ffaf95f8677b888fc6f48af085813ae57020
485,165
def _read_text_file_lines(file_name): """ Read text file lines. Open a text file and read it, return a list of lines. Parameters ---------- file_name: string Path to the file to be read. Returns ------- list of strings A list of lines. """ with open(file_name, 'r') as fh: lines = [line.rstrip('\n') for line in fh] return lines
e5c12777b35289196ef13aced84709004a4968a3
585,955
def _get_rest_endpoint_base_url(rest_service_base_url: str) -> str: """ Return the base URL of the endpoints of BAM's REST service. :param rest_service_base_url: Base URL of BAM's REST service. :type rest_service_base_url: str :return: The base URL of the endpoints of BAM's REST service. :rtype: str """ return ( rest_service_base_url + "v1" )
d947d242a63203f0007433be383ed74cb4289ff4
696,703
def replace_with(repl_str): """ Helper method for common parse actions that simply return a literal value. Especially useful when used with :class:`transform_string<ParserElement.transform_string>` (). Example:: num = Word(nums).set_parse_action(lambda toks: int(toks[0])) na = one_of("N/A NA").set_parse_action(replace_with(math.nan)) term = na | num term[1, ...].parse_string("324 234 N/A 234") # -> [324, 234, nan, 234] """ return lambda s, l, t: [repl_str]
1fa00581b5bd2d77886187ada1c0bd1d46f089a9
534,397
import random def split_train_val_test(data, split=[0.6, 0.1, 0.3], shuffle=False): """Split sequence of data into train, validation and test. Parameters ---------- data : [N,] list Input data. split : [3,] list, default=[0.6, 0.2, 0.2] Train, validation, test fractions. suffle : bool, default=False Randomly shuffle input data (with seed for reproducibility) Returns ---------- train : [split[0]*N,] list Train split. val : [split[1]*N,] list Validation split. test : [split[2]*N,] list Test split. """ N = len(data) # Ensure split is normalised split = [s / sum(split) for s in split] # Randomly shuffle input data (with seed for reproducibility) if shuffle: random.seed(0) data = random.sample(data, N) # Do train/val/test split train, val, test = [], [], [] for i, d in enumerate(data): if i < split[0] * N: train.append(d) elif i < sum(split[:2]) * N: val.append(d) elif i < sum(split) * N: test.append(d) return train, val, test
d7f3eb3ac403afd01a168a608eed24a1cb2b2ff8
294,244
import asyncio async def gather_dict(dct): """Utility function to do asyncio.gather for dictionaries Do asyncio.gather on the dictionary's values, and return the dictionary with the futures' results replacing the futures. >>> promise = Future() >>> promise.set_result("value") >>> await gather_dict({"key": promise}) {"key": "value"} """ items = dct.items() results = await asyncio.gather(*(item[1] for item in items)) keys_results = zip((item[0] for item in items), results) return {k: v for k, v in keys_results if v is not None}
c2799dcdee218e3d47af180f8121624d11cc42a2
312,658
def getPMUtiming(lines): """ Function to get the timing for the PMU recording. Parameters ---------- lines : list of str list with PMU file lines To improve speed, don't pass the first line, which contains the raw data. Returns ------- MPCUTime : list of two int MARS timestamp (in ms, since the previous midnight) for the start and finish of the signal logging, respectively MDHTime : list of two int Mdh timestamp (in ms, since the previous midnight) for the start and finish of the signal logging, respectively """ MPCUTime = [0,0] MDHTime = [0,0] for l in lines: if 'MPCUTime' in l: ls = l.split() if 'LogStart' in l: MPCUTime[0]= int(ls[1]) elif 'LogStop' in l: MPCUTime[1]= int(ls[1]) if 'MDHTime' in l: ls = l.split() if 'LogStart' in l: MDHTime[0]= int(ls[1]) elif 'LogStop' in l: MDHTime[1]= int(ls[1]) return MPCUTime, MDHTime
d4f07fe08ea38cf8f5f6dd6b806a42d2a9a0121b
369,862
from typing import List def parseRanges(rangeStr: str, inlcudeLast: bool = True, unique: bool = True) -> List[int]: """ Parses range strings in the form of 1,2,4:8,9 to a list 1,2,4,5,6,7,8,9 Args: rangeStr (): a range sting separated by commas and : inlcudeLast (): If true ranges 1:3 => 1,2,3 if false 1:3 => 1,2 Returns: A list of integers """ ranges = [r.strip() for r in rangeStr.split(',')] ret = [] add = 0 if inlcudeLast: add = 1 for r in ranges: v = r.split(':') if len(v) == 1: # single number ret += [int(v[0])] else: # range in form of a:b ret += list(range(int(v[0]), int(v[1]) + add)) if unique: return list(set(ret)) return ret
36d4cb444c5789c6b6f9d7e503aebda9b531c8cc
464,066
def is_int32(t): """ Return True if value is an instance of an int32 type. """ return t.typecode == "i"
075c87757c0942042a267f3a2acf280209894400
584,112
import textwrap def indent_text(text, indent, trailing_nl): """Reindent text by `indent` characters.""" text = textwrap.indent(text, indent) # remove trailing newline if so desired if not trailing_nl and text.endswith('\n'): text = text[:-1] return text
66b6de41e37720000242a8455bbee5476e69eb31
528,250
def _num_two_factors(x): """return number of times x is divideable for 2""" if x <= 0: return 0 num_twos = 0 while x % 2 == 0: num_twos += 1 x //= 2 return num_twos
d122f5a084c38e9b6a8de251f2a0658957f23b63
675,025
import csv def read_csv_to_dictionary(csvfile, delimiter=';'): """Reads a csv file and returns a dictionary with the respective keys specified in the first row of the csv file. """ data = [] with open(csvfile, mode='r') as infile: reader = csv.reader(infile, delimiter=delimiter) for i, rows in enumerate(reader): data.append(rows) infile.close() data = zip(*data) # transpose data data_dict = {} for l in data: key = l[0] values = list(l[1:]) data_dict.update({key: values}) return data_dict
947e4bc04f89f96cef4b4f2b50da8797d2abe1f3
141,782
def _checksum_valid(line): """ Calculate the checksum of the GPS output and check if it is equal to the provided checksum in output; therefore - there was no transmission errors. The checksum is XOR of all bytes between $ and * characters (excluding themselves). :param line: line of output to check :type line: str :return: True if the output is valid; False otherwise :rtype: bool """ l = line.index('$') r = line.rindex('*') chksum = 0 for char in line[l + 1: r]: chksum ^= ord(char) if line[r + 1: r + 3] == '%.2X' % chksum: return True return False
3affdede2ea61ca0beb13748da4cd425bd825aa2
534,272
def flatten(seq): """Recursively flattens a sequence.""" lst = [] for el in seq: if isinstance(el, (list, tuple)): lst.extend(flatten(el)) else: lst.append(el) return lst
449f6f9d428c58c9a46def0ceedf4f792051bc86
403,021
from typing import Counter def bag_of_character_ngrams(msg, n): """ Extract a bag of character ngrams from a message (including whitespace), with fixed n :param msg: input string :param n: size of ngram :return: bag of features, as a Counter """ if n == 0: raise ValueError('n must be a positive integer') elif n > len(msg): return Counter() else: return Counter(('char', msg[i:i+n]) for i in range(len(msg)-n+1))
af0e414582b76a32ffa9b7268aee6cf1c85c8d99
210,612
def fix_sanitizer_crash_type(crash_type): """Ensure that Sanitizer crashes use generic formats.""" # General normalization. crash_type = crash_type.lower().replace('_', '-').capitalize() # Use more generic types for certain Sanitizer ones. crash_type = crash_type.replace('Int-divide-by-zero', 'Divide-by-zero') return crash_type
a2971f209e380f7b50d4234876e7421249737d44
83,133
def find_zeros(list_of_lists, height, width): """ Finds the indices of zeros in a height by width list of lists """ zeros_lst = [] for row in range(height): for col in range(width): if list_of_lists[row][col] == 0: zeros_lst.append([row, col]) return zeros_lst
d72528c4dae587ec36f985a76288059b6832251b
304,796
def deg_to_arcmin(x): """ Convert *x* from [deg] to [arcmin]. """ return x * 60
2444eeef478c2bcc50518483791915861d9ac7cb
249,108
def pp_peername(peername): """Prettily format a peername tuple (host, port)""" return f"{peername[0]}:{peername[1]}"
7b165931939932ae571e0bfb1e0df4642309a069
285,625
def get_n_last_liked(sp, n): """Get n most recently liked songs by user.""" items = sp.current_user_saved_tracks(n)['items'] return items
e3d60e6c3a4bf4b7c46f66d8731c633d8f455885
500,723
def transform_seed_objects(objects): """Map seed objects to state format.""" return {obj['instance_id']: { 'initial_player_number': obj['player_number'], 'initial_object_id': obj['object_id'], 'initial_class_id': obj['class_id'], 'created': 0, 'created_x': obj['x'], 'created_y':obj['y'], 'destroyed': None, 'destroyed_by_instance_id': None, 'destroyed_building_percent': None, 'deleted': False, 'destroyed_x': None, 'destroyed_y': None, 'building_started': None, 'building_completed': None, 'total_idle_time': None } for obj in objects}
685c0c5b22fdff108311338354bb42fb53fd07f6
7,894
def get_mu(area): """ 获取平均入渗率,由于关系很简单,不再录入数据库,这里直接按规则判断。 :param area: int 所在分区 :return: float 平均入渗率 """ if area == 1: return 2.0 if area == 2: return 3.0 if area == 3: return 4.0 if area == 4: return 3.0 if area == 5: return 5.0 if area == 6: return 5.0 return 0.0
55a2f281ad4d6049aceafa084948b8619adb4053
251,772
from typing import List from typing import Dict from typing import Optional def aggregate_statuses(statuses: List[Dict], dc_voltage=False) -> Optional[Dict]: """Aggregates inverter statuses for use for PVOutput.org uploads. Does some rounding and integer conversion. Args: statuses: List of inverter statuses as returned by Inverter.status(). dc_voltage: If True, aggregates DC voltage instead of AC voltage. Returns: Dictionary of keyword arguments for add_status() or None if no inverter has operation mode normal. """ def avg(items): """Calculates average.""" i = list(items) return sum(i) / len(i) # Calculate values for each inverter separately values = [] for s in statuses: # Filter systems with normal operating mode if s['operation_mode'] != "Normal": continue # Calculate voltage if dc_voltage: # Takes average of PV1 and PV2 voltage voltage = avg([s['pv1_voltage'], s['pv2_voltage']]) elif 'grid_voltage_r_phase' in s: # For three-phase inverters, take average voltage of all three phases voltage = avg([s['grid_voltage_r_phase'], s['grid_voltage_s_phase'], s['grid_voltage_t_phase']]) else: # For one phase inverter, pick the grid voltage voltage = s['grid_voltage'] values.append({ 'energy_gen': int(s['energy_today'] * 1000), 'power_gen': int(s['output_power']), 'temp': s['internal_temperature'], 'voltage': voltage, }) # Aggregate values of all inverters if not values: return None return { 'energy_gen': sum(v['energy_gen'] for v in values), 'power_gen': sum(v['power_gen'] for v in values), 'temp': round(avg(v['temp'] for v in values), 1), 'voltage': round(avg(v['voltage'] for v in values), 1), }
8432ec1f14c3e96360934df456e655eb08553f37
40,401
def fits_file_name(rerun, run, camcol, field, band): """ SDSS FITS files are named, e.g., 'frame-g-001000-1-0027.fits.bz2'. We will uncompress this and save it as 'frame-g-001000-1-0027.fits'. This function returns 'frame-g-001000-1-0027.fits' in this case (without the '.bz2' extension). """ return "frame-{4}-{1:06d}-{2}-{3:04d}.fits".format( rerun, run, camcol, field, band )
5d2b0f4c08ef1a8b25838d0fdc959dbbe0c153bf
452,595
def fit_and_predict(model, X_train, X_test, y): """ Fitting of the model and predicting :param model: Keras model :param X_train: Training samples :param X_test: Test samples :param y: Labels :return: Predicted values """ y_train = y.reshape((-1,1)) model.fit(X_train, y_train, epochs=5) print("done fitting") y_pred = model.predict(X_test) print("done predicting") return y_pred
6e9c1f155ee39b2061a2d3aae1b72d76dcbc0dae
346,479
from typing import List from typing import Any from functools import reduce def prod(list_: List[Any]) -> Any: """ Multiply all elements in a list :param list_: list of elements to be multiplied :return: the product of the elements in the input list """ return reduce((lambda x, y: x * y), list_)
601876d9a90a586c4782f1ff21775e704326f877
283,800
def merge(a, b): """ Given two clocks, return a new clock with all values greater or equal to those of the merged clocks. """ return tuple(map(max, zip(a, b)))
3cdf31c9f32e846fcf8b65bfc2d6a7359d68b317
602,391
def determine_duration_in_seconds(duration_string): """ takes a timestamp of the form '10:10:10' (hours, minutes, seconds) and returns an integer number of seconds. """ hours, minutes, seconds = duration_string.split(":") return (int(hours)*60*60) + (int(minutes)*60) + int(seconds)
cc5c55bee1fb04be7f4c3bad9978f230a25c31e5
237,932
import random import hashlib def md5(*args): """Generates MD5 based on the provided string or a random int""" value = "" if len(args) > 0: value = args[0] if value == "": value = str(random.getrandbits(128)) return hashlib.md5(value.encode('utf-8')).hexdigest()
0d0becac0fad0aca2dcc79923f580ee7e6f06d16
616,407
def bb_intersection_over_union(box_a, box_b): """ Find out how much 2 boxes intersect :param box_a: :param box_b: :return: IOU overlap """ # determine the (x, y)-coordinates of the intersection rectangle x_a = max(box_a[0], box_b[0]) y_a = max(box_a[1], box_b[1]) x_b = min(box_a[2], box_b[2]) y_b = min(box_a[3], box_b[3]) # compute the area of intersection rectangle intersect_area = max(0, x_b - x_a + 1) * max(0, y_b - y_a + 1) # compute the area of both the prediction and ground-truth # rectangles box_a_area = (box_a[2] - box_a[0] + 1) * (box_a[3] - box_a[1] + 1) box_b_area = (box_b[2] - box_b[0] + 1) * (box_b[3] - box_b[1] + 1) # compute the intersection over union by taking the intersection # area and dividing it by the sum of prediction + ground-truth # areas - the interesection area iou = intersect_area / float(box_a_area + box_b_area - intersect_area) # return the intersection over union value return iou
bd420cb1b991c350e1554e77666dfaff9c0472ef
595,109
from pathlib import Path def find_feats(directory, utt_id, typ="out_duration", ext="-feats.npy"): """Find features for a given utterance. Args: directory (str): directory to search utt_id (str): utterance id typ (str, optional): type of the feature. Default: "out_duration" ext (str, optional): extension of the feature. Default: "-feats.npy" Returns: str: path to the feature file """ if isinstance(directory, str): directory = Path(directory) ps = sorted(directory.rglob(f"**/{typ}/{utt_id}{ext}")) return ps[0]
ca12f9ac333c8a794361374b54ad089bebef0cee
287,811
import struct def _GoToIffChunk(file_object, iff_chunk_id): """Jump to a named chunk in a (R)IFF file. Args: file_object: file object. iff_chunk_id: 4 chars ID of the chunk. Returns: length of the chunk in bytes. -1 if the chunk has not been found. If the chunk is found, file_object is positioned at the beginning of the chunk. Otherwise, it is positioned at the end of the file. """ while True: chunk_id = file_object.read(4) if len(chunk_id) < 4: return -1 chunk_size = file_object.read(4) if len(chunk_size) < 4: return -1 chunk_size = struct.unpack('<L', chunk_size) if iff_chunk_id == chunk_id: return chunk_size[0] else: file_object.seek(chunk_size, 1)
be8864b79777cea1b27f14342e042b89813d7ec5
512,799
def twoNumberSumBetter(array, targetSum): """ This function takes an array of numbers and check if there exists any two numbers sum up to a target number. This implementation has O(nlogn) time complexity and O(1) space complexity. args --------- array: an array of numbers targetSum: a target number output --------- new_array: an array of two numbers which sum up to target number False: if there is no such two sumbers which sum up to target number in the input array """ array.sort() left = 0 right = len(array) - 1 while left < right: if array[left] + array[right] == targetSum: return [array[left], array[right]] elif array[left] + array[right] < targetSum: left += 1 elif array[left] + array[right] > targetSum: right -= 1 return False
e31a73c8c17973766867335368fb2820f345072c
321,183
def sort_top_editors_per_country(editors, editcount_index, top_n): """ Takes a list of lists of editors with editcounts and a top editor cutoff int, returns a sorted top list. """ editors.sort(key=lambda x: int(x[2]), reverse=True) if len(editors) > top_n: editors = editors[:top_n] return editors
97505fccc411758247c2d39b167d87d19cc33454
246,371
def read_key_value_pairs(filepath): """Reads key value pairs from a file. Format: [key]:value :param filepath: file to read from :type filepath: str :return: The key value pairs stored in a dictionary and the leftover string before the first pair. :return: tuple(dict[str,str],str) """ pre_string = '' d = dict() with open(filepath, 'r+', encoding='utf-8') as f: pairs = f.read().split('\n[') if len(pairs) != 0: if pairs[0][0] != '[': pre_string = pairs[0] pairs.pop(0) else: pairs[0] = pairs[0][1:] for p in pairs: if len(p.split(']:')) != 2: continue key = p.split(']:')[0].strip() value = p.split(']:')[1] d[key] = value return d, pre_string
1a733ea773977d950ae8963514867a40ae921732
523,995
def flatten_dict(d, tld="") -> dict: """Flatten the given dict recursively while prepending the upper level key to all the lower level keys separated by a dot (.)""" new_dict = {} for k, v in d.items(): if isinstance(v, dict): lower = flatten_dict(v, tld=f"{tld}{k}.") new_dict.update(lower) else: key = tld + k new_dict[key] = v return new_dict
ed44559d4a3083c51f85a55a07b820d058272412
32,213
import inspect import typing def get_params(signature: inspect.Signature) -> typing.List[str]: """ Given a function signature, return a list of parameter strings to use in documentation. Eg. test(a, b=None, **kwargs) -> ['a', 'b=None', '**kwargs'] """ params = [] render_pos_only_separator = True render_kw_only_separator = True for parameter in signature.parameters.values(): value = parameter.name if parameter.default is not parameter.empty: value = f"{value}={parameter.default!r}" if parameter.kind is parameter.VAR_POSITIONAL: render_kw_only_separator = False value = f"*{value}" elif parameter.kind is parameter.VAR_KEYWORD: value = f"**{value}" elif parameter.kind is parameter.POSITIONAL_ONLY: if render_pos_only_separator: render_pos_only_separator = False params.append("/") elif parameter.kind is parameter.KEYWORD_ONLY: if render_kw_only_separator: render_kw_only_separator = False params.append("*") params.append(value) return params
c4c6583c8e2ed53f67255209e6a1ec99d3da8d96
369,121
import torch def get_binary_kernel2d(window_size): """ Creates a binary kernel to extract the patches. If the window size is HxW will create a (H*W)xHxW kernel. """ window_range = window_size[0] * window_size[1] kernel = torch.zeros(window_range, window_range) for i in range(window_range): kernel[i, i] += 1.0 return kernel.view(window_range, 1, window_size[0], window_size[1])
b06c4b5875c6b6b2f9b785ac03ee04778209a7e8
411,987
def multAll(lst): """Multiplies a list of variables together""" if len(lst) == 0: return 0 out = lst[0] for i in range(1,len(lst)): out *= lst[i] return out
6d985d8c69b25f35b14b2bc94cc4e45be8f826e0
635,467
def rollout(rnn, inputs, init_h): """ rnn partial rollout for k steps given init_h Arguments: rnn: RNN network inputs: (B,T,D) init_h: (B,D) Returns: hs: list of hidden states, [(B,H)]*T qs: list of outputs, [(B,O)]*T or [dict (B,O)]*T """ hs, qs = [init_h], [] # [(B,D)]*(k+1), # [(B,O)]*k for t in range(inputs.shape[1]): q_t, next_h = rnn(inputs[:,t], hs[-1]) hs.append(next_h) qs.append(q_t) return hs, qs
0af0e2152400faa4a1225771a053a77f020c34fb
565,633
def qD_func(g, a): """ Parameter --------- g : float ratio between exchange and dipolar interaction (supposed g << 1) a : float lattice constant of the cubic unit cell Return ------ qD : float dipolar wavevector of the system """ return g**0.5 / a
524549964aa8130584a80017ce4462bdb5bd0a77
474,202
import functools def empty_energy_result(energy_result): """ Return False if the energy_result list contains only negative numbers, True otherwise """ return functools.reduce(lambda acc, x: acc or (x >= 0), energy_result, False)
980cd49f687ee975eac773139855eec2a4fd59d2
635,104
import math def calculate_dist(coord, a, b, c): """Calculate the distance d from a point (x0, y0) to the line ax + by + c = 0, defined as the shortest distance between a fixed point and any point on the line. It is the length of the line segment that is perpendicular to the line and passes through the point. Args: coord (Tuple[float, float]): the latitude and longitude to match a (float): a in ax + by + c = 0 b (float): b in ax + by + c = 0 c (float): c in ax + by + c = 0 Returns: d (float): distance """ x0 = coord[0] y0 = coord[1] try: return abs(a*x0 + b*y0 + c)/math.sqrt(a*a + b*b) except ZeroDivisionError: return math.inf
6610b2901bd2ccd006f9e70ba26f1cf9b300b5e7
392,894
def make_gateway_name( gateway_type, volume_name, host ): """ Generate a name for a gateway """ return "%s-%s-%s" % (volume_name, gateway_type, host)
1918efea9a190432d551919d3772d21612d94b4f
279,194
import csv def _decode_csv(input_str): """ Decodes CSVs into a list of strings. """ reader = csv.reader([input_str], quotechar='"', delimiter=',', escapechar="\\", doublequote=False, quoting=csv.QUOTE_ALL, skipinitialspace=True); result = [] for row in reader: result += row return result
6ab4e99ea17e502d498c5a79b23bbc37094e4743
582,965
def get_file_location(data_dir, filename): """ Concat data_dir and filename """ return data_dir + filename
4142b6558a019d6600c9993aacfd8118a43cc6ba
552,494
def get_positions_dict(board_size): """Returns a dictionary with the positions of each tile in the puzzle.""" number_of_tiles = board_size ** 2 current_tile = 1 positions = {} for y in range(board_size): for x in range(board_size): positions[current_tile] = (x, y) current_tile += 1 if current_tile == number_of_tiles: current_tile = 0 # blank tile return positions
ff27a91cb1620a49b40382da4debebad171b1029
141,682
def filter_data(data, options): """Filters the data by the given options.""" if not options: return data return [item for item in data if all(key in item and options[key] == item[key] for key in options.keys())]
1d765e58efe615595537f7190f6afd8b4df30115
486,500
import requests def get_html(url): """ Gets the HTML for a page. """ print("Attempting to grab html...\n") r = requests.get(url) return r.text
1d74b5a67fcfc05190fb0ae24b6de344f6d52e38
138,957
def config_file_toml(tmpdir): """Mock config.toml""" config_toml = """[gcp] [gcp.prod] project_id = "mock-project-dl" bucket = "mock-project-dl" location = "EU" [gcp.test] project_id = "mock-project-test" bucket = "mock-project-test" location = "EU" [gcp.dev] project_id = "mock-project-dev" bucket = "mock-project-dev" location = "EU" [paths] cbs = "cbs" """ file = tmpdir.join("config.toml") with open(file, "w") as f: f.write(config_toml) return file
28e648ed3fdb327439bed15717e6965df2353ac3
530,914
import re def is_georgian(txt: str) -> bool: """Detects if a text is in Georgian by its UTF8 char range.""" return re.search(r"[\u10D0-\u10F1]+", txt) is not None
0f232b05994db258210dd431427ff98837f049c2
480,223
from pathlib import Path def find_config_file(src: Path) -> Path: """Look for sniptly.toml config file Args: src (Path): File or folder where to look for sniptly snippets. Raises: FileNotFoundError Returns: Path: Path to tohe config file. """ parents = src.resolve().parents paths_to_search_in = [src] + list(parents) if src.is_dir() else parents for path in paths_to_search_in: if (path / "sniptly.toml").exists(): return path / "sniptly.toml" raise FileNotFoundError( f"Config file was not found. Looked in {str(paths_to_search_in)}" )
82d0bdafce311087eed2b15c260e11d737df2d54
66,637
def is_good_read(a_read, min_q, min_pct): """Check whether a read has >= min_pct bases of Q >= min_q""" l_qual = a_read.letter_annotations["phred_quality"] good_bases = list(filter(lambda q: q >= min_q, l_qual)) good_pct = round(100 * len(good_bases) / len(l_qual)) result = good_pct >= min_pct return result
056e441d20503f1a78d936c0e0a9695e62a428b9
437,180
def find_home_listing_urls(soup): """ Finds all the relative individual house data links on a landing page. :param soup: a bs4 Soup object :return: list of strings. Each string is a relative URL representing one house listing. """ listing_url_list = [] for url in soup.find_all("a", class_="cover-all"): listing_url_list.append(url['href']) return listing_url_list
9b4072e2f9167bc1c59eae7618a07826d001ef42
68,987
def mpii_to_openpose(mpii): """ Converts keypoints in MPII order to OpenPose order. Parameters ---------- mpii : numpy.ndarray Keypoints in MPII order. Returns ------- kpss : numpy.ndarray Keypoints in OpenPose order. """ order = [9,8,13,14,15,12,11,10,6,3,4,5,2,1,0]; return mpii[:,order,:];
6aaa165f006f5d3350043c375d7e82e278f3d98c
196,220
import collections def multi_dict(pairs): """ Given a set of key value pairs, create a dictionary. If a key occurs multiple times, stack the values into an array. Can be called like the regular dict(pairs) constructor Parameters ------------ pairs: (n, 2) array of key, value pairs Returns ---------- result: dict, with all values stored (rather than last with regular dict) """ result = collections.defaultdict(list) for k, v in pairs: result[k].append(v) return result
ad3ad2f1e6dd591b77a0238e5199c24a36ccc49c
298,659
def KToF(temp): """ Converts temperature from Kelvin to Fahrenheit :param temp: temperature :return: Return the temperature in Fahrenheit """ C = temp - 273 F = (C * 9/5) + 32 F = round(F, 0) return F
4551d1d7a6b170bc3fae5dc4e92f7f6bc3557079
124,326
def convert_contract(contract): """ Map a contract from the DB format to what we want to send back """ return { "_id": contract["_id"], "title": contract["temp_type"], "body": contract["template"], }
d04e4fbf9ffe2db7e432da27858333b062c39272
321,478
def expand_value(value, params): """Expand str / list value with parameters by using str.format() """ if not value: return value if isinstance(value, list): value = list(map( lambda s: s.format(**params), value )) else: value = value.format(**params) return value
f8fe0633beb4073d16aae2a56fca70e8701778e5
599,138
def get_player_scoring_dict(method='nfl.com'): """Returns a dictionary of scoring rules for each individual player stat. All stats in this dictionary are part of the nflgame API except for defense_two_pt_return. Modified from https://github.com/BurntSushi/nflgame/wiki/Cookbook #calculate-the-fantasy-score-of-all-players-for-a-week """ if method == 'nfl.com': ppr = True player_scoring_dict = { # OFFENSE # Passing Yards 'passing_yds': lambda x: x * .04, # Passing Touchdowns 'passing_tds': lambda x: x * 4, # Interceptions Thrown 'passing_ints': lambda x: x * -2, # Rushing Yards 'rushing_yds': lambda x: x * .1, # Rushing Touchdowns 'rushing_tds': lambda x: x * 6, # Receptions 'receiving_rec': lambda x: x * 1 if ppr else x * 0.5, # Receiving Yards 'receiving_yds': lambda x: x * .1, # Receiving Touchdowns 'receiving_tds': lambda x: x * 6, # Fumbles Recovered for TD 'fumbles_rec_tds': lambda x: x * 6, # Fumbles Lost 'fumbles_lost': lambda x: x * -2, # 2-point conversions 'passing_twoptm': lambda x: x * 2, 'rushing_twoptm': lambda x: x * 2, 'receiving_twoptm': lambda x: x * 2, # KICKING # PAT Made 'kicking_xpmade': lambda x: x * 1, # FG Made 'kicking_fgm_yds': lambda x: 5 if x >= 50 else (3 if x > 0 else 0), # INDIVIDUAL DEFENSIVE PLAYERS # Blocked Kick (punt, FG, PAT) 'defense_puntblk': lambda x: x * 1, 'defense_fgblk': lambda x: x * 1, 'defense_xpblk': lambda x: x * 1, # Safety 'defense_safe': lambda x: x * 2, # Def 2-point Return 'defense_two_pt_return': lambda x: x * 2, # This is a custom one } elif method == 'fantasydata.com': # The stats in this dictionary match the order and scoring in # https://fantasydata.com/developers/fantasy-scoring-system/nfl # for individual players. ppr = True player_scoring_dict = { # OFFENSIVE PLAYERS # Passing 'passing_yds': lambda x: x * .04, 'passing_tds': lambda x: x * 4, 'passing_ints': lambda x: x * -2, # Rushing 'rushing_yds': lambda x: x * .1, 'rushing_tds': lambda x: x * 6, # Receiving 'receiving_rec': lambda x: x * 1 if ppr else x * 0.5, 'receiving_yds': lambda x: x * .1, 'receiving_tds': lambda x: x * 6, # 2-point conversions 'passing_twoptm': lambda x: x * 2, 'rushing_twoptm': lambda x: x * 2, 'receiving_twoptm': lambda x: x * 2, 'kickret_tds': lambda x: x * 6, # Fumbles 'fumbles_lost': lambda x: x * -2, 'fumbles_rec_tds': lambda x: x * 6, # INDIVIDUAL DEFENSIVE PLAYERS # Tackles/Hits 'defense_tkl': lambda x: x * 1, 'defense_ast': lambda x: x * 0.5, 'defense_sk': lambda x: x * 2, 'defense_sk_yds': lambda x: x * .1, 'defense_tkl_loss': lambda x: x * 1, 'defense_qbhit': lambda x: x * 1, # Pass Defense 'defense_pass_def': lambda x: x * 1, 'defense_int': lambda x: x * 3, # Run Defense 'defense_ffum': lambda x: x * 3, 'defense_frec': lambda x: x * 3, # Scoring on Defense 'defense_tds': lambda x: x * 6, 'defense_two_pt_return': lambda x: x * 2, # This is a custom one # KICKING 'kicking_xpmade': lambda x: x * 1, 'kicking_fgm_yds': lambda x: 5 if x >= 50 else (3 if x > 0 else 0), } else: raise ValueError("{} is not a valid value for `method`!".format(method)) return player_scoring_dict
3bfed4ebce6bea65fb4977eed9adcca382e5adcd
194,722
from typing import List from typing import Tuple from typing import Union from pathlib import Path def files_size(files: List[Tuple[Union[str, Path], int]]) -> int: """ Sums the sizes from a supplied list of file sizes """ return sum([x[1] for x in files])
9077c53f8efbcbe553d5d5610f2ac313f2cb1e55
391,272
def _clip(value, lower, upper): """ Helper function to clip a given value based on a lower/upper bound. """ return lower if value < lower else upper if value > upper else value
a84395414ca4fd6739ecebc6c2e0c1ee4716f506
637,639
def choose_color_by_layertype(layertype): """Define colors for nodes based on the layer type """ color = '#6495ED' # Default if layertype == 'Conv': color = '#FF5050' elif layertype == 'Embedding': color = '#FF9900' elif layertype == 'FullConnect': color = '#CC33FF' elif layertype == 'MaxPooling' or layertype == 'AvgPooling' or layertype == 'DynamicPooling': color = '#66CC66' elif layertype == 'Lstm' or layertype == 'Gru': color = '#B5E61D' return color
7cf32790a0c4b51c3d24cd77ae2d1d0a52661517
88,182
import math def coords_to_kavrayskiy(coords): """Convert geographical coordinates to Kavrayskiy VII coordinates. A Kavrayskiy VII map is defined with the following dimensions: - Height: pi units - Width: sqrt(3) * pi units """ # convert degrees to radians lat, lng = map(lambda deg: deg * math.pi / 180, coords) x = (3 * lng / 2) * math.sqrt((1 / 3.) - (lat / math.pi)**2) y = lat return (x, y)
963269e5f4bae78a536d8917c88d5c493b3d5b7f
46,515
import torch def cuda_network(network): """Converts network to CUDA if available""" if torch.cuda.is_available(): print('CUDA available: converting network to CUDA') network.cuda() return network
78729d2e924e216ee976653cebf59b60ce30dff4
484,082
def EI(sections, normal=None): # {{{ """Calculate the bending stiffnes of a cross-section. The cross-section is composed out of rectangular nonoverlapping sections that can have different Young's moduli. Each section is represented by a 4-tuple (width, height, offset, E). The offset is the distance from the top of the section to the top of the highest section. This should always be a positive value. E is the Young's modulus of the material of this section. Arguments: sections: Iterable of section properties. normal: The Young's modulus to which the total cross-section will be normalized. (Not used anymore, retained for compatibility.) Returns: Tuple of EI, top and bottom. Top and bottom are with respect to the neutral line. Examples: >>> E = 210000 >>> B = 100 >>> H = 20 >>> sections = ((B, H, 0, E),) >>> EI(sections) (14000000000.0, 10.0, -10.0) >>> B = 100 >>> h = 18 >>> t = 1 >>> H = h + 2 * t >>> E = 210000 >>> sections = ((B, t, 0, E), (B, t, h+t, E)) >>> EI(sections) (3794000000.0, 10.0, -10.0) >>> E1, E2 = 200000, 71000 >>> t1, t2 = 1.5, 2.5 >>> H = 31 >>> B = 100 >>> sections = ((B, t1, 0, E1), (B, t2, H-t2, E2)) >>> EI(sections) (9393560891.143106, 11.530104712041885, -19.469895287958117) """ normal = sections[0][-1] normalized = tuple((w * E / normal, h, offs) for w, h, offs, E in sections) A = sum(w * h for w, h, _ in normalized) S = sum(w * h * (offs + h / 2) for w, h, offs in normalized) yn = S / A # Find any geometry that straddles yn. to_split = tuple(g for g in sections if g[2] < yn and g[1] + g[2] > yn) geom = tuple(g for g in sections if g not in to_split) # split that geometry. # The new tuple has the format (width, height, top, bottom) new_geom = [] for w, h, offs, E in to_split: h1 = yn - offs h2 = h - h1 new_geom.append((w, h1, h1, 0, E)) new_geom.append((w, h2, 0, -h2, E)) # Convert the remaining geometry to reference yn. for w, h, offs, E in geom: new_geom.append((w, h, yn - offs, yn - offs - h, E)) EI = sum(E * w * (top ** 3 - bot ** 3) / 3 for w, h, top, bot, E in new_geom) top = max(g[-3] for g in new_geom) bot = min(g[-2] for g in new_geom) return EI, top, bot
24b5ca79f0a3f041586e2f9d7fe8d7953cd96780
699,859
def gen_ticks(bound: int): """Generate increasing ticks from a reversed axis. Parameters ---------- bound : `int` The highest value, tick for this is 0 Returns ------- `List[int]` Array of tick positions """ res = [bound] while bound >= 0: bound -= 7 if bound > 0: res.append(bound) return res
1e43327c637fe52462437975b61406a895c69d1f
283,134
def pattern_sort(lst, pattern, key=None, reverse=False): """sorts lst based on pattern (e.g. ```pattern_sort(['a','ba','c'], [2, 0, 1], lambda x: len(x))``` would return ```['ba','a','c']```) lst: the list to sort \\ pattern: the pattern to sort with (list of numbers, i.e. ```[2, 1, 0]``` would swap the 2th element with the 0th element) \\ key: sorting key to sort initially before sorting using the pattern (function) \\ reverse: whether to sort backwards during initial sort (bool) """ lst.sort(key=key, reverse=reverse) zip_list = zip(lst, pattern) return [ele for ele, _ in sorted(zip_list, key=lambda x: x[1])]
142dfaa760ca68b49696270b6026b4c489b13938
490,455
from bs4 import BeautifulSoup def extract_post_text(html): """Returns the contents of each post contained within the html.""" soup = BeautifulSoup(html, 'html.parser') posts = soup.find_all('div', 'showforumtopic-message-contents-text') return [post.text for post in posts]
7a053e9bf578429b495957aed237f69f8a3595eb
192,019
def elem2dict(node): """ Convert an lxml.etree node tree into a dict. """ d = {} for e in node.iterchildren(): key = e.tag.split('}')[1] if '}' in e.tag else e.tag value = e.text if e.text else elem2dict(e) d[key] = value return d
3b754af9c9a518a9e88d94c3868fc4d2f2eed151
204,468
def list_size_reducer(reduction_factor,your_list): """ Optional function to reduce the size of the lists output by the inspiral functions (not the merger lists, as those are much shorter), in order to reduce filesize to conserve storage space. NOTES: The typical reduction factor we have used in our research using this code is 100. The inspiral lists used by the matching/merger portions are realtimes, omega, i_phase and i_amp so if you reduce one of these you should reduce all of them. Parameters ---------- reduction_factor: int The factor you want to reduce the list length by. your_list: list The list you want to reduce. Returns ------- reduced_list: list your_list, in reduced form. """ #input type checking assert type(reduction_factor) == int, 'reduction_factor should be an int.' assert type(your_list) == list, ('The thing to be reduced needs to be a ' 'list.') #create new list with every nth point of your_list reduced_list = [your_list[0]] for i in range(reduction_factor,len(your_list),reduction_factor): reduced_list.append(your_list[i]) return reduced_list
377aa817fd172fad24ea1caf55f81a14e2fe3785
113,686
def uVal(thick): """Compute the wals U-value starting from the thickness of the insulation layer Parameters ---------- thick : float insulation thickness in m """ R_th = .17 layers = [ { 'Thickness':.025, 'Conductivity':.33 },{ 'Thickness':thick, 'Conductivity':.047 },{ 'Thickness':.2, 'Conductivity':.51 },{ 'Thickness':.013, 'Conductivity':.4 }, ] res = R_th for item in layers: res+=item['Thickness']/item['Conductivity'] return 1/res
2bdb74ad673feddc10f7e83613df577b198d0a38
521,710
import re from datetime import datetime def _get_iso_date(date_string: str) -> str: """ convert date from the form 1/22/2021 13:28:27 to iso format """ regex = r'\d{1,2}/\d{1,2}/\d{4} \d{1,2}:\d{1,2}:\d{1,2}' found_list = re.findall(regex, date_string) if found_list: date_value = datetime.strptime(date_string, '%m/%d/%Y %H:%M:%S') return date_value.isoformat() return date_string
fa9d6c733014ad4d738d63f2399085f08949c108
683,851
import struct def upstream_forward(sock, data): """ Forward a DNS request to a upstream server using TLS. Params: sock - socket object connected to upstream server data - wireformat DNS request packet to forward Returns: A wireformat DNS response packet Notes: Using DNS over TLS format as described here: https://tools.ietf.org/html/rfc7858 """ sock.send(struct.pack('!H', len(data)) + data) return sock.recv(4096)[2:]
7447786ae51833ff486b2924f5a85639eb760fbf
606,791
def app_config(app_config): """Application config fixture.""" app_config[ "RECORDS_REFRESOLVER_CLS" ] = "invenio_records.resolver.InvenioRefResolver" app_config[ "RECORDS_REFRESOLVER_STORE" ] = "invenio_jsonschemas.proxies.current_refresolver_store" # Variable not used. We set it to silent warnings app_config["JSONSCHEMAS_HOST"] = "not-used" return app_config
5ed3278a648382a2f164e3b29a0605b43dfe228d
478,622
def unpack_point_msg(msg, stamped=False): """ Get coordinates from a Point(Stamped) message. """ if stamped: p = msg.point else: p = msg return p.x, p.y, p.z
9c73105be73cbcdc55cef0f268851567dcbcd822
651,445
def remove_toffoli_from_line(local_qasm_line, qubit_1, qubit_2, target_qubit): """ Remove a specific Toffoli gate from a line of qasm. Args: local_qasm_line: The line of qasm qubit_1: The first control qubit of the Toffoli gate qubit_2: The second control qubit target_qubit: The target qubit Returns: The same line of qasm without the Toffoli gate call """ single_application = "Toffoli q[{}],q[{}],q[{}]".format(qubit_1, qubit_2, target_qubit) # if there is a parallel bar right local_qasm_line = local_qasm_line.replace(single_application + " | ", "") # else: if there is a parallel bar left local_qasm_line = local_qasm_line.replace(" | " + single_application, "") # else: if it is the only gate in parallelized brackets local_qasm_line = local_qasm_line.replace("{" + single_application + "}", "") # else: if it is not parellelized at all local_qasm_line = local_qasm_line.replace(single_application, "") return local_qasm_line
3bd95d5514af966dddb1f203b43749b51def1646
111,423
def dict2list_param(param): """Convert dictionary to list and add name by key.""" if isinstance(param, dict): out = [] for key in param: item = param[key] item["name"] = key out.append(item) return out return param
86fb42be91598987c06c8f6f229b9948b3ffc59a
580,360
def _get_named_code(error) -> int: """Gets the error code as an integer (e.g. 404) from the error string.""" return int(str(error)[:3])
28aec57eccf2793c1efc084f0d86bb22eed973d9
118,803
def clamp_value(value, max_value=1.0, min_value=0.0): """Clamp a value between max and min Args: value (float): value to clamp Kwargs: max (float): max value min (float): min value Returns: .float """ return max(min(value, max_value), min_value)
8dbc949361a0c7aff5501bf2a9e3f3faa1821c98
96,246
def is_auto_primary_key(primary_key: bool, autoincrement: bool) -> bool: """ Checks if field is an autoincrement pk -> if yes it's optional. :param primary_key: flag if field is a pk field :type primary_key: bool :param autoincrement: flag if field should be autoincrement :type autoincrement: bool :return: result of the check :rtype: bool """ return primary_key and autoincrement
3c3c06b9b7e3453cb5078e9c4e5d2355724d1dad
685,844
def transform_symbol_entrez_map(json_data, id_field='hugoGeneSymbol', values_field='entrezGeneId'): """Transform a list of homogeneous dicts into a dict of lists. Using the values of the `id_field` entries as the keys, mapping to lists of corresponding `values_field` entries. >>> transform_symbol_entrez_map( ... [{"hugoGeneSymbol": "A1BG", "entrezGeneId": 1}, ... {"hugoGeneSymbol": "A2M", "entrezGeneId": 2}]) {'A2M': [2], 'A1BG': [1]} >>> transform_symbol_entrez_map( ... [{"gene_alias": "A1B", "entrezGeneId": 1}, ... {"gene_alias": "ANG3", "entrezGeneId": 738}, ... {"gene_alias": "ANG3", "entrezGeneId": 9068}], ... id_field="gene_alias") {'ANG3': [738, 9068], 'A1B': [1]} """ result_dict = {} for data_item in json_data: symbol = data_item[id_field].upper() if symbol not in result_dict: result_dict[symbol] = [] result_dict[symbol].append( data_item[values_field]) return result_dict
cf2c531d2d36ea0d74b5a443d26d686a0dce55a1
419,535
def parallel_anims(anims): """Combines animations in anims to a single animation, where the animations run simultaneously""" def func(ctx, time): for anim in anims: anim(ctx, time) return func
1393e60836a6a839fd195fd0b5f3464ae3f4b437
587,191
def check_empty(values_cookies, values_no_cookies, negative_values, method=lambda x,y: x in y, convert=lambda x: x): """Returns True if one mode only returns negative values and the other does not return negative values. value_cookies: Series, results of run with cookies value_no_cookies: Series, results of run without cookies negative_values: list, results that count as negative method: function(val, negative_values), function to check if the value is negative or not, default: in convert: functiont(val), function to convert value first, default: id """ negative_cookies = sum([method(convert(val), negative_values) for val in values_cookies.values]) negative_no_cookies = sum([method(convert(val), negative_values) for val in values_no_cookies.values]) # If in total two results are negative, and they can't be equal, it can only be two negative values and 0 negative values return sum([negative_cookies, negative_no_cookies]) == 2 and negative_cookies != negative_no_cookies
c220684279526cfaf0b95e500398076d450fd23c
220,617
def is_digit_or_single_char(token): """ Acronyms or dates are formed either by tokens that have a sigle letter (acronyms ie. I-B-M) or numerical strings that can have more than 1 digit. (ie 2020-10-10). """ return token.isdigit() or len(token) < 2
5152a2628341e28f7e74eca94bbd7e60497ca45d
410,991
def get_last_byte(name): """Return the last byte in a file""" with open(name, 'r') as infp: infp.seek(-1, 2) return infp.read(1)
2a19bd4bb9816fc2fb36aa2b518bfc6f4aa4fba7
318,932
import importlib def get_module(name): """ Convenience method for importing a module (i.e. sub-command) from a string :param name: module name to import :return: the module object """ return importlib.import_module("api_builder.subcommands." + name)
63f362054452034a0712db72a6c1a201553664d2
306,050
def dedupe_tweets(tweet_list): """ Return de-duped list of tweets by throwing out retweets. Assumes list of tweets is in reverse chronological order. """ tweets = [] id_set = set() tweet_list.sort(key=lambda x: x['created_at']) for tweet in tweet_list: if tweet.id_str in id_set: continue id_set.add(tweet.id_str) try: rt_id_str = tweet.retweeted_status.id_str if rt_id_str in id_set: continue id_set.add(rt_id_str) except AttributeError: pass tweets.append(tweet) return list(reversed(tweets))
425d718a43121adeb557866eef305fa6525a7512
225,760
def MatchScorer(match, mismatch): """Factory function that returns a score function set to match and mismatch. match and mismatch should both be numbers. Typically, match should be positive and mismatch should be negative. Resulting function has signature f(x,y) -> number. """ def scorer(x, y): if x == y: return match else: return mismatch return scorer
fe3829efc64cb4d9785e52b8af6949c147481902
1,636
def rotate180_augment(is_training=True, **kwargs): """Applies rotation by 180 degree.""" del kwargs if is_training: return [('rotate180', {})] return []
4973181fed90fbe4639de7f2eae8b2fe3b5f2448
620,048
def bk_category_initialized(bk_category): """Returns bk_category object (the same as from bk_category fixture), but with grid elements initialized.""" bk_category.initialize_grid_elements() return bk_category
27344a88a1b1a5beb329167725fd803981304a5d
322,104
def export(varname): """Returns "export varname={formated value}\\n" """ var = str(globals()[varname]) frmted_var = var.strip("'\"").replace("\"", "\\\"").replace(r"\n", r"\\n") return f"export {varname}=\"{frmted_var}\"\n"
0c0e0d09cfbc8784fc4ce7e265b5be4e534dc4ff
45,254