content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def no_augmentation(img): """Returns an iterable to be compatible with cropping augmentations.""" return (img, )
1536ec21d69996c4ff849a602c647873e3bc8617
64,346
from datetime import datetime def ts_dt(ts_epoch): """datetime representation of a timestamp.""" return datetime.utcfromtimestamp(ts_epoch / 1000)
a160eb0443bd1a07b3d6bff858d060e494a01190
64,348
def _identity(ev, state_args=None, **kwargs): """An identity mapper callable.""" return ev
e31b1c90d4f4da9282cab5413a1f8ab6899f511b
64,353
import math def boxed_message(message): """Draw a box around a message. Parameters: ----------- message : str Message to be boxed Returns: -------- str Boxed message References: ----------- - https://github.com/quapka/expecto-anything/blob/master/boxed_msg.py """ def format_line(line, max_length): half_diff = (max_length - len(line)) / 2 return "{}{}{}{}{}".format( "| ", " " * math.ceil(half_diff), line, " " * math.floor(half_diff), " |\n" ) lines = message.split("\n") max_length = max(len(l) for l in lines) horizontal = "{}{}{}".format("+", "-" * (max_length + 2), "+\n") result = horizontal for l in lines: result += format_line(l, max_length) result += horizontal return result.strip()
d90b4fbf82f6317e0f6b9f5e5c17fd430ab6ae82
64,355
def is_past_due(date1, date2): """ Prüft, ob date2 hinter date1 liegt. :param date1: Erstes Datum :param date2: Zweites Datum :return: True, wenn date2 später als date1 ist. False, wenn nicht oder wenn eines der beiden Attribute None ist. """ if date1 is None or date2 is None: return False return date2 > date1
93e941b1a73ffb6392e3eb7113401e87e01b9bdb
64,356
def _permute(c, p): """Returns the permutation of the given 32-bit or 64-bit code with the specified permutation table.""" # NOTE: only difference between 32 & 64 bit permutations # is that len(p)==8 for 32 bit, and len(p)==16 for 64 bit. out = 0 for r in p: out |= r[c&0xf] c >>= 4 return out
fb249f96ae44dbfda72e32863b4c0efa334e2217
64,357
import re def sentences_github(text): """ Returns a list of sentences in some text that contain a mention of github The sentences might not necessarily contain a properly formatted repository URL. For example, this function can be used to extract sentences that *may* contain a repository URL, because the regex to subsequently identify properly formatted repository URLs is less efficient. Args: text: A string containing some text Returns: List of sentences that contain a mention of github """ if text is None: return [] formatted = re.sub('[\s\n\r]+', ' ', re.sub('-\n', '-', re.sub('/\n', '/', text))) sentences = re.split('[.?!]\s+', formatted) return filter(re.compile('[gG]it[hH]ub').search, sentences)
ed39d68bff3a466483819549c4349710d4e8d060
64,359
def pad_sequence(sequences, batch_first=False, padding_value=0, len_factor=1): """ Arguments: sequences (list[Tensor]): list of variable length sequences. batch_first (bool, optional): output will be in ``B x T x *`` if True, or in ``T x B x *`` otherwise padding_value (float, optional): value for padded elements. Default: 0. len_factor (int, optional): value for a factor of the length to be. Default: 1. Returns: Tensor of size ``T x B x *`` if :attr:`batch_first` is ``False``. Tensor of size ``B x T x *`` otherwise """ max_size = sequences[0].size() trailing_dims = max_size[1:] max_len = max([s.size(0) for s in sequences]) rem = max_len % len_factor if rem != 0: max_len += len_factor - rem if batch_first: out_dims = (len(sequences), max_len) + trailing_dims else: out_dims = (max_len, len(sequences)) + trailing_dims out_tensor = sequences[0].data.new(*out_dims).fill_(padding_value) for i, tensor in enumerate(sequences): length = tensor.size(0) # use index notation to prevent duplicate references to the tensor if batch_first: out_tensor[i, :length, ...] = tensor else: out_tensor[:length, i, ...] = tensor return out_tensor
24b755b58747a6cd918aaea5729736107141fe99
64,366
def flowers_dict(filename): """ This function opens the flowers.txt, reads every line in it, and saves it as a dictionary. """ flowers = {} #initialize the flowers' dictionary with open(filename) as f: # open the text file as f. for line in f: # reads every line in text file. letter = line.split(": ")[0].lower() # splits the line by ":" and takes the first index i.e letter in lower case flower = line.split(": ")[1] # splits the line by ":" and takes the second index i.e flower #print(flower) flowers[letter] = flower # enters the letter and flower as value pairs in the flowers' dictionary. return flowers
7edc3ee0a3120549df7a02c5ec0723d72367b071
64,369
def calc_n_virtual_pages(n_sheets: int) -> int: """ Converts #sheets to #pages, but rounding up to a multiple of 4. """ if n_sheets % 2 == 0: return n_sheets * 2 else: return (n_sheets + 1) * 2
38f4b5b9ff92ac3c0a5c17d2e38113b2ce4b0ba8
64,372
def jd2gpst(jd): """ Julian date to GPS time (GPS week, GPS day of week, GPS seconds of week) """ mjd = jd - 2400000.5 gps_start_mjd = 44243 gps_week = (int(mjd) - gps_start_mjd - 1)//7 if mjd - gps_start_mjd - 1 < 0: gps_week -= 1 gps_sow = (mjd - (gps_start_mjd + gps_week*7 + 1))*86400 if mjd - gps_start_mjd < 0 and gps_sow >= 604800: gps_sow = gps_sow - 604800 gps_dow = int(gps_sow // 86400) return gps_week, gps_dow, gps_sow
81a0373d3eb4d020afa66663bb0cfda2e4db735e
64,375
def sidebar(app): """The sidebar menu""" return app.sidebar.objects[0]
b82a12d6766537e53b0c6c4ceb4f510ac6cf0e1a
64,379
def _binary_op(result_name, func_name, arg1_name, arg2_name): """ Generates a binary operator indicated by func_name in infix notation with arguments arg1_name and arg2_name storing the result in result_name. Supported func_names are add, sub, mul, and div. """ funcs = {'add': '+', 'sub': '-', 'mul': '*', 'div': '/'} return f"{result_name} = {arg1_name} {funcs[func_name]} {arg2_name}"
5a8cb925aefa4850f182c87595fee9c1409809c6
64,380
def get_image_id(image_dict): """ Turns image_id dictionary found in GPV data into a single image_id """ return f'coco/{image_dict["subset"]}/COCO_{image_dict["subset"]}_{str(image_dict["image_id"]).zfill(12)}.jpg'
9350c5db453d0a20caa284764c263e4df808f79f
64,381
def get_airdrop_details(cur, airdrop_id): """ Return all the details about an airdrop from the database :param airdrop_id: :return: a disctionary with all the airdrop details """ airdrop_details = {} airdrop_transactions = [] cur.execute("SELECT hash, status, date, id FROM airdrops WHERE id = ?", (airdrop_id, )) airdrop = cur.fetchone() airdrop_details['airdrop_id'] = airdrop[0] airdrop_details['status'] = airdrop[1] airdrop_details['date'] = airdrop[2] cur.execute("SELECT hash, name, status, date FROM transactions WHERE airdrop_id = ?", (airdrop[3], )) transactions = cur.fetchall() for trans in transactions: airdrop_transaction = {} airdrop_transaction['transaction_hash'] = trans[0] airdrop_transaction['transaction_name'] = trans[1] airdrop_transaction['transaction_status'] = trans[2] airdrop_transaction['transaction_data'] = trans[3] airdrop_transactions.append(airdrop_transaction) airdrop_details['transactions'] = airdrop_transactions return airdrop_details
32a5dde6ffdbd5e5c35a2814a46668bfa7ba2011
64,385
def create_steering_control(packer, apply_steer, lkas_active, idx): """Creates a CAN message for the Honda DBC STEERING_CONTROL.""" values = { "STEER_TORQUE": apply_steer if lkas_active else 0, "STEER_TORQUE_REQUEST": lkas_active, } # Set bus 2 for accord and new crv. #bus = 2 if car_fingerprint in HONDA_BOSCH else 0 bus = 0 return packer.make_can_msg("STEERING_CONTROL", bus, values, idx)
1728afbd4df2ff9049124b37629e15a66e79bd59
64,386
def as_list(c): """Ensures that the result is a list. If input is a list/tuple/set, return it. If it's None, return empty list. Else, return a list with input as the only element. """ if isinstance(c, (list,tuple,set)): return c elif c is None: return [] else: return [c]
c44b0d781abf8d8cc5ae702805ecd69422b2b0ec
64,387
import _typing def sort_lists_by_list(lists: _typing.List[_typing.List], key_list_index: int = 0, desc: bool = False) -> _typing.Iterator[_typing.Tuple]: """Sort multiple lists by one list. The key_list gets sorted. The other lists get sorted in the same order. Reference: https://stackoverflow.com/a/15611016/8116031 :param lists: list of lists to be sorted :param key_list_index: index of list in lists to sort by :param desc: False: ascending order :return: sorted lists """ # note: two-liner equivalent: # line1: convert (zip(a,b,c,...)) lists into list of tuples, and # sort elements of this list by key_list elements, i.e. key_list-th # element in each tuple. # line2: unpack tuple list back into orignal, sorted, lists using # nested list comprehensions and tuple unpacking. # sorted_lists = sorted(zip(*lists), reverse=desc, key=lambda x: x[key_list]) # return [[x[i] for x in sorted_lists] for i in range(len(lists))] return zip(*sorted(zip(*lists), reverse=desc, key=lambda x: x[key_list_index]))
f6125ab98ff98d38324c7242b8b2db2646d5b3f8
64,388
def _get_colocation(op): """Get colocation symbol from op, if any.""" try: return op.get_attr("_class") except ValueError: return None
e3b4d9a1d493f6ce6e4dea6a363c17d9a18a35d8
64,391
def power_amplifier(s, thresh, pwr=2): """ Amplify elements of a positive-valued signal. Rescale the signal so that elements above thresh are equal to or greater than 1, and elements below thresh are less than one. Then take a power of the signal, which will supress values less than 1, and amplify values that are greater than one. """ #normalize the signal s /= s.max() #shift the signal so elements at the threshold are set to 1 s += 1.0 - thresh #raise the envelope to a power, amplifying values that are above 1 s = s**pwr #re-normalize s -= (1.0 - thresh)**pwr s /= s.max() return s
9d99b1b42c4d47d86d58fc2aaadf53db61fe4422
64,396
from typing import Union def repeated_value(content: Union[str, list, tuple]): """Finds the most frequently common value. Parameters ---------- content : Union[list, tuple] Contains the list/string/tuple's value. Returns ------- repeated_value : str Returns the Repeated Value. """ if type(content) == str: return max(set(list(content)), key=content.count) repeated_value = max(set(content), key=content.count) return repeated_value
e06383758d7ec28ae79c1e624f2a838fe871d1c5
64,397
def get_steering_angle(image_file): """ Get steering angle from the filename """ return float(image_file.split('_')[5])
636d0be350644ea9d4ae6a69c39bb0ed499d1d40
64,402
def _andparam(params): """ string join parameter list with & """ return "&".join(params)
ca7cedf4dd1169dd83f83a97f707f2e43eeaa8f7
64,408
def add_custom_variables(reco_event,truth_event): """ Example of adding custom variables to the csv - this is a dummy and returns the value of 1.0 and the eventNumber twice for every event. The objects passed to this function are the root2csv two TTrees. This function MUST return an array """ return [1.0,reco_event.eventNumber,truth_event.eventNumber]
cebce4865314703c41e2cd346d693c518e29781e
64,409
def postprocess_phrase(phrase): """ Some words might be 'cut' for an exam, as the text might take several lines. If we find that the last element of a sentence is '-', we remove it, and the word will continue in the next line. Otherwise, we are sure it is the end of a word. Args: phrase (string) Warnings: This implementation is specific for Spanish """ if len(phrase) == 0: return phrase if phrase[-1] == "-": return phrase[:-1] else: return phrase+" "
85f385cb1bbd435e48806d8d2ee7dce0222890e9
64,410
def get(text=""): """ overpython.get(text="") Get user input from console. """ return input(text)
e2fe7f117e9fe3e720cfa37ee0bcb80220fdccd9
64,411
def is_leap_year(year): """ Returns Boolean indicating if given year is a leap year. It is a leap year if the year is: * divisible by 4, but not divisible by 100 OR * divisible by 400 (Example of a "predicate function") Doctests: >>> is_leap_year(2001) False >>> is_leap_year(2020) True >>> is_leap_year(2000) True >>> is_leap_year(1900) False """ return ((year % 4 == 0) and (year % 100 != 0)) or (year % 400 == 0)
307ff157c6af29d1ab6d544dbee857165d5ea45b
64,417
def gen_jaguar_output(mae, jin=None, charge=0): """ Generates strings for a Jaguar .in file. Returns ------- list of strings """ # Read the lines from the .in file if it was provided. if jin: new_lines = [line.rstrip() for line in jin.lines] # If there's no Jaguar .in file provided, then put in some basic # ESP commands and the coordinates from the .mae file. else: # Header information. filename = '{}.mae'.format(mae.filename.split('.')[0]) new_lines = [ 'MAEFILE: {}'.format(filename), '&gen', 'icfit=1', 'incdip=1'] if charge != 0: new_lines.append('molchg={}'.format(charge)) new_lines.append('&') # Coordinates. new_lines.append('&zmat') new_lines.extend(mae.structures[0].format_coords(format='jaguar')) new_lines.append('&') return new_lines
7c2982633f8c22e03bdd10c5b3a1537a12b722bc
64,425
def overlaps(bounds_A, bounds_B): """Returns True if bounds_A and bounds_B partially overlap.""" x1_min, y1_min, x1_max, y1_max = bounds_A x2_min, y2_min, x2_max, y2_max = bounds_B x_overlaps = (x1_min <= x2_min <= x1_max) or (x2_min <= x1_min <= x2_max) y_overlaps = (y1_min <= y2_min <= y1_max) or (y2_min <= y1_min <= y2_max) return x_overlaps and y_overlaps
29eddecca05cd87efedd4b3130c6b10168dec66e
64,426
def sparse_poly_to_integer(degrees, coeffs, order): """ Converts polynomial to decimal representation. Parameters ---------- degrees : array_like List of degrees of non-zero coefficients. coeffs : array_like List of non-zero coefficients. order : int The coefficient's field order. Returns ------- int The decimal representation. """ assert len(degrees) == len(coeffs) order = int(order) decimal = 0 for d, c in zip(degrees, coeffs): decimal += int(c) * order**int(d) return decimal
410f4722639c7d18a3f285aba36bf50b5a9db58c
64,427
def somalinha(line_0: list, line_1: list, line_2: list, line_3: list, line_4: list, line_5: list, line_6: list, linha: int): """Retorna a soma dos elementos contidos na linha passada como parâmetro. :param line_0: Primeira linha da matriz 7x6. :param line_1: Segunda linha da matriz 7x6. :param line_2: Terceira linha da matriz 7x6. :param line_3: Quarta linha da matriz 7x6. :param line_4: Quinta linha da matriz 7x6. :param line_5: Sexta linha da matriz 7x6. :param line_6: Sétima linha da matriz 7x6. :param linha: Linha que terá seus elementos somados. :return: retorna, False caso não ouver seis números por linha ou a soma dos elementos da linha. """ matriz = [line_0, line_1, line_2, line_3, line_4, line_5, line_6] # Testando se a matriz é 7x6: for c in range(6): if len(matriz[c]) != 6: return False # Somando linha 2 da matriz: soma = 0 for m in range(6): soma += matriz[linha][m] return soma
16a61c2024df66c601da79131e12dc1cc108aa80
64,428
def CallParams(method_name, proto): """Returns the parameters for calling a gRPC method Arguments: method_name: Name of the remote method to be called proto: The protobuf to be passed to the RPC method Returns: The parameters thast can be passed to grpc_cli call to execute the RPC. """ return ["--protofiles=keyval.proto", "--noremotedb", "call", "localhost:50050", "KeyValue." + method_name, proto]
4faa908c97889fc98b7bb7af18fe59e74485dd55
64,430
def optimum_projected_function_spd(optimum_function, low_dimensional_spd_manifold, projection_matrix): """ This function returns the global minimum (x, f(x)) of a test function defined on a projection of a SPD manifold S^D_++ in a lower dimensional SPD manifold S^d_++. Note that, as the inverse of the projection function is not available, the location of the optimum x on the low-dimensional manifold is returned. Note: test functions and their global minimum are defined in test_function_spd.py. Parameters ---------- :param optimum_function: function returning the global minimum (x, f(x)) of the test function on S^d_++ :param low_dimensional_spd_manifold: d-dimensional SPD manifold (pymanopt manifold) :param projection_matrix: element of the Grassmann manifold (D x d) Returns ------- :return opt_x: location of the global minimum of the Ackley function on the low-dimensional SPD manifold :return opt_y: value of the global minimum of the Ackley function on the SPD manifold """ # Global minimum on nested SPD nested_opt_x, opt_y = optimum_function(low_dimensional_spd_manifold) return nested_opt_x, opt_y
a273a7e69ca0fd95e23bf17e5c807003db4c3da1
64,432
import math def LLR_alt(pdf,s0,s1): """ This function computes the approximate generalized log likelihood ratio (divided by N) for s=s1 versus s=s0 where pdf is an empirical distribution and s is the expectation value of the true distribution. pdf is a list of pairs (value,probability). See http://hardy.uhasselt.be/Toga/computeLLR.pdf """ r0,r1=[sum([prob*(value-s)**2 for value,prob in pdf]) for s in (s0,s1)] return 1/2*math.log(r0/r1)
4140f8c1d9d1d883b003eb3a805be5d752db5919
64,436
import torch def kullback_leibler(mean, sigma, reduction='mean'): """ Kullback-Leibler divergence between Gaussian posterior distr. with parameters (mean, sigma) and a fixed Gaussian prior with mean = 0 and sigma = 1 """ kl = -0.5 * (1 + 2.0 * torch.log(sigma) - mean * mean - sigma * sigma) # [B, D] skl = torch.sum(kl, dim=1) if reduction == 'mean': return torch.mean(skl) elif reduction == 'sum': return torch.sum(skl) else: return skl
de3fd1174904211d78ea936b1bc044a99e16fb58
64,439
def opt_write_testdata(request): """Returns the value of the flag --write-testdata.""" return request.config.getoption("--write-testdata")
d799fd12e324a4f11433a792c9048f9e4db3aec0
64,452
def akh(*args, **kwargs): """ akh: args/kwargs helper Return tuple (a, k) from f(*a, **k) """ return args, kwargs
17c68f17b817f9a508a3bc6e2df64a86843ce768
64,453
import socket def fetch_host_ip(host: str) -> str: """ Fetch ip by host """ try: ip = socket.gethostbyname(host) except socket.gaierror: return '' return ip
f7372c8600d9f4044a6ef2c81569e4cafa900e46
64,458
def to_i3_vec(array, i3_vec_type): """convert a list/array to an I3Vec""" i3_vec = i3_vec_type() i3_vec.extend(array) return i3_vec
680040298feb7684061802ad023af4aad9339988
64,462
import networkx as nx import uuid def generate_nx_graph_from_dfg(dfg, start_activities, end_activities, activities_count): """ Generate a NetworkX graph for reachability-checking purposes out of the DFG Parameters -------------- dfg DFG start_activities Start activities end_activities End activities activities_count Activities of the DFG along with their count Returns -------------- G NetworkX digraph start_node Identifier of the start node (connected to all the start activities) end_node Identifier of the end node (connected to all the end activities) """ start_node = str(uuid.uuid4()) end_node = str(uuid.uuid4()) G = nx.DiGraph() G.add_node(start_node) G.add_node(end_node) for act in activities_count: G.add_node(act) for edge in dfg: G.add_edge(edge[0], edge[1]) for act in start_activities: G.add_edge(start_node, act) for act in end_activities: G.add_edge(act, end_node) return G, start_node, end_node
d35783b1ddeb731594d234ccfa2767f85992d883
64,468
def _flatten(list_of_lists): """Transform a list of lists into a single list, preserving relative order.""" return [item for sublist in list_of_lists for item in sublist]
6f91e0b7708dedc44564d823eb3cdb87377c9f21
64,471
def ProbBigger(pmf1, pmf2): """Returns the probability that a value from one pmf exceeds another.""" total = 0.0 for v1, p1 in pmf1.Items(): for v2, p2 in pmf2.Items(): if v1 > v2: total += p1 * p2 return total
07f3be7bc05423c90adbc47baf142835850dae28
64,473
def binomial_coeff(n: int, k: int) -> int: """Compute the binomial coefficient 'n choose k'. n: number of trials k: number of successes """ return 1 if k == 0 else 0 if n == 0 else (binomial_coeff(n - 1, k) + binomial_coeff(n - 1, k - 1))
9862fa7d31b2fc508f806cec1dea6bfe5175aa02
64,474
def check_answer(guess, followers_a, followers_b): """Takes the user's guess and follower count and returns if the user got it right (returns a Boolean).""" if followers_a > followers_b: return guess == "a" else: return guess == "b"
3a46154ee16908e65b788b32fb084701634b6a76
64,476
def create_dicts_w_info(df, x_label, column_label): """ This function is used to create a dictionary that can be easily converted to a graphical representation based on the values for a particular dataframe Parameters ---------- df (dataframe): dataframe that contains the information to be converted x_label (string): the column of the dataframe whose rows will then be converted to they keys of a dictionary column_label (string): the column that contains the data quality metric being investigated Returns ------- data_qual_info (dictionary): has the following structure keys: the column for a particular dataframe that represents the elements that whose data quality is being compared (e.g. HPOs, different measurement/unit combinations) values: the data quality metric being compared """ rows = df[x_label].unique().tolist() data_qual_info = {} for row in rows: sample_df = df.loc[df[x_label] == row] data = sample_df.iloc[0][column_label] data_qual_info[row] = data return data_qual_info
5cab1b3bbd16b5f15a6c0207fc0af0d8d8adde37
64,477
def calc_nhd(tdiff): """Return the number of positive elements of an xarray along the time dim. """ nhd_yr = tdiff.where(tdiff > 0).count(dim='time') return nhd_yr
0237f5686f6b49a465cd4af536c81cdab53f8674
64,478
def opt_mod(num, div): """returns nonnegative or negative modulo residue depending on whichever one has a lower absolute value (if both equal, returns nonnegative)""" res = num % div return res if res <= (div/2) else res-div
582e233f10a8db057ffee583f8b7974b34f15b55
64,480
import re def convert_to_snake_case(value: str) -> str: """Convert any camel case attribute name to snake case Args: value: The key to be converted to snake case Returns: The converted key """ return re.compile(r"(.)([A-Z][a-z]+)").sub(r"\1_\2", value).lower()
b294065364ce6944d77b4df77911f29cb7528481
64,483
def get_program_dir(parser_name): """Return a directory name given a parser name. In at least one case (GAMESS-UK) the directory is named differently. """ if parser_name == "GAMESSUK": return "GAMESS-UK" return parser_name
7303d76cc315f883eb63d47ac4be6966bc714bf6
64,484
def average(iterable): """Computes the arithmetic mean of a list of numbers. >>> print average([20, 30, 70]) 40.0 >>> print average([1, 2, 3]) 2.0 """ return sum(iterable, 0.0) / len(iterable)
dae08682da55fa00818f39d42cbe4010cc6f02eb
64,486
def cohort_aggregator(results): """ Aggregate the number of individuals included in the cohort definition. """ count = {} for result in results: count[result[0]] = result[1] return count
c5e85e19bc241015f1c602e77442f893d0be41df
64,487
def cell_start(position, cell_width, wall_width): """Compute <row, col> indices of top-left pixel of cell at given position""" row, col = position row_start = wall_width + row * (cell_width + wall_width) col_start = wall_width + col * (cell_width + wall_width) return (row_start, col_start)
2db9e002a981ffb2f414772f54e4f321255dc7ed
64,491
def load_file(filename: str) -> list: """Load the Intcode program from a file :param filename: Location of the input file :return: Parsed Intcode program """ with open(filename) as f: raw_program = f.readline()[:-1].split(',') program = [int(el) for el in raw_program] return program
25427e7bad9ea1ca55846c5fb953e484bcc4aec3
64,492
def is_error_of_type(exc, ref_type): """ Helper function to determine if some exception is of some type, by also looking at its declared __cause__ :param exc: :param ref_type: :return: """ if isinstance(exc, ref_type): return True elif hasattr(exc, '__cause__') and exc.__cause__ is not None: return is_error_of_type(exc.__cause__, ref_type)
4728fa82f0ddb78e5b5b28715f40c5207add634d
64,494
def build_http_response(data, content_type='text/html', response_code='200 OK'): """ Base HTTP response maker. ---- data (str | bytes) : Data to be packed into an HTTP response content_type (str) : Mimetype of data response_code (str) : HTTP response code ---- Returns (bytes) of the packaged HTTP response. """ http_header = f'HTTP/1.1 {response_code}\r\nContent-Type: {content_type}\r\nContent-Length: {len(data)}\r\n\r\n' if content_type.startswith('text') and type(data) != bytes: return bytes(http_header + data, 'utf-8') else: return bytes(http_header, 'utf-8') + data
19c74d55aa0cb8b0a91ebe8210cec3432c42300a
64,495
import torch def per_face_normals(mesh : torch.Tensor): """Compute normals per face. Args: mesh (torch.Tensor): #F, 3, 3 array of vertices """ vec_a = mesh[:, 0] - mesh[:, 1] vec_b = mesh[:, 1] - mesh[:, 2] normals = torch.cross(vec_a, vec_b) return normals
0f6c51125ec33dc5aa8faa913b5fefba03bdd1f5
64,497
def isInList(node, alist: list): """ Determine if a node is within a list """ for temp in alist: if temp.position == node.position: return True return False
979b58647b88142d29a93a339e520416adfd78f9
64,498
def parse_headers(fp): """Parses only RFC2822 headers from a file pointer. """ headers = {} while True: line = fp.readline() if line in (b'\r\n', b'\n', b''): break line = line.decode('iso-8859-1') name, value = line.strip().split(':', 1) headers[name.lower().strip()] = value.lower().strip() return headers
2fea2c8fa8a997f97ae9d4181e631ae60de39957
64,507
def is_variable_font(tt) -> bool: """Tests for the presence of a fvar table to confirm that a file is a variable font.""" return "fvar" in tt
4bbd47d2fd7970ce45e2902bba87b595d8a8a88d
64,510
def intersect(object1, object2, radius = 15): """ Takes two Turtle objects and an optional radius. Returns True if the objects are less than radius apart. """ xbounds = (object1.xcor()-radius, object1.xcor() + radius) ybounds = (object1.ycor()-radius, object1.ycor() + radius) x, y = object2.pos() check_x = x > min(xbounds) and x < max(xbounds) check_y = y > min(ybounds) and y < max(ybounds) if (check_x and check_y): return True else: return False
3b5490d1491cdace69ecee907499aab038c4ac3c
64,514
def _mesh_to_material(mesh, metallic=.02, rough=.1): """ Create a simple GLTF material for a mesh using the most commonly occurring color in that mesh. Parameters ------------ mesh: Trimesh object Returns ------------ material: dict, in GLTF material format """ # just get the most commonly occurring color color = mesh.visual.main_color # convert uint color to 0-1.0 float color color = color.astype(float) / (2 ** (8 * color.dtype.itemsize)) material = {'pbrMetallicRoughness': {'baseColorFactor': color.tolist(), 'metallicFactor': metallic, 'roughnessFactor': rough}} return material
a67d940d7e50b75b6048fb83e620729fea70c4c7
64,517
import types from typing import List from typing import Set def _is_code_for_function( code: types.CodeType, parents: List[types.CodeType], func_codes: Set[types.CodeType] ): """ Check if this is the code object for a function or inside a function Args: code: The code object to check parents: List of parents for this code object func_codes: Set of code objects that are directly for functions """ return code in func_codes or any(p in func_codes for p in parents)
152817f50b986264b88074b089f9435a85680eda
64,527
def is_alias_assignment(expr): """ Examples: ['=', 'column_name', 1] Constant assignment ['=', 'column_name', 'other_column'] Basic aliasing ['=', 'column_name', ['sin', 'column_name']] ['=', 'column_name', ['+', 'column_name', 'other_column']] Complex calculations """ return type(expr) is list and len(expr) == 3 and expr[0] == '='
19082dddc11982b1a78048e1a88f42aff24b19aa
64,528
import io def iostr_to_iobytes(fs): """ Converts io.StringIO object to io.BytesIO Paramters _________ fs : io.StringIO Returns _______ fb : io.BytesIO """ fs.flush() fs.seek(0) fb = io.BytesIO(fs.read().encode()) return fb
b5d3d828f1a790cc61c90fba466987092f662e04
64,529
def concat(*items): """ Turn each item to a string and concatenate the strings together """ sep = "" if len(items) == 1 and ( isinstance(items[0], (list, tuple, set)) or hasattr(items[0], "as_list") ): items = items[0] sep = ", " return sep.join(map(str, items))
d5dc95898bc6120c136406bc891ab458dca215c7
64,532
def attack(encrypt_oracle, known_prefix, padding_byte): """ Recovers a secret using the CRIME attack (CTR version). :param encrypt_oracle: the encryption oracle :param known_prefix: a known prefix of the secret to recover :param padding_byte: a byte which is never used in the plaintext :return: the secret """ known_prefix = bytearray(known_prefix) padding_bytes = bytes([padding_byte]) while True: for i in range(256): # Don't try the padding byte. if i == padding_byte: continue l1 = len(encrypt_oracle(padding_bytes + known_prefix + bytes([i]) + padding_bytes + padding_bytes)) l2 = len(encrypt_oracle(padding_bytes + known_prefix + padding_bytes + bytes([i]) + padding_bytes)) if l1 < l2: known_prefix.append(i) break else: return known_prefix
3a8fe97cf2ca4d743854ef90160b0ca1322ef9bc
64,535
def read_file(source): """ Gets content of a text file. source: file name returns LIST """ with open(source, 'r') as myfile: data = myfile.read().replace('\n', ' ') return data.split(' ')
b38518d837b577e84c71930346fd8381e091bfb1
64,540
def formatDateTime(raw: str) -> str: """ Helper function. Converts Image exif datetime into Python datetime Args: raw (str): exif datetime string Returns: str - python compatible datetime string """ datePieces = raw.split(" ") if len(datePieces) == 2: return datePieces[0].replace(":","-") + " " + datePieces[1].replace("-",":") else: return ""
d9c0ec43e42e1d24223eb8f640d8790ec48b82fc
64,543
import re def is_hash256(s): """ Returns True if the considered string is a valid SHA256 hash. :param str s: the considered string :return: Returns True if the considered string is a valid SHA256 hash. :rtype: bool """ if not s or not isinstance(s, str): return False return re.match('^[0-9A-F]{64}$', s.strip(), re.IGNORECASE)
4fee492e2d1e10d75660af5f0ace77f61f43df83
64,544
def rounded_up (value, granularity) : """Returns `value` rounded up to nearest multiple of `granularity`. >>> rounded_up (3, 5) 5 >>> rounded_up (8, 5) 10 >>> rounded_up (-3, 5) 0 >>> rounded_up (-8, 5) -5 >>> rounded_up (0.024 / 0.005, 1.0) 5.0 """ if 0 < granularity < 1 : scale = 1.0 / granularity result = rounded_up (value * scale, 1.0) / scale else : result = value + ((granularity - value) % granularity) return result
9fe2e022a8e155e928c277eaa029f68131574665
64,547
import yaml def parse_summary(p_summary): """ Helper function for parsing summary.yaml Args: p_summary (string): summary.yaml """ try: with open(p_summary, 'r') as in_file: return yaml.safe_load(in_file.read()) except IOError: exit('File ' + p_summary + ' was not found!')
3e21a1568260c3ac263ebd9ef4cb2bd4aae4134e
64,548
def _compile(statements): """ Compiles the statements :param str statements: :return str/None: None if no error """ error = None try: compile(statements, "string", "exec") except SyntaxError as err: error = str(err) return error
1995e205ef19c58e1d7994704446757d63dd6de2
64,553
from typing import Callable def firstDerivative(f: Callable, x: float, h: float=1e-7) -> float: """Function to numerically approximate the first derivative about a point `x`, given a function `f(x)` which takes a single float as its argument. This function uses the central finite difference method, computing the slope of a nearby secant line passing through the points `(x - h)` and `(x + h)`. Arguments: f {Callable} -- Objective function who's derivative is to be computed. x {float} -- Point about which the derivative is computed. Keyword Arguments: h {float} -- Step size (default: {1e-7}). Returns: float -- Approximation of the first derivative of `f` about point `x`. """ return (f(x + h) - f(x - h)) / (2 * h)
b6ef9862ea62b0a31fd67c438742340b3f9942e1
64,555
def calculatedistance(vector1, vector2): """ Calculate the distance between two vectors (atoms) Returns the distance squared or None if vectors not aligned. :param vector1: First atom coordinate (x,y,z) :type vector1: list :param vector2: Second atom coordinate (x,y,z) :type vector2: list :return: squared distance between two atoms """ d= float(0) if len(vector1) == len(vector2): for i in range(len(vector1)): d=d+(vector1[i]-vector2[i])**2 return d else: return None
e83c0db251ee71bd26d3f15abaf00a14df96078b
64,557
def get_point(win, b_left, b_right, b_down, b_up): """Get a click and return it as a point object. If click is in the button area, instead of the point object return None. Return values: click_p - click point object""" click_p = win.getMouse() if b_left < click_p.getX() < b_right and b_down < click_p.getY() < b_up: return None return click_p
c43a87e608f8118d60e1dd2a7d80b3dd3cbc3d00
64,558
def readlines(fn): """ Read all lines from a file into a list :param fn: filename :return: list of lines from the file """ with open(fn) as f: content = f.readlines() return [x.strip() for x in content]
3dce2037890d8d788987fa92be8095f5ccdb993a
64,561
def RemoveQuots(string): """ Remove quotes at both ends of a string :param str string: string :return: string(str) - string """ if string[:1] == '"' or string[:1] == "'": string=string[1:] if string[-1] == '"' or string[-1] == "'": string=string[:-1] return string
21472432c087d95bd02175e983ddbfb86e584d6d
64,564
def get_dict_item(data: dict, Dict_map: list): """ Get the element embeded in layered dictionaries and lists based off a list of indexs and keys. """ for index in Dict_map: try: data = data[index] except (IndexError, KeyError): return return data
54a829aa69b7ca8db12d64a3b48b420bd048131f
64,565
import re def regex_first_claim(fullclaim, maxlen): """Attempts to extract the first patent claim from the full set of claims. Because patent claims have predictable strcuture, we can attempt to extract the first claim based on a few rules. If none of these work, we return all characters up to `maxlen`. Args: fullclaim: A string containing the full text of a patents claims. maxlen: An upper limit on the size of the result. This limit is only used if all previous extraction methods fail. Returns: A string containing the best estimate of the text of the first. """ # First try the simplest - split on the text '2.' or '2 .'. split_on_2 = re.split(r'.\s+[2]\s*.', fullclaim) if len(split_on_2) > 1: return split_on_2[0] # Next split on the first reference to 'claim 1'. if 'claim 1' in fullclaim.lower(): return fullclaim.split('claim 1')[0] # If none of the above worked, split on The (case sensistive). This word # should only appear in dependent claims by convention. if ' The ' in fullclaim: return fullclaim.split(' The ')[0] # Finally, just keep the first N chars based on maxlen input. return fullclaim[:maxlen]
40b19d30dcc67b1e0da27cae62069d3c3daa1230
64,566
from typing import Optional from typing import Dict from typing import Any def ensure_resampling_config_is_set( resampling_config: Optional[Dict[str, Any]] = None ) -> Dict[str, Any]: """Set default downsampling_method and upsampling_method if these are missing. The default downsampling method is "mean". The default upsampling method is "pad". """ if resampling_config is None: resampling_config = {} if resampling_config.get("downsampling_method", None) is None: resampling_config["downsampling_method"] = "mean" if resampling_config.get("upsampling_method", None) is None: resampling_config["upsampling_method"] = "pad" return resampling_config
68f2ecb58f5105cdb49080567b059dbe3192bf47
64,571
def date2str(dt, format_str=None): """ Convert list of datetime objects to legible strings Parameters ---------- dt : datetime.datetime Single or list of datetime object(s) format_str : string Timestamp string formatting, default: '%Y-%m-%d %H:%M:%S.%f'. See datetime.strftime documentation for timestamp string formatting Returns ------- time : string Converted timestamps """ if format_str is None: format_str = '%Y-%m-%d %H:%M:%S.%f' if not isinstance(dt, list): dt = [dt] return [t.strftime(format_str) for t in dt]
aee6ed28d3a23c02ae05885543170de0854a7e4a
64,572
def remove_items(garbages): """ Removes the items/garbages that are no longer visible on the screen. Args: garbages(list): A list containing the garbage rects Returns: garbages(list): A list containing the garbage rects """ for garbage_rect in garbages: # Loop through all the garbage_rect's if garbage_rect.centerx <= -50: # Checks if the center is out of the screen garbages.remove(garbage_rect) # Remove the garbage item return garbages
750f084e14dcb08d2308618736a62d0bed36dcd1
64,573
def _GetFieldPathElementIndex(api_error, field): """Retrieve the index of a given field in the api_error's fieldPathElements. Args: api_error: a dict containing a partialFailureError returned from the AdWords API. field: a str field for which this determines the index in the api_error's fieldPathElements. Returns: An int index of the field path element, or None if the specified field can't be found in the api_error. """ field_path_elements = api_error['fieldPathElements'] if field_path_elements: found_index = [field_path_element['index'] for field_path_element in field_path_elements if field_path_element['field'] == field] if found_index: return found_index return None
4718eff13d707e342df3b3988c5a35f75f9a48d4
64,582
import re def sanitizePrefix(prefix): """strips any leading '/' and substitutes non alphanumeric characters by '_' """ prefix = prefix.lstrip('/') return re.sub('[^0-9a-zA-Z]+', '_', prefix)
adf9177af52b77876471f6ff6272da167965a23d
64,584
def generate_method_deprecation_message(to_be_removed_in_version, old_method_name, method_name=None, module_name=None): """Generate a message to be used when warning about the use of deprecated methods. :param to_be_removed_in_version: Version of this module the deprecated method will be removed in. :type to_be_removed_in_version: str :param old_method_name: Deprecated method name. :type old_method_name: str :param method_name: Method intended to replace the deprecated method indicated. This method's docstrings are included in the decorated method's docstring. :type method_name: str :param module_name: Name of the module containing the new method to use. :type module_name: str :return: Full deprecation warning message for the indicated method. :rtype: str """ message = "Call to deprecated function '{old_method_name}'. This method will be removed in version '{version}'".format( old_method_name=old_method_name, version=to_be_removed_in_version, ) if method_name is not None and module_name is not None: message += " Please use the '{method_name}' method on the '{module_name}' class moving forward.".format( method_name=method_name, module_name=module_name, ) return message
0d5470dbdf7fc01204d67662b44ca7b3246ca1d8
64,586
import inspect def get_calling_file(file_path=None, result='name'): """ Retrieve file_name or file_path of calling Python script """ # Get full path of calling python script if file_path is None: path = inspect.stack()[1][1] else: path = file_path name = path.split('/')[-1].split('.')[0] if result == 'name': return name elif result == 'path': return path else: return path, name
7e900c9a781fb3dec0a791eaa1f51ba724ba51ea
64,589
def restrict_dates(feed, dates): """ Given a Feed and a date (YYYYMMDD string) or list of dates, coerce the date/dates into a list and drop the dates not in ``feed.get_dates()``, preserving the original order of ``dates``. Intended as a helper function. """ # Coerce string to set if isinstance(dates, str): dates = [dates] # Restrict return [d for d in dates if d in feed.get_dates()]
084aacf803233ec24bcaca91102196f28712867d
64,595
def gen_file_name(output_path, title): """Generates the name of the PDF-file from the "doc title" in the json file. Args: output_path (string): relative output path title (string): title of the file according to content.json Returns: string: file_name of the pdf-file """ file_name = output_path + title + ".pdf" return file_name
031e5fd78e0958c3de711c33e0357f1034a5e02d
64,598
import glob def get_file(pattern): """Get filename from a shell expression. pattern : str, unix-type pattern for the file to search. Returns a single file or None if not found. If more than one file found, prints a warning, and returns first file. """ files = glob.glob(pattern) if len(files) == 0: return None elif len(files) == 1: return files[0] else: print("WARNING: more than 1 file names matched the pattern. Return first") print("All matches: ",files) return files[0]
0944b5b9f784ef5d4ff6e99aa7105340bb575e7f
64,603
def swap_sign(sign): """Given a sign (+ or -), return opposite""" assert sign in ["+", "-"] if sign == "+": return "-" if sign == "-": return "+"
f047e3099a8e9f0945628021d1fe83980fbc4159
64,608
def verify_test_aaa_cmd(device, servergrp, username, password, path): """ To verify radius connectivity with test aaa command Args: device (`obj`): Device object servergrp (`str`): Radius server group name username (`str`): username password (`str`): password path (`str`): legacy/new-code Return: None Raise: SubCommandFailure: Failed configuring """ output = device.execute( "test aaa group {servergrp} {username} {password} {path}".\ format(servergrp=servergrp,username=username,password=password,\ path=path) ) return output
9f02d665c4438411ee93c918163dd17f899f0db4
64,611
from bs4 import BeautifulSoup def scrape(html_file): """ Scrapes an ESPN URL for schedule data Returns an array of dictionaries containing game details """ soup = BeautifulSoup(open(html_file), 'lxml') tables = soup.find_all('table') games = [] for table in tables: for row in table.find_all('tr'): # Convert generator object to list and remove ticket details game_details = [str(item) for item in row.stripped_strings] if 'matchup' not in game_details: # Skip header rows # Normalize team info to RANK, TEAM if '#' not in game_details[0]: game_details.insert(0, '') if '#' not in game_details[3]: game_details.insert(3, '') # Just in case the abbreviation is missing # ESPN is weird that way while len(game_details) < 6: game_details.append('') game_dict = {} game_dict['away_rank'] = game_details[0] game_dict['away_team'] = game_details[1].replace('\'', '') game_dict['away_abbr'] = game_details[2] game_dict['home_rank'] = game_details[3] game_dict['home_team'] = game_details[4].replace('\'', '') game_dict['home_abbr'] = game_details[5] games.append(game_dict) return games
f5d8d5d7b8fa6bc97d864e04f878b8debc570622
64,612
import math def angle_line(point, angle, length): """ Genera una recta en el punto point con un ángulo igual a angle y una longitud de 2*length. Retorna el punto inicial y el final de la recta :param (tuple) point: punto donde se quiere generar la recta :param (number) angle: ángulo de la recta (con respecto a la horizontal) :param (int) length: longitud de cada brazo de la recta :return (tuple,tuple): el punto inicial y el final de la recta """ x, y = point # Define cual es el desface (absoluto) de los límite # de la recta con respecto al punto origen delta_y = length * math.sin(math.radians(angle)) delta_x = length * math.cos(math.radians(angle)) # Devuelve una recta con longitud 2*length return (x-delta_x,y-delta_y),(x+delta_x,y+delta_y)
381b15828bf80aff403e7db1ae4b529c78d0a2f9
64,623
from bs4 import BeautifulSoup import requests def get_content(url:str) -> BeautifulSoup: """Attempts to make get request to url, raises HTTPError if it fails. Returns the contents of the page. Args: url (str): Valid url example: https://www.rescale.com. Returns: BeautifulSoup: The contents of the html page to parse. """ resp = requests.get(url) resp.raise_for_status return BeautifulSoup(resp.content, "html.parser")
dd7ff1a50964ac6a4c51ec0511ba18a3f9035306
64,624
from datetime import datetime def date(s): """Parse datetime""" return datetime.strptime(s, "%Y-%m-%d")
3216e6c6065b9ad11868af18a0dde02a997f8618
64,625
import re def _remove_emoticons(tweet): """finds all emoticons, removes them from the tweet, and then returns the tweet with emoticons removed as well as a list of emoticons Parameters: ----------- tweet: str contents of a tweet Returns ------- tweet_no_emoticons: string of tweet with emoticons removed emoticons: list of emoticons """ emoticons_re = r'(?:[:;])(?:[-<])?(?:[()/\\|<>])' emoticons = re.findall(emoticons_re, tweet) tweet = re.sub(emoticons_re, '', tweet) return tweet.strip(), emoticons
bbe7e1abed0228ccfd4aef6f191662a9a674a6ce
64,629
def serialize(root_node) -> str: """ Serializes the tree into a string of the form Node [LeftTree] [RightTree] :param root_node: The root of the tree :return: A string representing the serialized version of the tree """ if root_node is None: return "" elif root_node.left is None and root_node.right is None: return f"{root_node.val} [] []" else: return ( f"{root_node.val} " f"[{serialize(root_node.left)}] " f"[{serialize(root_node.right)}]" )
7e05470e1d11d0740dd6d2a0d7e2e75e22ff6a0a
64,636
def collect_matchable_successors(start_state): """ Collect the matchable successor states. NOTE: The start state also evaluated as a successor! :param start_state: the start state of the searching :return: the set of matchable successors """ processable_states = {start_state} matchable_states = set() visited_states = set() while processable_states: state = processable_states.pop() if state not in visited_states: if state.node.is_matchable(): matchable_states.add(state) elif not state.node.is_default(): processable_states.update(state.find_successor_states()) visited_states.add(state) return matchable_states
950f069a710b64d6eb1d39fc43604504b0cd3dd7
64,637
def __create_send_data(username, icon_emoji, message): """Create sending data Arguments: username {string} -- Slack display user name icon_emoji {string} -- Slack display emoji-icon (e.g. :hogehoge:) message {string} -- Message contents Returns: [dict] -- Sending data for JSON payload """ msg = {} if username: msg['username'] = username if icon_emoji: msg['icon_emoji'] = icon_emoji if message: msg['text'] = message return msg
fc50b81ce1b30bfcff3a471ee8cb9a5d16046a06
64,643
import json def get_stim_params_from_config_for_physiology_factsheet(prot_path, protocol_key): """Get step amplitude, delay and duration for phisiology factsheet. Args: prot_path (str or Path): path to the json file containing the protocols protocol_key (str): name of the protocol used for physiology features extraction Returns: a tuple containing - current_amplitude (int or float): the amplitude current of the step protocol (mA) - stim_start (int or float): the start of the stimulus (ms) - stim_duration (int or float): the duration of the stimulus (ms) Raises: Exception: If a step protocol with multiple steps has been provided """ with open(prot_path, "r", encoding="utf-8") as protocol_file: protocol_definitions = json.load(protocol_file) prot = protocol_definitions[protocol_key] step_stim = prot["stimuli"]["step"] if isinstance(step_stim, list): exception_message = ( "ME-type factsheet expects only one step stimulus " + "for protocol {key} at {filepath}" ) raise Exception(exception_message.format(key=protocol_key, filepath=prot_path)) # get parameters from protocol current_amplitude = step_stim["amp"] stim_start = step_stim["delay"] stim_duration = step_stim["duration"] return current_amplitude, stim_start, stim_duration
e4fd0974d2da0e48c786b24cf8f1d12695bd2b98
64,647