content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def fields_for(context, form): """ Renders fields for a form. """ context["form_for_fields"] = form return context
90aca9639f66ce971960e292e041ce11ffd8d929
109,506
def get_accept(environ): """get_accept(environ) -> accept header Return the Accept header from the request, or */* if it is not present. environ is the WSGI environment variable, from which the Accept header is read. """ if 'HTTP_ACCEPT' in environ: return environ['HTTP_ACCEPT'] return '*/*'
c58e94a185e475c48fa2f6dd58a0719297db730a
109,509
def dup(elem): """Generates a pair containing the same element twice.""" return (elem, elem)
8ef4237958bf831904006d355af9211864264193
109,510
import re def alphanum(string): """Remove non-alphanumeric characters Deletes all characters that are not a letter or number from a given string and capitalizes the letter following said character. For example, "my_subnet_in_eu-west-1a!" becomes "mySubnetInEuWest1a". Main use could be Logical IDs in CloudFormation Args: string: string to transform Returns: string """ m = re.search(r"[\W_]", string) while m is not None: if m.start() == 0: string = string[1:] elif m.start() == len(string) - 1: string = string[:-1] else: string = string[: m.start()] + string[m.end() : m.end()+1].upper() + string[m.end()+1:] m = re.search(r"[\W_]", string) return string
692d5790f79880beeae707f8fcb762fc5843a964
109,514
def ffs(n): """find first set bit in a 32-bit number""" r = 0 while r < 32: if (1<<r)&n: return r r = r+1 return -1
f401f708058508401b56d013898bced14428ad3f
109,518
def hex_to_rgb(txt): """Turn a hex color code string into an RGB tuple. RGB tuples are floats on the interval [-1.0, 1.0]. Args: txt (str): The hex string (6 chars, no #) Returns: A 3-tuple of R, G, and B floats. """ r_txt = txt[0:2] g_txt = txt[2:4] b_txt = txt[4:6] r_int = int(r_txt, base=16) g_int = int(g_txt, base=16) b_int = int(b_txt, base=16) r = 2 * r_int / 255 - 1.0 g = 2 * g_int / 255 - 1.0 b = 2 * b_int / 255 - 1.0 return r, g, b
fde6e88c55eb941dbb6980c5965983fce93502a6
109,524
def hex_to_rgb(hex_color: str, alpha: float) -> str: """Convert color in hex to rgb and add alpha channel""" hex_color = hex_color.lstrip('#') r = int(hex_color[0:2], 16) g = int(hex_color[2:4], 16) b = int(hex_color[4:6], 16) return f"rgba({r}, {g}, {b}, {alpha:.2f})"
92b12ac60550f8b957c63a8a656030fa04f7dc3d
109,525
import typing def row_to_line(row: typing.Sequence[str]) -> str: """ Row to line. Given a row (list of string) return a tsv encoded string. :param row: list of cells :returns: string representing the row """ line = "\t".join(row) return f"{line}\n"
a80420b50bd33b6b0a9740b5d78b65c9b44fd241
109,531
def clean_species_name(common_name): """ Converts various forms of "human" to the token "human", and various forms of "empty" to the token "empty" """ _people_tags = { 'Bicycle', 'Calibration Photos', 'Camera Trapper', 'camera trappper', 'camera trapper', 'Homo sapien', 'Homo sapiens', 'Human, non staff', 'Human, non-staff', 'camera trappe', 'Human non-staff', 'Setup Pickup', 'Vehicle' } PEOPLE_TAGS = {x.lower() for x in _people_tags} _no_animal_tags = {'No Animal', 'no animal', 'Time Lapse', 'Camera Misfire', 'False trigger', 'Blank'} NO_ANIMAL_TAGS = {x.lower() for x in _no_animal_tags} common_name = common_name.lower().strip() if common_name in PEOPLE_TAGS: return 'human' if common_name in NO_ANIMAL_TAGS: return 'empty' return common_name
96364758f81754cf4ee7de6a436a1cd82960e514
109,532
def get_file_map(inventory, version): """Get a map of files in state to files on disk for version in inventory. Returns a dictionary: file_in_state -> set(content_files) The set of content_files may includes references to duplicate files in later versions than the version being described. """ state = inventory['versions'][version]['state'] manifest = inventory['manifest'] file_map = {} for digest in state: if digest in manifest: for file in state[digest]: file_map[file] = set(manifest[digest]) return file_map
1e126cdfbbd206cd19a8621d023ec1973dfb329b
109,533
from datetime import datetime def check_orb_expiry(token_expires): """Check if token is expired.""" delta = abs((token_expires - datetime.utcnow()).seconds) return bool(delta < 60)
a290b2bb2308a77358ab74e957fab993bcdc4c5a
109,541
def shared_argument(arg): """Convenience function that denotes this argument is used in multiple places.""" return arg
6fec355dce36b82223a3cd6f628ca06e88da58e4
109,545
def fill_out_dict(out_dict, eval_dict): """Appends the computed metric score per run to the main output dictionary. All metrics are initialized in init_out_dict(). Args: out_dict (dict): main output dictionary. eval_dict (dict): dictionary containing scores per simulation. Returns: dict: dictionary with collected scores for each simulation """ for key in out_dict: out_dict[key].append(eval_dict[key]) return out_dict
17c72fb404673aadecb8b8442ccee150fab2a0a3
109,550
def get_box_size(box): """ calculate the bound box size """ return (box[:, 2]-box[:, 0]) * (box[:, 3]-box[:, 1])
54d8353f687300b02c14baee8d186bc3baaf6167
109,552
def get_temporal_feature_names(osn_name): """ Returns a set of the names of the temporal engineered features. :param osn_name: The name of the dataset (i.e. reddit, slashdot, barrapunto) :return: names: The set of feature names. """ names = set() #################################################################################################################### # Add temporal features. #################################################################################################################### names.update(["temporal_first_half_mean_time", "temporal_last_half_mean_time", "temporal_std_time", "temporal_timestamp_range"]) return names
4d86225db1664005963722e93ca10fba16bcdf58
109,556
from functools import reduce def get_attr_chain(obj, attr_chain): """ Attempt to retrieve object attributes when uncertain about the existence of the attribute or a different attribute in a given attribute chain. If the retrieval fails, None is returned. The function can be used to retrieve a direct attribute, or a chain of attributes. i.e. - obj.attr_a, obj_attr_a.sub_attr Another example - trying to access "sub_attr_b" in object.attr.sub_attr_a.sub_attr_b - get_attr_chain(object, "attr.sub_attr_a.sub_attr_b") The function can be used to try and retrieve "sub_attribute_b" without an exception, even in cases where "attr" or "sub_attr_a" might not exist. In those cases, the function will return None. Args: obj: An object attr_chain (str): A string containing one attribute or several sub-attributes separated by dots (i.e. - "attr.sub_attr_a.sub_attr_b") Returns: The requested attribute if found, otherwise None """ return reduce( lambda _obj, _attr: getattr(_obj, _attr, None), attr_chain.split("."), obj )
634e2cbe636aa79bbb377514bc217867b471cb78
109,557
def avg_list(items): """Return the average of a list of numbers.""" if items: return int(sum(items) / len(items)) return 0
ef962801092b922594dd1f7c6706803ddd50603e
109,558
def interpolatePrf(regPrfArray, col, row, imagePos): """ Interpolate between 4 images to find the best PRF at the specified column and row. This is a simple linear interpolation. Inputs ------- regPrfArray 13x13x4 prf image array of the four nearby locations. col and row (float) the location to interpolate to. imagePos (list) 4 floating point (col, row) locations Returns ---- Single interpolated PRF image. """ p11, p21, p12, p22 = regPrfArray c0 = imagePos[0][0] c1 = imagePos[1][0] r0 = imagePos[0][1] r1 = imagePos[2][1] assert c0 != c1 assert r0 != r1 dCol = (col-c0) / (c1-c0) dRow = (row-r0) / (r1 - r0) # Intpolate across the rows tmp1 = p11 + (p21 - p11) * dCol tmp2 = p12 + (p22 - p12) * dCol # Interpolate across the columns out = tmp1 + (tmp2-tmp1) * dRow return out
c5b77e35a390d92b6ce292a427f31d8e1816dcc7
109,562
from pathlib import Path from typing import List def list_joltage_steps(file: Path) -> List[int]: """ List joltages from the given file :param file: file containing the input values :return: list of joltage steps """ adapter_output_joltages = sorted([int(v) for v in open(file)]) device_input_joltage = 3 + max(adapter_output_joltages) joltages = [0] + adapter_output_joltages + [device_input_joltage] pairs = list(zip(joltages[:-1], joltages[1:])) joltage_steps = [p[1] - p[0] for p in pairs] return joltage_steps
b88c13dbb26f8228b38830d3a3ca408fbd54bc3f
109,563
def show_table(name, table, i, total): """ Display table info, name is tablename table is table object i is current Index total is total of tables """ return '[%d/%d, %s] %s' % (i+1, total, table.__appname__, name)
1310d2eb5f6bad6edf29c7f453cd5107f1c15852
109,564
def toLatin1String(text): """Convert string to latin1 encoding. """ vtkStr = "" for c in text: try: cc = c.encode("latin1", "ignore").decode() except (UnicodeDecodeError): cc = "?" vtkStr = vtkStr + cc return vtkStr
2f6c6991dcc66dffb6bd31376a9b58232da008af
109,568
def service_urls(service_data): """ Args: service_data (Dict): the loaded service data Returns: List[str]: list of urls of a service """ return service_data.get('urls')
7bbcacbd868b5076ce46cea11c06d671176a496a
109,570
def del_constant_start_stop(x): """ >>> l = [1,2,3,4] >>> del_constant_start_stop(l) [1, 2] >>> l = [1,2,3,4,5,6,7] >>> del_constant_start_stop(l) [1, 2, 7] """ del x[2:6] return x
f6b98c3d1e082588db962f6887a4035307756a0d
109,584
def clean_raw_telnet(telnet_output): """Clean raw telnet output from a brstest session :param telnet_output: List of raw (decoded) telnet lines :return: List of cleaned output lines, with empty lines removed """ split_str = "".join(telnet_output).split("\r\n") while split_str.count("") > 0: split_str.remove("") return split_str
7eab2a483f5dae28a3d8676e62e58a23fda04b46
109,585
import itertools def unsqueeze_list(listA, val=-1): """ Unpacks a list into a list of lists. Returns a list of lists by splitting the input list into 'N' lists when encounters an element equal to 'val'. Empty lists resulting of trailing values at the end of the list are discarded. Source: https://stackoverflow.com/questions/4322705/split-a-list-into-nested-lists-on-a-value Parameters ---------- listA : list A list. val : int/float, optional Value to separate the lists. Returns ------- list A list of lists. Examples -------- Unpack a list into a list of lists. >>> from dbcollection.utils.pad import unsqueeze_list >>> unsqueeze_list([1, 2, -1, 3, -1, 4, 5, 6], -1) [[1, 2], [3], [4, 5, 6]] """ return [list(g) for k, g in itertools.groupby(listA, lambda x:x in (val, )) if not k]
58452502092d80c9769eccd46113768c5ec6d600
109,586
import csv def read_csv(file): """Read data from .csv file""" data = csv.reader(open(file, encoding='utf-8'), delimiter=';') rows = list(data) return rows
ffa5b35e766910f4ce2b7e1fdb56629e6c313d68
109,591
def extract_package_name(line): """Return package name in import statement.""" assert '\\' not in line assert '(' not in line assert ')' not in line assert ';' not in line if line.lstrip().startswith(('import', 'from')): word = line.split()[1] else: # Ignore doctests. return None package = word.split('.')[0] assert ' ' not in package return package
daef1941b6c159ff220892a376d661b6a8a7bad4
109,593
def _parse_cli_list(items): """Process a string of comma separated items into a list""" if items == "": return None else: return items.split(",")
668e65bd7185901cea232ba8d84d42b38938a6f4
109,600
import re def checkName(name: str) -> bool: """ Checks if a string is a valid name Only spaces and alphabets allowed Parameters ---------- name: str The name to be Tested Returns ------- bool True/False according to the validity of the name """ return type(name) == str and bool(re.match(r'[a-zA-Z\s]+$', name))
915f58a7a3842f93c3332bb941f0f1b8bb6b9a98
109,606
def relevantIndexes(matrix, row): """ Gets the relevant indexes of a vector """ relevant = [] for j in range(matrix.shape[1]): if matrix[row, j] == 1: relevant.append(int(j)) return relevant
77034043cb59dbf73bdfee744f4ca7213c8bb08c
109,609
def incremental_str_maker(str_format="{:03.f}"): """Make a function that will produce a (incrementally) new string at every call.""" i = 0 def mk_next_str(): nonlocal i i += 1 return str_format.format(i) return mk_next_str
2c085f5a717591a5da5f2de2569f136d4e0c5a19
109,614
def preprocess(line): """ Preprocess a line of Jane Austen text. * insert spaces around double dashes -- :param line: :return: """ return line.replace("--", " -- ")
cc9caa450d6cc25ea4a2482f1a85b8c73a68c479
109,619
def bytes_decode(data): """ Parse bytes as a UTF-8 string, ignoring unsupported characters :param data: raw data read from the disk :type data: bytes :rtype: string """ return "'%s'" % data.decode("utf-8", errors="ignore")
4fcacfcbb1fe3fbca5725699c94dc2275bc2914d
109,620
def kilometers_to_miles(L_kilometers): """ Convert length in kilometers to length in miles. PARAMETERS ---------- L_kilometers: tuple A kilometers expression of length RETURNS ---------- L_miles: float The miles expression of length L_kilometers """ return 0.621371192*L_kilometers
4650e1084ba3f8fd9d13f611a457d4c5e0737376
109,621
def _format_input_label(input_label): """ Formats the input label into a valid configuration. """ return { 'name': input_label['name'], 'color': (input_label['color'][1:] if input_label['color'].startswith('#') else input_label['color']), 'description': input_label['description'] if 'description' in input_label else '' }
5bd63c54e1cb290be7a208c76ddc86375cc0a646
109,626
def parse_list_response(list_resp): """ Parse out and format the json response from the kube api Example response: Pod Name | Status | Pod IP | Node Name -------------------------------+-----------+----------------+------------ landing-page-76b8b9677f-nmddz | Running | 10.144.420.69 | salt-work1 """ response_message = ( "Pod Name | Status | Pod IP | Node Name\n" ) response_message += ( (30 * "-") + "+" + (11 * "-") + "+" + (17 * "-") + "+" + (12 * "-") ) for pod in list_resp.get("items"): pod_name = pod.get("metadata", {}).get("name", "Not Found") status = pod.get("status", {}).get("phase", "Not Found") pod_ip = pod.get("status", {}).get("podIP", "Not Found") node_name = pod.get("spec", {}).get("nodeName", "Not Found") response_message += f"\n{pod_name:30}| {status:10}| {pod_ip:16}| {node_name:11}" return response_message
679741e431827e38088223ca4b2c811fe6b81cbf
109,636
def combine_legend_handles_labels(*axes): """ Given one or more axes, return a tuple of (lines, labels) where each label appears only once. """ line_by_label = {} for ax in axes: lines, labels = ax.get_legend_handles_labels() assert len(lines) == len(labels) line_by_label.update(dict(zip(labels, lines))) labels, lines = zip(*line_by_label.items()) return lines, labels
26932a075f25577e6017126dc781d6564ca0feca
109,637
import re def ListInt(s: str) -> list[int]: """Parse a string of format <VALUE, ...> into a list of ints.""" return [int(i) for i in re.split(r',\s|,|\s', s)]
22bd4d3a6d944fb71e0cbbfb7b30a56dfa9c4f5c
109,639
def double_eights(n): """Determine if the input integer, n, contains two eight numbers consecutively Args: n (int): input integer >= 0 Returns: (bool): True if two consecutive B is detected in n, returns False, otherwise Raises: ValueError: if n is negative TypeError: if n is not an integer OverflowError: if n > 1e30 Examples: >>> double_eights(8) False >>> double_eights(88) True >>> double_eights(346672) False >>> double_eights(28.88) Traceback (most recent call last): ... TypeError: n is not an integer """ if isinstance(n, int) == False: raise TypeError('n is not integer') if n < 0: raise ValueError('n is negative') if n > 1e30: raise OverflowError('n is too large') result = False state = 0 while n>0: digit = n % 10 if state == 0: if digit == 0: state = 1 else: state = 0 elif state == 1: if digit == 8: state =2 return True else: state = 0 n = n // 10 return False
49fd3df61a60b2e6c7560a26dab5c7ee37f64597
109,640
def red(msg: str) -> str: """Return red string in rich markdown""" return f"[red]{msg}[/red]"
426e6b452298e90540030f526d359320cda5bfa6
109,642
from typing import Dict def build_ldap_filter(kwargs: Dict[str, str]) -> str: """ Builds up an LDAP filter from kwargs :param kwargs: Dict of attribute name, value pairs :return: LDAP search filter representation of the dict """ search_filter = "" for arg in kwargs: search_filter = f"{search_filter}({arg}={kwargs[arg]})" if len(kwargs) > 1: search_filter = f"(&{search_filter})" return search_filter
3dec67dcbd754e8c267329154366b66d2d345e5a
109,644
def punct_space(token): """ helper function to eliminate tokens that are pure punctuation or whitespace """ return token.is_punct or token.is_space
cc4eeef8838d43c15f30560a4086191ec63092a3
109,648
def parse_command(data): """ Parse received command and return a list of words. Any word starting with a ":" will be counted as final, and will "eat" the rest of the list. e.g:: >>> parse_command('CAP LS') ['CAP', 'LS'] >>> parse_command('NICK bruno') ['NICK', 'bruno'] >>> parse_command('USER bruno bruno 127.0.0.1 :Bruno Bord') ['USER', 'bruno', 'bruno', '127.0.0.1', 'Bruno Bord'] """ arguments, _, extra = data.partition(' :') words = arguments.split(' ') if extra: words.append(extra) return words
436f5a496d8ca7af88432629b95d73dc5e09a91c
109,660
def format_frr(cmd): """Prefixes FRR command with the appropriate vtysh prefix. Arguments: cmd {str} -- Unprefixed command Returns: {str} -- Prefixed command """ return f'vtysh -uc "{cmd}"'
7feca919719898d4fe80806275216ddd04325bae
109,662
def get_user_attributes(obj, exclude_methods=True): """Returns a list of non-system attributes for an object. :param obj: object or class to inspect :param exclude_methods: [optional] do not include callable methods in the returned list, defaults to True :returns: list of non-system attributes of an object or class """ base_attributes = dir(type('dummy', (object,), {})) attributes = dir(obj) results = [] for attribute in attributes: try: if attribute in base_attributes \ or (exclude_methods and callable(getattr(obj, attribute))): continue results.append(attribute) except AttributeError: # some kinds of access cause problems, ignore them pass return results
7ae716c9de8e2ebb804f36129c6afc2c9534ee3d
109,666
def sum_of_kwargs_values(**kwargs): """ :param kwargs: a dictionary with arbitrary number of keyword arguments :return: sum of values of all kwargs """ return sum(kwargs.values())
74be2157e33cc6c568f4cfe084c88a0be4768e4f
109,667
def _stft_frames_to_samples( frames, size, shift, fading=None ): """ Calculates samples in time domain from STFT frames :param frames: Number of STFT frames. :param size: window_length often equal to FFT size. The name size should be marked as deprecated and replaced with window_length. :param shift: Hop in samples. :return: Number of samples in time domain. >>> _stft_frames_to_samples(2, 16, 4) 20 """ samples = frames * shift + size - shift assert fading in [None, True, False, 'full', 'half'], fading if fading not in [None, False]: pad_width = (size - shift) samples -= (1 + (fading != 'half')) * pad_width return samples
12e3889f499bf3c5404db51a6a54f77fa67efef2
109,672
def midpoint(x1, y1, x2, y2): """ Computes midpoint between two points. """ return ((y1 + y2)/2,(x1 + x2)/2)
bc6b4a7ba22e4cecea21fb93d87e32687f30d976
109,679
def calc_num_terminal_mismatches(matches): """(Internal) Count the number of -1 entries at the end of a list of numbers. These -1 correspond to mismatches in the sequence to sequence search. """ if matches[-1] != -1: return 0 neg_len = -1 while matches[neg_len] == -1: neg_len -= 1 return abs(neg_len+1)
a4dbce034fc90cd7f2ef07bf9e4c16cffb247dcd
109,688
def v3_multimax(iterable): """Return a list of all maximum values. Bonus 1: make sure our function returned an empty list when given an empty iterable. """ max_item = None for item in iterable: if max_item is None or item > max_item: max_item = item return [ item for item in iterable if item == max_item ]
9ba3d17c510406ea85ab6f1d1806446165db0610
109,692
def spotify_id_from_url(url: str) -> str: """Extract the `Spotify ID`_ from a Spotify URL. Args: url (str): The URL to extract the `Spotify ID`_ from. Returns: str: The extracted `Spotify ID`_. """ url = url[::-1] url = url.split("/")[0] url = url[::-1] url = url.split("?")[0] return url
8946ea44a06a2ec838636a5db25f061fb28300a5
109,694
def logout_user(resp): # pylint: disable=unused-argument """Log user out.""" return {}, 200
a3d4af3f8cb4534768671fa0c6f1449e88868fc3
109,697
def null_count(df): """Checks the Dataframe for null values and returns amount of null values""" return df.isnull().sum().sum()
59d1d50f3b1d7c26891576d2a98a803f95864d07
109,706
def information_gain_proxy( impurity_left, impurity_right, w_samples_left, w_samples_right, ): """Computes a proxy of the information gain (improvement in impurity) using the equation - n_v0 * impurity_v0 - n_v1 * impurity_v1 where: * n_v0, n_v1 are the weighted number of samples in the left and right nodes * impurity_v0, impurity_v1 are the impurities of the left and right nodes It is used in order to find the best split faster, by removing constant terms from the formula used in the information_gain function. Parameters ---------- impurity_left : float Impurity of the left node impurity_right : float Impurity of the right node w_samples_left : float Weighted number of samples in the left node w_samples_right : float Weighted number of samples in the roght node Returns ------- output : float Proxy of the information gain after splitting the parent into left and child nodes """ return -w_samples_left * impurity_left - w_samples_right * impurity_right
be7555afd4ace0c2b05e91c74efa27b5603c9394
109,714
def is_pow2(a): """ Return True if the integer a is a power of 2 """ return a == 1 or int(bin(a)[3:], 2) == 0
245670f52a3af9521ede1a2759fe84debc251801
109,719
import time def get_current_time() -> str: """current time in unix epoch (miliseconds)""" return str(int(time.time() * 1000))
c4804d7d18f37b20c23a78c02e8571dd93cc5965
109,721
import random def random_string(size: int) -> str: """Generate a random string of length `size`. """ a = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789' r = random.SystemRandom() return ''.join(r.choice(a) for _ in range(size))
6f0df5baabcf5334c87707e86ace343debc9d1b2
109,722
import json def load_paths_configuration(config_file_path): """ Loads configuration from json file. :param config_file_path: :return: """ with open(config_file_path) as json_file: data = json.load(json_file) return data['paths'], data['windows_logs']
aaf088846d5e05b64305c72e301c4d343c2b8e50
109,727
def incremental_weighted_mean(xbarj,j,xbarm,wbarm,wbarj): """ incremental weighted mean assuming constant batch sizes Args: float: xbarj is the cumulative average of all previous batches float: j is the number of previous batches float: xbarm is the average of the mth batch (the one being added) float: wbarm is the average weight of the mth batch (the one being added) float: wbarj is the average weight of previous batches Returns: float: xbarjp1 is the new weighted mean with the new data added """ xbarjp1 = (j*xbarj*wbarj + xbarm*wbarm)/(j*wbarj + wbarm) return xbarjp1
bc7ee1f4993de567c7ae6b91f1abae3dcca90074
109,728
def encode_pos(pos): """Encode song position as a list of bytes.""" return [pos & 0x7f, pos >> 7]
4e606647a89c88b25e8d4cb1525527577ce2929a
109,729
def get_unit(scale): """ Convert scale term to unit label """ scale2unit = { 1e-9: 'nm', 1e-6: u'\N{MICRO SIGN}m', #or hex id (lookup): u'\u00B5' 1e-3: 'mm', 0.01: 'cm', 0.1:'dm', 1:'m', 1000:'km', # time 8.6400e4:'day', 3.1536e7:'yr', 3.1536e10:'ka', 3.1536e13:'Ma', #Pressure 1e9: 'GPa', 1e6: 'MPa', } return scale2unit[scale]
b95c0acba39f2a77f16a7544039549304aacdacd
109,732
def prox_trace_base(B, t, C): """Proximal operator of :math:`f(B) = tr(C^TB)`, the trace of :math:`C^TB`, where C is a given matrix quantity such that :math:`C^TB` is square. """ return B - t*C
b5c664215ad6321ac9a7f14a6b94e761d27ecebe
109,741
def clean_player_data(players_data): """clean the list of players so that dimensions are ints, experience is showing True or False and the guardians are split into a list Parameters ---------- players_data : list List of players data that requires cleaning Returns ------- list list of cleaned player data """ for player in players_data: player["dimension"] = player["height"][3:] player["height"] = int(player["height"][:2]) if player["experience"] == 'YES': player["experience"] = True else: player["experience"] = False player["guardians"] = player["guardians"].split(' and ') return players_data
791f29dc99b8435e387c59d68777eb21b7dccfe3
109,744
def T(name, content=None, **props): """Helper function for building components.""" return dict(_name=name, text=content, _props=props)
73fae9dfd8e28e5554d8634857701d133b682746
109,748
import math def heading_from_camera(bbox, image_shape): """ Calculates the heading angle (in degree) of the object from the camera. PARMS bbox: Bounding box [px] image_shape: Size of the image (width, height) [px] """ # GoPro Intrensic Camera Settings # ################################### focal_length_mm = 5.21 unit_pixel_length = 1.12 sen_res = (5663, 4223) sensor_height_mm = (unit_pixel_length*sen_res[1])/1000 sensor_width_mm = (unit_pixel_length*sen_res[0])/1000 ################################### # Image Center (cX, cY) = image_shape[1]/2, image_shape[0]/2 # Object Center (startX, startY, endX, endY) = bbox (centerX, centerY) = (startX+endX)/2, (startY+endY)/2 # Distance between the two points distance = math.sqrt((centerX - cX)**2 + (centerY - cY)**2) # Focal Length in px img_width_px = image_shape[1] f_px = (focal_length_mm * img_width_px)/ (sensor_width_mm) # Heading Angle angle = math.degrees(math.asin(distance/f_px)) if centerX > cX: return angle else: return -angle
561ad833c369b996188a7eedf57c4b2593414cb9
109,753
def boundaries_to_knots(boundaries, degree): """Construct the knot sequences used at the boundaries of a B-spline. # Arguments boundaries : A tuple containing two floats (low, high). degree : The degree of the B-spline pieces. # Returns A 2-tuple containing the lower and upper knot sequences as lists. """ d = int(degree) lo, hi = boundaries return d * [lo], d * [hi]
4c5e7c4ecbe50b1cbddcec2545003165601f7a8f
109,756
import importlib def import_item(name): """ Returns imported module, or identifier from imported namespace; raises on error. @param name Python module name like "my.module" or module namespace identifier like "my.module.Class" """ result, parts = None, name.split(".") for i, item in enumerate(parts): path, success = ".".join(parts[:i + 1]), False try: result, success = importlib.import_module(path), True except ImportError: pass if not success and i: try: result, success = getattr(result, item), True except AttributeError: pass if not success: raise ImportError("No module or identifier named %r" % path) return result
5e926b9a92120c8edf777a21c94b91bb16f4180a
109,760
def convert_to_list_dict(lst, label): """Convert a value or list into a list of dicts.""" if not lst: return None if not isinstance(lst, list): lst = [lst] return [{label: x} for x in lst]
dbe79465af081f2e0547fcd3304b51a993524d59
109,761
def get_order_type(order): """ Returns the order type (Sell, Short Sell, Buy) :param order: See models.Order :return: String """ if hasattr(order, 'sellorder'): return 'SELL' elif hasattr(order, 'shortsellorder'): return 'SHORT SELL' elif hasattr(order, 'buyorder'): return 'BUY' else: return 'UNKNOWN'
8bdcab3a0c76df6c1c96bc8677e63996dc256e61
109,764
def _dict_iteritems(dictionary): """Get an iterator or view on the items of the specified dictionary. This method is Python 2 and Python 3 compatible. """ try: return dictionary.iteritems() except AttributeError: return dictionary.items()
9bb95f263035bac6a2989eeb817c81ab15a4957f
109,767
import six def assignment_to_plan(assignment): """Convert an assignment to the format used by Kafka to describe a reassignment plan. """ return { 'version': 1, 'partitions': [{'topic': t_p[0], 'partition': t_p[1], 'replicas': replica } for t_p, replica in six.iteritems(assignment)] }
67798af28c1b2293367ec4700445bed9771db5e1
109,768
def no_stacktrace_for(exception_type): """ Suppress stack trace for exceptions of exception_type on the method. """ def decorator(step_function): if not hasattr(step_function, "_expected_exceptions"): step_function._expected_exceptions = [exception_type] else: step_function._expected_exceptions.append(exception_type) return step_function return decorator
fc09077e42a958ec429b863f0c82272ef9b6d8be
109,771
import torch def quantize(batch: torch.Tensor, bins: int) -> torch.Tensor: """ Returns the quantized version of the input batch given a number of bins Args: batch (torch.Tensor): batch containing data (..., dim_data) bins (int): number of intervals for the quantization Returns: torch.Tensor: quantized tensor containing values from 0 to bins-1 """ mins_x = batch.min(dim=-2)[0].unsqueeze(-2) maxs_x = batch.max(dim=-2)[0].unsqueeze(-2) q = (maxs_x - mins_x) / bins index = torch.floor((batch-mins_x) / q) index[index==bins] = bins - 1 return index
e612af87edbfa045e99848427158d183b2bc5683
109,776
import math def coriolis_frequency(lat_deg): """ Calculate the coriolis factor for a given latitude :param lat_deg: float deg :return: float hr**-1 coriolis factor """ w = 2.0 * math.pi / 24 return 2.0 * w * math.sin(math.radians(lat_deg))
846a04d0891f8d311bc2e89c66da70631c96b8f0
109,782
def parse_book_name(line): """ Extracts book name and year from title :param line: string containing book title :return: name: book name year: book year """ parts = line.split(':') name = parts[0].strip() if(parts.__len__() > 1): year = parts[1].strip() else: year = 'Unknown' return name, year
cb8ce7af221f66a8740ad04bb4a43a55fa13f539
109,784
def print_code(freq, codedict): """ Given a frequency map (dictionary mapping symbols to thier frequency) and a codedict, print them in a readable form. """ special_ascii = {0: 'NUL', 9: 'TAB', 10: 'LF', 13: 'CR', 127: 'DEL'} def disp_char(i): if 32 <= i < 127: return repr(chr(i)) return special_ascii.get(i, '') print(' symbol char hex frequency Huffman code') print(70 * '-') for i in sorted(codedict, key=lambda c: (freq[c], c), reverse=True): print('%7r %-4s 0x%02x %10i %s' % ( i, disp_char(i), i, freq[i], codedict[i].to01()))
022b1cb7ffa108dfd8a7673b10d708e21e54451e
109,785
def get_url(line3): """Collects URL from the line. Args: line3 (str): 3.st line of data block Returns: str: URL """ link = line3.split(' ')[1] return link if link != '' else '-'
71875ef6924c90c4d6e3759bcd46b0e8b063a2fe
109,786
def get_projection(id, tree, projection): """ Like proj() above, but works with the tree data structure. Collects node ids in the set called projection. """ for child in tree['children'][id]: if child in projection: continue # cycle is or will be reported elsewhere projection.add(child) get_projection(child, tree, projection) return projection
0706d31bd329912961ea082afc00192c5aa12e6c
109,789
def keys_list(d: dict) -> list: """Return list(d.keys()).""" return list(d.keys())
e87d20e2a2044bd23a5a1a234c530e0a4a427127
109,794
def recursiveIndex(nestedList, query): """ Find index of element (first occurrence) in an arbitrarily nested list. Args: nestedList(list): list object to search in query: target element to find Returns: list: Position indices """ for index, element in enumerate(nestedList): if isinstance(element, (list, tuple)): path = recursiveIndex(element, query) if path: return [index] + path if element == query: return [index] return []
6386feee441e6c687f1b0b68e8e319ca79653041
109,796
def make_setter(name, var_type, user_setter): """Returns a setter function which sets the attribute `name', first casting it to `type' and passing it through the `user_setter' function.""" return lambda self, new_val: setattr(self, name, user_setter(var_type(new_val)))
4f2c46b21f542a2d40ce45e2794f81b7db1d9dcf
109,798
import time def timeFunction(func): """ source: https://www.andreas-jung.com/contents/a-python-decorator-for-measuring-the-execution-time-of-methods Decorator function to measure how long a method/function takes to run It returns a tuple with: * wall clock time spent * returned result of the function * the function name """ def wrapper(*arg, **kw): t1 = time.time() res = func(*arg, **kw) t2 = time.time() return round((t2 - t1), 4), res, func.__name__ return wrapper
c23392aa41d9017ea2b5b56a13530cea9a2eef98
109,802
def repeat_str(repeat, string): """ Repeats a given string a specified amount of times. :param repeat: integer of times to be repeated. :param string: the string to be multiplied. :return: the string repeated n times. """ return string * repeat
7fadf06b400833addafd09d04461a974fb21078d
109,806
def performance_characteristics(signature): """Parse 802.11n/ac capabilities bitmasks from sig. Args: signature: the Wifi signature string. Returns: (standard, nss, width) where: standard: 802.11 standard like 'a/b/g', 'n', 'ac', etc. nss: number of spatial streams as an int, 0 for unknown. width: channel width as a string: '20', '40', '80', '160', '80+80', '??' """ segments = signature.split('|') vhtseg = '' for segment in segments: # There are a few kindof broken devices which # include a vhtcap in their Probe even though # they are not 802.11ac devices. They didn't # notice because its the Association which # really counts. For client devices, only look # at the Association. For APs, look at the # Beacon. if segment.startswith('assoc:') or segment.startswith('beacon:'): vhtseg = segment if not vhtseg: return '' fields = vhtseg.split(',') vht_nss = ht_nss = 0 vht_width = ht_width = '' for field in fields: if field.startswith('vhtcap:'): try: bitmap = int(field[len('vhtcap:'):], base=16) except ValueError: vht_width = '??' else: scw = (bitmap >> 2) & 0x3 widths = {0: '80', 1: '160', 2: '80+80'} vht_width = widths.get(scw, '??') elif field.startswith('htcap:'): try: bitmap = int(field[len('htcap:'):], base=16) except ValueError: ht_width = '??' else: ht_width = '40' if bitmap & 0x2 else '20' elif field.startswith('htmcs:'): try: mcs = int(field[len('htmcs:'):], base=16) except ValueError: pass else: ht_nss = ((mcs & 0x000000ff != 0) + (mcs & 0x0000ff00 != 0) + (mcs & 0x00ff0000 != 0) + (mcs & 0xff000000 != 0)) elif field.startswith('vhtrxmcs:'): try: mcs = int(field[len('vhtrxmcs:'):], base=16) except ValueError: pass else: vht_nss = ((mcs & 0x0003 != 0x0003) + (mcs & 0x000c != 0x000c) + (mcs & 0x0030 != 0x0030) + (mcs & 0x00c0 != 0x00c0) + (mcs & 0x0300 != 0x0300) + (mcs & 0x0c00 != 0x0c00) + (mcs & 0x3000 != 0x3000) + (mcs & 0xc000 != 0xc000)) if vht_width: return ('802.11ac', vht_nss, vht_width) if ht_width: return ('802.11n', ht_nss, ht_width) return ('802.11a/b/g', 1, '20')
57b6dc3b825eeae9d020c14473fc7b9bebf8d1a7
109,810
def one_obj_or_list(seq): """If there is one object in list - return object, otherwise return list""" if len(seq) == 1: return seq[0] return seq
fc2eac976880f442602685811f885190c6c7e4ee
109,813
def estimate_numers_of_sites(linear_regressor, x_value): """ Function to predict the y value from the stated x value. Parameters ---------- linear_regressor : object Linear regression object. x_value : float The stated x value we want to use to predict y. Returns ------- result : float The predicted y value. """ if not x_value == 0: result = linear_regressor.predict(x_value) result = result[0,0] else: result = 0 return result
707e349f0ba9468eb1d7fe88524ff50d7840e9ac
109,819
def Jobs(argv, job_state): """List jobs.""" job_state.List() return 0
88b8c4f7c2bc6141ef6a68f12e2416a65f345b2b
109,820
from typing import Any import pickle def import_from_pickle( file_path: str ) -> Any: """Given a file_path to a pickle object, return the loaded Object""" f = open(file_path, 'rb') Obj = pickle.load( f ) f.close() return Obj
fb0a96c3728d7c8a94dd5cd373132b6d291d1842
109,823
def filter_terms(p, degree_limit): """ This function gets the n-variable polynomial p and the number degree_limit and returns the polynomial which consists of terms of degree at most degree_limit :param p: n-variable polynomial :param degree_limit: boundary for degree :return: n-variable polynomial with terms of degree at most limit """ for m in p.monomials(): if m.degree() > degree_limit: p -= m*p.monomial_coefficient(m) return p
0b50d00bd058464319325a003ea8878313d5315b
109,824
def getFieldsForType(callback, object_type, object_name): """ Helper function to convert iRODS object type to the corresponding field names in GenQuery :param callback: :param object_type: The object type -d for data object -R for resource -C for collection -u for user :param object_name: The object (/nlmumc/P000000003, /nlmumc/projects/metadata.xml, user@mail.com, demoResc) :return: an dictionary with the field names set in a, v, u and a WHERE clausal """ fields = dict() if object_type.lower() == '-d': fields['a'] = "META_DATA_ATTR_NAME" fields['v'] = "META_DATA_ATTR_VALUE" fields['u'] = "META_DATA_ATTR_UNITS" # For a data object the path needs to be split in the object and collection ret_val = callback.msiSplitPath(object_name, "", "") object_name = ret_val['arguments'][2] collection = ret_val['arguments'][1] fields['WHERE'] = "COLL_NAME = '" + collection + "' AND DATA_NAME = '" + object_name + "'" elif object_type.lower() == '-c': fields['a'] = "META_COLL_ATTR_NAME" fields['v'] = "META_COLL_ATTR_VALUE" fields['u'] = "META_COLL_ATTR_UNITS" fields['WHERE'] = "COLL_NAME = '" + object_name + "'" elif object_type.lower() == '-r': fields['a'] = "META_RESC_ATTR_NAME" fields['v'] = "META_RESC_ATTR_VALUE" fields['u'] = "META_RESC_ATTR_UNITS" fields['WHERE'] = "RESC_NAME = '" + object_name + "'" elif object_type.lower() == '-u': fields['a'] = "META_USER_ATTR_NAME" fields['v'] = "META_USER_ATTR_VALUE" fields['u'] = "META_USER_ATTR_UNITS" fields['WHERE'] = "USER_NAME = '" + object_name + "'" else: callback.msiExit("-1101000", "Object type should be -d, -C, -R or -u") return fields
f25aaf259d078e75807f0c067f6e5fc2f278faa7
109,829
def dictify_table_export_settings(table_export_settings): """Return a dict representation of a table_export_settings named tuple.""" return { "provider": table_export_settings.provider, "output_name": table_export_settings.output_name, "iterate_daily": table_export_settings.iterate_daily, "sql": table_export_settings.sql, }
6b9a315b08d3d320cfcc1225545a9fc237dfc4a6
109,830
from typing import Union import pathlib import hashlib def _resolve_experiment_buffer( buffer_root: Union[str, pathlib.Path], exp_id: int, experiment_name: str, filter_string: str, ) -> pathlib.Path: """Resolve the buffer root for a given experiment and filter string.""" filter_hash = hashlib.sha512(filter_string.encode(encoding="utf8")).hexdigest()[:8] buffer_root = pathlib.Path(buffer_root) / f"{exp_id}_{experiment_name}" / filter_hash return buffer_root
1474b5b8cbda58c76b3d15da1d13cbb0c7b39187
109,833
def int_to_bytes(i: int, b_len: int) -> bytes: """ Convert an non-negative int to big-endian unsigned bytes. :param i: The non-negative int. :param b_len: The length of bytes converted into. :return: The bytes. """ return i.to_bytes(length=b_len, byteorder='big', signed=False)
6ef281513bff109aa5d605bff004d2cd677ae7b9
109,839
import binascii def hex_string_to_latin1_string(hex_string): """ Convert a hexadecimal string into a latin1 encoded string. """ byte_string = binascii.unhexlify(hex_string) return(byte_string.decode("latin-1"))
eb979aa95bf2ed20b40957c416d15fd2ba208421
109,840
def strp_brackets(text): """ Strip brackets surrounding a string. """ return text.strip().strip('(').strip(')')
5202268668d55816d7ae13fda95b2d57c7b68df5
109,849
def validate_actions(action, supported_actions): """Ensures the inputed action is supported, raises an exception otherwise.""" if action not in supported_actions: raise ValueError( f'Action "{action}" is not supported.' " the list of valid actions is: {}".format(", ".join(supported_actions)) ) return True
353e4f546428a9ae86f0133f1565a2aa6cd7108c
109,850
import six def process_item(dryrun, verbose, item, coll_dict): """ Process one movie or show item. Parameters: item (plexapi.video.Movie or plexapi.video.Show): movie or show item to be processed. coll_dict (dict): Collections dictionary from the collections file, with: * key (string): ID of the item * value (dict): Attributes of the item, as follows: - 'section': Title of the section of the item - 'title': Title of the item - 'year': Year of the item - 'collections': List of collection names of the item """ dryrun_str = "Dryrun: " if dryrun else "" # If the item is not fully loaded, it may show only a subset of collections. if not item.isFullObject(): item.reload() item_collections = [] # List of collection names in item if item.collections: for c in item.collections: # list of plexapi.media.Collection t = c.tag if isinstance(t, six.binary_type): t = t.decode('utf-8') item_collections.append(t) item_id = item.key.split('/')[-1] item_section = item.section().title item_title = item.title item_year = item.year if item_id not in coll_dict: if verbose: print("{d}Creating {s!r} item in collections file: {t!r} ({y})". format(d=dryrun_str, s=item_section, t=item_title, y=item_year)) coll_dict[item_id] = { 'section': item_section, 'title': item_title, 'year': item_year, 'collections': [], } file_item_dict = coll_dict[item_id] else: file_item_dict = coll_dict[item_id] if item_section != file_item_dict.get('section', None) or \ item_title != file_item_dict.get('title', None) or \ item_year != file_item_dict.get('year', None): if verbose: print("{d}Updating section/title/year in collections file for " "{s!r} item: {t!r} ({y})". format(d=dryrun_str, s=item_section, t=item_title, y=item_year)) file_item_dict['section'] = item_section file_item_dict['title'] = item_title file_item_dict['year'] = item_year # Sync collections from PMS to collections file for coll in item_collections: if coll not in file_item_dict['collections']: if verbose: print("{d}Saving collection {c!r} to collections file for " "{s!r} item: {t!r} ({y})". format(d=dryrun_str, c=coll, s=item_section, t=item_title, y=item_year)) file_item_dict['collections'].append(coll) # Sync collections from collections file to PMS for coll in file_item_dict['collections']: if coll not in item_collections: if verbose: print("{d}Restoring collection '{c}' from collections file for " "{s!r} item: {t!r} ({y})". format(d=dryrun_str, c=coll, s=item_section, t=item_title, y=item_year)) item_collections.append(coll) if not dryrun: item.addCollection(coll) return 0
dd0eaedfbbd376a3e36c089873a0fdd7c50fc007
109,851
def inpath(entry, pathvar): """Check if entry is in pathvar. pathvar is a string of the form `entry1:entry2:entry3`.""" return entry in set(pathvar.split(':'))
911c168d8bfab913e000f0286571b7e16813e970
109,853
from typing import Any from typing import cast def interpolate(format_: str, **context: Any) -> str: """ Dynamically interpolates a format by using a given context. Example: >>> interpolate('{payload}', payload=12) '12' >>> interpolate('{payload.upper()}', payload="a") 'A' >>> interpolate('{(a - b):0.2f}', a=10, b=4.999) '5.00' """ return cast(str, eval(f'f{format_!r}', None, context))
5ededd69612fc75f27968790c452104c66a16fc2
109,854
def get_number_of_samples(dataset): """Get the number of samples held in a dataset.""" keys = list(dataset.inputs.keys()) if not keys: raise AssertionError('Dataset has no inputs!') first_set = dataset.inputs[keys[0]] if hasattr(first_set, 'shape'): return first_set.shape[0] return len(first_set)
e9dfd6acdca79c8ed9695a0fae92fa557a6b3a33
109,856