content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def _confirm_prompt(message, prompt="\nAre you sure? [y/yes (default: no)]: ", affirmations=("Y", "Yes", "yes", "y")): """ Display a message, then confirmation prompt, and return true if the user responds with one of the affirmations. """ answer = input(message + prompt) return answer in affirmations
19b17f5ceee3ce00fc27712eb771cff21fde323d
77,779
def are_all_equal(iterable): """ Checks if all elements of a collection are equal. Args: iterable: iterator A collection of values such as a list. Returns: equal: boolean True if all elements of iterable are equal. Will also return true if iterable is empty. """ return len(set(iterable)) <= 1
1588770e1501381db88821758ed2c8c58df5db91
77,785
def rgb_to_hsv(red: int, green: int, blue: int) -> list[float]: """ Conversion from the RGB-representation to the HSV-representation. The tested values are the reverse values from the hsv_to_rgb-doctests. Function "approximately_equal_hsv" is needed because of small deviations due to rounding for the RGB-values. >>> approximately_equal_hsv(rgb_to_hsv(0, 0, 0), [0, 0, 0]) True >>> approximately_equal_hsv(rgb_to_hsv(255, 255, 255), [0, 0, 1]) True >>> approximately_equal_hsv(rgb_to_hsv(255, 0, 0), [0, 1, 1]) True >>> approximately_equal_hsv(rgb_to_hsv(255, 255, 0), [60, 1, 1]) True >>> approximately_equal_hsv(rgb_to_hsv(0, 255, 0), [120, 1, 1]) True >>> approximately_equal_hsv(rgb_to_hsv(0, 0, 255), [240, 1, 1]) True >>> approximately_equal_hsv(rgb_to_hsv(255, 0, 255), [300, 1, 1]) True >>> approximately_equal_hsv(rgb_to_hsv(64, 128, 128), [180, 0.5, 0.5]) True >>> approximately_equal_hsv(rgb_to_hsv(193, 196, 224), [234, 0.14, 0.88]) True >>> approximately_equal_hsv(rgb_to_hsv(128, 32, 80), [330, 0.75, 0.5]) True """ if red < 0 or red > 255: raise Exception("red should be between 0 and 255") if green < 0 or green > 255: raise Exception("green should be between 0 and 255") if blue < 0 or blue > 255: raise Exception("blue should be between 0 and 255") float_red = red / 255 float_green = green / 255 float_blue = blue / 255 value = max(max(float_red, float_green), float_blue) chroma = value - min(min(float_red, float_green), float_blue) saturation = 0 if value == 0 else chroma / value if chroma == 0: hue = 0.0 elif value == float_red: hue = 60 * (0 + (float_green - float_blue) / chroma) elif value == float_green: hue = 60 * (2 + (float_blue - float_red) / chroma) else: hue = 60 * (4 + (float_red - float_green) / chroma) hue = (hue + 360) % 360 return [hue, saturation, value]
b09c03d3a2830f5699f872653ebbcadbfb16aa0c
77,794
import math def get_dct(input_values): """ Apply DCT on list of numbers input_values, return list with same number of elements """ matrix_size = len(input_values) result = [0.0] * matrix_size if matrix_size > 0: for outer_index in range(matrix_size): for index in range(matrix_size): result[outer_index] += (input_values[index] * math.cos(math.pi / matrix_size * outer_index * (index + 0.5))) result[0] *= 1.0 / math.sqrt(2.0) for index in range(matrix_size): result[index] *= math.sqrt(2.0 / matrix_size) return result
95c616137b779da97a1487f77e867db1385a5c85
77,796
def strip_tzinfo(value): """Strip timezone information from :class:`datetime.datetime` objects to enable comparison.""" return value if value.tzinfo is None else value.replace(tzinfo=None)
0dc3125a8063b6960e250121e5674a83df78c3e7
77,798
def length_of_linkedlist(linkedlist): """ Find Length of Linked List """ node = linkedlist.head count = 1 while node.next is not None: count += 1 node = node.next return count
857c3665e97fb458fda2e2d63dbe9e7f168d0557
77,800
import socket def send_eth(ethernet_header, payload, server_ip, interface): """Send raw Ethernet packet on interface.""" s = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.ntohs(0x0003)) s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1) s.bind((interface, 0)) s.send(ethernet_header + payload) return s.close()
4a397fd5328a05fe6f2697b3ef0f389296401802
77,803
import hashlib def get_cache_key(text: str, voice: str, settings: str = "") -> str: """Get hashed WAV name for cache""" cache_key_str = f"{text}-{voice}-{settings}" return hashlib.sha256(cache_key_str.encode("utf-8")).hexdigest()
5051d38d8d4715bb2829f02718c8dc2eba416382
77,805
def initial(array): """Return all but the last element of `array`. Args: array (list): List to process. Returns: list: Initial part of `array`. Example: >>> initial([1, 2, 3, 4]) [1, 2, 3] .. versionadded:: 1.0.0 """ return array[:-1]
fc579be391ffdf5c3445b5f4420a0c28cb572cad
77,807
def is_standard_atlas_job(release): """ Is it a standard ATLAS job? A job is a standard ATLAS job if the release string begins with 'Atlas-'. :param release: Release value (string). :return: Boolean. Returns True if standard ATLAS job. """ return release.startswith('Atlas-')
1eaa77eee19fbf835f96446f8e310fa5fb27f7f5
77,808
import hashlib def string_to_md5(content: str) -> str: """ Take a string and calculate its md5 hash (as a string). """ encoded = content.encode("utf8") return hashlib.md5(encoded).hexdigest()
1949385c5f95af092147b6576647769f79318109
77,809
def all_field_names(model): """Return list of all field names for model""" return [f.name for f in model._meta.get_fields()]
6dbfc87a54e2851916fccf4ed08af7740eb32233
77,810
import re def segment_words(volpiano: str) -> list: """Segment a volpiano string in segments corresponding to words. Any group of 3 or more dashes is a word boundary. >>> segment_words('f--g---h-g') ['fg', 'hg'] >>> segment_words('---f-------g---') ['f', 'g'] Parameters ---------- volpiano : str the volpiano string to segment Returns ------- list A list of word-segments """ # Replace >3 dashes with word boundary volpiano = re.sub('-{4,}', '---', volpiano) # Word boundary --> space and remove neume/syll boundaries volpiano = re.sub('---', ' ', volpiano).replace('-', '').strip() words = volpiano.split() return words
d01acb8294aa9dc78f9531c612a754c880a98d4b
77,820
import re def normalize_input_string(input): """Performs following actions on input, then returns the input: - make all characters lowercase - strip leading and trailing whitespace - replace underscores with spaces - substitute one or more whitespaces with a space """ input = input.lower().strip() input = input.replace('_', ' ') input = re.sub(r'\s+', ' ', input) return input
60dc84ee309c15d40b31d2504eb053cf197701a7
77,822
def calc_speed_coefficient(thrust, total_mass): """ Calculate a ships maximum speed multipler for the server speed limit The maximum speed a ship can achieve on a server is a combination of the thrust:mass ratio of the ship and the server speed limit. This function returns a cofficient between 0.5 and 3.0 that when multiplied with the server's speed limit will give you the ships maximum speed. Args: thrust: A float that represents the thrust of a ship. See calc_thrust() Returns: A float between (inclusive) 0.5 and 3.0. The max speed cofficient """ return min(thrust / total_mass, 2.5) + 0.5
f3d3ac861538af645402a1383ad5ed55cc6324a6
77,824
def sort_by_message(results): """Sort `results` by the message_id of the error messages.""" return sorted(results, key=lambda r: r['message_id'])
2f357f56c6b6cea82e4ba26f7d77b462dd1a1ccc
77,828
import binascii import socket def ip2long(ip): """ Wrapper function for IPv4 and IPv6 converters. :arg ip: IPv4 or IPv6 address """ try: return int(binascii.hexlify(socket.inet_aton(ip)), 16) except socket.error: return int(binascii.hexlify(socket.inet_pton(socket.AF_INET6, ip)), 16)
91f6705a608ba22fd79cd05c37c6f0c3b2d2206c
77,830
import numbers import torch def torch_isnan(x): """ A convenient function to check if a Tensor contains any nan; also works with numbers """ if isinstance(x, numbers.Number): return x != x return torch.isnan(x).any()
5efbe74d10b460f555d8b9aa092fa05e3b7b3b5f
77,837
def parse_args(parser): """ Parse commandline arguments. """ parser.add_argument('--tacotron2-checkpoint', type=str, help='full path to the generator checkpoint file') parser.add_argument('-b', '--batch-size', default=32, type=int) parser.add_argument('--log-file', type=str, default='nvlog.json', help='Filename for logging') # Mel extraction parser.add_argument('-d', '--dataset-path', type=str, default='./', help='Path to dataset') parser.add_argument('--wav-text-filelist', required=True, type=str, help='Path to file with audio paths and text') parser.add_argument('--text-cleaners', nargs='*', default=['english_cleaners'], type=str, help='Type of text cleaners for input text') parser.add_argument('--max-wav-value', default=32768.0, type=float, help='Maximum audiowave value') parser.add_argument('--sampling-rate', default=22050, type=int, help='Sampling rate') parser.add_argument('--filter-length', default=1024, type=int, help='Filter length') parser.add_argument('--hop-length', default=256, type=int, help='Hop (stride) length') parser.add_argument('--win-length', default=1024, type=int, help='Window length') parser.add_argument('--mel-fmin', default=0.0, type=float, help='Minimum mel frequency') parser.add_argument('--mel-fmax', default=8000.0, type=float, help='Maximum mel frequency') # Duration extraction parser.add_argument('--extract-mels', action='store_true', help='Calculate spectrograms from .wav files') parser.add_argument('--extract-mels-teacher', action='store_true', help='Extract Taco-generated mel-spectrograms for KD') parser.add_argument('--extract-durations', action='store_true', help='Extract char durations from attention matrices') parser.add_argument('--extract-attentions', action='store_true', help='Extract full attention matrices') parser.add_argument('--extract-pitch-mel', action='store_true', help='Extract pitch') parser.add_argument('--extract-pitch-char', action='store_true', help='Extract pitch averaged over input characters') parser.add_argument('--extract-pitch-trichar', action='store_true', help='Extract pitch averaged over input characters') parser.add_argument('--train-mode', action='store_true', help='Run the model in .train() mode') parser.add_argument('--cuda', action='store_true', help='Extract mels on a GPU using CUDA') return parser
d9202b64da749740fe05d93508e4eb4c825eba9a
77,839
def feature_vectorizer(features, term_voc): """ Produces vector of features term_voc : core.TermVocabulary returns: dict vector {index1: value1, ..., indexN: valueN} """ vector = {} for feature_name in features.keys(): if not term_voc.contains(feature_name): term_voc.insert_term(feature_name) index = term_voc.get_term_index(feature_name) vector[index] = features[feature_name] return vector
bf8ae554151f509d67e1c2a232aa04ca134e843c
77,840
def _encoded_x_field(encoded_x, path): """Returns a field from `encoded_x` returned by the `encode` method. In order to test the correctness of encoding, we also need to access the encoded objects, which in turns depends on an implementation detail (the specific use of `nest.flatten_with_joined_string_paths`). This dependence is constrained to a single place in this utility. Args: encoded_x: The structure returned by the `encode` method. path: A list of keys corresponding to the path in the nested dictionary before it was flattened. Returns: A value from `encoded_x` corresponding to the `path`. """ return encoded_x['/'.join(path)]
656406daadad9b1ca41d51e6bc57103f61ce4fc3
77,842
def get_class_name(obj: object) -> str: """ Get the full class name of an object. :param obj: A Python object. :return: A qualified class name. """ module = obj.__class__.__module__ if module is None or module == str.__class__.__module__: return obj.__class__.__name__ return module + "." + obj.__class__.__name__
cbb80bfe03c62604ab5a1ecf9df35094f3f7e145
77,848
def parse_header_next_link(value): """Return a next link from headers.""" link = None for val in value.split(","): try: url, params = val.split(";", 1) except ValueError: url, params = val, '' # only next params please if params and not 'next' in params: continue return url.strip("<> '\"") return link
c0249b855730f83cdd7f507618ee5411dcdfac39
77,851
import re def get_insta_shortcode(url: str) -> str: """ Return shortcode of post from instagram URL :param url: URL :return: shortcode of post """ shortcode = re.findall( r"(?:(?:http|https):\/\/)?(?:www.)?(?:instagram.com|instagr.am)\/p\/([A-Za-z0-9-_.]+)", url, )[0] return shortcode
0e60013e1cb22eca71e44907c05d1dac7828b59a
77,852
import pickle def load_calibration_data(path): """ Load camera matrix and distortion coefficients from a pickled file Inputs ---------- path: str Path to where the data will be saved Outputs ------- mtx: numpy.ndarray Camera matrix dist: numpy.ndarray Camera distortion coefficients """ with open(path, mode = 'rb') as f: data = pickle.load(f) mtx = data['mtx'] dist = data['dist'] return mtx, dist
ba0e4dc77ec7aad1b11aba15b9cd1c6e4f7e9a69
77,854
import re def baseOfBaseCode(baseCode): """ Return the base (jrnlCode) of the baseCode >>> print baseOfBaseCode("IJP.001") IJP >>> print baseOfBaseCode("IJP001") IJP >>> print baseOfBaseCode("JOAP221") JOAP >>> print baseOfBaseCode("ANIJP-IT.2006") ANIJP-IT >>> print baseOfBaseCode("anijp-it.2006") ANIJP-IT """ retVal = re.split("\.|[0-9]", baseCode.upper()) return retVal[0]
9597272d4ffa6f1b6a74a7beff50e1197cda0c54
77,856
def blocks_table_h() -> dict[int, int]: """The table is the number of blocks for the correction level H. Returns: dict[int, int]: Dictionary of the form {version: number of blocks} """ table = { 1: 1, 2: 1, 3: 2, 4: 4, 5: 4, 6: 4, 7: 5, 8: 6, 9: 8, 10: 8, 11: 11, 12: 11, 13: 16, 14: 16, 15: 18, 16: 16, 17: 19, 18: 21, 19: 25, 20: 25, 21: 25, 22: 34, 23: 30, 24: 32, 25: 35, 26: 37, 27: 40, 28: 42, 29: 45, 30: 48, 31: 51, 32: 54, 33: 57, 34: 60, 35: 63, 36: 66, 37: 70, 38: 74, 39: 77, 40: 81 } return table
796605469839c8ad08c42f8faa0eb19ce17d4f74
77,858
import pkg_resources def get_string_asset(path): """Returns the content of the specified asset file""" return pkg_resources.resource_string(__name__, 'assets/{}'.format(path))
cfd3b0a02a7b1e71eab5343d4793a0f23024cd40
77,863
def set_list_as_str(l): """Small utility function to transform list in string Parameters ---------- l : list Input list Returns ------- str Stringified version of the input list, with items separated with a comma """ if type(l) == list: return ",".join(str(e) for e in l)
99d616bc7c57ca1be746bd24017e7c56376f1d67
77,865
def tuple_of(conversion): """Generate conversion function to convert list of strings to tuple of given type. Note that a tuple may be preferable to a list, since a tuple is hashable. >>> a = ["1","2"] >>> tuple_of(int)(a) (1,2) Arguments: conversion (function): type converstion function for single entry Returns: (function): function to convert list of such entries """ def f(data): return tuple(map(conversion,data)) return f
8800099640c1ca0fb1e55f90bdec78f4d133fd32
77,868
def odd_numbers(n): """Return a list of the first n odd numbers eg odd_numbers(5) should return: [1, 3, 5, 7, 9] """ odd_numbers = [] for i in range((n)*2): if i % 2 == 1: odd_numbers.append(i) else: pass return odd_numbers
c0bc0a6cdbee6c195581bf9ca6369b5986fd212c
77,869
def tmAvgMidpoint(ref_point_1, ref_point_2): """ Simplest version of a midpoint calculation. Simply the average of two positions Args: ref_point_1 (tm): position 1 ref_point_2 (tm): position 2 Returns: tm: midpoint average of positions 1 and 2 """ return (ref_point_1 + ref_point_2)/2
daa4e747b75e246a04d1f7549d3cb56724eec85d
77,870
def caesar_encode(p_offset, p_input_text): """ Encode a text using caesar method with an specific offset :param p_offset: offset that will be used with the alphabet :param p_input_text: text that will be cyphered :return: the text cyphered """ alphabet = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" output_text = "" # It will take a char from the alphabet with the offset added for i in p_input_text: pos = alphabet.find(i) output_text += alphabet[(pos + p_offset) % len(alphabet)] return output_text
593aaed374ca191597dccc48cb6c17e06265169a
77,873
def center_text_in_cell(cell_width_px, cell_height_px, text_width_px, text_height_px): """ Centers text in a cell. :param cell_width_px: cell width (pixels). :param cell_height_px: cell height (pixels). :param text_width_px: text width (pixels). :param text_height_px: text height (pixels). :return: upper left corner of centered area. """ x = int((cell_width_px - text_width_px) / 2) y = int((cell_height_px - text_height_px) / 2) return x, y
11476d54e42b5652010735171e349f0ca78a39ba
77,876
from typing import Any def isnum(val: Any) -> bool: """Check if string/number can be converted to number""" return str(val).replace('.', '', 1).isdigit()
3d616edb5c37334afa9a0dc6767a727d2006e5b9
77,878
import torch def roll_pitch_yaw_to_rotation_matrices(roll_pitch_yaw): """Converts roll-pitch-yaw angles to rotation matrices. Args: roll_pitch_yaw: Tensor with shape [..., 3]. The last dimension contains the roll, pitch, and yaw angles in radians. The resulting matrix rotates points by first applying roll around the x-axis, then pitch around the y-axis, then yaw around the z-axis. Returns: Tensor with shape [..., 3, 3]. The 3x3 rotation matrices corresponding to the input roll-pitch-yaw angles. """ cosines = torch.cos(roll_pitch_yaw) sines = torch.sin(roll_pitch_yaw) cx, cy, cz = torch.unbind(cosines, dim=-1) sx, sy, sz = torch.unbind(sines, dim=-1) # pyformat: disable rotation = torch.stack( [cz * cy, cz * sy * sx - sz * cx, cz * sy * cx + sz * sx, sz * cy, sz * sy * sx + cz * cx, sz * sy * cx - cz * sx, -sy, cy * sx, cy * cx], dim=-1) # pyformat: enable #shape = torch.cat([roll_pitch_yaw.shape[:-1], [3, 3]], axis=0) shape = list(roll_pitch_yaw.shape[:-1]) + [3, 3] rotation = torch.reshape(rotation, shape) return rotation
61440f46d0cd763614515de987ea258f8549d55e
77,881
def dist_rgb_weighted(rgb1, rgb2): """ Determine the weighted distance between two rgb colors. :arg tuple rgb1: RGB color definition :arg tuple rgb2: RGB color definition :returns: Square of the distance between provided colors :rtype: float Similar to a standard distance formula, the values are weighted to approximate human perception of color differences For efficiency, the square of the distance is returned which is sufficient for comparisons """ red_mean = (rgb1[0] + rgb2[0]) / 2.0 return ((2 + red_mean / 256) * pow(rgb1[0] - rgb2[0], 2) + 4 * pow(rgb1[1] - rgb2[1], 2) + (2 + (255 - red_mean) / 256) * pow(rgb1[2] - rgb2[2], 2))
c9b229bcd5d6be1dc28f6a1db755ac9d448b6792
77,882
def single_type_count(clothes_list, type): """ Returns an integer value of the number of a given type of clothing in a list. Args: clothes_list (list): List of clothing objects to count from type (str): Clothing type to count Returns: type_count (int): Number of garments of the specified type in the given list of clothing objects """ type_count = 0 for garment in clothes_list: if garment.db.clothing_type: if garment.db.clothing_type == type: type_count += 1 return type_count
52219cae17bd67671904bd7ee9ae01fc4548ef96
77,886
def check_type_equal_ignore_nullable(sf1, sf2): """Checks StructField data types ignoring nullables. Handles array element types also. """ dt1, dt2 = sf1.dataType, sf2.dataType if dt1.typeName() == dt2.typeName(): # Account for array types by inspecting elementType. if dt1.typeName() == 'array': return dt1.elementType == dt2.elementType else: return True else: return False
c47051068f87dbe62e51f139d9f77c9833887f26
77,887
def _is_builtin(obj): """ Check if the type of `obj` is a builtin type. Parameters ---------- obj : object Object in question. Returns ------- builtin : bool True if `obj` is a builtin type """ return obj.__class__.__module__ == 'builtins'
225eb8569b0e866bed29be2783bf5ab5b5e9bd31
77,889
def demand_crosses_cut(demand, cut): """ Determines whether a demand crosses a cut. :param demand: tuple of indices of the demand :param cut: tuple of indices of the cut :return: whether the given demand crosses the given cut """ i, j = min(demand), max(demand) g, h = min(cut), max(cut) return (i <= g < j <= h) or (g < i <= h < j)
6c7628d01d09ec3e52e6968381dc6d70704b7df0
77,895
def snd(pair): """ Second of a pair.""" return pair[1]
110d3663f7ce860dabb65ce608990cf29ebfc9b2
77,896
import re def re_soap_tag(text, tag, limit=4000, pattern=r"<{t}>(.*?)</{t}>"): """Search for tag in text[:limit] using pattern. Args: text (:obj:`str`): Text to search for pattern. tag (:obj:`str`): Tag name to use in pattern as 't'. limit (:obj:`int`, optional): Length to limit text to when searching for pattern. Defaults to: 4000. pattern (:obj:`str`, optional): Pattern to use when searching for tag. Defaults to: r'<{e}>(.*?)</{e}>' Notes: Given text is 4 GB and pattern is expected at top of text: * if head is None and pattern not found: 131 seconds * if head is None and pattern found: 0 seconds * if head is 4000 and pattern not found: 0 seconds * if head is 4000 and pattern found: 0 seconds Returns: :obj:`str` """ pattern_txt = pattern.format(t=tag) pattern_re = re.compile(pattern_txt, re.IGNORECASE | re.DOTALL) text_limit = text[:limit] match = pattern_re.search(text_limit) return match.group(1) if match else ""
0a01c24c628af8b9f5f1fd1925bf5440f1d0d055
77,897
def create_dict_from_columns(array, idx_key, idx_value): """ Create a dictionary to assign item to their cluster_id based on a DataFrame Args: array: numpy array, shape=(n_samples, n_features). idx_key: int, index of the column used as a key in the dictionary. idx_value: int, index of the column used as a value in the dictionary. Returns: dict, item index (as key) and cluster assignation (as value) To do: check type idx_X -> must be integer """ return dict(zip(array[:, idx_key].astype(int), array[:, idx_value].astype(int)))
3ac52fc71e6b7ec962f2481f29451a01b0b785c5
77,899
from typing import Dict from typing import Any def generate_workflow_response(status_code=201, **kwargs) -> Dict[str, Any]: """ Generate CATCH workflow response Args: status_code (int, optional): status code like HTTP code. Defaults to 201. **kwargs: additional fields to add to the returned response Returns: Dict[str, Any]: Generated response """ headers = {"Content-Type": "application/json"} response = dict(status_code=status_code, headers=headers) response.update(kwargs) return response
431c764ac96f2d09bb8333cfa662f105d5b5c1bc
77,901
import torch def semantic_segmentation_iou(score: torch.Tensor, label: torch.Tensor) -> torch.Tensor: """ Calculate IOU per class. IOU (Intersection Over Union) = true positive / (TP + FP + FN) IOU = # (predict == label == class_i) / (predict==class_i or label==class_i) Args: prediction: [N, C, H, W] tensor label: [N, H, W] tensor Returns: (torch.Tensor): [N, C] tensor """ n = score.shape[0] num_classes = score.shape[1] _, prediction = score.detach().max(dim=1) prediction = prediction.view(n, -1) label = label.detach().view(n, -1) out = [] for i in range(num_classes): TP = ((prediction == i) & (label == i)).float().sum(dim=1) # [N] union = ((prediction == i) | (label == i)).float().sum(dim=1) + 1 # [N] out.append(TP / union) out = torch.stack(out, dim=1) return out
ed1efb44d9cd5f02f3ff1e5188fd24ecff7afaf4
77,910
import re import itertools import random def generate_spintax(text, single=True): """Return a list of unique spins of a Spintax text string. Args: text (string): Spintax text (i.e. I am the {President|King|Ambassador} of Nigeria.) single (bool, optional): Optional boolean to return a list or a single spin. Returns: spins (string, list): Single spin or list of spins depending on single. """ pattern = re.compile('({[^}]+}|[^{}]*)') chunks = pattern.split(text) def options(s): if len(s) > 0 and s[0] == '{': return [opt for opt in s[1:-1].split('|')] return [s] parts_list = [options(chunk) for chunk in chunks] spins = [] for spin in itertools.product(*parts_list): spins.append(''.join(spin)) if single: return spins[random.randint(0, len(spins) - 1)] else: return spins
a3635958fad90ace9592f3e32c4d0a2b3c29a152
77,912
def scale_minmax(X, min=0.0, max=1.0): """ Minmax scaler for a numpy array PARAMS ====== X (numpy array) - array to scale min (float) - minimum value of the scaling range (default: 0.0) max (float) - maximum value of the scaling range (default: 1.0) """ X_std = (X - X.min()) / (X.max() - X.min()) X_scaled = X_std * (max - min) + min return X_scaled
1aed3f5f854d973bd81b821d637c17f741fa895a
77,918
def validateForInteger(val): """Check if the parameter can be converted to type int""" try: val = int(val) return val except: return -1
16d52ed13ce3d002b08d40d69d30f8b2e87c69b9
77,920
def sort_objects_left_to_right(objs): """ Put the objects in order from left to right. """ return sorted(objs, key=lambda k: k['bbox'][0] + k['bbox'][2])
179569bd2a7e065d041bbe9a72dbd1d0baebd969
77,921
from typing import Dict def default_credentials() -> Dict[str, str]: """ Returns default credentials used for easier reuse across tests in the project. """ return { "email": "test@mail.com", "password": "testme", }
5f8e8ca15d2b3ee4961bce8fe680ce165f904159
77,922
def auto_parameterize(nn_dist, snn_dist, smear=None): """ Automatically calculate fermi parameters from crystal properties so that the midpoint and width of the smearing depend on the distance between first and second nearest neighbours. Args: nn_dist (float): Nearest neighbour distance. snn_dist (float): Second-nearest neighbour distance. smear (float): Fermi sigma value to use. (Default is twenty percent of the first and second nearest neighbour distance difference.) Returns: 2-element tuple containing - (*float*) -- Distance for half contribution. - (*float*) -- Smearing width. """ center = 0.5 * (nn_dist + snn_dist) if smear is None: # Set smearing to 20% of the 2NN-1NN distance percentage = 0.2 smear = percentage * (snn_dist - nn_dist) return center, smear
6152abd1a87bbd7cb1d37455718dda2694270c35
77,923
from typing import Iterable from typing import Tuple def get_text_width_height( iterable: Iterable, font_size: float, target_axis: str = "y" ) -> Tuple[float, float]: """Estimate width and height required for a sequence of labels in a plot This is intended to be used for axis tick labels. Args: iterable: Sequence, series, array etc. of strings which will be used as axis labels font_size: font size used for the labels (e.g. tick label fontsize) target_axis: determines which dimension is width and which dimension is height for the labels. For 'x' rotation=90 is assumed. Returns: width, height required for the labels """ height = font_size * 1 / 72 + 2 / 72 max_text_length = max([len(s) for s in iterable]) max_width = height * 0.6 * max_text_length if target_axis == "y": return max_width, height elif target_axis == "x": return height, max_width else: raise ValueError(f"Unknown target axis {target_axis}")
dd90d36bb7d23e34e6cafbe27902756580b78e54
77,924
from typing import Tuple def get_annotator_id_and_date(procedure_id) -> Tuple[str, str]: """ Get the unique annotator_id and annotation date parameter ids for a procedure. Parameters ---------- procedure_id Returns ------- annotator_id, date_of_annotation """ map_ = { 'IMPC_EOL': ('IMPC_EOL_052_001', 'IMPC_EOL_053_001'), 'IMPC_EML': ('IMPC_EML_057_001', 'IMPC_EML_058_001'), 'IMPC_EMO': ('IMPC_EMO_178_001', 'IMPC_EMO_179_001') } return map_[procedure_id[:8]]
f7896cdf9973519ae4ce8a4d33171a4e39f863bb
77,927
def get_kmers(text, k=1): """Return k-mers from text""" return [text[i:i + k] for i in range(len(text) - k + 1)]
5942ded2c2e89644fb29ed6fd5896518eeb3d951
77,928
import json def get_pr_ref(event_path): """Returns the PR ref from |event_path|.""" with open(event_path, encoding='utf-8') as file_handle: event = json.load(file_handle) return 'refs/pull/{0}/merge'.format(event['pull_request']['number'])
3d95ea8c2091734a6e98dba9f6d1f6e594cce65b
77,931
def adc_map(current_val, from_Low, from_High, to_Low=0, to_High=100): """ Re-maps a number from one range to another. That is, a value of 'from_Low' would get mapped to 'to_Low', a value of 'from_High' to 'to_High', values in-between to values in-between, etc. Does not constrain values to within the range, because out-of-range values are sometimes intended and useful. y = adc_map(x, 1, 50, 50, 1); The function also handles negative numbers well, so that this example y = adc_map(x, 1, 50, 50, -100); is also valid and works well. The adc_map() function uses integer math so will not generate fractions, when the math might indicate that it should do so. Fractional remainders are truncated, and are not rounded or averaged. Parameters ---------- value: the number to map. from_Low: the lower bound of the value’s current range. from_High: the upper bound of the value’s current range. to_Low: the lower bound of the value’s target range. to_High: the upper bound of the value’s target range. Adapted from https://www.arduino.cc/reference/en/language/functions/math/map/ """ return (current_val - from_Low) * (to_High - to_Low) / (from_High - from_Low) + to_Low
a17eb6053620245a676b852a52160db5c1fe2600
77,934
import torch def normalize_features(mx): """Row-normalize sparse matrix""" rowsum = mx.sum(1) r_inv = torch.pow(rowsum, -1).flatten() r_inv[torch.isinf(r_inv)] = 0. r_mat_inv = torch.diag(r_inv) mx = torch.mm(r_mat_inv, mx) return mx
043a76475b4e7e3762572ea730f90ca0945ccf0d
77,936
from datetime import datetime def date_parsing(date_str: str) -> datetime: """Parse string of varying length to datetime object Args: date_str (str): Input date string Returns: datetime: Output datetime object """ if len(date_str) == 4: return datetime(int(date_str), 1, 1) elif len(date_str) == 10: return datetime.strptime(date_str, "%Y-%m-%d") else: return datetime.strptime(date_str, "%Y-%m-%d %H:%M:%S")
34810c762a0dd54511552dfbbb35e66ff93ccd1d
77,937
import re def valid_token(word: str) -> bool: """Accept alphanumeric or strings of form A.B""" return (word.isalnum() or len(re.findall(r'[A-Za-z0-9]+(?:\.[A-Za-z0-9]+)+$', word)) > 0)
524ac12fd932a4d9b68d9493946af298303f7e74
77,939
def descendants_count(tree): """For every node, count its number of descendant leaves, and the number of leaves before it.""" n = len(list(tree.nodes())) root = n - 1 left = [0] * n desc = [0] * n leaf_idx = 0 children = [list(tree.neighbors(node))[::-1] for node in range(n)] # children remaining to process stack = [root] while len(stack) > 0: node = stack[-1] if len(children[node]) > 0: stack.append(children[node].pop()) else: children_ = list(tree.neighbors(node)) if len(children_) == 0: desc[node] = 1 left[node] = leaf_idx leaf_idx += 1 else: desc[node] = sum([desc[c] for c in children_]) left[node] = left[children_[0]] assert node == stack.pop() return desc, left
3fdea98bf542df9700da08dcbb297f0e9eaca4ee
77,941
def rgb_from_index(i): """Map SAM palette index to RGB tuple""" intensities = [0x00, 0x24, 0x49, 0x6d, 0x92, 0xb6, 0xdb, 0xff] red = intensities[(i & 0x02) | ((i & 0x20) >> 3) | ((i & 0x08) >> 3)] green = intensities[((i & 0x04) >> 1) | ((i & 0x40) >> 4) | ((i & 0x08) >> 3)] blue = intensities[((i & 0x01) << 1) | ((i & 0x10) >> 2) | ((i & 0x08) >> 3)] return (red, green, blue)
f9f42cf440af9243ff3dfe3c400c95ec80187aee
77,942
import random import string def randomid(length): """ Get a random id """ return ''.join(random.choice(string.hexdigits) for i in range(length))
b21a178da5f13c72672347d5b87031d8d50c06f6
77,943
def check_color(board: list) -> bool: """ Checks if there are identical numbers in every color row in the board. Returns True if not, else False. >>> check_color(["**** ****","***1 ****","** 3****","* 4 1****",\ " 9 5 "," 6 83 *","3 1 **"," 8 2***"," 2 ****"]) True """ starting_row = 0 starting_column = 4 for i in range(5): i = starting_row j = starting_column color_temp = [] while i + j != 8: if board[i][j] != ' ': if board[i][j] in color_temp: return False else: color_temp.append(board[i][j]) i += 1 while i + j != 13: if board[i][j] != ' ': if board[i][j] in color_temp: return False else: color_temp.append(board[i][j]) j += 1 starting_row += 1 starting_column -= 1 return True
2510bd36e5bb9693efd0edeb49b7c6b771c45f50
77,946
def v8p(d,t): """ Compute horizontal wind v at mesh cell centers a.k.a. p-points :param d: open NetCDF4 dataset :param t: number of timestep """ v = d.variables['V'][t,:,:,:] return 0.5*(v[:,0:v.shape[1]-1,:]+v[:,1:,:])
d0d409ac08709590ea55044edcaaddb8c6693ba1
77,960
def to_sequence_id(sequence_name) -> int: """ Turn an arbitrary value to a known sequence id. Will return an integer in range 0-11 for a valid sequence, or -1 for all other values Should handle names as integers, floats, any numeric, or strings. :param sequence_name: :return: """ try: sequence_id = int(sequence_name) except ValueError: sequence_id = -1 if 0 <= sequence_id < 11: return sequence_id return -1
34831a0ad009f488819a0e5f5bc969a67861c9ac
77,961
def getellipseAttrib(treesSvgfile, id, attrib='@*', namespaces={'n': "http://www.w3.org/2000/svg"}): """ :param treesSvgfile: lxml.etree.parse('.svg') :param id: ['id14K5_Q1_4-8'] :param attrib: '@*', '@style', '@cx', '@cy', '@rx', '@ry' :param namespaces: {'n': "http://www.w3.org/2000/svg"} :return: list of attribute values """ sttr = "//n:ellipse[@id='"+id +"']"+'/'+attrib ec = treesSvgfile.xpath(sttr, namespaces=namespaces) return ec
02921a1edcee86ed638dc5ebba1f8ac49d45097d
77,963
from datetime import datetime def format_date(date): """Formate date object from datetime module and return %Y-%m-%d.""" human_date = datetime.fromtimestamp(date) formatted_date = human_date.strftime('%Y-%m-%d') return formatted_date
71d84cc52ddb5234c7d599f8b6470065ebd7c6dc
77,964
def first_word(title): """Find the first word in a title that isn't an article or preposition.""" split_title = title.split() articles = ['a', 'an', 'the', 'some'] prepositions = ['aboard','about','above','across','after','against','along','amid','among','anti','around','as','at','before','behind','below','beneath','beside','besides','between','beyond','but','by','concerning','considering','despite','down','during','except','excepting','excluding','following','for','from','in','inside','into','like','minus','near','of','off','on','onto','opposite','outside','over','past','per','plus','regarding','round','save','since','than','through','to','toward','towards','under','underneath','unlike','until','up','upon','versus','via','with','within','without'] for word in split_title: if word.lower() not in articles and word.lower() not in prepositions: # '-' need to be removed for bibtex return word.lower().replace('-', '')
cd5e2ba961553482932ffec8a095533ef3da5480
77,966
import re def lreplace(string, old_prefix, new_prefix): """Replaces a single occurrence of `old_prefix` in the given string by `new_prefix`.""" return re.sub(r'^(?:%s)+' % re.escape(old_prefix), lambda m: new_prefix * (m.end() // len(old_prefix)), string)
6130ac8cefba69eb572cf71e7e0374ca315a1213
77,969
import torch def get_parent_tensor(parent_sample_dict, continuous_parent_names): """ Args: parent_sample_dict: contains dictionary of pyro.sample()s for all parents continuous_parent_names: parent name in order Returns: tensor with all the parent pyro.sample() values. This will get used to predict child value from train_network. """ continuous_sample_list = [] for parent_name in continuous_parent_names: try: continuous_sample_list.append(parent_sample_dict[parent_name]) except: raise Exception("Something went wrong while get_parent_tensor") # converting 1-d tensor to 2-d output_tensor = torch.FloatTensor(continuous_sample_list).view(len(continuous_sample_list), 1) return output_tensor
60b3865411ecb2932169600f451cfd6edf3a4723
77,974
from typing import List def keep_unique_ordered(my_list: List) -> List: """ Keeps only 1 element of each in the list, keeping the order :param my_list: List you want to parse :return: A list with only unique element, keeping the order of the previous list """ return [x for i, x in enumerate(my_list) if x not in my_list[:i]]
a41673f368ba7e99de1f1c2236cfa231f55b9411
77,981
def mdquote(s): """Turn a block of text into a Markdown blockquote section""" return '\n'.join(map(lambda x: ' ' + x, s.split('\n')))
9328bc9c81ec3aaca92fd94c199282d91841f6fb
77,986
def uniform(a, b, u): """Given u in [0,1], return a uniform number in [a,b].""" return a + (b-a)*u
ce0d7f5fd61b1c6dbdae1937cecffa8391961081
77,991
def check_member(self, user): """ Check whether a user is in the circle :param (Circle) self: The checked circle :param (User) user: checked user :return (bool): whether the user is in the circle """ return len(list(filter(lambda member: member.id == user.id, self.members))) != 0
68eed99c87793f41d87911eb0c4172ea01a5be07
77,992
import json def forbidden(*args): """ Mock requests to Misfit with Forbidden error: 403 """ json_content = {'error_code': 403, 'error_message': 'Forbidden'} return {'status_code': 403, 'content': json.dumps(json_content)}
408569862fb104c84958790f9b47e5caa85cc717
77,993
def CheckSum( data ): """ Calculate an ASTM checksum for the supplied data Arguments: data -- whatever you want to calculate a checksum for. If manually calculating checksums, you need to be mindful of whether or not you want to include the <2> (STX) that begins the frame. Sometimes this is expected, sometimes not. """ Output = '' Counter = 0 for char in data: Counter += ord( char ) # Sum up all the bytes in the data CheckValue = Counter % 256 # Checksum = Sum mod 256 Output = '%X' % CheckValue if len( Output ) == 1: Output = '0' + Output return Output
af145ce0d558c26841f510aa45e0461eb512956f
77,996
def get_registry_for_env(environment: str) -> str: """ Mapping of container registry based on current environment Args: environment (str): Environment name Returns: str: Connect registry for current """ env_to_registry = { "prod": "registry.connect.redhat.com", "stage": "registry.connect.stage.redhat.com", "qa": "registry.connect.qa.redhat.com", "dev": "registry.connect.dev.redhat.com", } return env_to_registry[environment]
b2fec8efe5a39cfc0cc740d914653788e6e8804a
78,010
def get_comment_for_domain(domain): """Describe a domain name to produce a comment""" if domain.endswith(( '.akamaiedge.net.', '.akamaized.net', '.edgekey.net.', '.static.akamaitechnologies.com.')): return 'Akamai CDN' if domain.endswith('.amazonaws.com.'): return 'Amazon AWS' if domain.endswith('.cdn.cloudflare.net.'): return 'Cloudflare CDN' if domain.endswith('.mail.gandi.net.') or domain == 'webmail.gandi.net.': return 'Gandi mail hosting' if domain == 'webredir.vip.gandi.net.': return 'Gandi web forwarding hosting' if domain == 'dkim.mcsv.net.': return 'Mailchimp mail sender' if domain.endswith('.azurewebsites.net.'): return 'Microsoft Azure hosting' if domain.endswith('.lync.com.'): return 'Microsoft Lync' if domain == 'clientconfig.microsoftonline-p.net.': # https://docs.microsoft.com/en-gb/office365/enterprise/external-domain-name-system-records return 'Microsoft Office 365 tenant' if domain.endswith(('.office.com.', '.office365.com.')): return 'Microsoft Office 365' if domain.endswith('.outlook.com.'): return 'Microsoft Outlook mail' if domain in ('redirect.ovh.net.', 'ssl0.ovh.net.'): return 'OVH mail provider' if domain.endswith('.hosting.ovh.net.'): return 'OVH shared web hosting' if domain.endswith('.rev.sfr.net.'): return 'SFR provider' return None
08521967333e8c255f57e33c01bfcd233eab2a40
78,015
def poly5(p): """ Function which returns another function to evaluate a polynomial. """ C5, C4, C3, C2, C1, C0 = p def rfunc(x): y = C5*x**5.0 + C4*x**4.0 + C3*x**3.0 + C2*x**2.0 + C1*x + C0 return y return rfunc
1f4119e76e926eb7759928e6a196e71e69f63d35
78,016
def get_field_name(field_dict, field): """ Return a nice field name for a particular field Parameters ---------- field_dict : dict dictionary containing field metadata field : str name of the field Returns ------- field_name : str the field name """ if 'standard_name' in field_dict: field_name = field_dict['standard_name'] elif 'long_name' in field_dict: field_name = field_dict['long_name'] else: field_name = str(field) field_name = field_name.replace('_', ' ') field_name = field_name[0].upper() + field_name[1:] return field_name
e05c8d9f2eb635b9d4f7f36a652ff23ba186ae72
78,019
def get_tag_line(lines, revision, tag_prefixes): """Get the revision hash for the tag matching the given project revision in the given lines containing revision hashes. Uses the given array of tag prefix strings if provided. For example, given an array of tag prefixes [\"checker-framework-\", \"checkers-\"] and project revision \"2.0.0\", the tags named \"checker-framework-2.0.0\" and \"checkers-2.0.0\" are sought.""" for line in lines: for prefix in tag_prefixes: full_tag = prefix + revision if line.startswith(full_tag): return line return None
5f9aef43386407bf897251df8dee62611317ed1a
78,022
def slice_from_axis(array, *, axis, element): """Take a single index slice from array using slicing. Equivalent to :func:`np.take`, but using slicing, which ensures that the output is a view of the original array. Parameters ---------- array : NumPy or other array Input array to be sliced. axis : int The axis along which to slice. element : int The element along that axis to grab. Returns ------- sliced : NumPy or other array The sliced output array, which has one less dimension than the input. """ slices = [slice(None) for i in range(array.ndim)] slices[axis] = element return array[tuple(slices)]
c02a50f9dad028029dc692d2ece43c1633594937
78,024
import math def signal_relative_power(frequencies, scale): """Calculates the relative power in dB for the signal, centered around [-1, 1] Source: https://stackoverflow.com/questions/2445756/how-can-i-calculate-audio-db-level Parameters :param frequencies: list[float] List containing the frequency energies to be used in the calculation :param scale: list[int] List containing the high and low range for the signal, i.e. signed 8-bit scale is 128 :return: list[float] List containing the relative powers of the frequencies passed in the same order """ relative_power = [] for frequency in frequencies: norm_freq = frequency / scale relative_power.append(20 * math.log10(max(norm_freq) + 1e-7)) return relative_power
651bfe1ef80608bcf52bc9dc729b59c4685126b5
78,027
def is_modified(path, commit): """ Test whether a given file was present and modified in a specific commit. Parameters ---------- path : str The path of a file to be checked. commit : git.Commit A git commit object. Returns ------- bool Whether or not the given path is among the modified files and was not deleted in this commit. """ try: d = commit.stats.files[path] if (d["insertions"] == 0) and (d["deletions"] == d["lines"]): # File was deleted in commit, so cannot be tested return False else: return True except KeyError: return False
4f12b314a6525d7c7832fc4ef37f9cf4d39ee555
78,029
def extract_class_label(filename): """ arg: filename: string, e.g. 'images/001.Black_footed_Albatross/Black_footed_Albatross_0001_2950163169.jpg' return: A class label as integer, e.g. 1 """ _, class_dir, _ = filename.split("/") return int(class_dir.split(".")[0])
0e0593cf8bcf8846e33113deb5fd223299334559
78,034
def derivative_cross_entropy(model_output, target): """ Compute derivative of cross-entropy cost function""" return model_output - target
972f6ee74f8c6674fc72bf3a72a8723439a060fa
78,037
def min_power_rule(mod, g, tmp): """ **Constraint Name**: GenVar_Min_Power_Constraint **Enforced Over**: GEN_VAR_OPR_TMPS Power provision minus downward services cannot be less than zero. """ return ( mod.GenVar_Provide_Power_MW[g, tmp] - mod.GenVar_Downwards_Reserves_MW[g, tmp] >= 0 )
fc39cbe2349430f0e8c2ae86e8aa8978783f2bbb
78,040
def numberOfChar(stringList): """ Returns number of char into a list of strings and return a int """ return sum(len(s) for s in stringList)
d76eae43c6360e77226b876ebaa8007625f8cb26
78,043
from typing import Dict import inspect def extract_call_params(call_args, function) -> Dict: """ Combines a dictionary out of Mock.call_args. Helper useful to validate specific call parameters of the Mocked function. When you are not sure if your function is called with args or kwargs, you just feed the call_args and the source of function to this helper and receive a dictionary. .. _code-block:: python call_kwargs = extract_call_params(your_mock_object.some_method.call_args, mocked_module.Class.method) :param call_args: call_args of Mock object :param function: Source object that was initially mocked :rtype: dict """ # Specification of arguments of function function_args = inspect.getfullargspec(function).args # Mock call_arguments as a tuple call_args, call_kwargs = call_args result = {} for i, v in enumerate(call_args): position = i + 1 if function_args[0] == 'self' else i result[function_args[position]] = v for k, v in call_kwargs.items(): result[k] = v return result
2794d163a167e72ec12e982ce1209893719e65b7
78,047
import shlex def solc_arguments(libraries=None, combined='bin,abi', optimize=True, extra_args=None): """ Build the arguments to call the solc binary. """ args = [ '--combined-json', combined, '--add-std' ] if optimize: args.append('--optimize') if extra_args: try: args.extend(shlex.split(extra_args)) except BaseException: # if not a parseable string then treat it as a list args.extend(extra_args) if libraries is not None and len(libraries): addresses = [ '{name}:{address}'.format( name=name, address=address.decode('utf8')) for name, address in libraries.items() ] args.extend([ '--libraries', ','.join(addresses), ]) return args
dd31a7ad43f83247803e0a056f3c5d9e7bcb0996
78,048
def line_format(data, length=75, indent=0): """Format the input to a max row length Parameters ---------- data: list list och items that is beeing formated length: int how long is the max row length indent: int how many whitespaces should each line start with Returns ------ str """ returnstring = "" row = "" + (" " * indent) for i in data: if len(row + i) > length or len(i) >= length: returnstring += row + i + "\n" row = "" + (" " * indent) else: row += i + " " returnstring += row + "\n" return returnstring.rstrip()
67dd9438ff96868bac5f9e3c4e7827bc718a8a36
78,049
def formatDate(date): """Formats a date correctly for the Drive API.""" return date.isoformat('T') + '.0Z'
522ad5df0307f79ef86406ad6b6920038f0549f1
78,054
def rules_used_in_problem(problem): """Returns the number of times each rule is used in this problem. Args: problem: an lib.InferenceProblem. Returns: A dictionary where keys are rule names, and values are the number of times the corresponding rule appears in this problem. """ counts = {} for problem_inference in problem.inferences + problem.contradictions: for step in problem_inference[1]: rule_name = step[2] if rule_name not in counts: counts[rule_name] = 0 counts[rule_name] += 1 return counts
40128f83b53d0f9813ee78d1c729822b0b52a3a3
78,056
from typing import OrderedDict def enumerate_fields(_, publication): """Flattens all currently published fields. By default, publications are deeply nested dict structures. This can be very hard to read when rendered in certain outputs. PagerDuty is one example where the default UI does a very poor job rendering nested dicts. This publisher collapses deeply nested fields into a single-leveled dict with keys that correspond to the original path of each value in a deeply nested dict. For example: { "top1": { "mid1": "low", "mid2": [ "low1", "low2", "low3" ], "mid3": { "low1": "verylow" } }, "top2": "mid" } .. would collapse into the following structure: { "top1.mid1": "low", "top1.mid2[0]": "low1", "top1.mid2[1]": "low1", "top1.mid2[2]": "low1", "top1.mid3.low1: "verylow", "top2": "mid" } The output dict is an OrderedDict with keys sorted in alphabetical order. """ def _recursive_enumerate_fields(structure, output_reference, path=''): if isinstance(structure, list): for index, item in enumerate(structure): _recursive_enumerate_fields(item, output_reference, '{}[{}]'.format(path, index)) elif isinstance(structure, dict): for key in structure: _recursive_enumerate_fields(structure[key], output_reference, '{prefix}{key}'.format( prefix='{}.'.format(path) if path else '', # Omit first period key=key )) else: output_reference[path] = structure output = {} _recursive_enumerate_fields(publication, output) return OrderedDict(sorted(output.items()))
2861fe8ef839527d3d130fcccacfcb1ed89c249b
78,062
def _make_set(value): """ Converts range/set specification to a concrete set of numbers '[1-3]' => {1, 2, 3} '{1,2,3}' => {1, 2, 3} '{[1-3]} => {1, 2, 3} '{[1-3],[5-7]} => {1, 2, 3, 5, 6, 7} """ result = set() for vrange in value.strip('{} ').split(','): if '[' not in vrange: try: result.add(int(vrange)) except ValueError: pass else: try: start, end = vrange.strip('[] ').split('-') result.update(range(int(start.strip()), int(end.strip()) + 1)) except ValueError: pass return result
bf5e65cb0688647f8be55157cfee3ec7dcaf175e
78,064
def linspace(s,e,n): """Get n points that are equi-spaced between s and e""" l = e-s return map(lambda i: s + 1.0 * l * i / (n-1), range(0,n))
e23aaeae300f4bcfe6b2c6891da33133f620842e
78,066
def is_websocket(headers): """ Determine whether a given set of headers is asking for WebSockets. """ return ("Upgrade" in headers.get("Connection", "") and headers.get("Upgrade").lower() == "websocket")
15c52ca95133a485e057c9cc819bff0c0edd8f04
78,073
import re def find_thing_in_tag(texte, what_we_are_looking_4: str): """ This function returns the content of a choosen attribute (class, id, href...). :param texte: str. The full HTML tag. :param what_we_are_looking_4: An attibute. :return: The text content of the attibute. """ texte = str(texte) if rep := re.match(f'(<.*?{what_we_are_looking_4}=")(.*?)(".*)', texte): return rep.group(2) return f"{what_we_are_looking_4} attibute not in {texte}"
07243c3eefe7cf4330966b1410b39ebf03abe3fc
78,075
import re def crush_invalid_field_name(name): """ Log Insight field names must start with an underscore or an alpha character. :param name: A proposed field name :return: Field name with invalid characters replaced by underscores (_), and runs of underscores """ if name[0].isdigit(): name = "_%s" % name name = re.sub(r'[^a-z0-9_]', "_", name.lower()) return re.sub(r'__*', "_", name, flags=re.I)
4aa53f7f43da28b59d23302b1675128291ee32e9
78,076