content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def convert_to_list(xml_value): """ An expected list of references is None for zero values, a dict for one value, and a list for two or more values. Use this function to standardize to a list. If individual items contain an "@ref" field, use that in the returned list, otherwise return the whole item Args: xml_value (dict or list): value to transform to a list Returns: list: processed list of values from original xml_value """ if not xml_value: return [] elif isinstance(xml_value, dict): return [xml_value.get('@ref', xml_value)] else: return [val.get('@ref', val) for val in xml_value]
17bb8e21e4247a14a37bb98643cfe9f847168c4c
83,420
def get_temp_ids_from_collection(collection): """From a collection, make a set of the tempids""" id_set = set() for entity in collection: if entity is not None: temp_id = entity.tempId id_set.add(temp_id) return id_set
7647a05a2fb64e9dc5529db4a8333421c69d0958
83,423
from typing import OrderedDict def split_main_supp_tags(tags): """splits the labels into those for the main manuscript and for the supplementary""" supp_tags = OrderedDict() main_tags = OrderedDict() for tag in tags: store = supp_tags if tag.startswith("sup") else main_tags store[tag] = tags[tag] return main_tags, supp_tags
25eff2f43e85fdeb67e179eca032790437e1e6ec
83,424
def normalize(adc_reading, adc_max=4095): """Normalize (percentage) ADC reads a value in the range [0,4095] across voltage range [0.0v,1.0v]. Args: adc_reading: value read from ADC pin adc_max: maximum value allowed by ADC Returns: A normalized (percentage) value of ADC """ return (100 * adc_reading) / adc_max
35e5efcbaa76c411221e54a6249deb7d10d8b516
83,426
def _same_for_all(kretser): """Check if the result it for the whole municipality.""" for _, krets_data in kretser.items(): if krets_data['krets'] == 'Hele kommunen': return True, krets_data return False, None
9c4617b73d941a7f7c3f2d626302e87f0c90555c
83,429
def get_min_units(sell_order, buy_order): """ Get the minimum units between orders :param sell_order: sell order :param buy_order: buy order :return: minimum units between orders """ sell_units = sell_order['volume_remain'] buy_units = buy_order['volume_remain'] return min(sell_units, buy_units)
2b3cf7b41d911dbf128f4ff254a3c02480ffbb1b
83,431
def request_too_large(e): """Generates a valid ELG "failure" response if the request is too large""" return {'failure':{ 'errors': [ { 'code':'elg.request.too.large', 'text':'Request size too large' } ] } }, 400
eac64f3ab4316a59e23c60a0d29146de953a615d
83,439
from typing import Sequence from typing import Tuple from typing import List from typing import Dict import csv def parse(raw: Sequence[str]) -> Tuple[List[str], List[List[str]], List[List[float]], Dict[str, int]]: """Returns a tuple of (hdrs, data, timeseries, column idx map)""" data = list(csv.reader(raw)) print(len(data)) print(len(data[0])) # check that it's actually a matrix print(set(len(r) for r in data)) hdrs, data = data[0], data[1:] ts = [[float(x) for x in r[12:]] for r in data] col = {l: i for i, l in enumerate(hdrs)} return hdrs, data, ts, col
b420ca46d86670dc9f97fac35166ffa5c98fa1ab
83,440
def experiment_type(filename): """ Get a experiment type from a file name. Assuming that filename is the path to a file name as measurements_<experiment_type>.csv, get the experiment type. :param filename: The name of the file (it can be full path). :raise: AssertionError if <filename> is not a string :return: A string representing the experiment type. """ assert(isinstance(filename, str)) exp_type = filename.split('/')[-1].split('.')[-2].split('_')[1:-1] exp_type = '_'.join(exp_type) return exp_type
503cc5c3dccb83efdaf0ef9815d4b138fc0468e6
83,446
def _shorten(code_list): """ Shortens a list of numeric nomis geo codes into a string format where contiguous values are represented as ranges, e.g. 1,2,3,6,7,8,9,10 -> "1...3,6,7...10" which can drastically reduce the length of the query url """ # empty evals to False if not code_list: return "" if len(code_list) == 1: return str(code_list[0]) code_list.sort() # assume this is a modifying operation short_string = "" index0 = 0 index1 = 0 # appease lint for index1 in range(1, len(code_list)): if code_list[index1] != (code_list[index1-1] + 1): if index0 == index1: short_string += str(code_list[index0]) + "," else: short_string += str(code_list[index0]) + "..." + str(code_list[index1-1]) + "," index0 = index1 if index0 == index1: short_string += str(code_list[index0]) else: short_string += str(code_list[index0]) + "..." + str(code_list[index1]) return short_string
fca49f6bc25118e2d68805544572b4883d819214
83,449
def prepare_config_uri(config_uri: str) -> str: """Make sure a configuration uri has the prefix ws://. :param config_uri: Configuration uri, i.e.: websauna/conf/development.ini :return: Configuration uri with the prefix ws://. """ if not config_uri.startswith('ws://'): config_uri = 'ws://{uri}'.format(uri=config_uri) return config_uri
6379422e29748d5977a4c9b5a5206ec57bb9135a
83,450
def check_possible(nums): """Checks if its possible to split 4 bags of candies equally between Dawid's two friends.""" nums.sort() smallest = nums[0] biggest = nums[3] # The only possibility is for the biggest to be the sum of the others # or for the biggest and the smalles to be equal to the sum of the remaining two return (biggest == (nums[0] + nums[1] + nums[2]) or (biggest + smallest) == (nums[1] + nums[2]))
31e4f5f25c4b70576fc2233952dacc4cb213db38
83,451
def strftime(value, format): """Apply a stftime format to a date value.""" return value.strftime(format)
35fc41fc7c91105b631cc0aa427fadbd9b775ea0
83,456
import re def removeNonDna(seq): """ Replaces stretches of nonDNA characters with one 'N'. """ return re.sub(r'[^ATGCatgc]+', 'N', seq).upper()
5437ac1b512e92dbd56d345f932483fd2022c466
83,457
def add_centroid(zones_shp): """Returns a DataFrame with centroid attributes from a zonig pandas.DataFrame """ zones = zones_shp.copy() zones['centroid_geometry'] = zones['geometry'].apply(lambda g: g.centroid) zones['centroid_coordinates'] = zones['geometry'].apply(lambda g: g.centroid.coords[0]) zones['latitude'] = zones['geometry'].apply(lambda g: g.centroid.y) zones['longitude'] = zones['geometry'].apply(lambda g: g.centroid.x) return zones
6b50e7b9b05ee1d1c45eda4f1c2c25f160fee8f7
83,458
def filter_languages(project_list, languages_set): """ Filter the project list to contain only the languages in the languages_set """ filtered_projects = [] for project in project_list: if project["repository_language"] in languages_set: filtered_projects.append(project) return filtered_projects
8b9619957bde78d591b76f8416cb73e984d56dfc
83,460
import torch def soft_thresholding(x, b, a=None): """Remap values between [-a, b] to 0, keep the rest linear. """ if a is None: a = b return (torch.clamp(x - b, min=0) * (x > 0) + torch.clamp(x + a, max=0) * (x <= 0))
f7825191f561eb9e80e28da5add52487c542c86a
83,463
def multi_level_get(the_dict, key, default=None): """ Given the level of nested data contained in some of the results, this function performs an iterative get. :param the_dict: The multi-level dict to get the key from. :param key: The key to look for, with each level separated by '.' :param default: The default to return if the key is not found. (None if not supplied) :returns: The value of the key or default if key does not exist in the dict. """ if not isinstance(the_dict, dict): return default lvls = key.split('.') here = the_dict for lvl in lvls[:-1]: if lvl in here: here = here[lvl] else: return default if not isinstance(here, dict): return default return here.get(lvls[-1], default)
e0e64c076d9f6b984a46f1e0255e6c91d6fa3510
83,466
def map_gene_to_pos(x): """helper function to infer the gene based on nucleotide position of SARS-CoV-2 genome""" pos = x if pos <= 265: return '5UTR' elif pos > 265 and pos <= 13466: return 'ORF1a' elif pos > 13466 and pos <= 21555: return 'ORF1b' elif pos > 21562 and pos <= 25384: return 'S' elif pos > 25392 and pos <= 26220: return 'ORF3a' elif pos > 26244 and pos <= 26472: return 'E' elif pos > 26522 and pos <= 27191: return 'M' elif pos > 27201 and pos <= 27387: return 'ORF6' elif pos > 27393 and pos <= 27759: return 'ORF7a' elif pos > 27755 and pos <= 27887: return 'ORF7b' elif pos > 27893 and pos <= 28259: return 'ORF8' elif pos > 28273 and pos <= 29533: return 'N' elif pos > 29557 and pos <= 29674: return 'ORF10' elif pos > 29674: return '3UTR' return 'nan'
c36f26a38e203aabc0af0e636a3b5672056af5db
83,468
def count_vowels(s): """Returns the number of letters 'a', 'e', 'i', 'o', 'u', 'A', 'E', 'I', 'O', 'U' in a given string s""" letters = "aeiouAEIOU" count = 0 for letter in s: if letter in letters: count += 1 return count
0a4021738689874d4784ab4cdd6e0a2df9ec81a9
83,470
def prediction_error(y, y_hat): """ Calculate prediction accuaracy. Arguments: y -- np.array / pd.Series The original value of the binary dependent variable; y_hat -- np.array / pd.Series The predicted value of the binary dependent variable; Return: error -- float """ error = float(sum(y!=y_hat))/len(y) return error
f4903744d475fa7e111b8d9eaf95f4dbbebd4a01
83,472
def get_ijk_list(m): """Form all possible (i, j, k) exponents up to maximum total angular momentum m. """ l = [] for a in range(1, m + 2): for b in range(1, a + 1): i = m + 1 - a j = a - b k = b - 1 l.append([i, j, k]) return l
8a80b695e9c6825a71fb09ffa387add1ccccb70b
83,474
def select_test_metrics(metrics, data): """ Util function to check which subset of the provided test metrics is available in the current data dictionary """ found = False eval_metrics = [] for metric in metrics: if metric in data.keys(): eval_metrics.append(metric) found = True return found, eval_metrics
b417f082ca9c1a3c1e85c34c515bd798ddac0b37
83,480
import hashlib def getFileSha256Hash(file: str) -> str: """ Calculate the sha256 hash of a given file :param file: The full path to the file :return: string representing the file hash in sha256 """ sha256Hash = hashlib.sha256() with open(file, "rb") as f: for byteBlock in iter(lambda: f.read(4096), b""): sha256Hash.update(byteBlock) return sha256Hash.hexdigest()
128ad186ba48fc9af3ed494b14b8cb9cddde7673
83,481
def ValueOrNone(message): """Return message if message is a proto with one or more fields set or None. If message is None or is the default proto, it returns None. In all other cases, it returns the message. Args: message: An generated proto message object. Returns: message if message is initialized or None """ if message is None: return message default_object = message.__class__() return message if message != default_object else None
3c7264bc1060873c969d8cc355de1c90794cb50b
83,490
def convert_truthy_or_falsy_to_bool(x): """Convert truthy or falsy value in .ini to Python boolean.""" if x in [True, "True", "true", "1"]: out = True elif x in [False, "False", "false", "0"]: out = False elif x in [None, "None", "none"]: out = None else: raise ValueError( f"Input '{x}' is neither truthy (True, true, 1) or falsy (False, false, 0)." ) return out
875083962ae680bceb67a2637cfc9850cb913211
83,497
import functools import time def _rate_limit(func=None, per_second=1): """Limit number of requests made per second. Will sleep for 1/``per_second`` seconds if the last request was made too recently. """ if not func: return functools.partial(_rate_limit, per_second=per_second) @functools.wraps(func) def wrapper(self, url, *args, **kwargs): if self.last_request is not None: now = time.time() delta = now - self.last_request if delta < (1 / per_second): time.sleep(1 - delta) self.last_request = time.time() return func(self, url, *args, **kwargs) return wrapper
019963de08ec2cc5b1a44f1554fbe6b5cde5be6f
83,499
def query_int(pregunta, opciones): """ Query the user for a number in predefined interval :param pregunta: Question that will be shown to the user :param opciones: Available choices """ opcion = None while opcion not in opciones: opcion = input(pregunta + ' [' + str(opciones[0]) + '-' + str(opciones[-1]) + ']: ') try: opcion = int(opcion) except: print('Introduzca un número') opcion = None return opcion
15f3ddafc47107538d8bbfa46792ee0472e87231
83,500
def IsTryJobResultAtRevisionValid(result, revision): """Determines whether a try job's results are sufficient to be used. Args: result (dict): A dict expected to be in the format { 'report': { 'result': { 'revision': (dict) } } } revision (str): The revision to ensure is in the result dict. """ return result and revision in result.get('report', {}).get('result', {})
1515c82d2673139fb0fec01e662ca62057f715de
83,503
def add(num1:int,num2:int) -> int: """add is function used to give the addition of two inputted number Args: num1 (int): first number num2 (int): second number Returns: int: addition of num1 and num2 """ result = num1+num2 return result
065c3af1066f68ad92f89dd8714237283d9f6096
83,506
def dataCleaning(df): """ This function takes the dataframe as an arugment. 1) Filtering missing and meaningless values (e.g. Don't know, refused, etc.) by using the loc technique. 2) Replacing Feature values with their actual meaning by using dictionaries. :param [df]: [dataframe that will be used to carry out the data cleaning] :return: [cleaned (without NAs, meaningless values, renamed values) dataframe] """ df = df.loc[(df['Overweight_Obese'] != 9) & (df['Education_Level'] != 9) & (df['Income_Categories'] != 9) & (df['Exercise'] != 7) & (df['Exercise'] != 9) & (df['Age_Categories'] != 14) & (df['Gender'] != 7) & (df['Gender'] != 9) & (df['Alcohol_Consumption'] != 7) & (df['Alcohol_Consumption'] != 9)] # ========================================================================= # Creating a copy of the BRFSS dataframe while removing all the missing # values (NaNs) to ensure that the modifications to the data will not be # reflected in the original dataframe # ========================================================================= df = df.dropna().copy() gender_dict = {1: 'Male', 2:'Female'} for k1, v1 in gender_dict.items(): df.Gender.replace(k1, v1, inplace=True) overweight_dict = {1: 'No', 2: 'Yes'} for k2, v2 in overweight_dict.items(): df.Overweight_Obese.replace(k2, v2, inplace =True) education_dict = {1: 'No_HighSchool_Graduate', 2: 'HighSchool_Graduate', 3: 'Attended_College', 4: 'College_Graduate'} for k3, v3 in education_dict.items(): df.Education_Level.replace(k3, v3, inplace=True) income_dict = {1: '<$15,000', 2: '$15,000_to_<$25,000', 3: '$25,000_to_<$35,000', 4: '$35,000_to_<$50,000', 5: '$50,000>='} for k4, v4 in income_dict.items(): df.Income_Categories.replace(k4, v4, inplace=True) exercise_dict = {1: 'Yes', 2: 'No'} for k5, v5 in exercise_dict.items(): df.Exercise.replace(k5, v5, inplace=True) age_dict = {1: '18-24', 2: '25-29', 3: '30-34', 4: '35-39', 5: '40-44', 6: '45-49', 7: '50-54', 8: '55-59', 9: '60-64', 10: '65-69', 11: '70-74', 12: '75-79', 13: '80>='} for k6, v6 in age_dict.items(): df.Age_Categories.replace(k6, v6, inplace=True) alcohol_dict = {1: 'Yes', 2: 'No'} for k7, v7 in alcohol_dict.items(): df.Alcohol_Consumption.replace(k7, v7, inplace=True) arthritis_dict = {1: 'Diagnosed', 2: 'Not_Diagnosed'} for k8, v8 in arthritis_dict.items(): df.Arthritis.replace(k8, v8, inplace=True) return df
ae00f1f55445d743fa5ef28fad8d8f1dd88cfa35
83,512
import logging from datetime import datetime def TimedCommand(functor, *args, **kwargs): """Wrapper for simple log timing of other python functions. If you want to log info about how long it took to run an arbitrary command, you would do something like: TimedCommand(RunCommand, ['wget', 'http://foo']) Args: functor: The function to run. args: The args to pass to the function. kwargs: Optional args to pass to the function. timed_log_level: The log level to use (defaults to logging.INFO). timed_log_msg: The message to log after the command completes. It may have keywords: "name" (the function name), "args" (the args passed to the func), "kwargs" (the kwargs passed to the func), "ret" (the return value from the func), and "delta" (the timing delta). timed_log_callback: Function to call upon completion (instead of logging). Will be passed (log_level, log_msg, result, datetime.timedelta). """ log_msg = kwargs.pop( 'timed_log_msg', '%(name)s(*%(args)r, **%(kwargs)r)=%(ret)s took: %(delta)s') log_level = kwargs.pop('timed_log_level', logging.INFO) log_callback = kwargs.pop('timed_log_callback', None) start = datetime.now() ret = functor(*args, **kwargs) delta = datetime.now() - start log_msg %= { 'name': getattr(functor, '__name__', repr(functor)), 'args': args, 'kwargs': kwargs, 'ret': ret, 'delta': delta, } if log_callback is None: logging.log(log_level, log_msg) else: log_callback(log_level, log_msg, ret, delta) return ret
36e86cf15657b258c2a085afe2c75eba90d21232
83,519
import torch def nanmean(v: torch.Tensor, *args, **kwargs) -> torch.Tensor: """ https://github.com/pytorch/pytorch/issues/21987#issuecomment-539402619 :param v: A tensor :param args: Arguments one might pass to `mean` like `dim`. :param kwargs: Arguments one might pass to `mean` like `dim`. :return: The mean, excluding nans. """ v = v.clone() is_nan = torch.isnan(v) v[is_nan] = 0 return v.sum(*args, **kwargs) / (~is_nan).float().sum(*args, **kwargs)
7c73b178f134e1afd5e0871d8853b9500c7c16bd
83,526
def crop(img, box=None): """ Cut a rectangular region from this image. The box is a 4-tuple defining the left, upper, right, and lower pixel coordinate. """ return img.crop(box)
316d03b6814b7596987c5ab7935c8501c28dd404
83,527
import io import base64 def _to_base64(image_file: str) -> str: """Convert the PNG image to its base64 representation.""" assert image_file.endswith(".png") with io.open(image_file, "rb") as f: image_content = base64.b64encode(f.read()).decode("utf-8") return "data:image/png;base64," + image_content
fbe46e1de7353b0e2968e0bfc093a1e4d2b0aabf
83,529
def signal_lf_hf_ratio(lf, hf): """Computes the ratio between the high and low frequency components of a signal Parameters :param lf: scalar Summation of low frequency energies in a signal :param hf: scalar Summation of high frequency energies in a signal :return: scalar Ratio of the high and low frequency energies in a signal """ return lf / hf
95f9033d964210ffc98716b6cb64ec0bb30d0e8b
83,532
def count_pairs(arr, target): """ Counts the number of ordered pairs of integers in arr that sum to target. """ comp_count = {} # Counts the number of times each complement occurs in arr. for num in arr: comp = target - num comp_count[comp] = comp_count.get(comp, 0) + 1 return sum(comp_count.get(num, 0) for num in arr)
e3a0f535cd9474f1766f483aa3c22e954ac79b77
83,533
from io import StringIO import csv def _csv_str_from_list_of_rows(ll): """Make csv string from list of lists. :param data: list of lists (representing rows and cells) """ csv_str = StringIO() writer = csv.writer(csv_str) writer.writerows(ll) return csv_str.getvalue()
1254cc9f78b01f103686532a8c02dc522b9c40b2
83,535
def hamming_distance(p, q): """Calculate the HammingDistance for two strings. We say that position i in k-mers p and q is a mismatch if the symbols at position i of the two strings are not the same. The total number of mismatches between strings p and q is called the Hamming distance between these strings. We will let you implement a function to compute this distance, called HammingDistance(p, q). Args: p (str): the first DNA string. q (str): the second DNA string, p and q of the equal length. Returns: Integer, number of different base count between p and q, i.e. the Hamming distance between these strings. Examples: Solving the Hamming distance of two DNA Genomes. >>> p = 'GGGCCGTTGGT' >>> q = 'GGACCGTTGAC' >>> hammingdistance = hamming_distance(p, q) >>> hammingdistance 3 """ count = 0 for i in range(len(p)): if p[i] != q[i]: count += 1 return count
de3b165f478d0bcef8200314055cafaa58f93d82
83,537
def encode_splits(data): """ Encode splits as intetgers and return the array. """ lookup = {'train': 0, 'val': 1, 'test': 2} return [lookup[datum['split']] for datum in data]
0569a2e2b62b21e6ac38746a9aa763cd7ec3ce95
83,539
def get_wait_time(retry_num): """ Compute time for exponential backoff. Args: retry_num (int): Retry attempt number to determine wait time. Returns: (int): Amount of time to wait. """ return 2 ** (retry_num+3)
9539aec83eab77b42b445e861b3afa59c9d19286
83,543
def parse_dimension(line): """Parse a dimension line from CDL""" line = line.replace('=', ' ') words = line.split() name = words[0] if words[1].upper() == "UNLIMITED": length = 0 isUnlimited = True else: length = int(words[1]) isUnlimited = False return name, length, isUnlimited
18c37675f7067a0f686807f09e032d52fb92670d
83,544
from typing import List from typing import Tuple def go_to_next_student( student_names: List[str], individual_seconds: int, max_individual_seconds: int ) -> Tuple[List[str], int, int]: """Rotates the queue forwards. Parameters ---------- student_names : List[str] The list of student names. individual_seconds : int The number of seconds remaining for the individual meeting. max_individual_seconds : int The maximum individual meeting duration in seconds. Returns ------- List[str] The list of student names. int The number of seconds in the group meeting. int The number of seconds in the individual meeting. """ student_names.append(student_names.pop(0)) previous_individual_seconds = individual_seconds if student_names[0].endswith("-minute break"): individual_seconds = int(student_names[0].split("-")[0]) * 60 else: individual_seconds = max_individual_seconds return student_names, individual_seconds, previous_individual_seconds
c308c7cc20dbab369c288c746b9947b78d3f5cd3
83,557
from typing import Dict def calc_post_depths_from_thread_structure(thread_structure: Dict) \ -> Dict[str, int]: """Calculates the nested depth of each post in a thread. We determine post depth from the provided `structure.json` files in the dataset because this is easier than following the chain of a post's parents to the source post of a thread. Args: thread_structure: The parsed JSON dict from one of the dataset's `structure.json` files. Returns: A dictionary mapping post IDs to their nested depth. The source post of a thread always has depth `0`, first level replies `1`, etc. Example: If the `thread_structure` would look like the following:: { 'foo': { 'bar': [], 'baz': { 'boogy': [] }, 'qux': [] } } The parsed post depths would be:: { 'foo': 0, 'bar': 1, 'baz': 1, 'boogy': 2, 'qux': 1 } """ post_depths = {} def walk(thread: Dict, depth: int) -> None: for post_id, subthread in thread.items(): post_depths[post_id] = depth if isinstance(subthread, Dict): walk(subthread, depth + 1) walk(thread_structure, 0) return post_depths
11687977dfa107b9b94d8c097e750825e98a479e
83,558
import pytz def to_utc(dt): """ convert timezone to utc timezone, assuming it is in local time. :param dt: datetime obj :return: datetime obj """ dt_local = dt.astimezone() return dt_local.astimezone(pytz.utc)
70c61e1dca71034651b6ebc90f3475997db14ba4
83,561
def _get_var_units(obsspace, variable): """ Grabs units attribute from the requested variable """ var = obsspace.Variable(variable) units = var.read_attr('units') return units
a0853b34630ee42548210cf761a03ac5e0439aef
83,564
import re def _equals(a, b, match=False): """True if a equals b. If match is True, perform a regex match If b is a regex ``Pattern``, applies regex matching """ if match: return re.match(b, a) is not None if isinstance(a, str) else False else: return a == b
114583df0acba8ac53fe89da3c96398e3a37bbb0
83,566
def _repeated_proto3_field_to_list(field): """Convert a proto3 repeated field to list. Repeated fields are represented as an object that acts like a Python sequence. It cannot be assigned to a list type variable. Therefore, doing the conversion manually. """ result = [] for item in field: result.append(item) return result
424ef2a64d2878a2dce547bcb22c6f265d417aea
83,573
import re def initial_quotes(text): """Wraps initial quotes in ``class="dquo"`` for double quotes or ``class="quo"`` for single quotes. Works in these block tags ``(h1-h6, p, li, dt, dd)`` and also accounts for potential opening inline elements ``a, em, strong, span, b, i`` >>> initial_quotes('"With primes"') u'<span class="dquo">"</span>With primes"' >>> initial_quotes("'With single primes'") u'<span class="quo">\\'</span>With single primes\\'' >>> initial_quotes('<a href="#">"With primes and a link"</a>') u'<a href="#"><span class="dquo">"</span>With primes and a link"</a>' >>> initial_quotes('&#8220;With smartypanted quotes&#8221;') u'<span class="dquo">&#8220;</span>With smartypanted quotes&#8221;' """ quote_finder = re.compile(r"""((<(p|h[1-6]|li|dt|dd)[^>]*>|^) # start with an opening p, h1-6, li, dd, dt or the start of the string \s* # optional white space! (<(a|em|span|strong|i|b)[^>]*>\s*)*) # optional opening inline tags, with more optional white space for each. (("|&ldquo;|&\#8220;)|('|&lsquo;|&\#8216;)) # Find me a quote! (only need to find the left quotes and the primes) # double quotes are in group 7, singles in group 8 """, re.VERBOSE) def _quote_wrapper(matchobj): if matchobj.group(7): classname = "dquo" quote = matchobj.group(7) else: classname = "quo" quote = matchobj.group(8) return """%s<span class="%s">%s</span>""" % (matchobj.group(1), classname, quote) output = quote_finder.sub(_quote_wrapper, text) return output
74fedd7ae81ee6f784e50cf5c36c3d43a6e40651
83,574
from typing import Dict from typing import List def extract_based_on_ids(dataset: Dict, id_file: str) -> List[str]: """Extract ids from dataset files Args: dataset (Dict): dict of dataset id_file (str): id of the dataset file Returns: List[str]: return list of dataset entries """ lines = [] with open(id_file) as f: for line in f: id = line.strip() try: lines.append(dataset[id]) except ValueError: print(f"Error: Invalid ID {id} in {id_file}") return lines
44388b89af7d1653155e2445889bd5af23ce0923
83,580
def unit_label(quantity): """ Small function to return a string label that is empty if value is 1.0 or is the given number otherwise. """ if quantity.value == 1.0: return str(quantity.unit) return str(quantity)
36d6225a5b5fdd67ff1a4ab05121e18eae7e2b18
83,582
def get_coordinates_array(xyz): """ Given xyz string, return natom*3 array that contains the coordinates of the atoms. """ lines = xyz.splitlines() n = int(lines[0]) coords = [0.]*(n*3) i = 0 for line in lines[2:2+n]: coords[i:i+3] = [float(c) for c in line.split()[1:4]] i += 3 return coords
32677a268512a2cbc2047c1d2486cc3a4e399787
83,583
def _interactions_to_list_of_lists(interactions): """ Transforms interactions dataframe user, item to user, [item1, item2] dataframe :param interactions: Interactions dataframe :return: Interactions dataframe format user, [item1, item2] """ interactions = interactions.sort_values(by="timestamp") return interactions.groupby("user")["item"].apply(list)
55aaad623b217ff82dcb2ff7b83089506aafd374
83,584
def get_hex(input_list): """ Convert a list of bytes into hex string """ o = "" for i in input_list: o += "%02X"%ord(i) return o
3b47cc71326a625fe4a7b67cf7522bd029659c40
83,587
def length_comparator(value): """ Return value length. """ return len(value)
1b674b686481ca286c86f826620e01554e061e02
83,588
def _find_matching_node(parent, request): """Try to find a matching child node based on a test request. Node search order is as follows: - Node matching the exact test name - Node matching the class name + test name (minus 'test_' prefix for function name) - Node matching the class name """ test_name = request.node.name # First try to find a node with the exact test name. names = [test_name] if request.cls is not None: cls_name = request.cls.__name__.lower() test_name = test_name[5:] # Look for a node with the class name + test name (minus test_ from function name) names.append("{}_{}".format(cls_name, test_name)) # Finally try to find a node with the class name. names.append(cls_name) node = None for name in names: node = parent.node(name) if node is not None: break return node
3f8486e0772fc3c99deead1ab5fc90117beb938c
83,589
import torch def centers_to_extents(boxes): """ Convert boxes from [xc, yc, w, h] format to [x0, y0, x1, y1] format Input: - boxes: Input boxes of shape (N, 4) in [xc, yc, w, h] format Returns: - boxes: Output boxes of shape (N, 4) in [x0, y0, x1, y1] format """ xc, yc = boxes[:, 0], boxes[:, 1] w, h = boxes[:, 2], boxes[:, 3] x0 = xc - w / 2 x1 = x0 + w y0 = yc - h / 2 y1 = y0 + h boxes_out = torch.stack([x0, y0, x1, y1], dim=1) return boxes_out
ec2bb3fffdd71f80f652e1018beea21ca0777a7c
83,591
def _simple_init(parent, num): """Creates a list parent copies""" return [parent.copy() for i in range(num)]
cee30dd6878131687adaf19fa588ebab50b849f0
83,592
def get_netbox_docker_version_tag(netbox_version): """Get the repo tag to build netbox-docker in from the requested netbox version. Args: netbox_version (version.Version): The version of netbox we want to build Returns: str: The release tag for the netbox-docker repo that should be able to build the requested version of netbox. """ major, minor = netbox_version.major, netbox_version.minor if (major, minor) == (2, 10): tag = "1.0.1" elif (major, minor) == (2, 9): tag = "0.26.2" elif (major, minor) == (2, 8): tag = "0.24.1" elif (major, minor) == (2, 7): tag = "0.24.0" else: raise NotImplementedError( "Version %s is not currently supported" % netbox_version ) return tag
046e9f9992270f30c389242de267e6a1c0d56dad
83,593
def __add_left_0(stri: str, target_length: int) -> str: """ Adds left 0s to the current string until the target length is reached Parameters ---------------- stri String target_length Target length Returns ---------------- stri Revised string """ while len(stri) < target_length: stri = "0" + stri return stri
0dad516d9eee40c206600616485a1e840377ccf1
83,594
def modeIsWriting(mode): """ Determine whether a file mode will permit writing. """ m = mode.lower() return m not in ("r", "rb", "ru", "rub")
9b6a9a54862d9d7531a9405e65329ba507f4f3eb
83,595
def indent(s, n_spaces=2, initial=True): """ Indent all new lines Args: n_spaces: number of spaces to use for indentation initial: whether or not to start with an indent """ i = ' '*n_spaces t = s.replace('\n', '\n%s' % i) if initial: t = i + t return t
8609f0e5c63aae73a5886c39c9516b5bb85718c8
83,598
def transition(old, new, jugs): """ returns a string explaining the transition from old state/node to new state/node old: a list representing old state/node new: a list representing new state/node jugs: a list of two integers representing volumes of the jugs """ a = old[0] b = old[1] a_prime = new[0] b_prime = new[1] a_max = jugs[0] b_max = jugs[1] if a > a_prime: if b == b_prime: return "Clear {0}-liter jug:\t\t\t".format(a_max) else: return "Pour {0}-liter jug into {1}-liter jug:\t".format(a_max, b_max) else: if b > b_prime: if a == a_prime: return "Clear {0}-liter jug:\t\t\t".format(b_max) else: return "Pour {0}-liter jug into {1}-liter jug:\t".format(b_max, a_max) else: if a == a_prime: return "Fill {0}-liter jug:\t\t\t".format(b_max) else: return "Fill {0}-liter jug:\t\t\t".format(a_max)
14adbe0ac18a3983016b7b39ef2dc42ef0084d2d
83,605
def build_command_line_parameter(name, value): """ Some parameters are passed as command line arguments. In order to be able to recognize them they are passed following the expression below. Note that strings are always encoded to base64, so it is guaranteed that we will always have exactly two underscores on the parameter. :param name: Name of the parameter :param value: Value of the parameter :return: *PARAM_name_value. Example, variable y equals 3 => *PARAM_y_3 """ return '*INLINE_%s_%s' % (name, str(value))
7174811a70b4a3fa1cd581d4debe7e271063d417
83,610
def get_bottomup_nodes(tree, parameters=None): """ Gets the nodes of a tree in a bottomup order (leafs come first, the master node comes after) Parameters -------------- tree Process tree parameters Parameters of the algorithm Returns ------------- bottomup_nodes Nodes of the tree in a bottomup order """ if parameters is None: parameters = {} to_visit = [tree] all_nodes = set() while len(to_visit) > 0: n = to_visit.pop(0) all_nodes.add(n) for child in n.children: to_visit.append(child) # starts to visit the tree from the leafs bottomup = [x for x in all_nodes if len(x.children) == 0] # then add iteratively the parent i = 0 while i < len(bottomup): parent = bottomup[i].parent if parent is not None and parent not in bottomup: is_ok = True for child in parent.children: if not child in bottomup: is_ok = False break if is_ok: bottomup.append(parent) i = i + 1 return bottomup
ffbcb958fefe948846c94adc7aeb5fa78c63a714
83,611
def clean_doc_str(doc, overview=False): """ Transform a Python doc string into a Markdown string. First, it finds the indentation of the whole block. It's assumed that everything in the block shares the same basic indentation. Any line that starts with ":" is considered to be Python meta-information, and is stripped out. If the line following the ":" has the same indention and is not whitespace, it too is stripped out. For markdown support, lines with the same indention immediately after each other are considered to be the same line, and are joined together. """ doc = doc or '' ret = '' current = '' base_indent = 0 prev_indent = 0 strip_prev = False def get_indent(val: str) -> int: ind = 0 while ind < len(val) and val[ind].isspace(): ind += 1 if ind >= len(val): return 0 return ind for o_line in doc.splitlines(): line = o_line.rstrip() curr_indent = get_indent(line) # Check for Python meta-data if line and line.lstrip()[0] == ':': prev_indent = curr_indent strip_prev = True continue if curr_indent == prev_indent and strip_prev: continue if not ret and line: # First non-empty line. base_indent = get_indent(line) prev_indent = base_indent current += ' ' + line[base_indent:] continue if not line: # Empty line if current: if ret and overview: return ret.lstrip() ret += '\n\n' + current.lstrip() current = '' # leave the previous indent the same. continue # Note a single newline, not space. This is important. current += '\n' + line[base_indent:].lstrip() if current: ret += '\n\n' + current if not ret: ret = '(no documentation provided)' return ret.lstrip()
ed8d6e4ffd1be9384a2998e4eb3e806583e85a27
83,613
def foundation_profile(x, delta_f, L_o): """Return the idealised imperfection profile [m] - JIP. :param float x: Distance along profile [m] :param float delta_f: Imperfection height [m] :param float L_o: Natural wavelength [m] """ return delta_f * (x / L_o) ** 3 * (4 - 3 * x / L_o)
891ac3307d84ffdf466a48c037e47285845d0606
83,614
def get_priority(filter_item): """ Internal worker function to return the frame-filter's priority from a frame filter object. This is a fail free function as it is used in sorting and filtering. If a badly implemented frame filter does not implement the priority attribute, return zero (otherwise sorting/filtering will fail and prevent other frame filters from executing). Arguments: filter_item: An object conforming to the frame filter interface. Returns: The priority of the frame filter from the "priority" attribute, or zero. """ # Do not fail here, as the sort will fail. If a filter has not # (incorrectly) set a priority, set it to zero. return getattr(filter_item, "priority", 0)
4b23c4d0b247f2f91845ddd5e1911208f3916c32
83,615
def get_box_center(boxes): """ Get box center coordinates. Args: boxes: Bounding boxes. Returns: Center coordinates. """ center_x = (boxes[:, 0] + boxes[:, 2]) / 2.0 center_y = (boxes[:, 1] + boxes[:, 3]) / 2.0 return center_x, center_y
d39679f61393f0215f69470dd9779c1c708a158a
83,616
import torch def maybe_cat(tensors, dim, nullable=None): """Like torch.cat, but skips elements in `tensors` which are None. Args: tensors: List of tensors (compare torch.cat()) dim: Dimension along which to concatenate (compare to torch.cat()) nullable: List of the same length as `tensors`. If specified, throw a RuntimeError if the i-th element in `tensors` is None and the i-th element in nullable is False. Returns: Concatenation of all tensors in `tensors` along `dim` which are not None. Throws: RuntimeError is `nullable` constraint is violated or all alements in `tensors` are None. """ if nullable is not None and any( (t is None) and not n for t, n in zip(tensors, nullable) ): raise RuntimeError("Unexpected element in tensors is None.") filtered = [t for t in tensors if t is not None] if len(filtered) == 1: return filtered[0] return torch.cat(filtered, dim=dim)
978ca4f38b91e1ca134827e56b9a646f73026106
83,617
from typing import Dict from typing import Any def strip_empty_params(params: Dict[str, Any]) -> Dict[str, Any]: """Remove any request parameters with empty or ``None`` values.""" return {k: v for k, v in params.items() if v or v is False}
aa8e320b93524ef13d25d522464ab22b48226e79
83,623
def get(role_id, client): """Request a role with certain ID.""" return client.get_role(str(role_id))
e122a1e11f7c587d61aa265ec56e1e64e9699ade
83,625
def limits(cls): """Given a dict of fields, calculate the longest string lengths This allows you to easily format the output of many results so that the various cols all line up correctly. """ lims = {} for cl in cls: for k in cl.keys(): # Use %s rather than str() to avoid codec issues. # We also do this so we can format integers. lims[k] = max(lims.get(k, 0), len('%s' % cl[k])) return lims
6863f8787eb1cbac3ea4a0ae8951f7a9aa6cbb8d
83,627
def client(app): """ a test_client from an initialized Flask-App """ return app.test_client()
4ccc924b2204e873c5b75c63de89d2f79de81754
83,628
def centerel(elsize, contsize): """Centers an element of the given size in the container of the given size. Returns the coordinates of the top-left corner of the element relative to the container.""" w, h = elsize W, H = contsize x = (W-w)//2 y = (H-h)//2 return (x, y)
72aa035924ea2fe89d76073607f3156b75885410
83,629
def diff(df): """ Assuming col1 and col2 are lists, return set difference between list1 & list2 """ return list(set(df[0]) - set(df[1]))
a59223bee55ffcb1b7b75eb656abcea06ddbfda0
83,631
import string def hexStr(data): """Convert binary data to a hex string.""" h = string.hexdigits r = '' for c in data: i = ord(c) r = r + h[(i >> 4) & 0xF] + h[i & 0xF] return r
aafd3d3c0ceca30cebfd6d2d7fc884075b0defc4
83,632
def writeSeg(BCFILE, title, segID, planeNodeIDs): """write face segments to BC input file Args: BCFILE: file IO object title (str): header comment line segID (int): segment ID # planeNodeIDs (int): 2D array Returns: segID (inc +1) """ BCFILE.write('*SET_SEGMENT_TITLE\n') BCFILE.write('%s\n' % title) BCFILE.write('%i\n' % segID) for i in range(0, (len(planeNodeIDs) - 1)): (a, b) = planeNodeIDs.shape for j in range(0, (b - 1)): BCFILE.write("%i,%i,%i,%i\n" % (planeNodeIDs[i, j], planeNodeIDs[i + 1, j], planeNodeIDs[i + 1, j + 1], planeNodeIDs[i, j + 1])) segID = segID + 1 return segID
a82848e3664229ca9d4bcaa78dc8036e731c96a0
83,634
import torch def random_choice(a, size): """Generates a random sample of a given size from a 1-D tensor. The sample is drawn without replacement. Parameters ---------- a: torch.tensor The input tensor from which the sample will be drawn. size: int The size of the generated sample. Returns ------- sample: torch.tensor The generated sample. """ permutation = torch.randperm(a.size(0)) indices = permutation[:size] return a[indices]
d47202dd08a1a5f0845c6057fbee8a0a41a0a3f9
83,636
from typing import Optional from typing import Tuple from typing import List import re def parse_signature(sig: str) -> Optional[Tuple[str, List[str], List[str]]]: """Split function signature into its name, positional an optional arguments. The expected format is "func_name(arg, opt_arg=False)". Return the name of function and lists of positional and optional argument names. """ m = re.match(r'([.a-zA-Z0-9_]+)\(([^)]*)\)', sig) if not m: return None name = m.group(1) name = name.split('.')[-1] arg_string = m.group(2) if not arg_string.strip(): # Simple case -- no arguments. return name, [], [] args = [arg.strip() for arg in arg_string.split(',')] positional = [] optional = [] i = 0 while i < len(args): # Accept optional arguments as in both formats: x=None and [x]. if args[i].startswith('[') or '=' in args[i]: break positional.append(args[i].rstrip('[')) i += 1 if args[i - 1].endswith('['): break while i < len(args): arg = args[i] arg = arg.strip('[]') arg = arg.split('=')[0] optional.append(arg) i += 1 return name, positional, optional
00ae934eedb781a26f32af5405b954721504fdf5
83,637
def mapping_file_to_dict(mapping_data, header): """processes mapping data in list of lists format into a 2 deep dict""" map_dict = {} for i in range(len(mapping_data)): sam = mapping_data[i] map_dict[sam[0]] = {} for j in range(len(header)): if j == 0: continue # sampleID field map_dict[sam[0]][header[j]] = sam[j] return map_dict
4f592f396bac1d8f7c696f057d8b02ecd1408010
83,643
import math def item_based_sim(mean_data, movie_data, m1, m2): """Returns a similarity using adjusted cosine similarity. Parameters ---------- mean_data : dict Dictionary of the users mean of ratings movie_data : dict Dictionary of the data set. m1 : str Id of movie to be used for similarity measure. m2 : str Id of movie to be used for similarity measure. Returns ------- similarity value : float Returns adjusted cosine similarity of m1 and m2. """ joint = set([u1 for u1 in movie_data[m1]]) & set([u2 for u2 in movie_data[m2]]) if len(joint) == 0: return 0 numer = 0 denom1 = 0 denom2 = 0 for u in joint: ra = movie_data[m1][u] rb = movie_data[m2][u] mean = mean_data[u] numer += (ra - mean) * (rb - mean) denom1 += math.pow(ra - mean, 2) denom2 += math.pow(rb - mean, 2) if denom1 == 0 or denom2 == 0: return 0 return round(numer / (math.sqrt(denom1) * math.sqrt(denom2)), 15)
4f53faf3c2e3c95303c1eaaf36dcb314a9b5f369
83,644
from textwrap import dedent def format_description(string: str, **kwargs) -> str: """ Remove common leading whitespace from every line in text. Useful for processing triple-quote strings. If keyword arguments are supplied, they will serve as arguments for formatting the dedented string using str.format(). :param string: The string to unwrap >>> format_description(" c'est \\n une chaine \\n de plusieurs lignes. ") "c'est \\nune chaine \\nde plusieurs lignes. " >>> format_description(''' ... Multi-lined, ... indented, ... triple-quoted string. ... ''') '\\nMulti-lined,\\nindented,\\ntriple-quoted string.\\n' >>> format_description('{foo}{bar!r}', foo=123, bar={}) '123{}' """ dedented = dedent(string) return dedented.format(**kwargs) if kwargs else dedented
1c8c98aea22c880aed2b2269b4077118b0782ab5
83,645
def longest_word(input_count): """ Return one of the longest words in the dictionary Parameter: input_count - a dictionary with items of the form word: count Return: a string - one of the longest words in the given dictionary """ return max(input_count, key=len)
122de858f4b79f0f3163b364bc14ae37e02dd6cc
83,646
def _to_hass_temperature(temperature): """Convert percentage to Home Assistant color temperature units.""" return int(temperature * 346) + 154
c30624266f6575c426bffb34433a51393ff67cc5
83,654
from pathlib import Path def find_nearest_cmake_lists(component_dir: Path, deployment: Path, proj_root: Path): """Find the nearest CMakeLists.txt file The "nearest" file is defined as the closes parent that is not the "project root" that contains a CMakeLists.txt. If none is found, the same procedure is run from the deployment directory and includes the project root this time. If nothing is found, None is returned. In short the following in order of preference: - Any Component Parent - Any Deployment Parent - Project Root - None Args: component_dir: directory of new component deployment: deployment directory proj_root: project root directory Returns: path to CMakeLists.txt or None """ test_path = component_dir.parent # First iterate from where we are, then from the deployment to find the nearest CMakeList.txt nearby for test_path, end_path in [(test_path, proj_root), (deployment, proj_root.parent)]: while proj_root is not None and test_path != proj_root.parent: cmake_list_file = test_path / "CMakeLists.txt" if cmake_list_file.is_file(): return cmake_list_file test_path = test_path.parent return None
c194b5e713a7033a6606319b0070457298a190fb
83,655
def get_numbers(files: list) -> list: """Get numbers Can take a list returned by :meth:`get_filenames` and make an integer list of the numerical parts of the file names. Args: files: List of segmented file names. Returns: List of integers from file names in numerical order. """ numlist = [] for file in files: numlist.append(int(file[1])) return numlist
2c1fb99af25154153e842f74389d295240ea632c
83,656
def modify_segment_num(df): """ modify_segment_num(df) Modifies stimulus segment numbers for the Gabors and visual flow if they repeat. Arguments: df (pandas): stimulus table Returns: df (pandas): stimulus table, with updated stimulus segment numbers. """ df = df.copy() df = df.sort_values("start_frame_twop").reset_index(drop=True) df = df.reset_index() stimulus_types = ["gabors", "visflow"] for stimulus in stimulus_types: stimulus_location = df["stimulus_type"] == stimulus stimulus_df_start_idx = df.loc[ stimulus_location & (df["orig_stimulus_segment"] == 0), "index" ].tolist() + [len(df)] add_segment_num = 0 for i in range(len(stimulus_df_start_idx[:-1])): sub_stimulus_location = stimulus_location & df["index"].isin( range(stimulus_df_start_idx[i], stimulus_df_start_idx[i + 1]) ) df.loc[ sub_stimulus_location, "orig_stimulus_segment" ] += add_segment_num add_segment_num = ( df.loc[ sub_stimulus_location, "orig_stimulus_segment" ].max() + 1 ) df = df.drop(columns=["index"]) return df
2c4225e9faaf6a69ec32a59ba5c18c9d5d59d271
83,662
import json def dict_to_json_bytes(content): """ Converts a dict into JSON and encodes it to bytes. :param content: :type content: dict[any, any] :return: The JSON bytes. :rtype: bytes """ return json.dumps(content).encode("UTF-8")
228fab87b8c5c65b4816ec8f86bf76a44caaddbf
83,664
import secrets def random_partition(keys, p): """Return a dict with keys such that values are a random partition of p. Parameters ---------- keys : list of str p : int Returns ------- result : dict keys : str values : int Notes ----- each value of the dict should be greater than or equal 1 Example ------- >>> partition(['alphanumeric','hexdigits','symbols'],7) {'alphanumeric': 1, 'hexdigits': 2, 'symbols': 4} """ n = len(keys) # Each key should at least have one character values = [1] * n p -= n for _ in range(p): i = secrets.randbelow(n) values[i] += 1 result = {keys[i]: values[i] for i in range(n)} return result
bcbf64d205561feaa75b706f5bc47b6853134f16
83,667
import torch def _calculate_offsets(anchors: torch.Tensor, targets: torch.Tensor) -> torch.Tensor: """Calculate offsets for bounding box regression. Offsets are calculated following the equations below: ..math: x'' = \frac{x'}{\sigma_{x}^{2}} = \frac{x_{gt} - x_{anchor}}{w_{anchor}} / \sigma_{X'} y'' = \frac{y'}{\sigma_{y}^{2}} = \frac{y_{gt} - y_{anchor}}{h_{anchor}} / \sigma_{Y'} w'' = \frac{w'}{\sigma_{w}^{2}} = \ln \left[ \frac{w_{gt}}{w_{anchor}} \right] / \sigma_{W'} h'' = \frac{h'}{\sigma_{h}^{2}} = \ln \left[ \frac{h_{gt}}{h_{anchor}} \right] / \sigma_{H'} Args: anchors (torch.Tensor): anchor boxes (priors). Shape `(A, 4)`. the boxes must be in :math:`(x_{center}, y_{center}, width, height)` format. targets (torch.Tensor): ground truth boxes. Shape `(A, 4)`. the boxes must be in :math:`(x_{center}, y_{center}, width, height)` format. Returns: (torch.Tensor): the calculated offsets (deltas) for :math:`(x_{center}, y_{center}, width, height)`. Shape `(A, 4)` """ targets[:, :2] = (targets[:, :2] - anchors[:, :2]) / anchors[:, 2:] targets[:, 2:] = torch.log(targets[:, 2:] / anchors[:, 2:]) return targets
a0d9a42562028b28cabc81d55b8ccbe694470620
83,671
def GetStartingPositions(lines: list[str]) -> list[int]: """ Get the starting positions for the players, converted to the range 0-9""" p1 = int(lines[0].split()[-1]) - 1 p2 = int(lines[1].split()[-1]) - 1 return [p1, p2]
81370e82a26a14ad8baa0dfa1b3ccf4a5d7f8436
83,672
from typing import Dict from typing import Any def convert_dict_to_yaml(input_dict: Dict[str, Any], indent_spaces: int = 4, indent_level: int = 0) -> str: """ The original yaml.dump needed improvements, this is a recursive re-implementation yaml.dump(config_dict) Args: input_dict: Dict to be converted to yaml. indent_spaces: How many spaces per indent level. indent_level: Current indent level for the recursion. Returns: YAML string. """ # setup key-value collector and indent level ret_list = [] indent = " " * (indent_level * indent_spaces) # loop input dict for key, value in input_dict.items(): # setup collector for single key-value pair single_ret_list = [f"{indent}{key}:"] # check type if isinstance(value, bool): # bools as lower-case value_str = str(value).lower() elif isinstance(value, (int, float)): # leave float conversion to python value_str = str(value) elif isinstance(value, str): # put quotes around strings value_str = f"\"{value}\"" elif value is None: # None is null in yaml value_str = "null" elif isinstance(value, dict): # iterate dictionaries recursively value_str = "\n" + convert_dict_to_yaml(value, indent_spaces=indent_spaces, indent_level=indent_level + 1) else: raise ValueError(f"dict to yaml, value type not understood: {value}") # concatenate the single key-value pair and add it to the key-value collector single_ret_list += [f" {value_str}"] ret_list += ["".join(single_ret_list)] # join the collected key-value pairs with newline return "\n".join(ret_list)
7dff030116522084089c497cab82967bbaed8b5e
83,674
def pretty(X, format='%0.3f'): """ Returns a formatted string for a numpy array ``X``. \n Example: [1,2,3], %0.3f => 1.000 2.000 3.000 """ format = format + '\t' return ''.join(format % x for x in X)
952891312d05413bf2eb2d87fece3c935ccf2179
83,678
from pathlib import Path from typing import Dict from typing import Any import yaml def load_specs(specs_file: Path) -> Dict[str, Any]: """ Read the specifications for benchmark scenarios from the given file. Returns: A dict where keys are scenario names and values are scenario configuration. """ with open(specs_file, "r") as f: specs_list = yaml.safe_load(f) specs: Dict[str, Any] = {} for spec in specs_list: name = spec.pop("scenario") specs[name] = spec return specs
a94e76e9c711b840c33b32e75f9be765fb416532
83,680
def first(iterable): """Returns the first element of an iterable""" for element in iterable: return element
20eb077082ecbe6c073b507000bf148164f81503
83,686
def before_breadcrumb(crumb, hint): # pylint: disable=unused-argument; callback function signature from sentry """Render nothing for the breadcrumb history.""" return None
3d6969bd3634f72ae8297b021fa77e22bc346b4c
83,687
def broadcast(t, outsize, dim): """ General function that broadcasts a tensor (copying the data) along a given dimension `dim` to a desired size `outsize`, if the current size of that dimension is a divisor of the desired size `outsize`. >>> x = broadcast(torch.randn(5, 2, 6), 6, 1) >>> y = broadcast(torch.randn(5, 6, 6), 6, 1) >>> x.size(1) 6 >>> y.size(1) 6 """ tsize = t.size(dim) # do we need to broadcast? if tsize != outsize: div, mod = divmod(outsize, tsize) if mod != 0: raise ValueError( "Cannot broadcast {} -> {}".format(outsize, tsize)) size = [1 if d != dim else div for d in range(len(t.size()))] return t.repeat(*size) return t
84b32e5c45f6f2f82a214b342fa3033e53b4b81c
83,688
def bits_between(number: int, start: int, end: int): """Returns bits between positions start and end from number.""" return number % (1 << end) // (1 << start)
c061f3c4647c842089a07591c638c590b830f94d
83,692
import ast def count_boolops(node: ast.AST) -> int: """Counts how many ``BoolOp`` nodes there are in a node.""" return len([ subnode for subnode in ast.walk(node) if isinstance(subnode, ast.BoolOp) ])
5024f953efca73ab3fa2f95f53648de4c9e0936c
83,698