content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def _find_properties(parent, name): """ Find properties with a given name. """ properties = [p for p in parent if p.attrib.get('name') == name] return properties
9a1b82d68dac9f34e26be75f4583d963bbb8f152
106,897
def vec_to_midpoint(array): """ Computes the midpoint between elements in an array. Args: array: (np.array) Returns: (np.array) """ return (array[1:] + array[:-1]) / 2
29dc067910e9ac7bb72ce5dd6cb19fada3d29fc4
106,899
def char_to_bool(letter): """Transform character (J/N) to Bool.""" if letter.upper() == 'J': return True elif letter.upper() == 'N': return False else: raise ValueError('Invalid character, only J or N allowed.')
0dcde16fce2c116f640c3eb8d21ac20ed6514bcb
106,901
def backend_extras(*requirements): """Construct list of requirements for backend integration. All built-in backends depend on PyOpenGL so add it as default requirement. """ return ["PyOpenGL"] + list(requirements)
a7f96d47e4943c8e774671380d630afb215652d7
106,903
def _legend_add_subtitle(handles, labels, text, func): """Add a subtitle to legend handles.""" if text and len(handles) > 1: # Create a blank handle that's not visible, the # invisibillity will be used to discern which are subtitles # or not: blank_handle = func([], [], label=text) blank_handle.set_visible(False) # Subtitles are shown first: handles = [blank_handle] + handles labels = [text] + labels return handles, labels
01175234617193b101a062d58649e1589d492340
106,904
def set_independent_variables(data, dependent_variable): """ Set independent variables. This helper functions isolates the independent variables in a data.frame. :param data: An object of class pandas.DataFrame; the data to be analyzed. :param dependent_variable: A string; the dependent variable for this analysis. :return: An object of class pandas.DataFrame; the independent variables of the analysis. """ X = data.drop(dependent_variable, axis=1).values return X
db29610c91a7a6351d162289744b4554ac41bf02
106,905
def normalize_constraints(constraints, flds): """ This method renders local constraints such that return value is: * a list, not None * a list of dicts * a list of non-optional constraints or optional with defined field .. note:: We use a new variable 'local_constraints' because the constraints parameter may be a mutable collection, and we do not wish to cause side-effects by modifying it locally """ local_constraints = constraints or [] local_constraints = [dict(**c) for c in local_constraints] local_constraints = [ c for c in local_constraints if c.get('field') in flds or not c.get('optional') ] return local_constraints
3d603d3331fced20d1e3f3ca1e527c6dec957bea
106,906
def unify_columns(data, name1, name2, new_name, col_index): """ Unify two columns using boolean disjunction Args: data: Pandas DataFrame name1: Name of the first column name2: Name of the second column new_name: Name of the new column to introduce col_index: Column index where new column will be set """ def combiner(x, y): return x or y first_version = data[name1] second_version = data[name2] # Combine both new_version = first_version.combine(second_version, combiner) data.insert(col_index, new_name, new_version) # Delete the others data.drop(name1, axis=1, inplace=True) data.drop(name2, axis=1, inplace=True)
a9750b048258183fe15eccf411f36732bdf20b64
106,913
def _get_unique_identifier(primary_unique_identifier: str, secondary_unique_identifier: str, tertiary_unique_identifier: str) -> str: """ Return a unique identifier starting with the last possibility """ if tertiary_unique_identifier: return tertiary_unique_identifier elif secondary_unique_identifier: return secondary_unique_identifier return primary_unique_identifier
5af639b64ca923dc423e6172713a71b2e4d1afaa
106,916
def simple_invert(data): """Invert the input image Parameters ---------- data : np.ndarray (D,H,W) Input image Returns ------- np.ndarray (D,H,W) Inverted image """ return 1.0 - data
25c91894917e8ff93d112e33f45c38c3adb5935e
106,917
from typing import Sequence from typing import List def inverse_power_scale(data: Sequence[float], power: float) -> List[float]: """Scale a sequence of numbers based on the ``inverse_power_scale``. The ``inverse_power_scale`` scales the distribution of numbers inversely proportional to the ``power_scale``. While the ``power_scale`` places more emphasis in scaling the lower values, the ``inverse_power_scale`` places more emphasis in scaling the higher values. This is beneficial when one wishes to keep the distribution of numbers wide when scaling up. :param data: The sequence of floats to scale. The floats must be between zero and one, and will also be scaled within that range. :param power: A positive float representing the scaling factor. A larger number results in scaling the numbers higher. A scaling factor of 1 has no effect. :return: The sequence of floats after scaling. The floats in the sequence remain in the order they were given and the ``data`` is not modified. """ return [1 - (1 - n) ** power for n in data]
93d41751c86cccc4b0180344252234183ccd32f3
106,918
import pytz def parse_timezone(tz): """ Parse a timezone description into a tzinfo object >>> parse_timezone("America/Los Angeles") <DstTzInfo 'America/Los_Angeles' PST-1 day, 16:00:00 STD> >>> parse_timezone("America/Los_Angeles") <DstTzInfo 'America/Los_Angeles' PST-1 day, 16:00:00 STD> """ return pytz.timezone(tz.replace(" ","_"))
7fb4f4f6506afb3f2057fae5020cb904f88e16a6
106,920
def dif(x, y, afrund = None): """ Calculate the difference between two arrays """ result = [] if afrund == None: afrund = 2 for index, element in enumerate(x): result.append(round((element - y[index]), afrund)) return result
f7a1ed998e66eb4f497124a3647a7ed04a82fce4
106,931
def get_dims(x): """ Get values of each dimension. Arguments ---------- x: tensor scalar or array """ dims = x.get_shape() if len(dims) == 0: # scalar return [1] else: # array return [dim.value for dim in dims]
2dc27f628770eca2fd70c0b202c2873f09626a19
106,934
def get_unaligned_spans(x1, x2, ptrs): """Get unaligned spans from an edit distance pointer matrix.""" n1, n2 = len(x1), len(x2) i1, i2 = n1, n2 spans = [] end1, end2 = None, None while i1 != 0 or i2 != 0: i1_new, i2_new = ptrs[i1][i2] if i1_new == i1 or i2_new == i2 or x1[i1_new] != x2[i2_new]: # mismatch/gap if end1 is None: end1, end2 = i1, i2 else: if end1 is not None: spans.append(((i1, end1), (i2, end2))) end1, end2 = None, None i1, i2 = i1_new, i2_new if end1 is not None: spans.append(((0, end1), (0, end2))) return spans
ac6017a17bd0d666a28dac981f66c8254712a082
106,935
def format_remove_duplicates(text, patterns): """Removes duplicated line-basis patterns. Based on simple pattern matching, removes duplicated lines in a block of lines. Lines that match with a same pattern are considered as duplicates. Designed to be used as a filter function for Jinja2. Args: text: A str of multi-line text. patterns: A list of str where each str represents a simple pattern. The patterns are not considered as regexp, and exact match is applied. Returns: A formatted str with duplicates removed. """ pattern_founds = [False] * len(patterns) output = [] for line in text.split('\n'): to_be_removed = False for i, pattern in enumerate(patterns): if pattern not in line: continue if pattern_founds[i]: to_be_removed = True else: pattern_founds[i] = True if to_be_removed: continue output.append(line) # Let |'\n'.join| emit the last newline. if output: output.append('') return '\n'.join(output)
0547ec746f0d2d8bce916916c2b235de1569886b
106,936
import torch def _get_grid_locations(image_height, image_width, device): """Wrapper for `torch.meshgrid`.""" y_range = torch.arange(0., image_height, device=device, requires_grad=False) x_range = torch.arange(0., image_width, device=device, requires_grad=False) y_grid, x_grid = torch.meshgrid(y_range, x_range) return torch.stack((y_grid, x_grid), dim=-1)
089945f4058b14d8508c690a4709f65006dd9bb4
106,940
def calc_bmi(mass, height): """Calculates BMI from specified height and mass BMI(kg/m2) = mass(kg) / height(m)2 Arguments: mass {float} -- mass of the person in KG height {float} -- height of the person in meters Returns: bmi {float} -- Body Mass Index of the person from specified mass and height """ bmi = mass/(height**2) bmi = round(bmi, 1) # rounding off to 1 digit after decimal return bmi
2b8aa82cc627913e55d721b6cc3548c227b1e1b2
106,941
def parse_server(node): """Return hostname and presence_id for given server.presence node.""" if '#' not in node: return node, '-1' hostname, presence_id = node.split('#') return hostname, presence_id
53e0fb2c28ddac25f5592f3bf1b91b4123e3496a
106,942
from typing import Callable import ast def eval_lambda(val: str) -> Callable: """Parse a lambda from a string safely. Args: val: string representing a lambda. Returns: a callable. Raises: ValueError: in case the lambda can not be parsed. """ parsed_lamba = ast.parse(val).body[0].value # type:ignore if isinstance(parsed_lamba, ast.Lambda) and "eval" not in val: return eval(val) else: raise ValueError(f"'{val}' can not be safely parsed as a lambda function")
aa2081f66471d483ea76cc9723c632cb354d5f42
106,943
def average_above_zero (items): """ Compute the average of an given array only for positive values. @type items: list @param items: List of positive values @rtype: float @return: Return the average of the list """ # Don't compute if items isn't an List (array) if type(items) is not list: raise TypeError('Type of items param need to be \'list\' and not ' + str(type(items))) average = -1 sum = 0 items_length = len(items) # Don't compute if items is empty if items_length > 0: for item in items: if (type(item) is float or type(item) is int) and item >= 0: sum += float(item) else: # Remove 1 to items_length if item isn't an positive value items_length -= 1 average = sum / items_length else: raise ValueError('Items list must not be empty') return average
ba9770de1c594c6bd5d0dbabe89ec9b8e55b06c5
106,947
from typing import OrderedDict def state_dict_to_cpu(state_dict: OrderedDict): """Moves a state_dict to cpu and removes the module. added by DataParallel. Parameters ---------- state_dict : OrderedDict State_dict containing the tensors to move to cpu. Returns ------- new_state_dict : OrderedDict State_dict on cpu. """ new_state = OrderedDict() for k in state_dict.keys(): newk = k.replace('module.', '') # remove "module." if model was trained using DataParallel new_state[newk] = state_dict[k].cpu() return new_state
50c0ac1cd44c3983fa5a86086535ff3af422462c
106,948
def ndim(tensor): """ returns number of dimensions for tensor """ return len(tensor.get_shape())
47f3d2753a49ee049d4219fa7327f0ea7fa5fa00
106,951
def needs_binary_relocation(m_type, m_subtype): """Returns True if the file with MIME type/subtype passed as arguments needs binary relocation, False otherwise. Args: m_type (str): MIME type of the file m_subtype (str): MIME subtype of the file """ if m_type == 'application': if m_subtype in ('x-executable', 'x-sharedlib', 'x-mach-binary'): return True return False
8dac0cd20e782be24a8ffb35c5870aadc1001d0f
106,952
import struct def str64(s): """Convert a string to an int64.""" s = s + "\0" * (8 - len(s)) s = s.encode("ascii") return struct.unpack("@q", s)[0]
2017a9d25f1644a563506bf6e6e0e512b03ccbe3
106,958
import math def rms_to_db(rms: float): """Root Mean Square to dB. Args: rms ([float]): root mean square Returns: float: dB """ return 20.0 * math.log10(max(1e-16, rms))
24dd5a58320283d33641ddff501feb4dd6580566
106,959
def classify_code_type(raw_string): """ A very simple function to detect HTML/XML. """ search_for_words = [ '</div>', '</p>', ] for word in search_for_words: if word not in raw_string: return 'XML' return 'HTML'
7f9963d6eb273c5b5f76e67943e8ee3ae6cb85ec
106,962
def read_file(f): """ Read an entire file and return the contents """ with open(f) as f: return f.read()
5fd04024806fc529df539d051ab15da88eaf6562
106,964
import click def include_option(func): """Option for including keyword(s).""" return click.option( '--include', 'include_keywords', help="Include pages with keyword(s) (comma-separated)." )(func)
6b9c5473480e9265be9604f7f51eae5a661ac6e9
106,969
import io def isfile(f): """ Returns True if the given object represents an OS-level file (that is, ``isinstance(f, file)``). On Python 3 this also returns True if the given object is higher level wrapper on top of a FileIO object, such as a TextIOWrapper. """ if isinstance(f, io.FileIO): return True elif hasattr(f, 'buffer'): return isfile(f.buffer) elif hasattr(f, 'raw'): return isfile(f.raw) return False
ac8f746c047f7ca616fd1c8d87d72f75fbfa0f9f
106,978
from typing import Union import re def join(*args: Union[None, str], trailing_slash: bool = False) -> str: """ Return a url path joined from the arguments. It correctly handles blank/None arguments, and removes back-to-back slashes, eg:: assert join('/', 'foo', None, 'bar', '', 'baz') == '/foo/bar/baz' assert join('/', '/foo', '/', '/bar/') == '/foo/bar' Note that it removes trailing slashes by default, so if you want to keep those, then you need to pass the ``trailing_slash`` keyword argument:: assert join('/foo', 'baz', None, trailing_slash=True) == '/foo/baz/' """ dirty_path = '/'.join(map(lambda x: x and x or '', args)) path = re.sub(r'/+', '/', dirty_path) if path in {'', '/'}: return '/' path = path.rstrip('/') return path if not trailing_slash else path + '/'
66cfb908d927091dc664809c3edbdc44cae3f323
106,989
def __consolidate_stability(df_vim_structured, ranked=True): """Calculates the variance of each feature's VIMs. """ return_val = df_vim_structured.var(axis=0) return_val.name = 'VIM_var' if ranked: return_val.sort() return return_val
8a9acf812af6f72d3c2d232f8f5f3c6156227dd6
106,991
import traceback def fetch_data(datagen, idx): """ Fetch one batch of data from a data generator """ try: return datagen[idx] except Exception as exp: traceback.print_exc() print() raise exp
449ca1a8ee22022295dc989672e5cae1dc177338
106,995
import base64 def encode_to_b16(inp: str) -> bytes: """ Encodes a given utf-8 string into base-16. >>> encode_to_b16('Hello World!') b'48656C6C6F20576F726C6421' >>> encode_to_b16('HELLO WORLD!') b'48454C4C4F20574F524C4421' >>> encode_to_b16('') b'' """ encoded = inp.encode("utf-8") # encoded the input (we need a bytes like object) b16encoded = base64.b16encode(encoded) # b16encoded the encoded string return b16encoded
15e451f01d46525bb3a9c4a5e8b009cff6683191
106,997
from datetime import datetime def generate_fig_save_string(filename): """Save Figures ====================================== Saves figures using prescribed filename, date/timestamp and directory. Args: filename (string) - Filename to be used for figure. Returns: save_string (string) - String to use for saving file. """ # Save files to project filedirectory = r'C:/Developer/electric_motor_thermal_modelling/Reports/Figures' # Retrieve timestamp timestamp = datetime.now() timestamp_str = timestamp.strftime('%Y_%m_%d-%H_%M_%S') # Create filepath filepath = filedirectory + '/' + 'YC_' + filename + '_' + timestamp_str + '.png' # Return save string return filepath
4ede3fded795617defd74881dbef8d1c903b2046
106,999
import json def load_json(file_path): """ Loads json from a file :param file_path: the path to the file :return: dictionary """ try: with open(file_path, "r+") as file: return json.loads(file.read()) except (OSError, IOError): with open(file_path, "r+") as file: file.truncate(0) file.seek(0) json.dump("{}", file, indent=4) return {}
7203397948b2c227d697169f6bcd663f5f818674
107,001
import re def match1(text, *patterns): """Scans through a string for substrings matched some patterns (first-subgroups only). Args: text: A string to be scanned. patterns: Arbitrary number of regex patterns. Returns: When only one pattern is given, returns a string (None if no match found). When more than one pattern are given, returns a list of strings ([] if no match found). """ if len(patterns) == 1: pattern = patterns[0] match = re.search(pattern, text) if match: return match.group(1) else: return None else: ret = [] for pattern in patterns: match = re.search(pattern, text) if match: ret.append(match.group(1)) return ret
1f01b9c2ba44cdbf84db67584af7524d67297dd1
107,002
import json def load_element_list(filename): """ Load the list of elements in the ADAS database as defined for this script in the 'elemenets.json' file, located in the same directory as this script. elements.json: Define which datasets to use. The value indicates which year the dataset corresponds to. Please check the Open_ADAS documentation (https://open.adas.ac.uk/man/appxa-11.pdf) for the quality of the dataset before adding it to this list. """ ELEMENTS = None with open(filename, 'r') as f: ELEMENTS = json.load(f) return ELEMENTS
41e0bb79f4a234ea48251ab173717afeddf8ce8a
107,008
from json import dumps def return_pyvpc_objects_json(pyvpc_objects): """ Return list of PyVPCBlock as json :param pyvpc_objects: list of PyVPCBlock :return: json formatted string """ result = [] for pyvpc_object in pyvpc_objects: result.append({'start_address': str(pyvpc_object.get_start_address()), 'end_address': str(pyvpc_object.get_end_address()), 'num_of_addresses': pyvpc_object.get_num_addresses(), 'prefix': pyvpc_object.get_network_prefix(), 'available': pyvpc_object.block_available, 'id': pyvpc_object.get_id(), 'name': pyvpc_object.get_name()}) return dumps({'ranges': result})
61dde328d9ce5ecdae2af8dc861980053321b836
107,010
def parse_line(line): """Parse a line of instructions and return a (step, requires) tuple""" assert line.startswith("Step ") and line.endswith(" can begin.") word = line.split() return word[7], word[1]
47dca266cd19feccc41985947c646ada50b8da36
107,011
def read_blacklist_file(blacklist_file_path): """ Input is path to blacklist file. Blacklist file can be either structured as <taxonID>\t<any other stuff, i.e. lineage...> or just as <taxonID>. This function returns a set of taxonIDs that are in the blacklist """ blacklist = set() with open(blacklist_file_path, 'r') as infile: for line in infile: if len(line.split('\t')) >1: line = line.split('\t') taxonID = line[0] blacklist.add(int(taxonID)) else: taxonID = line.rstrip('\n') blacklist.add(int(taxonID)) return blacklist
e59ebe40fd40db2365d334e993e788d4e2e2c3a0
107,012
import yaml def load_yaml(yaml_file): """ Load *yaml_file*. Ruturn a dictionary. """ with open(yaml_file) as f: config = yaml.safe_load(f) return config
c24c64db5d491b784e018b3e2e5920c23366dd6d
107,014
def add_config(cmd, config): """ Adds config information to cmd either by appending to an existing --config/-C argument or appending to end of command. cmd (list of strings): passed snakemake commands. config (list of strings): additional values to add. """ config_flag = list(set(["--config", "-C"]).intersection(set(cmd))) if config_flag: # append to end of config arg if passed idx = max([cmd.index(c) for c in config_flag]) # last occurance return cmd[: idx + 1] + config + cmd[idx + 1: ] else: # add config to end if arg not already used return cmd + ["--config"] + config
57dda916a8f35a9c44f09ce60b65d9fabb885e1f
107,016
def slurp(path): """Read the contents of a file, stripping leading and trailing whitespace.""" with open(path) as f: return f.read().strip()
a87d703ab8c85772195714d080418f3cb99794f1
107,026
import json def format_json(json_object, indent, default): """ Pretty-format json data """ indent_str = "\n" + " " * indent json_str = json.dumps(json_object, indent=2, default=default) return indent_str.join(json_str.split("\n"))
0b4942451524eb81bfa6d4396bee003de85181ca
107,028
import pickle def read(fnIn): """ Method to read a FourierICA object from disk Parameters ---------- fnIn: filename of the FourierICA object """ # import necessary modules filehandler = open(fnIn, "rb") fourier_ica_obj = pickle.load(filehandler) filehandler.close() return fourier_ica_obj
07dae75599f1d3931a4d6c389762e1cb96c9da0f
107,030
def skip_label(what): """Generate a "skip" label name.""" return f"skip {what}"
60c32ec2636cb3c14216b604dfa1987381f52dd5
107,035
from typing import Counter def make_freq_table(stream): """ Given an input stream, will construct a frequency table (i.e. mapping of each byte to the number of times it occurs in the stream). The frequency table is actually a counter. """ freqs = Counter() buffer = bytearray(512) while True: count = stream.readinto(buffer) freqs.update(buffer[:count]) if count < len(buffer): # end of stream break return freqs
ee34be52e8274d90dc74308aad6b1b27c42ccd9c
107,036
import importlib def import_module(module): """Protected import of a module """ try: ret = importlib.import_module(module) except ImportError: raise ImportError("Requested module '{}' not found.".format(module)) return ret
7126404b74cf457e824f5ca316161ea78bd73a10
107,037
def _consonant_or_vowel_stem_verb(forms, *, strong): """Return the consonant root of a verb if it exists, otherwise the vowel root""" # https://kaino.kotus.fi/visk/sisallys.php?p=55 return forms.get('imperatiivi_yks_3', '')[:-4]
0580cfc2348f1b64a498968eec607c3824293d04
107,038
from typing import Union def calculate_bmi(mass: Union[int, float], height: Union[int, float]) -> Union[int, float]: """That func calculate body mass index(bmi) mass in kg height in m """ return mass / (height ** 2)
1811b45cb0d971e9dc6da0bc7ec8e2cd4ff43994
107,039
import math def cone_volume(radius, height): """Compute and return the volume of a right circular cone.""" volume = math.pi * radius**2 * height / 3 return volume
d216dd45db741d085215a4b7842d17e97fcb3e00
107,042
def get_incoming_message_sub_id(ad): """ Get incoming message subscription id """ if hasattr(ad, "incoming_message_sub_id"): return ad.incoming_message_sub_id else: return ad.droid.subscriptionGetDefaultSmsSubId()
1fed30d31b0b515bff806aacec02816dcb782b05
107,045
def PmfMean(pmf): """Computes the mean of a PMF. Returns: float mean """ mean = 0.0 for x, p in pmf.d.items(): mean += p * x return mean
0dce710f7fdc02313ff75c2e873400470f78ef90
107,048
def primes_till_N(N:int): """ Generate all primes less than N Args: N (int): upper bound Returns: list: list of prime numbers """ nums = [True] * N primes = [] for x in range(2,N): if nums[x]: primes.append(x) for i in range(x, len(nums), x): nums[i] = False return primes
f5324512b5db787e8353afd9ce5917dc9df7efc7
107,049
def is_parenthesis(char): """ Checks whether a char is a parenthesis Parameters ---------- char : char The charactere that is going to be checked Returns ------- bool: Whether the character is a parenthesis """ if char in ["(", ")"]: return True else: return False
752fb4efb7bbd5ca5ba8f9dc4f12afa71ef4c0fc
107,050
import re from pathlib import Path from typing import Union from typing import List from typing import Tuple def find_files( data_path: Union[str, Path], skip_str: Union[str, List[str]] = "sf6" ) -> List[Tuple[Path, Path]]: """A helper file to find GCWERKS data and precisions file in a given folder. It searches for .C files of the format macehead.19.C, looks for a precisions file named macehead.19.precions.C and if it exists creates a tuple for these files. Please note the limited scope of this function, it will only work with files that are named in the correct pattern. Args: data_path: Folder path to search skip_str: String or list of strings, if found in filename these files are skipped Returns: list: List of tuples """ data_path = Path(data_path) files = data_path.glob("*.C") if not isinstance(skip_str, list): skip_str = [skip_str] data_regex = re.compile(r"[\w'-]+\.\d+.C") data_precision_tuples = [] for file in files: data_match = data_regex.match(file.name) if data_match: prec_filepath = data_path / Path(Path(file).stem + ".precisions.C") data_filepath = data_path / data_match.group() if any(s in data_match.group() for s in skip_str): continue if prec_filepath.exists(): data_precision_tuples.append((data_filepath, prec_filepath)) data_precision_tuples.sort() return data_precision_tuples
d83baf3b8819a1edc580034799a05123b5a5c973
107,053
def r_split(_, text, char): """ Strips string to left of and including specified characters.""" return text.split(char)[-1]
7ec4ccbdb7920640e5fdbf45737915e3b8f64e8c
107,059
def filter_by_extension(ext): """ A higher-order function that returns a function that given a filename filters by the extension argument. :param ext: the return function will return true if a filename ends with ext :return: a one-arg function that takes a filename and returns true if filename ends with ext """ def filt(filename): return filename.endswith(ext) return filt
f4ec9e30bf867af29101226c23335c6328e6d032
107,071
import operator def max_tuple_dict(tuple_dict, key_index): """Calculates the maximum of a dict of tuples Uses the element at key_index as the key for comparisons. In case of an empty dict, this returns (None, None). """ if len(tuple_dict): return max(tuple_dict.items(), key=operator.itemgetter(key_index)) else: return None, None
6504cbe9ad39ac79ba6f6ed50395db31b3b476e6
107,077
def _incorrect_data_type_message( data_dict: dict, field_name: str, expected_type: str ) -> str: """ Creates a string explaining to the user that the field input did not have the expected type. :param data_dict: The dictionary containing the different data fields for the disk chopper. :param field_name: The name of the field that failed the check. :param expected_type: The expected data type. :return: A string that contains the name of the field, the type it should have, and the type the user entered. """ return ( f"Wrong {field_name} type. Expected {expected_type} but found" f" {data_dict[field_name].dtype}." )
a8e39b48e75b8aa42e745891716ffd6726dcaef2
107,080
def parse_values_to_remove(options): """Manual parsing of remove arguments. :param options: list of arguments following --remove argument :return: dictionary containing key paths with values to be removed :rtype: dict EXAMPLE: {'identity.username': [myname], 'identity-feature-enabled.api_extensions': [http, https]} """ parsed = {} for argument in options: if len(argument.split('=')) == 2: section, values = argument.split('=') if len(section.split('.')) != 2: raise Exception("Missing dot. The option --remove has to " "come in the format 'section.key=value[,value" "]', but got '%s'." % argument) parsed[section] = values.split(',') else: # missing equal sign, all values in section.key will be deleted parsed[argument] = [] return parsed
0d9ff5fbc8f5db5a40c38c84206699b98fe49af6
107,082
import base64 def B64(val: bytes) -> str: """Return base64-encoded string representation of input binary data.""" return base64.b64encode(val).decode()
4525d982cbb80d97dce14b446fc8e203b9176d68
107,083
def s_and(*args): """Logical and.""" result = True for i in args: result = result and i if not result: break return result
022f3a14e0430210a636828daf2056c653107c58
107,088
def is_frequent_individual(itemset, dico, ds, minFrequency): """ Checks if the itemset contains only frequent items :param itemset: the itemset :param dico: dictionary containing the transactions :param ds: dataset :param minFrequency: the minimum frequency for the items :return: True if all items in itemset are frequent, False otherwise """ for e in itemset: support = len(dico[tuple([e])]) frequency = support / ds.trans_num() if frequency < minFrequency: return False return True
7de7f137710fe92490db564a5000b8a1074f793d
107,091
def fatorial(n, show=False): # o show é o parâmetro opcional e ele começa com false porque não quer mostrar o fatorial """ -> Calcule o fatorial de um número. :param n: O número a ser calculado. :param show: (opcional) Mostrar ou não a conta. :return: O valor do Fatorial de um número n. """ f = 1 # criou uma variável f, que começa com 1 for c in range(n, 0, -1): # vai começar do número até o 0, decrescente if show: # se o show print(c, end='') # vai mostrar o número if c > 1: # se for maior que 1 print(' x ', end='') # vai mostrar o vezes else: # se não print(' = ', end='') # se for igual a 1, significa que é o último número f *= c # vai adicionar o fatorial no c return f
e901e36904dddd820973820be19e3549a3946c87
107,095
import random def random_book(json_data): """ selects a random book out of all books on a given user shelf via json_data :param json_data: json data provided by the Goodreads API for a given user shelf :return: string of a books title and author """ total = json_data["GoodreadsResponse"]['reviews']["@total"] book_index = random.randint(1, int(total)) title = json_data["GoodreadsResponse"]["reviews"]["review"][book_index]["book"]["title"] author = json_data["GoodreadsResponse"]["reviews"]["review"][book_index]["book"]["authors"]["author"]["name"] return f"{title} by {author}"
b36c800461240e3844c3586c75b6d2be8b18968a
107,096
def read_dataset(spark, file_path, file_type, file_configs={}): """ This function reads the input data path and return a Spark DataFrame. Under the hood, this function is based on generic Load functionality of Spark SQL. Parameters ---------- spark Spark Session file_path Path to input data (directory or filename). Compatible with local path and s3 path (when running in AWS environment). file_type "csv", "parquet", "avro", "json". Avro data source requires an external package to run, which can be configured with spark-submit (--packages org.apache.spark:spark-avro_2.11:2.4.0). file_configs This optional argument is passed in a dictionary format as key/value pairs e.g. {"header": "True","delimiter": "|","inferSchema": "True"} for csv files. All the key/value pairs in this argument are passed as options to DataFrameReader, which is created using SparkSession.read. (Default value = {}) Returns ------- DataFrame """ odf = spark.read.format(file_type).options(**file_configs).load(file_path) return odf
e9fb4d2195162ce880048fee262bfa489f741bbd
107,097
def compute_scale_down(input_size, output_size): """compute scale down factor of neural network, given input and output size""" return output_size[0] / input_size[0]
fa1ebfa7fc0adc850121736f54ffd89cb560be93
107,100
def compute_line_col(text, size_tab): """Computes the line and column in which each letter of text is located.""" at_what_line = [0] * len(text) at_what_col = [0] * len(text) line = 1 col = 1 for (position, letter) in enumerate(text): at_what_line[position] = line at_what_col[position] = col if letter == "\n": line += 1 col = 1 if letter == "\t": col += size_tab else: col += 1 return at_what_line, at_what_col
438ba529d67e196e9f7f14c1c4653c51994c0e85
107,101
def encode_byte_array(value: bytes) -> bytes: """Encodes a byte array. """ return bytes([]) if isinstance(value, type(None)) else value
98377bdab13bd4dce7e19efe5ab4392b1343e35f
107,102
def read_other(config): """ Read other options. """ other = {} section = 'other' if 'print models' in config[section]: other['print_models'] = config.getboolean(section, 'print models') plot = config.get(section, 'plot time').split() other['plot_time'] = [float(t)-2450000. for t in plot] return other
d66e1f1145dea774263fdd4fe1a74fd1eb153c6c
107,104
def les(prop, value): """Check if a property is less than a value""" return prop < value
3793119aeb6f053b3fd240ec1d74d1d562d473d4
107,108
from typing import Sequence def extract_coordinates(dicom, field): """Given a field that is provided for a dicom, extract coordinates""" field = field.replace("from:", "", 1) coordinates = [] if field not in dicom: return coordinates regions = [] region = dicom.get(field) # First put list of attributes together if isinstance(region, Sequence): for entry in region: regions.append(entry) else: regions.append(region) # Now extract coordinates for region in regions: if ( "RegionLocationMinX0" in region and "RegionLocationMinY0" in region and "RegionLocationMaxX1" in region and "RegionLocationMaxY1" in region ): # https://gist.github.com/vsoch/df6957be12c34e62b21000603f1687e5 # minr, minc, maxr, maxc = coordinate # self.cleaned[minc:maxc, minr:maxr] = 0 # should fill with black # self.cleaned[A:B, C:D] # image[A:B,C:D] # A: refers to ymin # B: refers to ymax # C: refers xmin # D: refers to xmax # self.cleaned[ymin:ymax, xmin:xmax] # coordinate must be [xmin, ymin, xmax, ymax] # x0,y0,x1,y1. coordinates.append( "%s,%s,%s,%s" % ( region.RegionLocationMinX0, region.RegionLocationMinY0, region.RegionLocationMaxX1, region.RegionLocationMaxY1, ) ) return coordinates
e51e4e2eefa1f2ce370e565ab9b02b28f6eeb33c
107,111
import jinja2 def get_jinja2_template ( template_file: str, ) -> jinja2.Template: """ Load a Jinja2 template. """ env = jinja2.Environment( loader=jinja2.FileSystemLoader("."), autoescape=True, ) return env.get_template(template_file)
b6b1d08c8e0c1db8fcb11962606bd9cfdad32760
107,113
from typing import Set from typing import Dict from typing import Optional import math def compute_minimum(open_set: Set[str], f_scores: Dict[str, float]) -> Optional[str]: """Compute the node_id in open_set with the minimum f_score""" min_id: Optional[str] = None min_f_score = math.inf for identifier in open_set: f_score = f_scores[identifier] if f_score < min_f_score: min_id = identifier min_f_score = f_score return min_id
5e88285aef816c7a3d4bc4584b76ffedb689c865
107,115
import re def is_relative_path(s): """ Return True if s looks like a relative posix path. Example: usr/lib/librt.so.1 or ../usr/lib """ relative = re.compile('^(?:([^/]|\\.\\.)[\\w_\\-]+/.*$)', re.IGNORECASE).match return relative(s)
7b817849c79d92ef442da5c609df450379d1d9f8
107,121
def get_collection(collection, key, default=None): """ Retrieves a key from a collection, replacing None and unset values with the default value. If default is None, an empty dictionary will be the default. If key is a list, it is treated as a key traversal. This is useful for configs, where the configured value is often None, but should be interpreted as an empty collection. """ if default is None: default = {} if isinstance(key, list): out = collection for k in key: out = get_collection(out, k) else: out = collection.get(key, None) return out if out is not None else default
e6983b67ec746c6839e332471ef540b6199a861b
107,123
from typing import List from typing import Set def reference_descendent( current_package: List[str], imports: Set[str], py_package: List[str], py_type: str ) -> str: """ Returns a reference to a python type in a package that is a descendent of the current package, and adds the required import that is aliased to avoid name conflicts. """ importing_descendent = py_package[len(current_package) :] string_from = ".".join(importing_descendent[:-1]) string_import = importing_descendent[-1] if string_from: string_alias = "_".join(importing_descendent) imports.add(f"from .{string_from} import {string_import} as {string_alias}") return f'"{string_alias}.{py_type}"' else: imports.add(f"from . import {string_import}") return f'"{string_import}.{py_type}"'
47115a27317a340f9b467a9834a930512bb86979
107,124
def get_home_page(pypi_pkg): """Retrieve home page link""" home_page = pypi_pkg["pypi_data"]["info"]["home_page"] return home_page
4ccdcc48cf6329fad6e338be88fc80150e38826c
107,127
def create_nodenames_from_branching_factors(BFS): """ This function creates the node names of a tree without creating the whole tree. Parameters ---------- BFS : list of integers Branching factors. Returns ------- nodenames : list of str a list of the node names induced by branching_factors, including leaf nodes. """ stage_nodes = ["ROOT"] nodenames = ['ROOT'] if len(BFS)==1 : #2stage return(nodenames) for bf in BFS[:(len(BFS))]: old_stage_nodes = stage_nodes stage_nodes = [] for k in range(len(old_stage_nodes)): stage_nodes += ['%s_%i'%(old_stage_nodes[k],b) for b in range(bf)] nodenames += stage_nodes return nodenames
aa869af9a7929b63ec3a02ccb6dd955e7472efb0
107,137
def strip_all (lst): """ Strips leading and trailing whitespace from all strings in a list. Args: lst (list of str): The list of strings to strip. Returns: list of str: The list of stripped strings. """ return list(map(lambda x: x.strip(), lst))
faae74c3b868d02ba4e69fde72101ef5bbb4ca3b
107,140
def DictGetNotNone( d, k, dflt ): """Get a value from the dictionary; if not such value, return the specified default""" return d[k] if k in d and d[k] is not None else dflt
19b987f3f862352e64ef26449979a08b37754eda
107,141
def _sum_frts(results:dict): """ Get total number of FRTs Arguments: results {dict} -- Result set from `get_total_frts` Returns: int -- Total number of FRTs in the nation """ if results == None: return 0 total_frts = 0 for result in results: if result['state_total']: total_frts += result['state_total'] return total_frts
44d56d9e35c2b910ea10e25bb3de7ba9ae0c6294
107,152
def get_file_ext(f): """ takes in a filename.ext, finds extension by looking for the last . delimiter, and returns the filetype as str :param f: str, required; filename with extension :return: file extension """ delim = f.rfind('.') + 1 # locate the delimiter between the file name and file extension filetype = f[delim:] # grabs the substring after the delimiter return filetype
bfb9b77aa14325c4888511473a975839992a0f18
107,154
def filter_to_economic(df): """filter to educational attainment indicators""" df = df.filter(regex="GeoID|P25pl|LTHS|HSGrd|SClgA|BchD") return df
0d4b78f177af6a02407e069e7b4bb89a4a14eb39
107,164
def name_sorted(indexes): """Sort indexes by name""" return sorted(indexes, key=lambda x: x['name'])
5dcf99e99f98d686c4afa9def73c44f46116c6de
107,165
def _reactome_wrapper(pathways): """Filter down the human pathways. :param list[Pathway] pathways: list of pathways :rtype: list[Pathway] :return: human pathways """ return [ pathway for pathway in pathways if pathway.species.name == 'Homo sapiens' ]
e6c56438ea12ddfb4968511ea2721a6643f8b4d5
107,173
import collections def build_hitmap(data): """ Build a hitmap from the given list of address. A hitmap is a map of address --> number of executions. The list of input addresses can be any sort of runtime trace, coverage, or profiiling data that one would like to build a hitmap for. """ output = collections.defaultdict(int) # if there is no input data, simply return an empty hitmap if not data: return output # # walk through the given list of given addresses and build a # corresponding hitmap for them # for address in data: output[address] += 1 # return the hitmap return output
38bea2a1e2814edd5f5a81dc1c5c54bd93fede7e
107,175
def sqrt(x): """for x>=0, return non-negative y such that y^2 = x""" estimate = x/2.0 while True: newestimate = ((estimate+(x/estimate))/2.0) if newestimate == estimate: break estimate = newestimate return estimate
34112bd08bfbf64699503548556380313e032320
107,177
def _get_shared_bottom_tops(top, layers): """ Get the layers that have the same bottom layer as given top layer's. This happens for most pooling layers and all concat layers, where outputs are branched to several convolution layers on top. """ tops = [] for l in layers: if set(top.bottom).intersection(l.bottom) and l.type != "Noise": tops.append(l) return tops
a05f4ecff3144e92d06ebacd8680998f13cd3fa7
107,179
def moedaBR(vlr): """ -> Formata um número em moeda Brasileira (R$) :param vlr: é o valor que será formatado. Ex.: 32.65 :return: vai retornar R$ 32,65 """ return f'R$ {vlr:_.2f}'.replace('.', ',').replace('_', '.')
d846e0d57faebcc6141d730657873929ad8d0586
107,182
def create_steer_command(packer, angle_cmd, enabled, action, angleReq): """Creates a CAN message for the Ford Steer Command.""" values = { "ApaSys_D_Stat": action, "EPASExtAngleStatReq": angleReq, "ExtSteeringAngleReq2": angle_cmd, } return packer.make_can_msg("ParkAid_Data", 2, values)
75cfc9a881f11bfdb46de7f1dfb2c09fb0096518
107,185
def convert_to_str(value): """convert True/False to yes/no and all values to strongs""" if isinstance(value, bool): if value: return "yes" return "no" return str(value)
dfa9dbaa31e18ec8c1433dd81e80b578fd8fe541
107,191
def indent(level, size=2): """Return indentation.""" return ' ' * level * size
8208680da30f43b582dc25fe106e1e805a85064d
107,192
def Hex8_node_coords(n): #Test function written """Get the local coordinates for node n (0-7)""" coords = [[-1,-1,-1],[ 1,-1,-1],[ 1, 1,-1],[-1, 1,-1],\ [-1,-1, 1],[ 1,-1, 1],[ 1, 1, 1],[-1, 1, 1]] return coords[n]
f063cd266d31b24e84cf101ee0ae961624e0fe5d
107,196
def binary_search(target, lst): """ Perform binary search over the specified list with the specified target. The type of the target should match the types of the elements of the list and the list should be sorted. Parameters ---------- target : Any The element to search for in the list. lst : list of Any The list. Returns ------- bool A boolean value indicating whether the target is found in the list. int The index in the list matching the target. """ low = 0 high = len(lst) - 1 while low < high: mid = low + (high - low)//2 if lst[mid] == target: return True, mid elif lst[mid] < target: low = mid + 1 else: high = mid - 1 return False, -1
b7a1c8e9e9212107bef66835b7eece7e8562d9aa
107,199
def is_power_of_two(a): """ Figures out if an int is a power of two. """ return not ((a-1) & a)
af8e7eef4c74267642345cf2d47ef6f4919c76cd
107,201
def read_data(filepath: str): """ function to read the actual data :param filepath: the relative path to the file containing the specifications of the data :return: the data in filepath as a two-dimensional list where each row represents one instance and its values """ with open(filepath, "r") as f: lines = f.read().splitlines() data = [] for line in lines: data.append(line.split(",")) return data
2f37b90c3831a9d64a7d0081083bdcfadcfcd179
107,205
def vec_dotprod(vector1, vector2): """ Dot product of 2 vectors Parameters ---------- vector1 : list Input vector 1. vector2 : list Input vector 2. Returns ------- dotp_res : float Resulting dot product. """ dotp_res = sum(v1_i * v2_i for v1_i, v2_i in zip(vector1, vector2)) return dotp_res
b910b2e978e46665a6e977d7686b90813ef30157
107,206