content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def extract_email_addresses(emails, sep=","): """ Transforms a string of multiple email adresses (separated by commas) into a list :param str emails: Single string of emails separated by a specific character :param str sep: The separator used in the emails parameter. Defaults to ',' :return: A list of email addresses :rtype: list(str) """ if type(emails) == str: emails = emails.split(",") emails = list(map(lambda x: x.strip(), emails)) return emails
d2e04b1c36abd80856ebe47105d4a25bb4553f7c
559,804
def find_exchange_or_parameter_by_id(dataset, uuid): """Find exchange or parameter in ``dataset`` with id ``uuid``. Raises ``ValueError`` if not found in dataset.""" for exc in dataset['exchanges']: if exc['id'] == uuid: return exc for param in dataset.get('parameters', []): if param['id'] == uuid: return param raise ValueError("Exchange id {} not found in dataset".format(uuid))
33619132cc574b8b1e23034138f22c8519e6d2f4
608,229
def field_items_text(content): """ Creates a comma separated string of values for a particular field """ if len(content): tag = content[0].find_all(class_="field-items")[0] if len(tag.contents): return ', '.join([c.text for c in tag.contents]) else: return tag.text return None
7356c8c4b850f8bd8dc3ad1eff38c5074275bdab
366,644
def assign_y_height_per_read(df, phased=False, max_coverage=100): """Assign height of the read in the per read traces Gets a dataframe of read_name, posmin and posmax. Sorting by position, and optionally by phase block. Determines optimal height (y coordinate) for this read Returns a dictionary mapping read_name to y_coord """ if phased: dfs = df.sort_values(by=['HP', 'posmin', 'posmax'], ascending=[True, True, False]) else: dfs = df.sort_values(by=['posmin', 'posmax'], ascending=[True, False]) heights = [[] for i in range(max_coverage)] y_pos = dict() for read in dfs.itertuples(): for y, layer in enumerate(heights, start=1): if len(layer) == 0: layer.append(read.posmax) y_pos[read.Index] = y break if read.posmin > layer[-1]: layer.append(read.posmax) y_pos[read.Index] = y break return y_pos
d2723371fda2ccff7822b5a1cec7ed88f474c370
71,947
def _BuildOutputFilename(filename_suffix): """Builds the filename for the exported file. Args: filename_suffix: suffix for the output file name. Returns: A string. """ if filename_suffix is None: return 'results.html' return 'results-{}.html'.format(filename_suffix)
ae4087a3bcf50b0715685f5359962813e2fdc70d
55,568
import re def snake_case(camelCase): """ Function to convert a camelCaseString to a snake_case_string :param camelCase: camelCaseString :type camelCase: str :returns: snake_case_string """ first_cap_re = re.compile('(.)([A-Z][a-z]+)') all_cap_re = re.compile('([a-z0-9])([A-Z])') return( all_cap_re.sub( r'\1_\2', first_cap_re.sub( r'\1_\2', camelCase ) ).lower() )
12dd5de3853ccc016f73942fb583336e76a90cec
131,845
def get_cdxj_line_closest_to(datetime_target, cdxj_lines): """ Get the closest CDXJ entry for a datetime and URI-R """ smallest_diff = float('inf') # math.inf is only py3 best_line = None datetime_target = int(datetime_target) for cdxj_line in cdxj_lines: dt = int(cdxj_line.split(' ')[1]) diff = abs(dt - datetime_target) if diff < smallest_diff: smallest_diff = diff best_line = cdxj_line return best_line
35c6b1a374e153982abe6edf2d2e6625cf8e8937
72,211
def display_binary_4bit(num: int): """Displays the binary string representation for a given integer. If the number is within 4 bits, the output will be 4 binary digits; if the number is 8 bits then 8 binary digits will be the output. Examples: >>> display_binary_4bit(151)\n '10010111' >>> display_binary_4bit(11)\n '1011' """ return format(num, "04b")
52cdab625a043b97e73d5e87298483268e4def76
241,995
def definition_area_ref(t, x0): """ reference solution for ode with respricted definition area. :param t: time :param x0: initial value :return: samples of reference solution for time t """ x_ref = 1. / (1. / x0 - 1. / 2. * (t ** 2)) # analytical solution of this ODE return x_ref
5342eb8004f7720689823a3a61709cd516de56dc
415,478
def get_min_max(ints): """ Return a tuple(min, max) out of list of unsorted integers. Args: ints(list): list of integers containing one or more integers """ # If list has no elements return None if not ints: return None # If the size of the list is 1 than return the present element as min and max if len(ints) == 1: return (ints[0], ints[0]) # Initially assign first element as a min and max element minimum = ints[0] maximum = ints[0] for num in ints: # Compare the assigned min and max with the elements in the given list if minimum > num: # If the number is minimum than the min value than, swap the element with the min minimum = num if maximum < num: # If the number is maximum than the max value than, swap the element with the min maximum = num return (minimum, maximum)
6641cf0d3f9f55a487e67df590c592547c0ea5c7
518,274
def quartic_easeinout(pos): """ Easing function for animations: Quartic Ease In & Out """ if pos < 0.5: return 8 * pos * pos * pos * pos fos = pos - 1 return -8 * fos * fos * fos * fos + 1
e317c61db1a100104a4647e6db75ddde647340e7
162,015
def has_substr (s, subs) : """Returns true if `s` contains `subs`""" return subs in s
db4e10151a15bf6d7e8e0e6a6aaa88b8bdabbe60
190,575
def list_from_string(list_str=None): """Get list of items from `list_str` >>> list_from_string(None) [] >>> list_from_string("") [] >>> list_from_string(" ") [] >>> list_from_string("a") ['a'] >>> list_from_string("a ") ['a'] >>> list_from_string("a b c") ['a', 'b', 'c'] """ value = (list_str or '').split(' ') return [v for v in value if v]
949d93c83c8242210458d448ebabc0845de7e4b3
264,686
def get_namespace(type_or_context): """ Utility function to extract the namespace from a type (@odata.type) or context (@odata.context) :param type_or_context: the type or context value :type type_or_context: str :return: the namespace """ if '#' in type_or_context: type_or_context = type_or_context.rsplit('#', 1)[1] return type_or_context.rsplit('.', 1)[0]
6b84732b23c5e09731927a75b4aeeda8752750b0
74,093
import re def isEndStoryText(string): """ Return True if reach the end of stories. """ match = re.search(r'^\*\*\*', string) if match == None: r = False else: r = True return r
36f2b8f333c4188c2c3a53a6da036308e8a8152e
686,281
def add_args(parser): """ Create parser for command line utility. :meta private: """ parser.add_argument("--model", help="Trained prediction model", required=True) parser.add_argument("--test", help="Test Data", required=True) parser.add_argument("--embedding", help="h5 file with embedded sequences", required=True) parser.add_argument("-o", "--outfile", help="Output file to write results") parser.add_argument("-d", "--device", type=int, default=-1, help="Compute device to use") return parser
1b9d9bb2cbe5d2d0431dc529fc8b4826852c5f11
83,885
def prepare_prediction_column(self, prediction): """Return the class label of highest probability.""" return prediction.argmax(axis=-1)
f51e54ff59ac1084c147dec37eaaafa23567466f
511,088
import hashlib def md5(*txt): """ Returns the md5-checksum for `txt`. This can be used to test if some piece of text, for example a grammar source file, has changed. """ md5_hash = hashlib.md5() for t in txt: md5_hash.update(t.encode('utf8')) return md5_hash.hexdigest()
7333fcfd53c6b0e40d6cc6797b7f10453c32a048
261,063
def _additional_env_args(additional_env): """Build arguments for adding additional environment vars with env""" if additional_env is None: return [] return ['env'] + ['%s=%s' % pair for pair in additional_env.items()]
ccd0fbb99c9b133d0ea0bee3ea2acbfe8a7105d4
135,944
import fnmatch def _is_globbed(name, glob): """ Determine if the name is globbed based on the glob list """ if not glob: return True return any( (fnmatch.fnmatchcase(name, i) for i in glob) )
9411f2f36fc1f8ed67027a3490eec0a58929c20f
416,479
def decode_rgb565(val): """Decode a RGB565 uint16 into a RGB888 tuple.""" r5 = (val & 0xf800) >> 11 g6 = (val & 0x7e0) >> 5 b5 = val & 0x1f return ( int((r5 * 255 + 15) / 31), int((g6 * 255 + 31) / 63), int((b5 * 255 + 15) / 31) )
0c9a67016df686eb23282de74f663493caa305a9
687,811
def latest_date_between(date_1, date_2): """Utility function to compare the dates and return latest date.""" if date_1 > date_2: return date_1 else: return date_2
6ef511dc35812ca2cf1842cec8a9454fe0503891
615,373
def H_generator(N, adjacency_matrix): """ This function maps the given graph via its adjacency matrix to the corresponding Hamiltiona H_c. Args: N: number of qubits, or number of nodes in the graph, or number of parameters in the classical problem adjacency_matrix: the adjacency matrix generated from the graph encoding the classical problem Returns: the problem-based Hmiltonian H's list form generated from the graph_adjacency matrix for the given graph """ H_list = [] # Generate the Hamiltonian H_c from the graph via its adjacency matrix for row in range(N): for col in range(N): if adjacency_matrix[row, col] and row < col: # Construct the Hamiltonian in the list form for the calculation of expectation value H_list.append([1.0, 'z' + str(row) + ',z' + str(col)]) return H_list
f220146f0ed5682f77963ebed60d7b91acceaa82
548,823
def collect_hmeans(path): """ Collects hmean values in log file. """ hmeans = [] keyword = 'hmean=' with open(path) as read_file: content = [line.strip() for line in read_file] for line in content: if keyword in line: hmeans.append(float(line.split(keyword)[-1])) return hmeans
9ea5cef5120cdda245e5e51ff164ca31d0872ed9
423,027
import torch def length_penalty(sequence_lengths, penalty_factor): """ Calculate the length penalty according to https://arxiv.org/abs/1609.08144 lp(Y) =(5 +|Y|)^α / (5 + 1)^α Input: sequence_lenthgs: the sequences length of all hypotheses of size [batch size x beam size x vocab size] penalty_factor: A scalar that weights the length penalty. Returns: The length penalty factor, a tensor fo shape [batch size x beam size]. """ return torch.div((5. + sequence_lengths)**penalty_factor, (5. + 1.) **penalty_factor)
2b5742ee0514d1f33f94d82d5fb8c5f2d0d095eb
245,655
def refbasis(reading,ref): """Argument: raw ADC reading, raw ADC basis. Returns an absolute potential based on the ADC reading against the 2.5 V reference (reading from pot as a value between 0 and 1023, reference value in V (e.g. 2.5))""" return round((float(reading)/float(ref))*2.5,3)
91dccf15e520b3054fc6502d0346d7002cf5116d
142,797
import torch def one_hot_to_class_indices(labels: torch.Tensor) -> torch.Tensor: """ Converts one hot encoded label tensor to a tensor representing class ids. :param labels: One-hot encoded label tensor """ # Check that labels do not overlap with each other if not labels.is_floating_point(): raise TypeError("Input `label` tensor is not a float tensor") if not (labels.sum(dim=1) == 1.0).all(): raise ValueError("Input `label` tensor contains multiple foreground labels for some pixels") # Derive class indices _, class_ids = labels.max(dim=1) return class_ids
4fbd0612787ed31248cf09238e03bacc21b765ae
467,766
def devilry_multiple_users_verbose_inline(users): """ Returns the provided iterable of user objects HTML formatted. Perfect for formatting lists of users inline, such as when showing examiners or candidates on a group. """ return { 'users': users }
9cec004b7362bd1a513b5d02a154e2af5cbca7ef
620,442
import re def replace_chars(token, chars_to_replace): """Replaces all characters in the dict with their specified replacement.""" for c in token: if c in chars_to_replace: token = re.sub(c, chars_to_replace[c], token) return token
57b543f56baa9fb89ac32e3fe4f9fe6997a6df6f
413,005
def find_a_sequence_inside_another_sequence(seq, seq_to_find): """Check if a sequence exists in another sequence Example: seq = [1,2,4,5,6,2,3,1,2,3,4] seq_to_find = [4,5,6] find_a_sequence_inside_another_sequence(seq, seq_to_find) >> True :type seq: list :type seq_to_find: list :rtype: boolean """ if len(seq) < len(seq_to_find): return False pos = 0 for item in seq: if seq_to_find[pos] == item: pos += 1 if pos == len(seq_to_find): return True continue pos = 0 return False
0d115a34ecc6dde3220b878324fc4414c358b908
271,510
from typing import List def arrayToFloatList(x_array) -> List[float]: """Convert array to list of float. Args: x_array: array[Any] -- Returns: List[float] """ return [float(x_item) for x_item in x_array]
617377ae4bf5e55db2882451d91d6839df0bec3d
672,391
def get_attrs(obj, config_attrs): """ Given an object obtains the attributes different to None. :param obj: object containing the attributes. :param config_attrs: a list of all the configurable attributes within the object. :return attr_data_dict: A dictionary containing all the attributes of the given object that have a value different to None. """ attr_data_dict = {} for attr_name in config_attrs: attr_data_dict[attr_name] = getattr(obj, attr_name) return attr_data_dict
18348e05d799406961169dcb195531b25fe03946
693,119
def as_list(x): """A function to convert an item to a list if it is not, or pass it through otherwise Parameters ---------- x : any object anything that can be entered into a list that you want to be converted into a list Returns ------- list a list containing x """ if not isinstance(x, list): return [x] else: return x
68415bfe9aeee8b69d5e03d0c7af3c9459803f7e
603,670
def get_count_words(document, words): """ Takes in a Document object and a list of words to be counted. Returns a dictionary where the keys are the elements of 'words' list and the values are the numbers of occurrences of the elements in the document. Not case-sensitive. :param document: Document object :param words: a list of words to be counted in text :return: a dictionary where the key is the word and the value is the count >>> from gender_analysis import document >>> from gender_analysis import common >>> from pathlib import Path >>> document_metadata = {'filename': 'test_text_2.txt', ... 'filepath': Path(common.TEST_DATA_PATH, 'document_test_files', 'test_text_2.txt')} >>> doc = document.Document(document_metadata) >>> get_count_words(doc, ['sad', 'and']) {'sad': 4, 'and': 4} """ dic_word_counts = {} for word in words: dic_word_counts[word] = document.get_count_of_word(word) return dic_word_counts
30643557c047899a4a92514e991e15b39f6b6d90
266,984
from datetime import datetime def format_time_from_arg(time_string): """ Format a time string from args and return a datetime. :param time_string: String of the form HH:MM:SS. :type time_string: str :return: datetime """ time_from_arg = datetime.strptime(time_string, '%H:%M:%S') current_time = datetime.today() check_time = time_from_arg.replace(year=current_time.year, month=current_time.month, day=current_time.day) return check_time
9339532ea13ad0024b57eaa10bd88ea9b10282f3
277,524
import torch def fMAPE(preds, targs, *kwargs): """ Loss function Mean Absolute Percentage Error N-BEATS paper definition """ return torch.mean( torch.abs((targs - preds) / targs) )
a847550230a046d14effced268df4c135edabaef
228,085
def map_components_by_name(components): """Given a dictionary or list of components, map them into a dictionary by name""" if isinstance(components, dict): components = components.values() return { component.name: component for component in components }
2f7e4b061cbef754d979c6c12d0cb4bc67f8dc22
420,670
import warnings def deprecated(func): """This is a decorator which can be used to mark functions as deprecated. It will result in a warning being emmitted when the function is used. Taken from http://code.activestate.com/recipes/391367-deprecated/ """ def newFunc(*args, **kwargs): warnings.warn("Call to deprecated function %s." % func.__name__, category=DeprecationWarning, stacklevel=2) return func(*args, **kwargs) newFunc.__name__ = func.__name__ newFunc.__doc__ = func.__doc__ newFunc.__dict__.update(func.__dict__) return newFunc
d9d3681fb3cd3c48ace662c06cbba3690391a7c1
343,939
def path_segments(path, **kwargs): """ Create an array of vertices and a corresponding array of codes from a :class:`matplotlib.path.Path`. Parameters ---------- path A :class:`matplotlib.path.Path` instance. Other Parameters ---------------- kwargs See :func:`matplotlib.path.iter_segments` for details of the keyword arguments. Returns ------- vertices, codes A (vertices, codes) tuple, where vertices is a numpy array of coordinates, and codes is a numpy array of matplotlib path codes. See :class:`matplotlib.path.Path` for information on the types of codes and their meanings. """ pth = path.cleaned(**kwargs) return pth.vertices[:-1, :], pth.codes[:-1]
6073bcad156548dad5d6bd43a3d9c797b830de41
336,836
import re def parse_size(size, binary=False): """ Parse a human readable data size and return the number of bytes. Match humanfriendly.parse_size :param size: The human readable file size to parse (a string). :param binary: :data:`True` to use binary multiples of bytes (base-2) for ambiguous unit symbols and names, :data:`False` to use decimal multiples of bytes (base-10). :returns: The corresponding size in bytes (an integer). :raises: :exc:`InvalidSize` when the input can't be parsed. This function knows how to parse sizes in bytes, kilobytes, megabytes, gigabytes, terabytes and petabytes. Some examples: >>> parse_size('42') 42 >>> parse_size('13b') 13 >>> parse_size('5 bytes') 5 >>> parse_size('1 KB') 1000 >>> parse_size('1 kilobyte') 1000 >>> parse_size('1 KiB') 1024 >>> parse_size('1 KB', binary=True) 1024 >>> parse_size('1.5 GB') 1500000000 >>> parse_size('1.5 GB', binary=True) 1610612736 """ def tokenize(text): tokenized_input = [] for token in re.split(r'(\d+(?:\.\d+)?)', text): token = token.strip() if re.match(r'\d+\.\d+', token): tokenized_input.append(float(token)) elif token.isdigit(): tokenized_input.append(int(token)) elif token: tokenized_input.append(token) return tokenized_input tokens = tokenize(str(size)) if tokens and isinstance(tokens[0], (int, float)): disk_size_units_b = \ (('B', 'bytes'), ('KiB', 'kibibyte'), ('MiB', 'mebibyte'), ('GiB', 'gibibyte'), ('TiB', 'tebibyte'), ('PiB', 'pebibyte')) disk_size_units_d = \ (('B', 'bytes'), ('KB', 'kilobyte'), ('MB', 'megabyte'), ('GB', 'gigabyte'), ('TB', 'terabyte'), ('PB', 'petabyte')) disk_size_units_b = [(1024 ** i, s[0], s[1]) for i, s in enumerate(disk_size_units_b)] k = 1024 if binary else 1000 disk_size_units_d = [(k ** i, s[0], s[1]) for i, s in enumerate(disk_size_units_d)] disk_size_units = (disk_size_units_b + disk_size_units_d) \ if binary else (disk_size_units_d + disk_size_units_b) # Get the normalized unit (if any) from the tokenized input. normalized_unit = tokens[1].lower() if len(tokens) == 2 and isinstance(tokens[1], str) else '' # If the input contains only a number, it's assumed to be the number of # bytes. The second token can also explicitly reference the unit bytes. if len(tokens) == 1 or normalized_unit.startswith('b'): return int(tokens[0]) # Otherwise we expect two tokens: A number and a unit. if normalized_unit: # Convert plural units to singular units, for details: # https://github.com/xolox/python-humanfriendly/issues/26 normalized_unit = normalized_unit.rstrip('s') for k, low, high in disk_size_units: # First we check for unambiguous symbols (KiB, MiB, GiB, etc) # and names (kibibyte, mebibyte, gibibyte, etc) because their # handling is always the same. if normalized_unit in (low.lower(), high.lower()): return int(tokens[0] * k) # Now we will deal with ambiguous prefixes (K, M, G, etc), # symbols (KB, MB, GB, etc) and names (kilobyte, megabyte, # gigabyte, etc) according to the caller's preference. if (normalized_unit in (low.lower(), high.lower()) or normalized_unit.startswith(low.lower())): return int(tokens[0] * k) raise ValueError("Failed to parse size! (input {} was tokenized as {})".format(size, tokens))
fc53c07d81df5e4fb39b5dca7c9a62c4a9a6f3b2
103,609
def correct_position(first, second): """ :param first: a string. :param second: a string. :return: the number of characters in first and second which are equal and are located at the same index. """ return sum([f == s for f, s in zip(first, second)])
e374e2fa1fcd224f3d5b4bf471177af1d38e36db
236,758
def first(a, fn): """ Example: first([3,4,5,6], lambda x: x > 4) :param a: array :param fn: function to evaluate items :return: None or first item matching result """ return next((x for x in a if fn(x)), None)
72fd47eff62a406f42a0fcd6901f010c98954464
55,697
import re def replace_string( text, old_string, new_string ): """ Check that `old_string` is in `text`, and replace it by `new_string` (`old_string` and `new_string` use regex syntax) """ # Check that the target line is present if re.findall(old_string, text) == []: raise RuntimeError('Did not find expected string: %s' %old_string) # Return the modified text return re.sub( old_string, new_string, text )
93811fe03ac720baab12b3bd0733a8b05cf6b3a0
663,918
def quantize_duration(dur): """Quantize the duration to the nearest 500 milliseconds. This function was adapted from Sven Marnach's answer to this question: http://stackoverflow.com/questions/9810391/round-to-the-nearest-500-python Arguments: dur (int) Returns: An int """ if (dur <= 500): return 500 elif (dur >= 2000): return 2000 else: return int(round(dur / 500.0) * 500.0)
3d3ba93e6f6ca46c2c2392a3bd18492465fa0e80
152,836
import logging import json def is_newformat(filename): """ Check if app is new format (with FolderDefinition as top object) :param filename :return: True if new format, throws exception otherwise """ with open(filename, 'r') as mf: logging.info("Reading file: " + filename) content_obj = json.load(mf) if ((not ('type' in content_obj)) or not ( content_obj['type'] == "FolderSyncDefinition" or content_obj['type'] == "Folder")): raise Exception("Invalid app file format") else: if content_obj['type'] == "FolderSyncDefinition": return True else: return False
c169360882777dd9897870da0c90203c268ac528
577,481
import re def structure_from_viewer(status, atlas_layer, atlas): """ Get brain region info from mouse position in napari viewer. Extract nx3 coordinate pair from napari window status string. Return brainglobe (BG) structure number, name, hemisphere, and a "pretty" string that can be displayed for example in the status bar. Parameter --------- status : str, Napari viewer status (napari.viewer.Viewer.status) atlas_layer : Napari viewer layer Layer, which contains the annotation / region information for every structure in the (registered) atlas atlas : Brainglobe atlas (bg_atlasapi.bg_atlas.BrainGlobeAtlas) Returns ------- If any error is raised, (None,None,None,"") is returned structure_no : int BG Structure number Returns none if not found structure : str Structure name Returns none if not found hemisphere : str Hemisphere name Returns none if not found region_info : str A string containing info about structure and hemisphere Returns empty string if not found """ # Using a regex, extract list of coordinates from status string assert hasattr(atlas_layer, "data"), "Atlas layer appears to be empty" assert ( atlas_layer.data.ndim == 3 ), f'Atlas layer data does not have the right dim ("{atlas_layer.data.ndim}")' try: coords = re.findall(r"\[\d{1,5}\s+\d{1,5}\s+\d{1,5}\]", status)[0][ 1:-1 ] coords_list = coords.split() map_object = map(int, coords_list) coord_list = tuple(map_object) except (IndexError, ValueError): # No coordinates could be extracted from status return None, None, None, "" # Extract structure number try: structure_no = atlas_layer.data[coord_list] except IndexError: return None, None, None, "" if structure_no in [0]: # 0 is "Null" region return None, None, None, "" # Extract structure information try: structure = atlas.structures[structure_no]["name"] except KeyError: return None, None, None, "" # ... and make string pretty region_info = [] for struct in structure.split(","): region_info.append(struct.strip().capitalize()) hemisphere = atlas.hemisphere_from_coords( coord_list, as_string=True ).capitalize() region_info.append(hemisphere) region_info = " | ".join(region_info) return structure_no, structure, hemisphere, region_info
a0f92a90cf13b1bc01081167f95c248a1b74c046
690,208
def _bowtie2_args_from_config(config): """Configurable high level options for bowtie2. """ qual_format = config["algorithm"].get("quality_format", "") if qual_format.lower() == "illumina": qual_flags = ["--phred64-quals"] else: qual_flags = [] num_cores = config["algorithm"].get("num_cores", 1) core_flags = ["-p", str(num_cores)] if num_cores > 1 else [] return core_flags + qual_flags
c40a518fd4ec5b98f36c04e0f93a673bef76a4cb
417,860
from typing import Any from typing import List from typing import Optional def get_path(obj: Any, path: List[str]) -> Optional[Any]: """ Get a nested value by a series of keys inside some nested indexable containers, returning None if the path does not exist, avoiding any errors. Args: obj: any indexable (has __getitem__ method) obj path: list of accessors, such as dict keys or list indexes Examples: get_path([{'x': {'y': 1}}], [0, 'x', 'y']) -> 1 """ for key in path: try: obj = obj[key] except Exception: return None return obj
58a92a461c9ce20eaca2f879e8c9a90d7546f4fc
519,096
def ensure_evidence(badge_data: dict) -> dict: """Given badge_data, ensure 'evidence' key exists with list value""" if 'evidence' not in badge_data: badge_data['evidence'] = [{}] return badge_data
755eb002d1b8c79e000c785c4049c29b5e30975f
313,189
def _find_new_name(old_name, node_names): """Disambiguate a node's name from a list of existing node names by adding successively larger integers. """ count = 0 new_name = old_name + "." + str(count) while new_name in node_names: count += 1 new_name = old_name + "." + str(count) return new_name
37f899fc5ffc4983709aa893be7332f200287819
396,192
def points_to_bbox(p): """ from a list of points (x,y pairs) return the lower-left xy and upper-right xy """ llx = urx = p[0][0] lly = ury = p[0][1] for x in p[1:]: if x[0] < llx: llx = x[0] elif x[0] > urx: urx = x[0] if x[1] < lly: lly = x[1] elif x[1] > ury: ury = x[1] return (llx, lly, urx, ury)
5303bd02af4ab8e14929b43723c5591832c66b92
466,599
def to_int(x, error=0): """Convert argument to int.""" try: return int(x) except (ValueError, TypeError): return error
2c363a1d9125e396a76007d9986748b98130e1ab
12,142
def merge_components(local_component, target_component): """ Find resulting component from merging the first (local) component into the second. The resulting component will maintain the parent identifier of the target component. """ local_bounds, local_center, local_size, local_parent = local_component target_bounds, target_center, target_size, target_parent = target_component merged_bounds = [ min(local_bounds[0], target_bounds[0]), max(local_bounds[1], target_bounds[1]), min(local_bounds[2], target_bounds[2]), max(local_bounds[3], target_bounds[3]), min(local_bounds[4], target_bounds[4]), max(local_bounds[5], target_bounds[5]), ] merged_size = local_size + target_size # use weighted averaging to find center. the center point is not guaranteed to occur at a # position containing the component (eg if it is "C" shape) merged_center = ( local_center * local_size + target_center * target_size ) / merged_size return merged_bounds, merged_center, merged_size, target_parent
bdb9b437d0981f46b676c427034b7716c8727560
36,799
def split(s, esc='\\', sep=' '): """ enhance the split func of str, support escape default '\' single-back-slash >>> split('www.abc.com\.a', sep='.') ['www', 'abc', 'com.a'] """ l = [] ss = [] for c in s: if c == esc: l.append(c) else: [ss.append(esc) for i in range(len(l) // 2)] if c == sep: if len(l) % 2 != 0: # is escaped char ss.append(sep) else: ss.append(' ') l.clear() else: ss.append(c) return ''.join(ss).split(' ')
13b835d5ddb22609c5e40ea669d3ffeec24b59c8
267,323
import re def axes_from_cmd(cmd, alt_names=None): """ Get axes name from command string :param cmd: str :param alt_names: dict {name_in_cmd: name_in_file} :return: str """ alt_names = {} if alt_names is None else alt_names cmd = cmd.split() axes = cmd[1] if axes in alt_names: axes = alt_names[axes] # These are specific to I16... if axes == 'hkl': if cmd[0] == 'scan': hstep, kstep, lstep = cmd[8:11] elif cmd[0] == 'scancn': hstep, kstep, lstep = cmd[2:5] else: raise Warning('Warning unknown type of hkl scan') if float(re.sub("[^0-9.]", "", hstep)) > 0.0: axes = 'h' elif float(re.sub("[^0-9.]", "", kstep)) > 0.0: axes = 'k' else: axes = 'l' elif axes == 'energy': axes = 'energy2' elif axes == 'sr2': axes = 'azimuthal' # 'phi' in pre-DiffCalc scans elif axes == 'th2th': axes = 'delta' elif axes == 'ppp_energy': axes = 'ppp_offset' return axes
443acce54aaec1f9bf1fda3031464bcc1566399e
646,684
def _between_lines(lines, beg, end, single=False): """ Extract lines from `beg` to `end` :param lines: content list which is split for each newline :param beg: starting point of parsing :param end: end point of parsing :param single: whether there are multiple sections to exist :return: extracted lines """ # Initialize variables start_idx = -1 end_idx = -1 indices = [] # Parsing for idx, line in enumerate(lines): if line.startswith(beg): start_idx = idx if line.startswith(end) and start_idx != -1: end_idx = idx if start_idx != -1 and end_idx != -1: assert end_idx >= start_idx, "must be 'end_idx >= start_idx'" indices.append([start_idx, end_idx]) start_idx = -1 end_idx = -1 if single: break return [lines[item[0]:item[1]] for item in indices]
6e9fd61a5415e60acfc6562b2feb66b06f909391
493,384
import re def remove_html_elements(string: str) -> str: """Removes any html elements and attributes from any string passed""" regex = re.compile("<.*?>") clean_text = re.sub(regex, "", string) return clean_text
2aaca8e085ef73bc765adb48692cdab823998236
407,392
def _indexing(x, indices): """ :param x: array from which indices has to be fetched :param indices: indices to be fetched :return: sub-array from given array and indices """ # np array indexing if hasattr(x, 'shape'): return x[indices] # list indexing return [x[idx] for idx in indices]
8061aee1464ffef0bc13eab0c59b472fd5dc140c
115,670
def terms_accept_url() -> str: """Path to accepted terms.""" return '/terms/accept'
44f3409491591eb5284d249d34da4cf06f150a3a
64,256
def reproject(link, node, epsg): """ reporoject link and node geodataframes for nodes, update X and Y columns """ link = link.to_crs(epsg=epsg) node = node.to_crs(epsg=epsg) node["X"] = node["geometry"].apply(lambda p: p.x) node["Y"] = node["geometry"].apply(lambda p: p.y) return link, node
5ead99d074ea1d643f598d790b083dda511caa1a
8,345
def dicom_age_in_years(age_string): """Helper function to extract DICOM age into float Parameters ---------- age_string : str The age string as defined in the DICOM standard, see http://dicom.nema.org/medical/dicom/current/output/chtml/part05/sect_6.2.html Returns ------- float or None The age or None if any conversiomn went wrong. """ try: units = age_string[-1] value = age_string[0:-1] except IndexError: return try: age = float(value) except ValueError: return if units == "Y": # default pass elif units == "M": age /= 12 elif units == "W": age /= 52 elif units == "D": age /= 365 else: # unknown return return age
f537617fc7b3d9481019c2c684097e917d8bc780
278,028
def _parse_expected_tuple(arg, default=tuple()): """ Parse the argument into an expected tuple. The argument can be None (i.e., using the default), a single element (i.e., a length-1 tuple), or a tuple """ try: _ = iter(arg) except TypeError: tpl = tuple(default) if arg is None else (arg,) else: tpl = tuple(arg) return tpl
39b4925ffc7af098f8a8c9746ee246a1ea944c5b
446,687
def get_iwp_label_key( iwp_label ): """ Retrieves a key that locates the supplied IWP label within the underlying dataset. The key returned locates the label both temporarly and spatially. Takes 1 argument: iwp_label - IWP label to locate. Returns 1 value: label_key - Tuple identifying iwp_label's location within a dataset. Comprised of (time step index, z index). """ return (iwp_label["time_step_index"], iwp_label["z_index"])
e5814e14f3d1b4c40074e4429ae5729ea7087321
684,160
def any_a_in_b(a, b): """Return true if any element *s* of <a> also exists in <b>.""" return any(s in b for s in a)
1573f5b446768894c89a12840300be1268041504
466,728
import importlib def load_module(module_path): """ Load's a python module. ex: module_path = "confidant.authnz.rbac:no_acl" Will load the module confidant.authnz.rbac and return the function no_acl """ module_name, function_name = module_path.split(':') module = importlib.import_module(module_name) function = getattr(module, function_name) return function
2536293f8b2556b91fdd34c9f81bf7f29c8ee7d1
464,207
def find_min_id_ind(tmp_sel_anch_dict): """ Finds the index of minimum ID from the selected anchors' IDs. """ selected_id = list(tmp_sel_anch_dict['AnchorID']) min_id_ind = selected_id.index(min(selected_id)) return min_id_ind
36669853ead3531b3925c95c8ed669b1a0058b71
536,234
from typing import List from typing import Any def contains_sublist(list_: List[Any], sublist: List[Any]) -> bool: """Determine if a `list` contains a `sublist`. :param list_: list to search for the `sublist` in. :param sublist: Sub list to search for. :return: True if `list` contains `sublist`. """ # Adapted from: https://stackoverflow.com/a/12576755 if not sublist: return False for i in range(len(list_)): if list_[i] == sublist[0] and list_[i : i + len(sublist)] == sublist: return True return False
3c6a9cd063f19b9d712be94970f29f017af0a89f
421,946
def DictToEnvVarsProperty(env_vars_type_class=None, env_vars=None): """Sets environment variables. Args: env_vars_type_class: type class of environment variables env_vars: a dict of environment variables Returns: An message with the environment variables from env_vars """ if not env_vars_type_class or not env_vars: return None return env_vars_type_class(additionalProperties=[ env_vars_type_class.AdditionalProperty(key=key, value=value) for key, value in sorted(env_vars.items()) ])
52f4663376d18cba6ceca9116668d35b658db81a
406,714
def url_join(parts): """ Take various parts of a url and join them """ return "/".join(map(lambda part: part.strip('/'), parts))
778377572592a814913f734000f57fc080c586f9
99,506
def is_cap(word: str) -> bool: """Return True if the word is capitalized, i.e. starts with an uppercase character and is otherwise lowercase""" return word[0].isupper() and (len(word) == 1 or word[1:].islower())
78787ca4ed8a4c70b0b19a0c92aa6ae9d72ee2f1
656,629
import re def proto_should_have_icon(f): """Check if this PROTO file should have an icon. Hidden and deprecated PROTO nodes doesn't need an icon. """ file = open(f, 'r') row = file.readlines() for line in row: if re.match(r'^#[^\n]*tags[^\n]*:[^\n]*hidden', line) or re.match(r'^#[^\n]*tags[^\n]*:[^\n]*deprecated', line): return False if not line.startswith('#'): return True
2c95a137779bddb5bee2b96655e35ccaf32d82b0
288,516
import requests def check_orb_response(response): """Check Orbital API response.""" try: response.raise_for_status() return True except requests.exceptions.HTTPError as err: r_json = response.json() print("\nFAILED:") print('Response Error: ', err) print(r_json + "\n") return False
3665bb08674e637c627dc79564e0976f2df292e4
335,719
import random def random_policy(state): """ Ignore the state, move randomly. """ action = { 'command': random.randint(0, 1) } return action
675e489f3490ebe4e0279c1704536ba6282767f9
588,780
def startswith(this, that): """Returns **True** if *this* or *that* starts with the other.""" if len(this) < len(that): return that.startswith(this) else: return this.startswith(that)
5f68940f0656507c64f7906f60aaa8834191fa8c
486,034
def general_spatial_relation(sp_el1, sp_el2, f): """General function for computing spatial relations with a function f given. Parameters ---------- sp_el1: optional the spatial information of element 1. sp_el2: optional the spatial information of element 2. f: function function to compute spatial relation between spatial objects. Returns ------- rel: float number of the the relation between the spatial object. """ rel = f(sp_el1, sp_el2) return rel
92e4dc07778212b43996df23b41a36fa3047ce4f
66,477
def _format_date_time(date, time): """ Helper func to format date & time strings from the case coords file Parameters ---------- date : str Format: MMDDYYYY time: str Format: HHMM Returns ------- str Format: MM-DD-YYYY-HH:MM """ month = date[:2] day = date[2:4] year = date[-4:] hour = time[:2] mint = time[2:] return '{}-{}-{}-{}:{}'.format(month, day, year, hour, mint)
f9b88d02026cb6c0b9ba339d65e9adff7d06f539
351,546
import re def isfilepath(value): """ Return whether or not given value is Win or Unix file path and returns it's type. If the value is Win or Unix file path, this function returns ``True, Type``, otherwise ``False, Type``. Examples:: >>> isfilepath('c:\\path\\file (x86)\\bar') True, 'Win' >>> isfilepath('/path') True, 'Unix' >>> isfilepath('c:/path/file/') False, 'Unknown' :param value: string to validate file path """ win_path = re.compile(r'^[a-zA-Z]:\\(?:[^\\/:*?"<>|\r\n]+\\)*[^\\/:*?"<>|\r\n]*$') nix_path = re.compile(r'^(/[^/\x00]*)+/?$') if win_path.match(value): # check windows path limit see: # http://msdn.microsoft.com/en-us/library/aa365247(VS.85).aspx#maxpath if len(value[3:]) > 32767: return False, 'Win' return True, 'Win' elif nix_path.match(value): return True, 'Unix' return False, 'Unknown'
56a423a3b27df5ad0e66291db0bd2698fef5a8b5
16,996
def get_sets(df_raw, num=0): """ Group raw data into measurement sets conducted at the same temperature. Parameters ---------- df_raw : pandas.DataFrame Raw measurement data. num : int, optional Number of measurement points per temperature level. Default is 0, which means that the number of measurement points within a set is evaluated based on the provided frequency range of the measurement points. The first occurance of the maximum frequency is used to identify the number of measurement points per temperature level. Returns ------- df_raw : pandas.DataFrame Contains additional column `Set` compared to input data frame. Notes ----- This function is intended to be used in combination with input files provided by the Eplexor software. Hence, it is limited to measurements in the frequency domain. """ iset = -1 lset = [] if num == 0: #Identify measurement sets based on frequency range num = df_raw['f_set'].idxmax()+1 for i in range(df_raw.shape[0]): if i%num == 0: iset += 1 lset.append(iset) df_raw['Set'] = lset return df_raw
eb7e5faf1cb91fffe44a1cb69d4fd50f08bc8391
438,814
def calc_sales_price(price): """ 计算折扣 :param price: 打折前的价格 :return: 打折后的价格 """ if price<0: raise ValueError("price should not < 0!") sales=0.9*price return sales
743a1e9d535c5f65d5db2b2a512996686104d6bd
211,813
def uncapitalize(string: str): """De-capitalize first character of string E.g. 'How is Michael doing?' -> 'how is Michael doing?' """ if len(string): return string[0].lower() + string[1:] return ""
1a294f171d16d7a4c41fb0546feca3c03b7ae37a
1,430
def _calcgbar(Ip, Vclamp, A=1): # Unused """ Estimate (lower bound) the cell's maximum conductance from its peak current Ip := Peak current [nA] Vclamp := Clamp Voltage [mV] A := Cell surface area [um^2] return gbar [pS/um^2] """ Gmax = Ip/Vclamp # Maximum conductance for the whole cell gbar = Gmax/A # Maximum conductance pS / um^2 return gbar * (1e6)
30d42870e928286052a446867279375430ba161f
220,347
def get_obsid_beam_dir(obsid,beam,mode='happili-01'): """ Get directory path for obsid + beam Default assumes happili-01 setup / access to 02-04 Can also run in happili-05 mode where everything is local Parameters ---------- obsid : str Obsid provided as a string beam : int beam provided as an int mode : string Running mode - happili-01 or happili-05 Default is happili-01 Returns ------- obsid_beam_dir : str path to obsid / beam """ if mode == 'happili-05': obsid_beam_dir = '/data/apertif/{0}/{1:02d}'.format(obsid,beam) else: #if not happili-05 mode, default to happili-01 mode if beam < 10: obsid_beam_dir = '/data/apertif/{0}/{1:02d}'.format(obsid,beam) elif beam < 20: obsid_beam_dir = '/data2/apertif/{0}/{1:02d}'.format(obsid,beam) elif beam < 30: obsid_beam_dir = '/data3/apertif/{0}/{1:02d}'.format(obsid,beam) else: obsid_beam_dir = '/data4/apertif/{0}/{1:02d}'.format(obsid,beam) return obsid_beam_dir
aafed805e36772a0c665e25ff6d1c59229052fb4
525,149
import gzip def gz_open(fname, omode): """ Use :py:mod:`gzip` library to open compressed files ending with .gz. Parameters ---------- fname : str Path to file to open. omode : str String indicating how the file is to be opened. Returns ------- file File Object. """ if fname.endswith(".gz"): return gzip.open(fname, omode) return open(fname, omode)
1016ef102586d36e8949d4bad36d79ec13af1e14
64,300
import csv def get_csv_collection(filename, **kwargs): """ Loads a record collection from a CSV file. The CSV file MUST include columns for Artist, Title, and Year Arguments: filename (str) - CSV filename **kwargs (dict) - Optional kwargs: skip=0, don't load the first <skip> rows Returns: list - List of record dicts """ skip = kwargs.get('skip', 0) collection = [] with open(filename, newline='') as collection_file: collection_dict = csv.DictReader(collection_file) count = 1 for record in collection_dict: if count <= skip: count += 1 continue collection.append({'artist': record['Artist'], 'release_title': record['Title'], 'year': record['Year'], 'type': 'release', 'country': 'US'}) count += 1 return collection
72761b39b200bb94e9759ffc63a1cf52beb7c264
89,009
def naorthreshold(lmbda, mu, costofbalking): """ Function to return Naor's threshold for optimal behaviour in an M/M/1 queue. This is taken from Naor's 1969 paper: 'The regulation of queue size by Levying Tolls' Arguments: lmbda - arrival rate (float) mu - service rate (float) costofbalking - the value of service, converted to time units. (float) Output: A threshold at which optimal customers must no longer join the queue (integer) """ n = 0 # Initialise n center = mu * costofbalking # Center mid point of inequality from Naor's aper rho = lmbda / mu while True: LHS = (n*(1-rho)- rho * (1-rho**n))/((1-rho)**2) RHS = ((n+1)*(1- rho)-rho*(1-rho**(n+1)))/((1-rho)**2) if LHS <= center and center <RHS: return n n += 1
d43c11a8c0fa98102cfeabfc98de6076c0d97f98
636,396
import json def readPalette(filePath): """Read the contents of a palette file.""" with open(filePath, 'r') as f: return json.loads(f.read())
19b85df302b4af1e652908a38ddd8c04e23b890a
178,990
def is_default_extra(extra: bytes) -> bool: """Checks if the tx_extra follows the standard format of: 0x01 <pubkey> 0x02 0x09 0x01 <encrypted_payment_id> :param extra: Potential default extra bytes. :type extra: bytes :return: True if the passed in bytes are in the default tx_extra format :rtype: bool """ if len(extra) != 1 + 32 + 1 + 1 + 1 + 8: return False if ( extra[0] == 0x01 and extra[33] == 0x02 and extra[34] == 0x09 and extra[35] == 0x01 ): return True return False
c84eb2324723224397028a0ab8d9f70610e4b682
392,224
def plain_text_to_html(string): """Convert plain text to HTML markup.""" string = string.replace("&", "&amp;") return string
8dfaf9287ec8f765d77c241e2161c1729cb8c422
624,374
def read_input_samples(data_dict): """ Function that takes only the input property from the dictionary Ignores train or test and just takes all inputs as equal :param data_dict: data dictionary with the full file input structure loaded :return: a dictionary of just input values >>> dict = {'train': [{'input': [[0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0]], 'output': [[0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0]]}, {'input': [[0, 0, 0, 8, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0]], 'output': [[0, 0, 0, 8, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 4, 0, 4, 0]]}], 'test': [{'input': [[0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0]], 'output': [[0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 7, 0, 0]]}]} >>> read_input_samples(dict) {0: [[0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0]], 1: [[0, 0, 0, 8, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0]], 2: [[0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0]]} """ inputs_dict = {} i = 0 for train_inputs in data_dict["train"]: inputs_dict[i] = train_inputs["input"] i += 1 for test_inputs in data_dict["test"]: inputs_dict[i] = test_inputs["input"] i += 1 return inputs_dict
edfafe34b2b005be13105d79153683a7a9c2a1d2
238,705
def _table_create_query_var(model): """Returns the name of the variable to hold the model's table creation query""" return '{}_TABLE_CREATE'.format(model.get_table_name().upper())
fa6c37ee711df16a2cb2cf20db9575ee614f8b5a
567,178
def etl_path(ctx, branch: str): """ append branch to container path """ return ctx.obj['container_path'] + '\\' + branch if branch else ctx.obj['container_path']
4efa07bc9a2f8718a5de200c4ba1fbd174c06a69
453,563
def year_quarter(target_date_obj): """Returns the yearly quarter (coded as 0,1,2,3) in which a given target datetime object lies. Args: target_date_obj: target date as a datetime object """ m = target_date_obj.month if m >= 12 or m <= 2: # December - February return 0 elif m >= 3 and m <= 5: # March - May return 1 elif m >= 6 and m <= 8: # June - August return 2 elif m >= 9 and m <= 11: # September - November return 3 else: raise ValueError(f"Invalid month {m}")
fee09a686eff67127972918d7f0c31063c1e6015
138,067
import gzip import pickle def load_pickle_gz(filename): """Loads a compressed object from disk """ with gzip.GzipFile(filename, 'rb') as fin: obj = pickle.load(fin) return obj
506231dc173fd498946bb1df590193a59184e19d
458,330
def compose(f, g): """ Return the composition of one-variable functions: `f \circ g` See also :func:`self_compose()` and :func:`nest()` INPUT: - `f` -- a function of one variable - `g` -- another function of one variable OUTPUT: A function, such that compose(f,g)(x) = f(g(x)) EXAMPLES:: sage: def g(x): return 3*x sage: def f(x): return x + 1 sage: h1 = compose(f,g) sage: h2 = compose(g,f) sage: _ = var ('x') sage: h1(x) 3*x + 1 sage: h2(x) 3*x + 3 :: sage: _ = function('f g') sage: _ = var ('x') sage: compose(f,g)(x) f(g(x)) """ return lambda x: f(g(x))
00cb40c299778f8f867b7c1419842910975ca368
484,577
def num_param_Gauss(d): """ count number of parameters for Gaussian d-dimension. input d [int] : dimension of data """ return 0.5 * d * (d + 3.0)
84a967bff81277ec8c391a5a504e3a8a6747a22a
661,100
import re def stripout(txt): """ Replace white space and non \w characters in text :param txt: some text to clean :return: clean text """ txt = txt.replace(' ', '_') return re.sub('\W', '', txt)
38bf70d52662b285553b399fad73901d64a6024e
356,877
def entry_exists(entry): """ Check if entry exists. :param entry: Text of entry, scraped from the wiki. :type entry: str """ return not "This page doesn't exist yet!" in entry
ddbaa0998220b79a8f7b09df884b7d0a074ae47f
541,323
def get_hk_variable_names(my_df): """ This procedure will return al ist of variables in the housekeeping file. Parameters ---------- my_df: ACT Dataset The dataframe to get the variable names from Returns ------- var_names: list The names of each variable in the file. """ return [my_str for my_str in my_df.variables.keys()]
54c2034306c77ab44560ef58c78065c8c7adecfb
120,098
import json def parseOutput(outputMsg): """ parse the full stdOut from call to endUser_sim to get the returned result """ resultStr = outputMsg.split('\n')[-2] resultStr = resultStr.replace("'", '"') # make single quotes double resultStr = resultStr.replace('True', '"True"') # put quotes around 'True' resultDict = json.loads(resultStr) return resultDict['testResult']
f23b5f8b8dcfe23752c841fb64170d68e5195bea
533,991
def get_session_length(row): """ Calculate length of session in seconds""" time_delta = row['session_end'] - row['session_start'] session_length = time_delta.total_seconds() return session_length
f6ce61db58635a5da494707ed7d4b651df250fbd
96,060