content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def alignedLoadFromStoredValue(load_size_bytes, stored_value, stored_size_bytes, load_endness, store_endness): """ Return the correct data when loading from the given stored_value, when sizes may be different. Assumes the load and store were to the _same address_. Also assumes load_size <= stored_size (otherwise we don't have all the data needed). """ if load_endness != store_endness: raise ValueError("not yet implemented: load and store have different endianness") if len(stored_value) != stored_size_bytes * 8: raise ValueError("expected stored_value to be size {} bytes, got size {} bits".format(stored_size_bytes, len(stored_value))) if load_size_bytes == stored_size_bytes: return stored_value # This is mostly a guess at what the correct way to do this is # Note many things interacting here: endianness of the load, endianness of the store, # the fact that angr reverses bitvectors on loads and stores depending on endianness, # the comment on claripy.ast.bv.BV that for an AST 'a', a[31] is the LEFT-most (or most-significant) bit, # the fact that the first argument to get_bytes() is always in big-endian order regardless of system endianness... return stored_value.get_bytes(0, load_size_bytes)
2178c292428245af933f7c83e681c331a1fa68d3
23,333
import functools def stateguard(action_guard): """ Decorator guard refines the criteria for picking an action to run by stating a function with THE SAME signature as the guarded action returning a boolean (True if action allowed). If the speciified function is unbound or a lambda expression, you must account for 'self', e.g. 'lambda self, a, b: a>0' """ def wrap(action_method): @functools.wraps(action_method) def guard_wrapper(self, *args): if not action_guard(self): return (False, True, ()) return action_method(self, *args) return guard_wrapper return wrap
988c165fd4496f7c4f4d16a3d213fa98718a1d96
23,334
import copy def get_true_abstract_mask_spriteworld(sprites, config, action=(0.5, 0.5)): """Returns a mask with iteractions for next transition given true sprites. E.g., returns [[1,0,0],[0,1,0],[0,0,1]] for 3 sprites""" sprites1 = copy.deepcopy(sprites) config['action_space'].step(action, sprites1) return config['renderers']['mask_abstract'].render(sprites1)
4dbb0f6c65d52684077d2a97ee668e02ec4eb31f
23,335
import glob import os def subdirectory_basenames(parent_directory): """ Return list of immediate subdirectories of provided parent_directory. """ return [os.path.basename(sd.rstrip('/')) for sd in glob.glob(parent_directory + '*/')]
f0df3c304ec7146d30ee75b759a65bbf678c96be
23,336
def VLT_distortion(measured_wave, cutoff=10000., slope1=0.06, intercept1 = -100.0, slope2 =0.160, intercept2=-1500.0, ): """Telescope dependent distortion function for the VLT sample.""" if measured_wave < cutoff: return measured_wave * slope1 + intercept1 else: return measured_wave * slope2 + intercept2
b2fd724e217e0a0ee11ce0df9d872a7bdbba069d
23,337
def lift(a): """ Lift an element a of Q[y] / (y ** (n // 2) + 1) up to Q[x] / (x ** n + 1). The lift of a(y) is simply a(x ** 2). Input: a A polynomial of Q[y] / (y ** (n // 2) + 1) Output: res The lift of a in Q[x] / (x ** n + 1) Format: Coefficient """ n = len(a) res = [0] * (2 * n) for i in range(n): res[2 * i] = a[i] return res
eff5fd7ac4e63ac1887d99ee0c45e2820262bf59
23,338
def get_jaccard_similarity(s, t): """Computes the Jaccard Similarity of two sets""" return len(s.intersection(t)) / len(s.union(t))
12c17780e3ca51b9948b7b89b8b22f89aed6690d
23,343
def strip_list_items(items): """Apply str.strip to all items in a list""" return list(map(str.strip, items))
f34e5d483eb15b4936bff6df57afd0574b05397b
23,345
import random def best_index(seq): """best_index(seq) Given a sequence, find the postion of the largest value. Ties are broken randomly. """ largest = max(seq) indices = [] for idx, val in enumerate(seq): if val == largest: indices.append(idx) # Randomize if necessary if len(indices) > 1: random.shuffle(indices) return indices[0]
7d9506220d9b216c4016102f48853783d049f33d
23,349
def inask(question: str) -> str: """ this fnc made for test because we need user input but intest it cant typing itself os it need to call patch module but it need to create a sperate function and call it in function that we want to use Args: question (str): any string Returns: str: user input """ answer = input(question) return answer
e0ad0b9fb4ff518b103b35a1855b780e6d323172
23,350
def borough(): """ Ensures the borough entered is a string Parameters ---------- none """ bvalue = "Brooklyn" if isinstance(bvalue, str): return bvalue else: print("Please enter a word")
742c04ced6a6c2d69a2d2c1c2c768f659f6f4963
23,351
import ast def _node_filter_for_globals(globals): """Filters ast nodes in support of setting globals for exec. Removes initial assigns of any variables occuring in `globals`. This is to allow globals to provide the initial value. Subsequent assigns are not removed under the assumption that are re-defining the initial variable value. """ names = set(globals.keys()) removed = set() def f(node): if isinstance(node, ast.Assign): for target in node.targets: if not isinstance(target, ast.Name) or target.id in removed: return True if target.id in names: removed.add(target.id) return False return True return f
5ffc5fb128ab297d0cbc2efef6684fb455dfaa2e
23,353
def nvmf_subsystem_add_ns(client, nqn, bdev_name, tgt_name=None, ptpl_file=None, nsid=None, nguid=None, eui64=None, uuid=None): """Add a namespace to a subsystem. Args: nqn: Subsystem NQN. bdev_name: Name of bdev to expose as a namespace. tgt_name: name of the parent NVMe-oF target (optional). nsid: Namespace ID (optional). nguid: 16-byte namespace globally unique identifier in hexadecimal (optional). eui64: 8-byte namespace EUI-64 in hexadecimal (e.g. "ABCDEF0123456789") (optional). uuid: Namespace UUID (optional). Returns: The namespace ID """ ns = {'bdev_name': bdev_name} if ptpl_file: ns['ptpl_file'] = ptpl_file if nsid: ns['nsid'] = nsid if nguid: ns['nguid'] = nguid if eui64: ns['eui64'] = eui64 if uuid: ns['uuid'] = uuid params = {'nqn': nqn, 'namespace': ns} if tgt_name: params['tgt_name'] = tgt_name return client.call('nvmf_subsystem_add_ns', params)
7cced4de1411e5b082be8a7f19730d56134a85be
23,354
from typing import List from typing import Dict def get_initial_state(procs: int, tasks: List[int]) -> Dict[int, List[int]]: """generate the initial state of the system the initial stage is easy to generate: all processors have an empty tasklist except the first one which has it all example: { 0: [1, 2, 2, 2, 2, 3, 4, 5, 5, 5, 6, 6, 6, 7, 7, 8, 9], 1: [], 2: [] } :param procs: number of available processors :param tasks: list of tasks to distribute :return: the initial state """ if procs < 1: exit('Must provide at least one processor') state = {i: [] for i in range(procs)} state[0] = sorted(tasks) return state
04ac8b0d99d7b248772894ea5356065e555c5250
23,357
from typing import List def list2str(l: List[int]) -> str: """ Converts list to a string""" return ' '.join([str(x) for x in l])
fde657c2143ab73fbf0e9a28f3cf47fa377de7dc
23,358
def a_function(): """A pretty useless function""" return "1+1"
692f03d4f6a600137c97d0e6af475cc167475146
23,359
import math def _pos_sqrt(value: float) -> float: """Returns sqrt of value or raises ValueError if negative.""" if value < 0: raise ValueError('Attempt to take sqrt of negative value: {}'.format(value)) return math.sqrt(value)
7b4e76f67b2f3f3dfab3e3c441f084685464a994
23,360
def three_sum(number_list, required_sum): """Three sum question using hashing method. Parameters ---------- number_list: list[float] required_sum: int Returns ------- tuple(float) """ for first_number_index in range(len(number_list)): first_number = number_list[first_number_index] hash_set = set() # The sum to find with two more number other than the first number. sum_to_satisfy = required_sum - first_number # pretty much two sum problem from here. for second_index in range(first_number_index + 1, len(number_list)): second_number = number_list[second_index] if (sum_to_satisfy - second_number) in hash_set: return second_number, first_number, sum_to_satisfy - second_number hash_set.add(second_number) second_index += 1 return None, None, None
2aa7fa178b16da3d5b7150b1bcfaab27b6947884
23,362
def get_type(): """Collects the graph type the user desires. This function prompts the user as to whether they want to draw an Epitrochoid or a Hypotrochoid. The function will loop until valid input is achieved. Again, the 'loop and a half' method was used. """ types = ['Epitrochoid', 'Hypotrochoid'] while True: selection = input('Please tell me if you\'d like to draw an ' 'Epitrochoid or a Hypotrochoid.\n>> ') if selection in types: print('{} selected'.format(selection)) return selection print('You didn\'t correctly select :( Please write either ' 'Epitrochoid or Hypotrochoid when prompted.\n')
4d1ece158138b141f5ac5a303dcd41a751097041
23,365
import re def filter(phrase, isSpecCharsFiltered=True): """Processes key and value strings for dictionary storage, including condensation of whitespace, removal of bracketed references, and (if not specified otherwise) removal of non-word/space/digit characters. """ # phrase = phrase.encode('ascii', 'ignore') phrase = re.sub('[\-\s]+', ' ', phrase) phrase = re.sub('\[.+?\]', '', phrase) if isSpecCharsFiltered: phrase = re.sub('[^\w\s\d]+', '', phrase) return phrase
f97a421539f07fa28dc695055108c9de9eedd5b7
23,366
def find_nth_prime(n: int) -> int: """modified sieve checks every natural number against all previous primes to find the nth prime""" assert n >= 2 current = 3 primes = [current] if n == 1: return current # add 1 continuously in a loop checking if the number divides any of the previously found primes while len(primes) < n: if current == 2: current += 1 else: current += 2 is_prime = True for prime in primes: if current % prime == 0: is_prime = False break # not a prime if is_prime: primes.append(current) # print(primes) return primes[-1]
1b131ea3f0a00174220f5ed7969bba9a828aa4bf
23,367
from typing import List from typing import Dict def generate_create_sqls(tbl_name: str, columns: List[tuple], keys: Dict) -> str: """ tbl_name: string a table name after pyjj_{tbl_name} columns: list of tuples (column_name, data_type, options) keys: dict of keys key: (columns, options) """ assert tbl_name column_stmt = ",".join(f"{key} {val} {opt}" for key, val, opt in columns) key_stmt = ( "," + ",".join( f"{key} ({','.join(value[0])}) {value[1]}" for key, value in keys.items() ) if keys else "" ) return f"CREATE TABLE IF NOT EXISTS pyjj_{tbl_name} ({column_stmt} {key_stmt});"
2bcf1e568ce093426666b8e10f9fa28128b7e995
23,368
import random import os def _set_ants_seed(): """Fix random seed for antsRegistration, antsAI, antsMotionCorr""" val = random.randint(1, 65536) os.environ["ANTS_RANDOM_SEED"] = str(val) return val
2edc4bf148f581950f96287bb990398c2be9a8f4
23,369
import csv def csv2dict(csv_filename, deli=',', encoding=None, key_is_header=False): """ 将指定的 csv 文件转换为 list 返回; :param: * csv_filename: (string) csv 文件的长文件名 * deli: (string) csv 文件分隔符,默认为逗号 * del_blank_row: (string) 是否要删除空行,默认为删除 * encode: (string) 文件编码 :return: * csv_data: (dict) 读取后的数据 举例如下:: from fishbase.fish_file import * from fishbase.fish_csv import * def test_csv2dict(): csv_filename = get_abs_filename_with_sub_path('csv', 'test_csv.csv')[1] print(csv_filename) csv_dict = csv2dict(csv_filename) print(csv_dict) if __name__ == '__main__': test_csv2dict() """ with open(csv_filename, encoding=encoding) as csv_file: if key_is_header: reader = csv.reader(csv_file, delimiter=deli) # 读取字典 key fieldnames = next(reader) reader = csv.DictReader(csv_file, fieldnames=fieldnames, delimiter=deli) return [dict(row) for row in reader] reader = csv.reader(csv_file, delimiter=deli) return {row[0]: row[1] for row in reader if row}
71ebf02d85bf9fceaf3374c4233940e5794cc8cc
23,370
import json def put_content(request): """Body should be { 'type': 'page', 'title': 'title', 'body': {'storage': {'value': 'value', 'representation': 'storage'}}, 'version' : {'number': number}, 'ancestors': [{'id': parent_id}] (optional) } """ body = request["body"] if not all(key in body for key in ('type', 'title', 'body', 'version')): raise Exception('Missing keys from body') if "storage" not in body.get('body') and "number" not in body.get('version'): raise Exception('Missing keys from body') if not all(key in body.get('body').get('storage') for key in ('value', 'representation')): raise Exception('Missing keys from body') return json.dumps({"result": "OK"})
ded8c5f78e93ba4e3ae7c96d4b2ddfd64a4cb10b
23,372
def tokenize_text(text): """ Fake tokenizer; Return a list of words """ return text.split()
c96764e5d504dcdf0285b0c2117f6a355d0358c5
23,373
def build(lineage): """build(lineage) takes as input the lineage of a stream and constructs the stream. """ plan = lineage if len(plan) == 0: raise ValueError("Plan is empty") elif len(plan) == 1: return plan[0] else: v = plan[0] for op in plan[1:]: v = v[op] return v
7b34c1147515497e8858ea92d57d50709f5f839d
23,374
def validate_and_build_instance_fleets(parsed_instance_fleets): """ Helper method that converts --instance-fleets option value in create-cluster to Amazon Elastic MapReduce InstanceFleetConfig data type. """ instance_fleets = [] for instance_fleet in parsed_instance_fleets: instance_fleet_config = {} keys = instance_fleet.keys() if 'Name' in keys: instance_fleet_config['Name'] = instance_fleet['Name'] else: instance_fleet_config['Name'] = instance_fleet['InstanceFleetType'] instance_fleet_config['InstanceFleetType'] = instance_fleet['InstanceFleetType'] if 'TargetOnDemandCapacity' in keys: instance_fleet_config['TargetOnDemandCapacity'] = instance_fleet['TargetOnDemandCapacity'] if 'TargetSpotCapacity' in keys: instance_fleet_config['TargetSpotCapacity'] = instance_fleet['TargetSpotCapacity'] if 'InstanceTypeConfigs' in keys: if 'TargetSpotCapacity' in keys: for instance_type_config in instance_fleet['InstanceTypeConfigs']: instance_type_config_keys = instance_type_config.keys() instance_fleet_config['InstanceTypeConfigs'] = instance_fleet['InstanceTypeConfigs'] if 'LaunchSpecifications' in keys: instanceFleetProvisioningSpecifications = instance_fleet['LaunchSpecifications'] instance_fleet_config['LaunchSpecifications'] = {} if 'SpotSpecification' in instanceFleetProvisioningSpecifications: instance_fleet_config['LaunchSpecifications']['SpotSpecification'] = \ instanceFleetProvisioningSpecifications['SpotSpecification'] if 'OnDemandSpecification' in instanceFleetProvisioningSpecifications: instance_fleet_config['LaunchSpecifications']['OnDemandSpecification'] = \ instanceFleetProvisioningSpecifications['OnDemandSpecification'] instance_fleets.append(instance_fleet_config) return instance_fleets
4afede0fbee45f5eb4cfeb29aa0eaf759ce31bd9
23,375
def sort(pinyin_d): """ :rtype: list """ return sorted(pinyin_d.items(), key=lambda x: x[0])
19763f57d70b088b869da4d01cd9de8ef3f6d247
23,377
from collections.abc import Iterable def split_iterable(ls: list, n: int): """将可迭代的数据分割成多个列表 :param ls: 可迭代的带下标的数据 (list, str) :param n: 每个列表数据的数量 (n>=1) :return list """ if isinstance(ls, Iterable) and isinstance(n, int) and n >= 1: result = [ls[i:i + n] for i in range(0, len(ls), n)] return result
a9c79078bc89095b942343d866f64fff0800a15c
23,379
def get_node_datatext(node): """Returns a string with data node text if it exists on the node, otherwise returns an empty string""" datatext = "" if node.attributes["id"].value: for data_node in node.getElementsByTagName('data'): if data_node.attributes["key"].value == "d5": if data_node.firstChild: datatext = data_node.firstChild.wholeText return datatext
b686cc52e0194440f0c86b9e6dfb12e4b7f2a1b4
23,380
import math import numpy import random def partition(array_to_partition, start_index, end_index, partition_style): """ There are various ways to choose a partition. The function returns the index of the passed array that is to be used as the pivot point. The following options are available for sorting using the second parameter: f or first: returns 0 l or last: returns the index of the last element of the array m or median: return the index of the median of the first, last, and middle elements of the array r or random: returns a random element of the array """ if partition_style.lower()[0] == "f": return start_index elif partition_style.lower()[0] == "l": return end_index - 1 elif partition_style.lower()[0] == "m": # Find the median of the first, middle and last elements. x = array_to_partition[start_index] y = array_to_partition[end_index - 1] z = array_to_partition[int(math.floor((end_index+start_index-1)/2))] med = int(numpy.median([x,y,z])) # Return the index corresponding to the calculated median. if med == x: return start_index elif med == y: return end_index - 1 else: return int(math.floor((end_index+start_index-1)/2)) elif partition_style.lower()[0] == "r": return math.floor(random.random() * end_index + 1)
30ebe40ace8796d4e06ff50956cd27a3b87481e1
23,381
def clean_up_string(string: str) -> str: """ Cleaning up string from invalid chars :param string: :return: """ return ''.join(s for s in string if s == '(' or s == ')')
719feee7d955ad7a788bccfe19998bbdcccf7464
23,382
def make_constructed_utts(utt_tokens): """ Converts utterances into correct form for composition Args: utt_tokens: List of utterances (utterance = list of tokens) Returns: utts: List of utterances for individual tokens and compositions compose_idx: List of indices in utts to be composed [["green", "-ish"]] => [["#start#", "green", "#end#"], ["#start#", "-ish", "#end#"], ["#start#", "green", "-ish", "#end#"]], [(0,2)] """ START = "#start#" END = "#end#" utts = [] compose_idx = [] for utt in utt_tokens: compose_idx.append((len(utts), len(utts)+len(utt))) # [start, end) for tok in utt: utts.append([START, tok, END]) utts.append([START] + utt + [END]) return utts, compose_idx
57eaa008883b2cfde95b438251dc305478dac3f9
23,383
import functools def once(callable_): """ Call the given function at most once per set of parameters """ return functools.lru_cache(maxsize=None, typed=True)(callable_)
b95c38a1ae6120bbd9296457bd201503cd639b5d
23,384
import re def isbase64(value): """ Return whether or not given value is base64 encoded. If the value is base64 encoded, this function returns ``True``, otherwise ``False``. Examples:: >>> isbase64('U3VzcGVuZGlzc2UgbGVjdHVzIGxlbw==') True >>> isbase64('Vml2YW11cyBmZXJtZtesting123') False :param value: string to validate base64 encoding """ base64 = re.compile(r"^(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=|[A-Za-z0-9+/]{4})$") return bool(base64.match(value))
02da2cfe0b32288aa599534dea08f4bc85a41123
23,386
def find_next_biggest_with_same_1s(n): """Finds the next biggest number with the same number of 1 bits. - Flips the rightmost 0 that has ones on its right (increases the value) - Rearrange 1s on its right to lowest positions and flips highest of them (decreases the value and creates same number of 1s) Example: xxxx_0_111_0000 --> xxxx_1_111_0000 --> xxxx_1_000_0011 Args: n: A positive integer. Raises: ValueError on non-positive input. Returns: Next biggest number with same number of 1s. """ if n <= 0: raise ValueError('Input argument has to be positive.') temp = n # Count number rightmost 0s num_of_zeros = 0 while temp & 1 == 0: temp >>= 1 num_of_zeros += 1 # Count number of 1s to the left of 0s num_of_ones = 0 while temp & 1 == 1: temp >>= 1 num_of_ones += 1 # Flip next 0 to 1 n = n ^ (1 << (num_of_ones + num_of_zeros)) # Create a 0...01...1 mask, then invert it to get 1...10...0 mask = ~((1 << (num_of_ones + num_of_zeros)) - 1) n = n & mask # Create a 0...01...1 mask with number of 1s = (num_of_ones - 1) mask = (1 << (num_of_ones - 1)) - 1 n = n | mask return n
d323a3d828929f9f6ae0c0ad6f2849e067a2eb8d
23,388
import argparse def parse_args(): """Argument Parser""" parser = argparse.ArgumentParser(description="Car Lane Joint Detection") parser.add_argument("-m", "--mode", choices=["image", "video"], default="image") parser.add_argument("--fps", type=int, default=20, help='registered frames-per-second for videos') parser.add_argument("-dp", "--data_path", default="data/images", help="path to an image directory or a explicit path to a video") parser.add_argument("-lcf", "--lane_cfg_path", default="cfgs/lane.yml", help="Path to lane-model-config file") parser.add_argument("-ccf", "--car_cfg_path", default="cfgs/car.yml", help="Path to car-model-config file") parser.add_argument("-odr", "--out_dir", default="output", help="Saving directory") return parser.parse_args()
52dde0e6bd7aed1f285c9fc171175291d6670a71
23,390
import math def board_valid(board, current, i): """ Check if matrix is valid :param board: matrix :param current: tuple(matrix index) :param i: number :return: boolean """ if all([i != board[current[0]][x] for x in range(len(board))]): valid_col = all([i != board[x][current[0]] for x in range(len(board))]) if valid_col: sqrt_n = int(math.sqrt(len(board))) top_point = (sqrt_n * (current[0] // sqrt_n), sqrt_n * (current[1] // sqrt_n)) for x in range(top_point[0], top_point[0] + sqrt_n): for y in range(top_point[1], top_point[1] + sqrt_n): if board[x][y] == i: return False return True return False
186601934dfa97bf3c4b7a14b0e1cfb64e52a44e
23,392
from typing import List from typing import Dict def get_single_color_dicts(self) -> List[List[Dict]]: """ Converts the text in the editor based on the line_string_list into a list of lists of dicts. Every line is one sublist. Since only one color is being applied, we create a list with one dict per line. """ rendering_list = [] for line in self.line_string_list: # appends a single-item list rendering_list.append([{'chars': line, 'type': 'normal', 'color': self.textColor}]) return rendering_list
9ebcb28e59f6b05591c0c5465513aa9408952a62
23,393
def is_last_ts_in_thousands(timestamps): """Detect if the last timestamp in a sequence is a multiple of 1000. Args: timestamps (list): An list of timestamps (in picoseconds). Returns: True if the last timestamp is a multiple of 1000, False otherwise. """ if timestamps is None: return True last_timestamp = max(timestamps) return last_timestamp % 1000 == 0
cb26d134eada9407415fefb4761ec265fa3c8f28
23,394
def fixture_srvr_jenny(): """Define example server data: Jenny.""" return { 'name': 'jenny', 'fullName': 'minecraft-main-server-jenny', 'environment': 'main', 'instanceId': '8675309', 'state': 'stopped', 'publicIpAddress': '10.11.12.13' }
b56d6606dae6b0e4df8ca537707eb24564bf7d37
23,395
def is_pos_int(num_str): """ Args: num_str (str): The string that is checked to see if it represents a positive integer (not 0) Returns: bool """ assert isinstance(num_str, str) if num_str.isdigit(): return int(num_str) != 0 return False
52b5dc71f9253518ee07e91cdf5e21e9d5e5588f
23,396
def create_group(key, name): """create a group of keys: node of our tree""" res = {} res['name'] = name res['key'] = key res['keys'] = [] res['groups'] = [] res['children'] = [] return res
f49e2c481b0f41a8e1cf15cb746f605b16abd52a
23,397
from typing import Callable from typing import List def _excluded_save_params( self, super_: Callable[[], List[str]], additionals: List[str], ) -> List[str]: """Method, exclude additional attributes. """ return super_() + additionals
38f39befc2974adf43e5591a5d5aca03f41d0c18
23,398
from typing import List def argv_pars(arguments: List[str]) -> int: """Returns second argv or 30. Args: argv (List[str]): sys.argv Returns: int: i >= 1, or 30 """ try: return max(int(arguments[1]), 1) except Exception: return 30
b96340f9d547e3fabd959f2fe9feb8cd0d3f4c47
23,399
def get_pentagonal_number(n: int) -> int: """Get Pentagonal number `P_n=n*(3n−1)/2` for a given number `n`.""" return (n * (3*n - 1)) // 2
4ec89c0428ea83ede1084877790edadb7b61b6d5
23,400
import secrets def _generate_token_value(): """Return a cryptographic, URL-safe token.""" return secrets.token_urlsafe()
4c5f4247260f2decb83c3f7a0e2049f169f78e3d
23,402
import os def get_duplicate_notebooks(repo): """ The function takes a repository and checks whether two or more notebooks with the same filename are present. Args: repo(Repository): python object representing the repository Returns: paths: list containing paths to notebooks with duplicate filenames A way you might use me is duplicate_nb_in_repo = get_duplicate_notebooks(repo) """ nb_filenames = [] duplicate_filanames = [] paths = [] for notebook in repo.notebooks: filename = os.path.basename(notebook.path) if filename in nb_filenames: duplicate_filanames.append(filename) else: nb_filenames.append(filename) for filename in duplicate_filanames: for notebook in repo.notebooks: if os.path.basename(notebook.path) == filename: paths.append(notebook.path) return paths
d08f1d3e643079cbec951e90f9e9329df0980adb
23,404
import numpy def _detector_camera( positions, intensities, shape=(256, 256), position=(0, 0), exposure=1, binning=(1, 1), axes=(-2, -1), ): """ """ yax, xax = axes ymin = position[0] - shape[0] // 2 ymax = position[0] + shape[0] // 2 + shape[0] % 2 xmin = position[1] - shape[1] // 2 xmax = position[1] + shape[1] // 2 + shape[1] % 2 images = numpy.zeros( ( positions.shape[1] // exposure, shape[0] // binning[0], shape[1] // binning[1], ), dtype=numpy.uint32, ) for p in range(positions.shape[0]): for t in range(positions.shape[1]): y = positions[p, t, yax] x = positions[p, t, xax] if y >= ymin and y < ymax and x >= xmin and x < xmax: images[ t // exposure, (y - ymin) // binning[0], (x - xmin) // binning[1], ] += intensities[p, t] return images
3ad2e3ea6dc31785b5fa89afed3f47b8d65604bb
23,405
def cpf_checksum(cpf): """ CPF Checksum algorithm. """ if cpf in map(lambda x: str(x) * 11, range(0, 10)): return False def dv(partial): s = sum(b * int(v) for b, v in zip(range(len(partial) + 1, 1, -1), partial)) return s % 11 dv1 = 11 - dv(cpf[:9]) q2 = dv(cpf[:10]) dv2 = 11 - q2 if q2 >= 2 else 0 return dv1 == int(cpf[9]) and dv2 == int(cpf[10])
cbe634578d50687d2b2aec5aa996123f25a03327
23,407
import argparse def get_arguments(): """Parse command line arguments""" parser = argparse.ArgumentParser( description = "Subset an alignment") parser.add_argument('-a', '--alignment', help = 'fasta alignment', required = True) parser.add_argument('-m', '--missing', help = 'missing data file gen with missingness.py', required = True) parser.add_argument('-p', '--percent', help = 'percent missing threshold to exclude', required = True, type=float) parser.add_argument('-o', '--outfile', help = 'outfile name (file.fasta)', required = True) return parser.parse_args()
2ba1deb591844e845262c6125f889794a0a26ced
23,408
def _findBeginning(file_name, loc): """Scan given TRY dat file and find end of header (start of data) Arguments: file_name {str} -- Name of TRY data file loc {str} -- Location of TRY data file Returns: (int, string) -- (lines before data starts, column header) """ with open(loc + file_name, 'r') as dat_file: last_line = dat_file.readline() current_line = dat_file.readline() dat_start = 2 while current_line[:3] != '***': last_line = current_line # save column header current_line = dat_file.readline() dat_start += 1 if dat_start == 100: break # get header as list of string last_line = last_line.split() return (dat_start, last_line)
20be3ccefafc61bfa3f769b993e5f53c667b37ba
23,409
def mhz_to_freq_khz(mhz): """ Convert MHz to exact frequency in kHz """ return { 14: 14100, 18: 18110, 21: 21150, 24: 24930, 28: 28200 }[mhz]
d7ec477c88b7e212e852aef407f2a31064d806a0
23,410
import base64 def encode_b16(msg: str) -> bytes: """ >>> encode_b16('ini contoh') b'696E6920636F6E746F68' """ encoded = msg.encode("utf-8") b16_encode = base64.b16encode(encoded) return b16_encode
7c0bb90c591d83d01e3bbbe84edd5f10c4a35bf3
23,411
def price_table_to_price_mapping(table): """Convert price table to a dict mapping from region to instance type to instance info """ region_price_mapping = {} for region_table in table['config']['regions']: types = {} for type_category in region_table['instanceTypes']: for size in type_category['sizes']: types[size['size']] = size region_price_mapping[region_table['region']] = types return region_price_mapping
d39887b82be8ae37a20d73c830e7ef724553600e
23,413
import hashlib def hash16(data): """ Return a hex string of the data's hash. Currently uses md5. """ hash_object = hashlib.md5(bytes(data, 'utf-8')) return hash_object.hexdigest()
bed6d51832f1354990c08c0c27f883e99f00ddd7
23,414
def tranche99(filt, cutoff=99.6): """ return True if the tranche is below 99.6 VQSRTrancheINDEL90.00to99.00 """ if filt is None: return True if filt[:4] != "VQSR": return False try: return float(filt.split("to")[1]) < cutoff except: return False
039090762623500b804ac222fbb7bf2f8a55402f
23,415
def get_empty_string(submission_json): #pylint: disable=unused-argument """ Empty String - For consistency in defining the map """ return ""
23550c53f7b6e8d22e0a553c1642b1ef480faaab
23,418
def dict_to_css(css_dict, pretty=False): """Takes a dictionary and creates CSS from it :param css_dict: python dictionary containing css rules :param pretty: if css should be generated as pretty :return: css as string """ seperator = '\n' tab = '\t' if not pretty: seperator = '' tab = '' css_rules = [] for selector, rules in css_dict.items(): tmp = selector + '{' + seperator tmp_rules = [] if isinstance(rules, dict): for rule, value in rules.items(): tmp_rules.append(tab + rule + ':' + value + ';') tmp += seperator.join(tmp_rules) tmp = tmp + '}' css_rules.append(tmp) return seperator.join(css_rules)
03c489e2cb3f855fad2e2476c33fc2806be9043f
23,419
def list_insert(collection, position, value): """:yaql:insert Returns collection with inserted value at the given position. :signature: collection.insert(position, value) :receiverArg collection: input collection :argType collection: sequence :arg position: index for insertion. value is inserted in the end if position greater than collection size :argType position: integer :arg value: value to be inserted :argType value: any :returnType: sequence .. code:: yaql> [0, 1, 3].insert(2, 2) [0, 1, 2, 3] """ copy = list(collection) copy.insert(position, value) return copy
d4a6bf1af3845112b688a867221f6e070a5eac95
23,420
import pandas as pd def load_exercise_threshold_app_data(data_dict={}): """Loads data from data dict with format provided by https://www.exercisethresholds.com/ Parameters: data_dict (dict) : Dictionary with format like test/exercise_threshold_app_test.json Returns: df (pandas df) : Pandas data frame with format that can be used by Pyoxynet for inference (columns needed: 'VO2_I', 'VCO2_I', 'VE_I', 'PetO2_I', 'PetCO2_I', 'VEVO2_I', 'VEVCO2_I') """ time = [] VO2_I = [] VCO2_I = [] VE_I = [] PetO2_I = [] PetCO2_I = [] VEVO2_I = [] VEVCO2_I = [] for data_points_ in data_dict[0]['data']: time.append(data_points_['t']) VO2_I.append(data_points_['VO2']) VCO2_I.append(data_points_['VCO2']) VE_I.append(data_points_['VE']) PetO2_I.append(data_points_['PetO2']) PetCO2_I.append(data_points_['PetCO2']) VEVO2_I.append(data_points_['VE/VO2']) VEVCO2_I.append(data_points_['VE/VCO2']) df = pd.DataFrame() df['time'] = [time_ - time[0 ]for time_ in time] df['VO2_I'] = VO2_I df['VCO2_I'] = VCO2_I df['VE_I'] = VE_I df['PetO2_I'] = PetO2_I df['PetCO2_I'] = PetCO2_I df['VEVO2_I'] = VEVO2_I df['VEVCO2_I'] = VEVCO2_I return df
e872a00d6cb32e2976c393e175fd5580e663c4df
23,421
def avg(iterable): """Simple arithmetic average function. Returns `None` if the length of `iterable` is 0 or no items except None exist.""" items = [item for item in iterable if item is not None] if len(items) == 0: return None return float(sum(items)) / len(items)
1489cf0a828e8c1613453d04abb8773658d60e8e
23,422
def _validate_alphabet(seq): """Validates the encoding alphabet *seq*. Returns :const:`None` if input is invalid, otherwise returns a string copy of the valid alphabet. >>> a = [chr(x) for x in xrange(35, 35 + 85)] >>> print _validate_alphabet(a) # doctest: +ELLIPSIS #$%&...tuvw >>> print _validate_alphabet(a[:-1]) None >>> print _validate_alphabet(a + ['\x8a']) None >>> print _validate_alphabet(['a'] + a) None """ # Early-exit checker for the uniqueness of members. seen = set() accept = [] # Set membership is O(1), but maybe unnecessary anyway... # Filter out duplicate or unprintable characters. # Works even if seq never terminates, due to the pigeonhole principle. for item in seq: if item in seen or not 32 <= ord(item) <= 126: return None seen.add(item) accept.append(item) # Check size. Don't use len(seq), for it doesn't have to have a length. if not 85 <= len(accept) <= 95: return None return "".join(accept)
c3d9e5a243c13a1de5802f8a48faaa29f9f24bd8
23,423
def remove_negative_sum(arr): """ recursive funtion to find and remove starting sequence that yeilds negative value """ _sum = 0 for idx, num in enumerate(arr): _sum += num if _sum < 0: return remove_negative_sum(arr[idx + 1:]) return arr
e1324c0d4ecb8e7382e33f70c0aaf76fa5f87718
23,424
import os def _envvar_to_bool(envvar: str) -> bool: """Convert environmental variable values to bool.""" envvar_value = os.environ.get(envvar, False) envvar_bool = bool(envvar_value) and (envvar != "0") and (envvar.lower() != "false") return envvar_bool
1eb0acd5437fe6a564f26b584cd7b865053cbd65
23,426
def dict_from_tokens(tokens, value): """Build a dict-tree from a list of tokens defining a unique branch within the tree. Args: tokens (list): A list of tokens defining a branch within the nested dict value (any): An object set as the leaf of a branch Returns: dict: A nested dictionary """ if len(tokens) == 0: return value key = tokens.pop(0).lower() return {key: dict_from_tokens(tokens, value)}
dd3b9e72208ba404354f9bd98c8a4a8d80609611
23,427
def mk_falls_description(data_id, data): # measurement group 8 """ transforms a h-falls-description.json form into the triples used by insertMeasurementGroup to store each measurement that is in the form :param data_id: unique id from the json form :param data: data array from the json form :return: The list of (typeid,valType,value) triples that are used by insertMeasurementGroup to add the measurements """ return [(220, 2, data_id), (48, 7, data['fallint']), (49, 2, data['falldesc']), (50, 2, data['fallinjury'])]
1e0c304538159b9bb01d677bdacdfa9a0b3b4e4d
23,428
import inspect import sys def get_all_psf_models(): """Used to display choices in generate.py""" classes = [obj for name, obj in inspect.getmembers(sys.modules[__name__]) if inspect.isclass(obj)] psf_models = [] for cls in classes: if 'Psf' in cls.__name__ and cls.__name__ != 'PsfModel': psf_models.append(cls.__name__.lower()) return psf_models
fbec1f06fdc971fdcdaf38d803869d9b27cc1e29
23,429
def sequence_producer(templates_num,sequence_storage): """ This function aims to convert the a dictonary to a set of sequences, then we can think of it as a time series problem @input parameters : templates_num -> the total numbers of distint template. sequence_storage -> A dictionary contains a set of timestamps, where each timestamp records the frequency of query IDs executed during the timestamp period. @output: sequence_list -> a set of sequences for time series problem. """ sequence_list=[] for timestamp in sorted(sequence_storage): sub_sequence=[0]*templates_num if sequence_storage[timestamp]: for id in sequence_storage[timestamp]: sub_sequence[id]=sequence_storage[timestamp][id] sequence_list.append(sub_sequence) return sequence_list
3dd4130d38951c46863f662bf83bf2c0d0f4d14a
23,431
def addMessage(row_num, valid, new_msg, messages): """ Add error message to the list of errors and set the validity""" if new_msg: if "Error" in new_msg: valid = False match = False for msg in messages: if new_msg == msg[1]: match = True if row_num + 1 != msg[0][-1]: msg[0].append(row_num + 1) return valid, messages if match == False: messages.append([[row_num + 1], new_msg]) return valid, messages
899f2c4168ccfbc1fe66ffd35f6b4ad008d6f032
23,432
def get_lines_from(hand, track_num, fps, ticks_per_ms): """ goes through a hand array and converts its information into the form required for the csv file :param hand: a matrix that has the information of what is being played by a hand :param track_num: what track number to give this hand :param fps: frames per second of the video downloaded. Used to calculate the time at which a note should be played :param ticks_per_ms: number of ticks that occur per millisecond :return: a list of lines to be put into the csv for the hand given """ def to_intervals(array): in_intervals = [] for column_index in range(array.shape[1]): col = array[:, column_index] new = [] count = 1 for i in range(1, col.shape[0]): if col[i] == col[i - 1]: count += 1 if i == col.shape[0] - 1: # if on the last element of column. have to record now. new.append([col[i], count]) else: new.append([col[i - 1], count]) count = 1 in_intervals.append(new) return in_intervals def to_abs(array): in_abs = [] for note in array: new = [] time_count = 0 for occurrence in note: new.append([occurrence[0], time_count]) time_count += occurrence[1] in_abs.append(new) return in_abs hand = to_abs(to_intervals(hand)) lines = [] for note_num in range(len(hand)): note = hand[note_num] for record in note: frame_num = record[1] second = frame_num / fps millisecond = second * 1000 tick = int(millisecond * ticks_per_ms) if record[0] == 0: # note off event lines.append(f"{track_num}, {tick}, Note_off_c, 0, {note_num + 21}, 0\n") elif record[0] == 1: # note on event lines.append(f"{track_num}, {tick}, Note_on_c, 0, {note_num + 21}, 127\n") return lines
85fab955961704250c00314fafadd013dfed97fa
23,433
from typing import Dict def word_count_helper(results: Dict) -> int: """ Helper Function that computes word count for ocr results on a single image Parameters ---------- results: Dict (OCR results from a clapperboard instance) Returns ------- Int Number of words computed from OCR results """ count = 0 for element in results: words_list = element["text"].split(" ") count += len(words_list) return count
aaf4b9b6e430c13b804ed96374eecddb134a31ae
23,434
def _findString(text, s, i): """Helper function of findString, which is called recursively until a match is found, or it is clear there is no match.""" # Find occurrence i2 = text.find(s, i) if i2 < 0: return -1 # Find newline (if none, we're done) i1 = text.rfind("\n", 0, i2) if i1 < 0: return i2 # Extract the part on the line up to the match line = text[i1:i2] # Count quotes, we're done if we found none if not line.count('"') and not line.count("'") and not line.count("#"): return i2 # So we found quotes, now really count them ... prev = "" inString = "" # this is a boolean combined with a flag which quote was used isComment = False for c in line: if c == "#": if not inString: isComment = True break elif c in "\"'": if not inString: inString = c elif prev != "\\": if inString == c: inString = "" # exit string else: pass # the other quote can savely be used inside this string prev = c # If we are in a string, this match is false ... if inString or isComment: return -i2 # indicate failure and where to continue else: return i2
a5a87b6c023d09b10d9473c852350a2b56d999da
23,435
import argparse def cli_mode(): """ Configurations fo CLI to run program with. Will be set to default values if run in other environments. :return: Parsed arguments from CLI """ parser = argparse.ArgumentParser(description="Manual to use this script:", usage="python main.py mode model") parser.add_argument('mode', type=str, nargs='?', default="test", help='Choose whether you want to train a model ' 'or test one') parser.add_argument('model', type=str, nargs='?', default="lstm", help='Choose the model you wish to train/test') args = parser.parse_args() return args
cdedd9f98a78c9eba09543b293c48e8c8b386a24
23,436
def get_main_logon_do_form(): """ Assemble form for get_main_logon_do :return: form in dict """ post_data_dict = dict() post_data_dict['continueUrl'] = 'http://www.sd.10086.cn/eMobile/jsp/common/prior.jsp?menuid=index' return post_data_dict
6ff17e1729c5d54a2ed8f68f04f0cf0a2462000b
23,437
import sys def getFormByName(qualifiedName): """ converts 'module_name.forms.FormName' to a class object """ appName, forms, className = qualifiedName.split('.', 2) formsName = '%s.%s' % (appName, forms) __import__(formsName) mod = sys.modules[formsName] return getattr(mod, className)
54a1bbb671f2bce077298bde9770300fbe70399a
23,438
def get_num_train_images(hparams): """Returns the number of training images according to the dataset.""" num_images_map = { 'imagenet': 1281167, 'cifar10': 50000, } if hparams.input_data.input_fn not in num_images_map: raise ValueError( f'Unknown dataset size for input_fn {hparams.input_data.input_fn}') num_images = num_images_map[hparams.input_data.input_fn] if hparams.input_data.max_samples > 0: return min(num_images, hparams.input_data.max_samples) return num_images
e75e827026b247158ca76890990b04d51cd99a6a
23,439
def all_constant_input(node): """Find the inputs of the given node. If the inputs of this node are all\\ constant nodes, return True. Otherwise, return False. :param node: the input node which has a Node structure\\ :return: whether the node of this node are all constant """ if node.proto is None: return False isConstant = True for parent in node.parents: if parent.proto is None or parent.proto.op_type != 'Constant': isConstant = False break return isConstant
2fd23ad7dec2bb8c77da2d18b875e7030e1c122a
23,440
import os def getfilenames(): """Gets the filenames of the dynamic trial, static trial, vsk file, and output file and returns their file paths. The example filenames below are found in the SampleData folder in the git repository. Returns ------- tuple A tuple of four strings that includes the paths to dynamic_trial, static_trial, vsk_file, and outputFile """ scriptdir = os.path.dirname(os.path.abspath(__file__)) os.chdir( scriptdir ) os.chdir( ".." ) #relative to github os.chdir( "./SampleData/59993_Frame/" ) #Directory from github dir = os.getcwd() + os.sep dynamic_trial = dir+'59993_Frame_Dynamic.c3d' static_trial = dir+'59993_Frame_Static.c3d' vsk_file = dir+'59993_Frame_SM.vsk' outputfile = dir+'pycgm_results.csv' os.chdir( scriptdir ) return dynamic_trial,static_trial,vsk_file,outputfile
637d11a01397b53ee82bb45b0876357fc2f1995f
23,441
def _same_file_up_to_epsilon(filen1, filen2, eps=1e-9): """_same_file_up_to_epsilon Return True if filen1 and filen2 contains the same float data up to epsilon Args: filen1 (str): The path and name of the first filename filen2 (str): The path and name of the second filename eps (float): The maximum tolerance for asserting that two floating point numbers are different Returns: A bool indicating if the two files contains the same data """ assert filen1 != filen2, "File names must be different." with open(filen1, "r") as filep_ref: with open(filen2, "r") as filep_test: line_ref = next(filep_ref) line_test = next(filep_test) assert line_ref == line_test, "Invalid line generated for " + filen1 + ":\n" + line_test + "\nthat is different from the reference file " + filen2 + ":\n" + line_ref for line_ref, line_test in zip(filep_ref, filep_test): # Checks that the 38 3D landmarks generated are equal up to epsilon to the reference for val_ref, val_test in zip(line_test.split(",")[1:38 * 3 + 1], line_test.split(",")[1:38 * 3 + 1]): assert abs(float(val_ref) - float(val_test)) < eps, "Invalid value detected for " + filen1 + ":\n" + line_test + "\nthat is different from the reference file " + filen2 + ":\n" + line_ref return True
5aea46a44ce5d5df0e8c32ea4c59f34b4751d492
23,442
import sys def convert_to_unicode(value): """ Python 2.x: converts value to unicode :param value: value to be converted to unicode :type value: str :return: unicode string :rtype: str (unicode) """ try: return value.decode(sys.getfilesystemencoding()) except AttributeError: return value
06f75c9c3b4b6ea4664c71dd389d4fd0de9739ad
23,444
import mimetypes def get_extension(value): """Take in mimetype and return extension.""" mimetypes.add_type("audio/wav", '.wav') extension = None preferred = [".txt", ".jpg", ".mp3"] all_extensions = mimetypes.guess_all_extensions(value) if all_extensions: for ext in all_extensions: if ext in preferred: extension = ext break if not extension: extension = all_extensions[0] else: extension = ".???" return extension.upper()[1:]
b559f3762cf7ad2bf5ad20c59f2bbea21ee6a932
23,445
import argparse def _comma_separated_pyramiding_policies(string): """Parses an input consisting of comma-separated pyramiding policies.""" error_msg = ('Argument should be a comma-separated list of: ' '{{"mean", "sample", "min", "max", "mode"}}: {}') values = string.split(',') if not values: raise argparse.ArgumentTypeError(error_msg.format(string)) redvalues = [] for value in values: if value.lower() not in {'mean', 'sample', 'min', 'max', 'mode'}: raise argparse.ArgumentTypeError(error_msg.format(string)) redvalues.append(value.lower()) return redvalues
513449b4b2cad4ac3831bb16975aa5c2266889a0
23,447
def unite_statuses(statuses, update): """ Takes two dictionaries <hostname, hoststatus> and returns dictionary with united entries (returncode is set to the max value per host, logs per host are concatenated)""" result = {} for key, value in statuses.iteritems(): if key in update: upd_status = update[key] res_status = { "exitstatus" : max(value["exitstatus"], upd_status["exitstatus"]), "log" : value["log"] + "\n" + upd_status["log"] } result[key] = res_status else: result[key] = value return result
c0454e3fdc0ccda0c6cabf3ac2ee479aacbfee27
23,448
def stringify(value): """ Escapes a string to be usable as cell content of a CSV formatted data. """ stringified = '' if value is None else str(value) if ',' in stringified: stringified = stringified.replace('"', '""') stringified = f'"{stringified}"' return stringified
74d5683a79e7efab48ec24767d1c912b66c0e65b
23,450
import math def get_bpd(log_p, dimentions=28*28): """ bpd = (nll_val / num_pixels) / numpy.log(2). log_p: log probability dimentions: dimentions (resolution) of image """ return ((-log_p / dimentions) / math.log(2)).mean().item()
a1ba8c8e688988ef0b02ec555e5b31ffc5408d2a
23,452
def nb_coverage_distance(epitope, peptide, mmTolerance = 0): """Determines whether pepitide covers epitope and can handle epitopes and peptides of different lengths. To be a consistent distance matrix: covered = 0 not-covered = 1 If epitope is longer than peptide it is not covered. Otherwise coverage is determined based on a mmTolerance Parameters ---------- epitope : np.array peptide : np.array mmTolerance : int Number of mismatches tolerated If dist <= mmTolerance then it is covered Returns ------- covered : int Covered (0) or not-covered (1)""" LEpitope, LPeptide = len(epitope), len(peptide) if LEpitope > LPeptide: return 1 for starti in range(LPeptide-LEpitope+1): mm = 0 for k in range(LEpitope): if epitope[k] != peptide[starti + k]: mm = mm + 1 if mm > mmTolerance: """If this peptide is already over the tolerance then goto next one""" break if mm <= mmTolerance: """If this peptide is below tolerance then return covered (0)""" return 0 """If no peptides meet mmTolerance then return not covered""" return 1
46b88f83934465e8bb4b30f144b5acc2791c809a
23,455
def get_timespan(): """ function to ask the user for the two timespans which he wants to compare returns a list of the 4 border years author: Leo Raises ------ ValueError if they aren't in ascending order or outside the datarange Returns ------- timespan: list of the 4 years """ while True: try: year1 = int(input('start year of first timespan: ')) year2 = int(input('end year of first timespan: ')) year3 = int(input('start year of second timespan: ')) year4 = int(input('end year of second timespan: ')) if (1901>year1 or year1>=year2 or year1>=year3 or year1>=year4 or year2>=year4 or year3>=year4 or year4>2018): #if 1901>year1 or year1>=year2 or year2>=year3 or year3>=year4 or year4>2018: raise ValueError('''The data includes the timespan from 1901 - 2018, the years have to be given in ascending order!''') break except ValueError: print('''the years has to be given as an integer and in ascending order in the range of 1901-2018''') timespan = [year1, year2, year3, year4] return timespan
fa27f08e54d1432e4b9a153a4ee159cc374ee798
23,457
def construct_unsent_berichten_query(naar_uri, max_sending_attempts): """ Construct a SPARQL query for retrieving all messages for a given recipient that haven't been received yet by the other party. :param naar_uri: URI of the recipient for which we want to retrieve messages that have yet to be sent. :returns: string containing SPARQL query """ q = """ PREFIX schema: <http://schema.org/> PREFIX ext: <http://mu.semte.ch/vocabularies/ext/> SELECT DISTINCT ?referentieABB ?dossieruri ?bericht ?betreft ?uuid ?van ?verzonden ?inhoud WHERE {{ GRAPH ?g {{ ?conversatie a schema:Conversation; schema:identifier ?referentieABB; schema:about ?betreft; schema:hasPart ?bericht. ?bericht a schema:Message; <http://mu.semte.ch/vocabularies/core/uuid> ?uuid; schema:dateSent ?verzonden; schema:text ?inhoud; schema:sender ?van; schema:recipient <{0}>. FILTER NOT EXISTS {{ ?bericht schema:dateReceived ?ontvangen. }} OPTIONAL {{ ?conversatie ext:dossierUri ?dossieruri. }} BIND(0 AS ?default_attempts) OPTIONAL {{ ?bericht ext:failedSendingAttempts ?attempts. }} BIND(COALESCE(?attempts, ?default_attempts) AS ?result_attempts) FILTER(?result_attempts < {1}) }} }} """.format(naar_uri, max_sending_attempts) return q
ff380f1dd2edc77f6ee41376f6c1d8ee50448d43
23,458
def _roman_to_int(r): """ Convert a Roman numeral to an integer. """ if not isinstance(r, str): raise TypeError(f'Expected string, got type(input)') r = r.upper() nums = {'M': 1000, 'D': 500, 'C': 100, 'L': 50, 'X': 10, 'V': 5, 'I': 1} integer = 0 for i in range(len(r)): try: value = nums[r[i]] if i+1 < len(r) and nums[r[i + 1]] > value: integer -= value else: integer += value except KeyError: raise ValueError('Input is not a valid Roman numeral: %s' % r) return integer
f91c88cbdd6ca31ae811a2300b45fb5a6df3ea91
23,459
def find_min_cost(M, start_node, memo): """Returns minimum length of tour. Connects tour back to starting node and minimizes the cost. """ n_nodes = len(M) # the end state is a bit mask with all bits set to 1 END_STATE = (1 << n_nodes) - 1 min_cost = float('inf') # the end node is the last node in solved subtour of length n_nodes for end_node in range(n_nodes): if end_node == start_node: continue # connect back to the start node and update the min cost cost = memo[end_node][END_STATE] + M[end_node][start_node] min_cost = min(min_cost, cost) return min_cost
e291572734669c9aefd3557de0e9e69280012e35
23,461
import numpy as np def weighted_avg_and_std(values, weights, axis=None): """ Computes weight average and standard deviation Parameters ---------- values : :obj:`array` Input array weights : :obj:`array` Must be same shape as ``values`` axis : int , optional axis to perform weighting """ average = np.average(values, weights=weights,axis=axis) variance = np.average((values-average)**2, weights=weights, axis=axis) # Fast and numerically precise return (average, np.sqrt(variance))
4b2cbd2b412174e0035414de23d36f66e29470b6
23,462
import re def match_absolute_path(path: str) -> bool: """ Return true if the path starts with ``http(s)://``, false otherwise. Args: path (str): URL Returns: bool: True for absolute URL, false for relative URL. """ return re.match(r"http(s)*://*", path, re.IGNORECASE) is not None
8b508b76fc0f5102c687a202a0ffab26631eaf8b
23,463
def fps(branch): """ extracts function #, process #, and scan # from the idstring of a spectrum branch returns function, process, scan as integers """ idstring = branch.getAttribute('id').split() # pull id string from scan attribute return [int(x.split('=')[1]) for x in idstring]
a470d609d2c8c15c88bbaba539c587410c03394a
23,464
import timeit def stages_of_timing(): """ Show when stages are invoked. Demonstrates that 'setup' is invoked once before each repetition. Shows that the statement is repeated 'number' of times. Also observe that the state is shared from one number to another (but not across repetitions. :Expected: >>> stages_of_timing(): in setup real statement 1 real statement 2 real statement 3 in setup real statement 1 real statement 2 real statement 3 """ return min(timeit.repeat(stmt=''' print('real statement', val) val += 1''', setup = ''' print('in setup') val = 1''', repeat=2, number=3))
bfcaa41ae745e39144899ecaf7a4ff4e60ef8e2d
23,465
def partition(alist, indices): """A function to split a list based on item indices Parameters: ----------------------------- : alist (list): a list to be split : indices (list): list of indices on which to divide the input list Returns: ----------------------------- : splits (list): a list of subreads based on cut sites """ return [alist[i:j] for i, j in zip([0]+indices, indices+[None])]
b14040058d96d66acf81e0bff8eedfe23df20738
23,466
from typing import Counter def count_elements(data_lst): """Count how often each element occurs in a list. Parameters ---------- data_lst : list List of items to count. Returns ------- counts : collections.Counter Counts for how often each item occurs in the input list. """ counts = Counter(data_lst) try: counts.pop(None) except KeyError: pass return counts
a12f0a35a228e8a8627a8fcfc703d3231984e3f4
23,467
def i_love_python(): """ Let's explain why do we love Python. """ return "I love Python!"
0ce72c35822f52b6cf9153b79555418c1b739f09
23,468
import re def clean_text_from_private_unicode(line): """Cleans the line from private unicode characters and replaces these with space.""" line = re.sub(r"([\uE000-\uF8FF]|\uD83C[\uDF00-\uDFFF]|\uD83D[\uDC00-\uDDFF])", " ", line) return line
0ad7f47446dfb91069003c3fce0d1129dcb71113
23,469