content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
import itertools import random def reservoir(iterator, k): """ Performs reservoir sampling of k items in iterator. Make sure that the iterator is a once-only iterator (ie. not created using the "range" function). :param iterator: set of items to sample from :param k: sample k items :return: list of sampled items """ sample = list(itertools.islice(iterator, 0, k)) for i, item in enumerate(iterator): replace = random.randint(0, i + k) if replace < k: sample[replace] = item return sample
c8ef11758246cbda4d07223c94daedf834513f9e
46,685
import sys def string_class(): """Returns the parent class for strings depends on the Python version.""" if sys.version_info[0] >= 3: return str return basestring
d69b5ef03c20f07524084650acbe90859858d217
46,689
import os def getScreenSize(): """Asks for the terminal dimensions using the 'stty size' system command""" rows, columns = os.popen('stty size', 'r').read().split() return (int(rows), int(columns))
fca39202281d92dddab8672123c08b3864edb6ea
46,690
def calculate_kinetic_energy(mass, velocity): """Returns kinetic energy of mass [kg] with velocity [ms].""" return 0.5 * mass * velocity ** 2 def test_calculate_kinetic_energy(): mass = 10 # [kg] velocity = 4 # [m/s] assert calculate_kinetic_energy(mass, velocity) == 80
f5552b919a671f072ae4bf1e06f2b28239f158e8
46,691
import shutil def dependency_check(dependency): """ Uses shutil to check if a dependency is installed (won't check version of anything - just presence) :param dependency: The dependency as it would be called on the command line (i.e. for blastn, would be blastn) :return: True if dependency is present, False if it is not found. """ if shutil.which(dependency) is not None: return True else: return False
4bfb07814492eb7257e81653ef0ea816c71a3341
46,692
def cleanup_user(user): """Given a dictionary of a user, return a new dictionary for output as JSON.""" return {"id" : str(user["_id"])}
4ebd6abefbac839c26ebfd9c6a0503e51b3d48a5
46,694
def int2bin(n, nbits): """Converts an integer into a binary string. Parameters ---------- n : int The integer value. nbits: The number of bits used to encode the value. """ bits = [] while n: n, remainder = divmod(n, 2) bits.insert(0, bool(remainder)) while len(bits) < nbits: bits.insert(0, False) return bits
68e430840bc6c5b9509bd9315d7d5617cc3b131c
46,695
import math import numpy def quaternion_to_sphere_points(q): """Return two points on unit sphere from quaternion.""" l = math.sqrt(q[0]*q[0] + q[1]*q[1]) v0 = numpy.array((0.0, 1.0, 0.0) if l==0.0 else \ (-q[1]/l, q[0]/l, 0.0), dtype=numpy.float64) v1 = numpy.array((q[3]*v0[0] - q[3]*v0[1], q[3]*v0[1] + q[3]*v0[0], q[0]*v0[1] - q[1]*v0[0]), dtype=numpy.float64) if q[3] < 0.0: v0 *= -1.0 return v0, v1
d66b11188848fa580e0d285f1cfeeb488658229e
46,696
import copy from typing import Mapping def merge_with_updates(initial_dict, updates, extend_only=False, merge_lists=False): """ Creates copy of initial_dict with updates applied **Parameters** initial_dict : dictionary used as base for updates updates : dictionary with changes to be applied extend_only: boolean flag that prohibits to overwrite values, only add new **Returns** A copy of initial dict with all updates applied **Examples** initial_dict is None: >>> merge_with_updates(None, {2: 2}) {2: 2} Add new value in dict: >>> merge_with_updates({1: 1}, {2: 2}) {1: 1, 2: 2} Update value in dict: >>> merge_with_updates({1: 1, 2: 2}, {2: 'two', 3: 3} ) {1: 1, 2: 'two', 3: 3} Update value in inner dict: >>> merge_with_updates({1: 1, 2: {21: 21}}, {2: {21: 'twenty-one'}}) {1: 1, 2: {21: 'twenty-one'}} Override list: >>> merge_with_updates({1: [{2: 2, 3: 3}]}, {1: [{2: 'two'}]}) {1: [{2: 'two'}]} Update value in list: >>> merge_with_updates( ... {1: [{2: 2, 3: 3}]}, {1: [{2: 'two'}]}, ... merge_lists=True) {1: [{2: 'two', 3: 3}]} Update value in inner list: >>> merge_with_updates( ... {1: {2: [{3: [{4: 4}]}]}}, ... {1: {2: [{3: [{4: 'four'}]}]}}, ... merge_lists=True) {1: {2: [{3: [{4: 'four'}]}]}} Extend dict: >>> merge_with_updates({1: 1}, {2: 2}, extend_only=True) {1: 1, 2: 2} Extend list: >>> merge_with_updates({1: [2]}, {1: [2, 3]}, merge_lists=True) {1: [2, 2, 3]} Extend list of dicts: >>> merge_with_updates({1: [{2: 2}]}, {1: [{3: 3}]}, merge_lists=True) {1: [{2: 2, 3: 3}]} Extend empty list with list of dicts: >>> merge_with_updates({1: []}, {1: [{2: 2}]}, merge_lists=True) {1: [{2: 2}]} Extend list of dicts with empty inner dict: >>> merge_with_updates({1: [{2: 2}]}, {1: [{}]}, merge_lists=True) {1: [{2: 2}, {}]} Extend inner dict: >>> merge_with_updates({1: 1, 2: {21: 21}}, {2: {22: 22}}) {1: 1, 2: {21: 21, 22: 22}} Overwrite value with dict: >>> merge_with_updates({1: 1}, {1: {2: 2}}) {1: {2: 2}} Do not modify initial dict: >>> initial_dict = {1: 1, 2: {21: 21}} >>> merge_with_updates(initial_dict, {1: 'one', 2: {21: 'two'}}) {1: 'one', 2: {21: 'two'}} >>> print(initial_dict) {1: 1, 2: {21: 21}} Do not override value in extend mode: >>> merge_with_updates({1: 1}, {1: 'one'}, extend_only=True) Traceback (most recent call last): ... ValueError: Can not overwrite "1" value in extend mode Do not override inner value in extend mode: >>> merge_with_updates({1: {2: 2}}, {1: {2: 0}}, extend_only=True) Traceback (most recent call last): ... ValueError: Can not overwrite "2" value in extend mode Do not override list value in extend mode: >>> merge_with_updates( ... {1: [{2: 2}]}, {1: [{2: 0}]}, merge_lists=True, extend_only=True) Traceback (most recent call last): ... ValueError: Can not overwrite "2" value in extend mode """ initial_dict = initial_dict or {} initial_copy = copy.deepcopy(initial_dict) for key, value in updates.items(): if isinstance(value, Mapping) and isinstance(initial_copy.get(key), Mapping): initial_copy[key] = merge_with_updates( initial_copy.get(key, {}), value, extend_only=extend_only, merge_lists=merge_lists ) elif merge_lists and isinstance(value, list) and isinstance( initial_copy.get(key), list): if len(initial_copy[key]) == 0 or not isinstance( value[0], Mapping) or value[0] == {}: initial_copy[key].extend(value) else: initial_copy[key][0] = merge_with_updates( initial_copy.get(key, [{}])[0], value[0], extend_only=extend_only, merge_lists=merge_lists ) else: if extend_only and key in initial_copy: raise ValueError( f'Can not overwrite "{key}" value in extend mode' ) initial_copy[key] = updates[key] return initial_copy
8137a276a27c18cb56b6f845696f829fa4481e60
46,697
def _content(obj): """Return content of obj as bytes""" if type(obj) is bytes: return obj if not isinstance(obj, memoryview): obj = memoryview(obj) return obj.tobytes()
5241b542c2d94b118c447c93a39b784bbe0d2ba5
46,698
def parse_posicao(f_element): """ helper function to the constructor, parses xml entries @param f_element: element to parse """ # inicia o dicionário de dados ldct_tmp = {} # handle case tipo de coordenada if "tipocoord" == f_element.tagName(): ldct_tmp["tipocoord"] = f_element.text() # handle X/cpoA elif "latitude" == f_element.tagName(): ldct_tmp["latitude"] = f_element.text() # handle Y/cpoB elif "longitude" == f_element.tagName(): ldct_tmp["longitude"] = f_element.text() # handle Z/cpoC elif "cpoC" == f_element.tagName(): ldct_tmp["cpoC"] = f_element.text() # handle cpoD elif "cpoD" == f_element.tagName(): ldct_tmp["cpoD"] = f_element.text() # retorna o dicionário de dados return ldct_tmp
7725917f2b153e790c63fa7b67790e345970132a
46,699
def invert_colors_manualV2(img): """ Function who return an image with reversed color without using numpy Parameters: img : image to process Returns: return image with reversed color """ return 255-img
a8e86adbd86bf252651426af3d12b6e9bdf1cddc
46,700
import torch def divide_sequence( x: torch.Tensor, seq_len: int, pad: bool) -> torch.Tensor: """ Break full_len -> n_samples * seq_len Args: x: tensor: (full_len, ...) seq_len: Divided sequence length, the second dimension of the output tensor pad: Pad with zeros or discard the remainder sequence Returns: tensor, where the first input dimension (full_len, ) is split into (n_samples, seq_len) """ full_len = x.size()[0] k_dims = list(x.size()[1:]) remainder = full_len % seq_len divisible = remainder == 0 if not divisible: if pad: pad_len = seq_len - remainder pad_size = [pad_len] + k_dims x_pad = torch.zeros(size=pad_size, dtype=x.dtype) if x.is_cuda: x_pad = x_pad.cuda() x = torch.cat([x, x_pad], dim=0) else: # discard remainder x = x[0:-remainder] new_size = [-1, seq_len] + k_dims return x.view(*new_size)
c128ee735b92287b450d74f706714eb0a5dc6c25
46,701
def generate_verification(module_id, frame_id, data): """ 生成CAN帧数据的校验位 :param module_id: 模块ID :param frame_id: 帧ID :param data: 帧数据部分 :return: 校验位字符串 """ id1 = int(module_id, base=16) id2 = 0 for i in range(len(frame_id)): id2 += int(frame_id[i], base=16) data_sum = 0 for i in range(len(data)): data_sum += int(data[i], base=16) verification = id1 + id2 + data_sum return hex(verification)[3:]
9cd5f9d9a470e6423c56bc5b721e1d980175a792
46,703
def read_log(log): """ reads a log file and returns lists of epoch, training loss, learning rate """ epochs = [] losses = [] lrs = [] t_perps = [] vlosses = [] v_perps= [] with open(log, 'r') as f: lines = f.readlines() for line in lines[1:]: epoch, loss, lr, t_perp, vloss, v_perp, _ = line.split('\t') epochs.append(float(epoch)) losses.append(float(loss)) lrs.append(float(lr)) vlosses.append(float(vloss)) v_perps.append(float(v_perp)) t_perps.append(float(t_perp)) return epochs, losses, lrs, t_perps, vlosses, v_perps
fff7b80e4b6f539925b14e7bf2daeb93b8214692
46,704
def sdfClearAllProps(mol): """ sdfClearAllProps() returns molecule mol without any property inside """ mol["keyvals"] = [] return mol
f376498106b8541878ce63c65b6e89c0e3404fa0
46,706
def get_multi_values(columns_to_insert,insert_values_dict_lst): """ returns the values for the placeholders in query. :param columns_to_insert: :param insert_values_dict_lst: :return: """ values = [] for value_dict in insert_values_dict_lst: for col in columns_to_insert: value = value_dict[col] values.append(value) return values
9a7d9213f42eee303b3845ef8d1564ef72668c89
46,707
import re def is_literature(paragraph: str) -> bool: """ Check if a paragraph is a literature entry. Parameters ---------- paragraph : str Returns ------- is_literature : bool """ doi_regex = re.compile(r"""(10[.][0-9]{4,}(?:[.][0-9]+)*/(?:(?!["&\'<>])\S)+)""") issn_regex = re.compile(r"""ISSN \d+""", re.IGNORECASE) vol_regex = re.compile(r"""vol\. [IVCXL\d]+""", re.IGNORECASE) return ( "ISBN" in paragraph or bool(doi_regex.search(paragraph)) or bool(issn_regex.search(paragraph)) or bool(vol_regex.search(paragraph)) or "https://" in paragraph or "http://" in paragraph )
861c6332fb4eea0a696e1705c68dd49c6bab0885
46,708
def compute_MIF(ic): """ Args: ic (IC): indepentent component. Returns: float: Myogenic identification feature. """ freqs, psd = ic.psd(verbose=False) mean_psd = psd.mean(axis=0) return mean_psd[freqs > 20].sum() / mean_psd.sum()
423a96a9dfcc3c05b2fb05f97e7912a534b41c95
46,711
def pixel_to_world(geo_trans, x, y): """Return the top-left world coordinate of the pixel :param geo_trans: geo transformation :type geo_trans: tuple with six values :param x: :param y: :return: """ return geo_trans[0] + (x * geo_trans[1]), geo_trans[3] + (y * geo_trans[5])
799a7a4cc7b2f9c1947f47a6474e6fbf625447da
46,713
import os import numpy def fsl_save_custom_timings(slice_orders, output_directory): """ Save acquisition slice timings in order to perform the slice timing with FSL. <process> <return name="timings_file" type="File" desc="the acquisition slice timings."/> <input name="slice_orders" type="List_Int" desc="the sequence slice orders."/> <input name="output_directory" type="Directory" desc="the output directory where fsl slice times are saved."/> </process> """ # Check that the outdir is valid if not os.path.isdir(output_directory): raise ValueError("'{0}' is not a valid directory.".format( output_directory)) # Type conversion custom_timings = (numpy.asarray(slice_orders).astype(numpy.single) - 1) # Express slice_times as fractions of TR custom_timings = custom_timings / numpy.max(custom_timings) # In FSL slice timing an input file is expected timings_file = os.path.join(output_directory, "custom_timings.txt") numpy.savetxt(timings_file, custom_timings) return timings_file
d81a41b1e61891f56d0dc091d4c2126b3e17e971
46,714
def convert(s, numRows): """ :type s: str :type numRows: int :rtype: str """ if len(s) <= 0 or len(s) <= numRows or numRows == 1: return s list = {} row = 0 add = True for i in range(len(s)): s1 = '' if row in list: s1 = list[row] list[row] = s1 + s[i] if add: row += 1 else: row -= 1 if row == numRows - 1: add = False elif row == 0: add = True return ''.join(list.values())
309f9ba69b4f64eb00aeabeebe34dd807895b701
46,715
def preformatted(text: str) -> str: """Make text to pre-formatted text.""" return f'```{text}```'
152c9cf6ce78ffed74b23562f7f09195340ab9b0
46,716
def interest1(b, p, n): """ INTEREST1(b, p, n) computes the new balance after n years for an initial balance b and an annual interest rate p in per cent """ return b*(1 + p/100)**n
351ec07ed8e9c12728a6ae033eaaba7585ccf29d
46,719
import functools def chain(func): """ Decorator function that allows class methods to be chained by implicitly returning the object. Any method decorated with this function returns its object. :param func: :return: """ @functools.wraps(func) def wrap(self, *args, **kwargs): func(self, *args, **kwargs) return self return wrap
fe603c769d2ca7a3f9bbc31cb37a82c948062825
46,720
def find_factors(b): """Find factors of a number.""" res = [] for i in range(1, b + 1): if b % i == 0: print(i) res.append(i) return res
61a2d8dc3727eed32752ac6dbd58ac74fdff9d67
46,721
def rename_dict_keys(input_dict, key_sets): """Renames the keys in a dictionary Parameters ---------- input_dict : dict Dictionary for which to change the keys key_sets : list list of tuples of the format `(old_key, new_key)` Returns ------- dict Copy of `input_dict` with old keys subbed for new keys """ output_dict = input_dict.copy() for old_key, new_key in key_sets: output_dict[new_key] = output_dict.pop(old_key, None) return output_dict
ebfe7eb12c16d8e9ba2d8f9f4c5b1d7a9b0f4716
46,722
def split(separator=None): """ create a generator which split the inputs on separator """ def _split(input): yield from (line.split(separator) for line in input) return _split
d9339dcb01ae0367251dda37679f053aad85852b
46,723
def get_img(item): """ Get img data from item. :param item: :return: """ return item
8244f271af81140dc28822f8f59f6c23e9073958
46,724
def check_args(args): # --result_dir """ check_folder(os.path.join(args.result_dir, args.dataset, 'model')) check_folder(os.path.join(args.result_dir, args.dataset, 'img')) check_folder(os.path.join(args.result_dir, args.dataset, 'fakeA')) check_folder(os.path.join(args.result_dir, args.dataset, 'fakeB')) # --epoch try: assert args.epoch >= 1 except: print('number of epochs must be larger than or equal to one') """ # --batch_size try: assert args.batch_size >= 1 except: print('batch size must be larger than or equal to one') return args
f187403e78db81348a46eae0b4fa0fc4a5422cce
46,725
import requests def check_german_wikipedia(title: str) -> bool: """ Checks whether a page with the exact same title exists on German Wikipedia. :param title: :return: """ base_url = "https://de.wikipedia.org/w/api.php" params = { "action": "query", "format": "json", "list": "search", "srlimit": 5, "srsearch": title } res = requests.get(url=base_url, params=params) print(len(res.json()['query']['search'])) for search_result in res.json()['query']['search']: if search_result['title'] == title: return True return False
3984aca101b3f0e0d5e6a323fe55a3ecb5cd7749
46,726
def setup_cmd_input(multi, sequences, ordering, structure = ''): """ Returns the command-line input string to be given to NUPACK. """ if not multi: cmd_input = '+'.join(sequences) + '\n' + structure else: n_seqs = len(sequences) if ordering == None: seq_order = ' '.join([str(i) for i in range(1, n_seqs+1)]) else: seq_order = ' '.join([str(i) for i in ordering]) cmd_input = str(n_seqs) + '\n' + ('\n'.join(sequences)) + '\n' + seq_order + '\n' + structure return cmd_input.strip()
ac5804b2caac14df875e323eeb404de55c6f15ae
46,728
from typing import Optional def icon_for_battery_level(battery_level: Optional[int] = None, charging: bool = False) -> str: """Return a battery icon valid identifier.""" icon = 'mdi:battery' if battery_level is None: return icon + '-unknown' if charging and battery_level > 10: icon += '-charging-{}'.format( int(round(battery_level / 20 - .01)) * 20) elif charging: icon += '-outline' elif battery_level <= 5: icon += '-alert' elif 5 < battery_level < 95: icon += '-{}'.format(int(round(battery_level / 10 - .01)) * 10) return icon
6907255c3f2225ef382b39db02e75e97feba942b
46,729
def build_caching_info_message( job_spec, job_id, workflow_workspace, workflow_json, result_path ): """Build the caching info message with correct formatting.""" caching_info_message = { "job_spec": job_spec, "job_id": job_id, "workflow_workspace": workflow_workspace, "workflow_json": workflow_json, "result_path": result_path, } return caching_info_message
6573ca89698390ebb1d54e913ba1ba0a35b0566d
46,730
def build_queue_config_param(group_id, task_name, rt_id, topic, tasks, dest_kafka, topic_prefix, sasl_config): """ queue connector的配置参数 :param group_id: 集群名 :param rt_id: rt_id :param topic: 数据源topic :param tasks: 消费并发数 :param dest_kafka: 目标kafka :param topic_prefix: topic前缀 :param sasl_config: { "use.sasl": True/False, "sasl.user": "", "sasl.pass": "", } :return: 配置 """ config = { "group.id": group_id, "rt.id": rt_id, "connector.class": "com.tencent.bk.base.datahub.databus.connect.queue.QueueSinkConnector", "tasks.max": "%s" % tasks, "topics": topic, "dest.topic.prefix": topic_prefix, "producer.bootstrap.servers": dest_kafka, } config.update(sasl_config) return config
3039ca46ab0be00a040eb9888bf67ddfc8356e25
46,731
def url_is_new(url, object_store): """ checks if URL exists in reviewed storage of URLs """ if url in object_store: return False if url.replace('www.', '') in object_store: return False if url.replace('://', '://www.') in object_store: return False if url.replace('http://', 'https://') in object_store: return False if url.replace('https://', 'http://') in object_store: return False if url + '/' in object_store: return False if url[:-1] in object_store: return False return True
47f0aa686f9f9d1295f89315c84e3637ab0ff56e
46,733
def get_annotated_tweets(collection_name): """ Dataframe of: text label txt1 l1 txt2 l2 """ return None
24cdb85192b71ca63c546505c95b2748118d3f7f
46,734
import os def getInstrumentserverPath(*subfolder: str) -> str: """get the absolute path of the instrumentserver module by specifying a subfolder, get the absolute path of that. :example: >>> getInstrumentserverPath('foo', 'bar') /path/to/instrumentserver/foo/bar """ path = os.path.split(__file__)[0] return os.path.join(path, *subfolder)
b58f55e6cab90c291a7b504d63653bafa0687d44
46,736
def import_object(name): """ Import an object from a module, by name. :param name: The object name, in the ``package.module:name`` format. :return: The imported object """ if name.count(':') != 1: raise ValueError("Invalid object name: {0!r}. " "Expected format: '<module>:<name>'." .format(name)) module_name, class_name = name.split(':') module = __import__(module_name, fromlist=[class_name]) return getattr(module, class_name)
7822570779519954f2e06c5451c704fd905eb48a
46,738
import numpy def no_fuse_unhandled(x, y): """No fuse unhandled""" x_1 = x + 0.7 y_1 = y + 1.3 intermediate = x_1 + y_1 return intermediate.astype(numpy.int32)
2a1812b006b0695e02f3d294aac22dd88e44ed6e
46,739
def buildEdgeDict(faces): """ Arguments: faces ([[vIdx, ...], ...]): A face representation Returns: {vIdx: [vIdx, ...]}: A dictionary keyed from a vert index whose values are adjacent edges """ edgeDict = {} for face in faces: for f in range(len(face)): ff = edgeDict.setdefault(face[f-1], set()) ff.add(face[f]) ff.add(face[f-2]) return edgeDict
d11c803d954087815362ee3ede910d894b473c1c
46,740
def merge(dict1, dict2): """ Gives a single dictionary with all the most recent commands. """ return dict2.update(dict1)
e4b97c2da2876a47ed65caab95acc0cc7d6563e0
46,741
def add(x, y): """ >>> add(3, 4) 7 >>> add(4, -1) 3 >>> add(0, 5) 5 """ return x + y
499614e650179171b5f0d694f55d36df8e19cd6f
46,742
def parse_input(puzzle_data): """ Format the data from aocd """ output = list() for line in puzzle_data.split("\n"): output.append(int(line)) return output
f115de7fd71ff8f987cb945332b18b282a3697cd
46,743
def my_sum_squares1(n): """ >>> my_sum_squares1(3) 14.0 """ return (1/6)*n*(n + 1)*(2*n + 1)
67d0ebc1f26f36f09ef4aed288697aa8b3217bd1
46,745
import inspect import weakref import functools def weakref_cache(func): """ (This cache is designed for the functions in the ``introspection.typing.introspection`` module.) Caches a function's return values based on the first argument. The cached input values are weakly referenced. Example:: @weakref_cache def demo_func(foo, bar): Here, every call to ``demo_func`` with a new value for ``foo`` would be cached. The values passed to ``bar`` are completely ignored:: >>> demo_func(int, 'bar') # first call with foo=int, result is cached <some output> >>> demo_func(int, None) # second call with foo=int, cached result is returned <the same output as before> If an input value can't be hashed or weakly referenced, the result of that call is not cached. """ sig = inspect.signature(func) cache_param = next(iter(sig.parameters)) cache = weakref.WeakKeyDictionary() @functools.wraps(func) def wrapper(*args, **kwargs): bound_args = sig.bind(*args, **kwargs) cache_key = bound_args.arguments[cache_param] try: return cache[cache_key] except (KeyError, TypeError): pass result = func(*args, **kwargs) # If the cache_key isn't hashable, we simply won't cache this result try: cache[cache_key] = result except TypeError: pass return result return wrapper
b5c4bd2ed00fd7f0d4ebeaf1dd8510665f59ffba
46,746
async def add_subtractions_to_analyses(db): """ Add subtraction fields to analysis documents based on the subtraction of their parent samples. :param db: :return: """ # Return early if all analyses have subtraction fields. if await db.analyses.count_documents({"subtraction": {"$exists": False}}) == 0: return pipeline = [ { "$match": { "subtraction": { "$exists": True } } }, { "$group": { "_id": "$subtraction.id", "id_list": { "$addToSet": "$_id" } } } ] async for item in db.samples.aggregate(pipeline): sample_ids = item["id_list"] await db.analyses.update_many({"sample.id": {"$in": sample_ids}}, { "$set": { "subtraction.id": item["_id"] } }) return True
d53be684ecd89b16b603be7c6455af97461e424c
46,747
from typing import Any def get_from_dict(dct: dict, key: tuple[str, ...]) -> Any: """Get value from dict using a multi-part key""" data: Any = dct for part in key: assert isinstance(data, dict) data = data[part] return data
64366f80dd896f31561f1ace2b768aa36c8058ad
46,748
def digit(n, k, base): """ >>> digit(1234, 0, 10) 4 >>> digit(1234, 1, 10) 3 >>> digit(1234, 2, 10) 2 >>> digit(1234, 3, 10) 1 """ return n // base**k % base
818c19539298c6ec05691a08ee1425ea800c5850
46,749
def howManyWeightsInTheANN(hiddenLayers, numHiddenLayers, numInputs): """ This calculates how many weights the ANN needs in order to function. """ counter = (numInputs + 1)*hiddenLayers[0] for i in range(len(hiddenLayers)-1): counter += hiddenLayers[i]*hiddenLayers[i+1] counter += hiddenLayers[len(hiddenLayers)-1] return counter
1a5d74d92dc5da892bf2b302d78b39d598db556e
46,751
def majority(samples, ignore_none=True): """ Find the most frequent element in a list. Arguments: samples (list): Input list. Its elements must be hashable. ignore_none (bool): If None is a valid value. Returns: object: The most frequent element in samples. Returns none if the input list is empty. """ freq_dict = {} most_freq_ele = None highest_freq = 0 for element in samples: if ignore_none and element is None: continue if element not in freq_dict: freq_dict[element] = 0 freq = freq_dict[element] + 1 freq_dict[element] = freq if freq > highest_freq: highest_freq = freq most_freq_ele = element return most_freq_ele
5929ed5ab7c19a1a77ef8c4d4d7205e6181e53cc
46,753
def new_layer(layer=None, n=1, activation='linear', regval=float(0), lreg=int(1), desc='fully connected'): """specify a new bodyplan layer""" layer = {} layer["layer"] = layer layer["n"] = n layer["activation"] = activation layer["regval"] = regval layer["lreg"] = lreg layer["desc"] = desc return layer
c61ce7c4a34f64214677d784987e5f47fb46f8ad
46,754
def get(k): """k -> (k -> v) -> v""" return lambda d: d[k]
e834a343ccdc8f37e3bc62d7572902da499324ba
46,758
def exclude_zero_decimals(value): """ Returns removes trailing zeros from decimal numbers if all numbers after the decimal are zeros. """ if value==None: return_value=value else: str_value = str(value) value_length = len(str_value) decimal_index = str_value.index('.'); str_decimals = str_value[decimal_index+1:value_length]; if str_decimals.count(str_decimals[0]) == len(str_decimals) and str_decimals[0] == '0': str_value = str_value[0:decimal_index] return_value=str_value return return_value
04238f45d9e1e51d664341312db70bde82cf6498
46,759
import requests from datetime import datetime def commit_in_last_year(commits_url: str, headers: dict) -> str: """ 11. Has there been a commit in the last year? """ r = requests.get(commits_url, headers=headers).json() last_commit_date = r.get("commit").get("author").get("date") last_commit_date = datetime.strptime(last_commit_date, "%Y-%m-%dT%H:%M:%SZ") days_since_last_commit = (datetime.utcnow() - last_commit_date).days if days_since_last_commit > 365: message = f"[red]No. The last commit was {days_since_last_commit} days ago" else: message = f"[green]Yes. The last commit was on {datetime.strftime(last_commit_date, '%m-%d-%Y')} " message += f"which was {days_since_last_commit} days ago" return message
93af56c1b71dac407fac84a0ea659a27100c87d7
46,760
from typing import Callable from typing import Any import inspect def ignore_input(inner: Callable[[], Any]) -> Callable: """Returns `inner` function ignoring the provided inputs. >>> ignore_input(lambda: 0)(1) 0 """ def ignore_and_run(*args, **kwargs): return inner() async def ignore_and_run_async(*args, **kwargs): return await inner() if inspect.iscoroutinefunction(inner): return ignore_and_run_async return ignore_and_run
9fd11556c0dcfcd045dc73027eea9dae9f034d40
46,762
import struct def read_string_weight(weights_data, offset, num_strings): """Decodes binary weight data for a tfjs string""" string_list = [] j = offset for _ in range(num_strings): # TFJS strings start with a 4 byte unsigned int indicating their length, followed by the bytes of the string length = struct.unpack('<I', weights_data[j:j + 4])[0] j += 4 string_list.append(weights_data[j:j + length]) j += length return string_list, j - offset
16289590de0b196f8ad6bb4925e2de8fd792c612
46,764
def get_milking_events(): """ returns milking events in a dictionary with date as a key and quantity of milk as a value """ container = open("Calendar.txt", "r") content = list() content = container.readlines() container.close() milking_dictionary = {} for i in content: one_event_list = i.split("-") if one_event_list[1] == "MILKING": milking_dictionary[one_event_list[0]] = one_event_list[2] return milking_dictionary
d35418a2fa989e45a9e6542293aa3275c26260d1
46,766
import string import random def gen_random_str(length=32): """ Generate random string (letters+numbers) Args: length: string length (default: 32) """ symbols = string.ascii_letters + '0123456789' return ''.join(random.choice(symbols) for i in range(length))
9d7244a747c09455de0b7d9c3858022fcecf13af
46,768
def extract_source_phrase(line): """Extract the source phrase from an extract-file line.""" return line.split(b'|||', 1)[0]
a1fe16c9bace30ab110920080d1b6eed97803d28
46,769
def cig_start_clip(cigar_tuple): """ """ if cigar_tuple[0][0] == 4 or cigar_tuple[0][1] == 5: return True return False
cc209b83294db5ea0d269899c98a31900c0a2f23
46,770
import uuid def transcript_to_gpd_line(tx,transcript_name=None,gene_name=None,direction=None): """Get the genpred format string representation of the mapping :param transcript_name: :param gene_name: :param strand: :type transcript_name: string :type gene_name: string :type strand: string :return: GPD line :rtype: string """ tname = tx._options.name if transcript_name: tname = transcript_name gname = tx._options.gene_name if gene_name: gname = gene_name dir = tx._options.direction if direction: dir = direction # check for if we just have a single name if not tname: tname = str(uuid.uuid4()) if not gname: gname = tname out = '' out += gname + "\t" out += tname + "\t" out += tx.exons[0].chr + "\t" out += dir + "\t" out += str(tx.exons[0].start-1) + "\t" out += str(tx.exons[-1].end) + "\t" out += str(tx.exons[0].start-1) + "\t" out += str(tx.exons[-1].end) + "\t" out += str(len(tx.exons)) + "\t" out += str(','.join([str(x.start-1) for x in tx.exons]))+','+"\t" out += str(','.join([str(x.end) for x in tx.exons]))+',' return out
4b09d195b1beafb4cf7f0e3dc3b70443bd52477f
46,771
import re def parse_fasta_header(line): """ Returns gene_name, [(start, end), ..], strand for a given fasta header line. >>> parse_fasta_header(">lcl|NC_000913.2_cdsid_NP_417358.2 [gene=xanQ] [protein=xanthine permease] [protein_id=NP_417358.2] [location=3022373..3023773]") ('xanQ', [(3022373, 3023773)], '+') >>> parse_fasta_header(">lcl|NC_000913.2_cdsid_NP_414616.1 [gene=leuA] [protein=2-isopropylmalate synthase] [protein_id=NP_414616.1] [location=complement(81958..83529)]") ('leuA', [(81958, 83529)], '-') >>> parse_fasta_header(">lcl|NC_000913.2_cdsid_NP_417367.1 [gene=prfB] [protein=peptide chain release factor RF-2] [protein_id=NP_417367.1] [location=complement(join(3033206..3034228,3034230..3034304))]") ('prfB', [(3033206, 3034228), (3034230, 3034304)], '-') """ # Regular expressions to match id and location #exp_id = re.compile("\[protein_id=([a-zA-Z0-9_\.]+)\]") exp_id = re.compile("\[gene=([a-zA-Z0-9]+)\]") exp_loc = re.compile("\[location=([a-zA-Z0-9_\.(),]+)\]") positions = [] strand = '+' protein_id = None m = exp_id.search(line) if m: protein_id = m.group(1) start, end = None, None m = exp_loc.search(line) if m: loc_str = m.group(1) if loc_str.startswith("complement"): strand = '-' loc_str = loc_str[11:-1] if loc_str.startswith("join"): loc_str = loc_str[5:-1] for pair in loc_str.split(","): start, end = map(int, pair.split("..")) positions.append((start, end)) else: start, end = map(int, loc_str.split("..")) positions.append((start, end)) return protein_id, positions, strand
d22648282247b30ef871e85602a4874e33ca4f10
46,772
def count_lines(fd): """ !!! This function assumes each line has the same number of bytes!!! Counts the lines remaining in the file, and returns to the current position. """ p1 = fd.tell() fd.readline() p2 = fd.tell() fd.seek(0, 2) pe = fd.tell() fd.seek(p1, 0) val = (pe - p1) / (p2 - p1) if val != int(val): raise ValueError('Number of lines is not an integer: \ perhaps the bytes / line is not constant?') return int(val)
d927fd9d3a534118b64dbff2863b5e1a19a119f3
46,773
def look_at_all_vizible_cells(data,i,j,simbol): """ Смотрим на все видимые ячейки и считаем количество заполненных элементов указанным символом """ directions=[(0,1),(1,0),(0,-1),(-1,0), (1,1),(1,-1),(-1,1),(-1,-1)] count=0 for height_step, width_step in directions: current_height=i currnet_width=j while 0<=(current_height+height_step)<len(data) and 0<=(currnet_width+width_step)<len(data[0]): current_height+=height_step currnet_width+=width_step if data[current_height][currnet_width]=="#": count+=1 if data[current_height][currnet_width]!=".": break return count
7765d1c8442e13354adbe40fa3b1582c3f4145bd
46,775
def is_file_type(file, extension=["psd", "tga"]): """ Returns True if the file has a given extension. Args: file (str): File name or full path. extension (list, optional): example: [ "PSD", "MB", "MAX", "TGA", "BMP", "GIF", "JPEG", "MNG", "PBM", "PGM", "PNG", "PPM", "XBM", "XPM" ] Returns: Bool """ for ext in extension: if file.lower().endswith(ext.lower()): return True return False
0d472a6ba94ed3e2a8a48063ff2398b71257f0c7
46,776
def mvw_standard(prices, weight_bounds=(0.,1.), rf = 0., options = None): """ Calculates the mean-variance weights given a DataFrame of returns. Wraps mean_var_weights with standard covariance calculation method Args: * prices (DataFrame): Prices for multiple securities. * weight_bounds ((low, high)): Weigh limits for optimization. * rf (float): `Risk-free rate <https://www.investopedia.com/terms/r/risk-freerate.asp>`_ used in utility calculation * options (dict): options for minimizing, e.g. {'maxiter': 10000 } Returns: Series {col_name: weight} """ r = prices.to_returns().dropna() covar = r.cov() return covar
98d0e2bee27984fd2229e39450050ac85e6a3e4f
46,778
def change_user_password( self, username: str, current_password: str, new_password: str, ) -> bool: """Update an existing user's password .. list-table:: :header-rows: 1 * - Swagger Section - Method - Endpoint * - user - POST - /users/{newUser}/password :param username: Username for user :type username: str :param current_password: Current password for user :type current_password: str :param new_password: New password for user, Password must be at least 8 characters long and contain the following items: upper case letter, lower case letter, a number, a special character :type new_password: str :return: Returns True/False based on successful call :rtype: bool """ data = { "oldPassword": current_password, "newPassword": new_password, } return self._post( "/users/{}/password".format(username), data=data, expected_status=[204], return_type="bool", )
01c4d0d6fdb5592db96c44d37e6cb5eccad61803
46,779
def get_Di(M, A, Ds, S=None): """ Obtain the IFOV vector in GCI coordinates. Parameters ---------- M : numpy.matrix 3x3 nadir-to-GCI rotation matrix M A : numpy.matrix 3x3 Spacecraft Attitude Matrix Ds : 1-dimensional array of floats 3-element IFOV vector in sensor coordinates S : numpy.matrix or None Optional 3x3 Sensor Alignment Matrix Returns ------- Di : 1-dimensional array of floats 3-element IFOV vector in GCI coordinates """ if S is None: return M * A.T * Ds else: return M * A.T * S.T * Ds
95aa7582eafe12be434fb3377d5d2ddc6a0b3137
46,781
import os def extract_sample_id(filename): """ Extracts the sample id from the filename. Filenames are in the format 'ldbs<measurement id>_<sample id>.txt'. """ return os.path.splitext(filename)[0].split('_')[1]
eef3af63a855f05c7b03ea1736fc86bf606a0a3e
46,782
import colorsys def colorFromAngle2(angle, h=136, s=118, maxx = 0.8): """ Converts simple numbers into colors using HSL color scale. This can be used to shade surfaces more exposed to light brighter. args: angle: Higher values of this argument correspond to brighter colors. If a plane of a polyhedron makes a large angle with a light source, it will look brighter. """ l = 96+64*angle/maxx #/450 r, g, b = colorsys.hls_to_rgb(h/255.0, l/255.0, s/255.0) r, g, b = [x*255.0 for x in (r, g, b)] return (int(r),int(g),int(b))
fa17cded0bf880bdbeb92bec2d5874dbeec4fdff
46,785
def check_troposphere(altitude: float=0.0) -> bool: """ This function checks if the input altitude is in the Troposphere.""" if -610.0 <= altitude <= 11000.0: return True else: return False
bc3247a44358e8cf175ab8c06f11eaf7eae77a14
46,787
def MIPSsimulation(fadds,adds,s,a,b): """fadds是初始data地址,adds是地址,s是二进制指令集合, 表里边的a是register信息,b是data信息 """ op=s[0:6] rd=(int(s[16:21],2)) rs=(int(s[6:11],2)) rt=(int(s[11:16],2)) sa=(int(s[21:26],2)) im=(int(s[16:32],2)) if(op=='010000'):#J adds=(int(s[6:32],2)<<2) #Jump to the target elif(op=='010001'):#JR adds=a[rs] elif(op=='010010'):#BEQ if(a[rs]==a[rt]): adds=adds+(im<<2) adds=adds+4 elif(op=='010011'):#BLTZ if(a[rs]<0): adds=adds+(im<<2) adds=adds+4 elif(op=='010100'):#BGTZ if(a[rs]>0): adds=adds+(im<<2) adds=adds+4 elif(op=='010101'): #BREAK pass #如果是break的话不做操作 elif(op=='010110'):#SW tmp=int((a[rs]+im-fadds)/4) b[tmp]=a[rt] #将a[rt]对应的数据存入缓存表中 adds=adds+4 elif(op=='010111'):#LW tmp=int((a[rs]+im-fadds)/4) a[rt]=b[tmp] #将data中的数据写入到a[rt]中 adds=adds+4 if(op=='011000'):#SLL a[rd]=(a[rt])<<sa adds=adds+4 elif(op=='011001'):#SRL a[rd]=(a[rt])>>sa#logical adds=adds+4 elif(op=='011010'):#SRA a[rd]=(a[rt])>>sa#arithmetic adds=adds+4 elif(op=='011011'):#NO pass #To perform no operation. elif(op=='110000'):#ADD a[rd]=a[rs]+a[rt] adds=adds+4 elif(op=='110001'):#SUB a[rd]=a[rs]-a[rt] adds=adds+4 elif(op=='110010'):#MUL a[rd]=a[rs]*a[rt] adds=adds+4 elif(op=='110011'):#AND a[rd]=a[rs]&a[rt] adds=adds+4 elif(op=='110100'):#OR a[rd]=a[rs]|a[rt] adds=adds+4 elif(op=='110101'):#XOR a[rd]=a[rs]^a[rt] adds=adds+4 elif(op=='110110'):#NOR a[rd]=~(a[rs]|a[rt]) adds=adds+4 elif(op=='110111'):#SLT a[rd]=(int)(a[rs]<a[rt]) adds=adds+4 elif(op=='111000'):#ADDI a[rt]=a[rs]+im adds=adds+4 elif(op=='111001'):#ANDI a[rt]=a[rs]&im adds=adds+4 elif(op=='111010'):#ORI a[rt]=a[rs]|im adds=adds+4 elif(op=='111011'):#XORI a[rt]=a[rs]^im adds=adds+4 return adds,a,b
f556d88b92fd5c2d3a532efbaf11c4057dd404aa
46,788
import requests def scrape(): """Scrapes the AR page.""" url = 'http://www.accuraterip.com/driveoffsets.htm' response = requests.get(url) return response.text
162bb99c92b90c3a9e20e4bd632332b3bbce6570
46,789
def read_lifepo4wered(eLiFePO4weredVar): """ Read data from LiFePO4wered/Pi """ return 1
f424d8babd3edae88497704d4b14cc4495beeab3
46,790
def last_dig(a: int, b: int, c: int) -> bool: """Determine if last digit of c equals last digit of a * b.""" # Make list of list digits of a,b,c. last_digits = [digit % 10 for digit in [a, b, c, ]] # Calculate a*b and find the last digit. ab_last_digit = (last_digits[0] * last_digits[1]) % 10 # Assign last digit of c to variable. c_last_digit = last_digits[2] # Compare ab_last_digit to c_last_digit and return. return ab_last_digit == c_last_digit
b95a3eed84b776b760071b10d1d1557a1fbfcd43
46,791
def tftransform(jet,tf): """applies a robustscaler transform to one jet""" jet["content"] = tf.transform(jet["content"]) return(jet)
d49dbbbbb168c070ef914cdbb3f6cdac2a178006
46,792
import math def safeArgs(args): """Iterate over valid, finite values in an iterable. Skip any items that are None, NaN, or infinite. """ return (arg for arg in args if arg is not None and not math.isnan(arg) and not math.isinf(arg))
d455bcd0cef7e6a47d1e967f17ba0e2dd08c25f4
46,793
from unittest.mock import patch def mock_get_metadata(): """Mock recorder.statistics.get_metadata.""" mocks = {} def _get_metadata(_hass, *, statistic_ids): result = {} for statistic_id in statistic_ids: if statistic_id in mocks: if mocks[statistic_id] is not None: result[statistic_id] = mocks[statistic_id] else: result[statistic_id] = (1, {}) return result with patch( "homeassistant.components.recorder.statistics.get_metadata", wraps=_get_metadata, ): yield mocks
f5b50d2ae75cac42fa79c6b11ce570871b5522bf
46,795
def grabKmer(seq, starti, k=9): """Grab the kmer from seq starting at position starti with length k Return the gapped and non-gapped kmer If seq[starti] is a gap then the non-gapped kmer is None. If there are not enough non-gap AA to return after starti then it returns None Parameters ---------- seq : str Sequence from which peptide will be grabbed. starti : int Starting position of the kmer (zero-based indexing) k : int Length of the peptide to return. Returns ------- gapped : str A k-length peptide starting at starti from seq. nonGapped : str A k-length peptide starting at starti from seq. If seq[starti] is a gap then returns None. If not then all gaps are removed before taking the k-length peptide (if there aren't k AAs then return is None)""" if not isinstance(starti, int): starti = int(starti) if (starti+k-1) <= (len(seq)-1) and starti >= 0: tmp = seq[starti:] full = tmp[:k] if full[0] == '-': return None, None elif '-' in full: ng = tmp.replace('-', '') if len(ng) >= k: ng = ng[:k] else: ng = None else: ng = full return full, ng else: return None, None
10f2e135c27cf2986512017b2cf165520efae655
46,796
import os def get_tests_to_be_skipped_path(skip_tests_file='tests_to_be_skipped_conditionally.yaml'): """ Get path to file with dynamic skip information :param skip_tests_file: skip test file name :return: full path to skip test file name """ custom_skip_folder_path = os.path.dirname(__file__) custom_skip_tests_file_path = os.path.join(custom_skip_folder_path, skip_tests_file) return custom_skip_tests_file_path
77f2a3db38bde6359331a79c4538b169f51c8ff9
46,797
from pathlib import Path import os def get_cache_dir(): """return cache dir under home directory if possible, or return that under current directory""" try: return '{}/.abeja/.cache'.format(Path.home()) except RuntimeError: return '{}/.cache'.format(os.getcwd())
30e3091fbf0731d0e9a9169d3ebb5a2b3543c727
46,798
import re def parse_config(args): """Parse config from args.""" parsers = [int, float, eval, str] config = dict() if args.config is not None: valid = re.compile(r"[a-zA-Z_]\w*$") for entry in args.config: try: key, val = entry.split("=", 1) except ValueError: raise ValueError( "Config entries have to be defined as name=value pairs." ) if not valid.match(key): raise ValueError("Config entry must start with a valid identifier.") v = None for parser in parsers: try: v = parser(val) # avoid accidental interpretation as function if not callable(v): break except: pass assert v is not None config[key] = v return config
e5288542449fb16ad30e58e8569ba6958e817870
46,799
import pkg_resources def dbt_installed_version(): """Returns: dbt version""" try: return pkg_resources.get_distribution("dbt-core").version except ImportError: return
9d337a00b6544fee050d3a0919c926c4473c352c
46,800
import time def get_data_points(id, bps=72, presence=True): """ Epoch or Unix time: https://en.wikipedia.org/wiki/Unix_time On-line tool: https://www.epochconverter.com/ Example: 1593099453 Assuming that this timestamp is in seconds: GMT: Thursday, June 25, 2020 3:37:33 PM Your time zone: Thursday, June 25, 2020 5:37:33 PM GMT+02:00 DST """ epoch_time = int(time.time()) return{ "version": "1.0.0", "device": id, "datastreams": [ { "id": "ccare.bps", "datapoints": [{"at": epoch_time, "value": bps}] }, { "id": "ccare.presence", "datapoints": [{"at": epoch_time, "value": presence}] } ] }
babd0f5516ba671eab85b507f54e7385d42eebc0
46,801
import fractions def format_fraction(val, digits): """ format_fraction :param val: :param digits: :return: """ if digits <= 0: raise ValueError() if val < 0: return "-" + format_fraction(-val, digits) scaler = 10 ** digits val *= scaler flr = val.numerator // val.denominator rem = val % 1 half = fractions.Fraction(1, 2) if rem > half or (rem == half and flr % 2 == 1): flr += 1 return "{}.{}".format(flr // scaler, str(flr % scaler).zfill(digits))
67d37e233846e3e52c4d93d83f3e1c18b623b45e
46,802
import numpy def dynamic_range(im): """ Simplistic dynamic range """ max = numpy.max(im) mean = numpy.std(im) return max/mean
57a394347c05bf608be122a25b840e2800e1e278
46,803
def dict_to_parameter_list(d): """ :type d: dict :rtype: list[dict] """ return [{u'name': k, u'value': v} for k, v in d.items()]
42cba15a14a85882665abebf576221874e3e7924
46,804
def isEven(v): """ >>> isEven(2) True >>> isEven(1) False """ return v%2 == 0
4a21862b479c812ce4aa98835a53b65933f1cdbf
46,805
def _resolve_dotted_attribute(obj, attr): """Resolves a dotted attribute name to an object. Raises an AttributeError if any attribute in the chain starts with a '_'. """ for i in attr.split('.'): if i.startswith('_'): raise AttributeError( 'attempt to access private attribute "%s"' % i ) else: obj = getattr(obj,i) return obj
3463dfea3a617cd132df2d85e8003b1e320576cb
46,806
import random def rand_filename(length: int = 6) -> str: """ :param length: :return: Return a random filename with a length equal to the parameter length """ return "".join([chr(random.randint(65, 90)) for _ in range(length)])
f089c7d163aa384fbfcf1414e486d4ba7444022b
46,807
def extract_turls(indata): """ Extract TURLs from indata for direct i/o files. :param indata: list of FileSpec. :return: comma-separated list of turls (string). """ # turls = "" # for filespc in indata: # if filespc.status == 'remote_io': # turls += filespc.turl if not turls else ",%s" % filespc.turl # return turls return ",".join( fspec.turl for fspec in indata if fspec.status == 'remote_io' )
4bc769096b7a7d9cda485366ef47d5eeca4c4a12
46,808
def repeat_selection(base_algorithm, sampler, min_success, num_tries): """ Repeat a set-returning selection algorithm `num_tries` times, returning all elements that appear at least `min_success` times. """ results = {} for _ in range(num_tries): current = base_algorithm(sampler) for item in current: results.setdefault(item, 0) results[item] += 1 final_value = [] for key in results: if results[key] >= min_success: final_value.append(key) return set(final_value)
f25c292870ff0a50973f3259f3849cf81609e363
46,809
import json import re def parse_chap(contant): """ 解析当前章节 """ res_json=json.loads(contant) text=re.sub("</?p>","",res_json['chapter']['chapter_content']) return { "text":text, "title":res_json["chapter"]["title"] }
2ad556fa6f722e414f8596360a8b6ad7959ef4fa
46,813
def chunk_products(products, product_limit): """Split products to list of chunks. Each chunk represents products per page, product_limit defines chunk size. """ chunks = [] for i in range(0, len(products), product_limit): limit = i + product_limit chunks.append(products[i:limit]) return chunks
58e400ed7606486cb0adf586f85dde9d8519ed42
46,815
from typing import Callable def compose2(f: Callable, g: Callable) -> Callable: """Compose two functions """ def h(*args, **kwargs): return f(g(*args, **kwargs)) return h
de7e7da7192cee12bceafa2939810eecedffc72d
46,816
def decode(node): """Convert cmdx Node to shortest unique path This is the same as `node.shortestPath()` To get an absolute path, use `node.path()` """ try: return node.shortestPath() except AttributeError: return node.name(namespace=True)
2ffcc6884877d95a228c6c74106bbd36bc2ea4c1
46,817
def _escape_strings(strings): """escape to squarebracket and doublequote. >>> print(_escape_strings("hoge")) hoge >>> print(_escape_strings("[hoge")) \\[hoge >>> print(_escape_strings("hoge]")) hoge\\] >>> print(_escape_strings("[hoge]")) \\[hoge\\] >>> print(_escape_strings('[ho"ge]')) \\[ho\\"ge\\] """ target_chars = '[]"`' ret = [] for string in strings: if string in target_chars: string = "\\" + string ret.append(string) return "".join(ret)
e1a80def54cfe40da9634b5bbe7f157539a864d1
46,818
def strip_from_end(text, suffix): """ Strip a substring from the end of a string Parameters ---------- text : str The string to be evaluated suffix : str The suffix or substring to remove from the end of the text string Returns ------- str A string with the substring removed if it was found at the end of the string. """ if not text.endswith(suffix): return text return text[:len(text)-len(suffix)]
01227c75cee0fc153dcebc153dd89cc5ea35c1d4
46,819
import struct def _convert_unsigned(data, fmt): """Convert data from signed to unsigned in bulk.""" num = len(data) return struct.unpack( "{}{}".format(num, fmt.upper()).encode("utf-8"), struct.pack("{}{}".format(num, fmt).encode("utf-8"), *data) )
b65fa5fb1c7243ff831e95961bcc6528c5c57aae
46,821