content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def no_nans(curve): """ Check for NaNs anywhere at all in the curve, even the top or bottom. """ number_of_nan = curve.df.isnull().sum().sum() return not bool(number_of_nan)
cbb5be578111e6d085a1f036873f9593a3db8a28
37,295
def has_changes(statements): """ See if a list of SQL statements has any lines that are not just comments """ for stmt in statements: if not stmt.startswith('--') and not stmt.startswith('\n\n--'): return True return False
d357443e34a575af2cb1827064fa372668097895
37,296
def disconnect(signal, slot): """Disconnect a Qt signal from a slot. This method augments Qt's Signal.disconnect(): * Return bool indicating whether disconnection was successful, rather than raising an exception * Attempt to disconnect prior versions of the slot when using pg.reload """ try: signal.disconnect(slot) return True except (TypeError, RuntimeError): return False
6aee43431c153a48111f21541448009496f27dd3
37,297
def get_cidr(**kwargs): """ The function just return the cidr value of in the given dc configuration file """ dc_cfg = kwargs.get('dc_cfg', {}) return dc_cfg['dc_network'] + '/' + dc_cfg['dc_cidr']
afea2b8ced5841a8130d3b3647e5823b527da8c8
37,298
def generate_attribute_string(self, attributes): """Return a string of available attributes.""" output = "" for attribute in attributes: output += f"{attribute}: {getattr(self, attribute)}\n" return output
142739db31b617934ac97a79794dfcd835deb98c
37,299
def parser_callback(ctx, param, value: str): """click callback to parse a value to either None, int or float Args: ctx (_type_): ignored param (_type_): ignored value (str): value to parse """ if isinstance(value, (int, float)): return value # None value if value == "None": return None # boolean ? if value.lower() in ["true", "ok", "yes"]: return True if value.lower() in ["false", "ko", "no"]: return False # maybe int try: return int(value) except (ValueError, TypeError): pass # maybe float try: return float(value) except (ValueError, TypeError): pass # return as str return value
fe0d028343cbcf3734fda630f15599524a6d27d9
37,300
from typing import Dict from typing import List from typing import Optional import queue def find_node_distance( i: int, vertex_connections: Dict[int, List[int]], target: Optional[int] = None ): """ Finds the distances from one node to all others. Args: i: starting vertex idx vertex_connections: expects the vertices to be in a range starting from zero. target: Returns: distance between vertex i and target, if target is not None. Otherwise the distance to every other node. """ # visited[n] for keeping track of visited node in BFS visited = [False] * len(vertex_connections.keys()) # Initialize distances as 0 distance = [0] * len(vertex_connections.keys()) # queue to do BFS queue_: queue.Queue = queue.Queue() distance[i] = 0 queue_.put(i) visited[i] = True while not queue_.empty(): x = queue_.get() for i in range(len(vertex_connections[x])): if visited[vertex_connections[x][i]]: continue # update distance for i distance[vertex_connections[x][i]] = distance[x] + 1 queue_.put(vertex_connections[x][i]) visited[vertex_connections[x][i]] = True if target is not None: return distance[target] else: return distance
38f68e536d1863e1f00d5b429ce1ba359ff65977
37,301
import os def _expand_path(path): """Expands ~ and variables like $HOME from paths""" return os.path.expanduser( os.path.expandvars( path ) )
effdeebf0628ad51ebb075e4c46e8af8d1526631
37,303
import _warnings def discard_short_fixations(fixation_sequence, threshold=50): """ Deprecated in 0.4. Use `eyekit.fixation.FixationSequence.discard_short_fixations()`. """ _warnings.warn( "eyekit.tools.discard_short_fixations() is deprecated, use FixationSequence.discard_short_fixations() instead", FutureWarning, ) return fixation_sequence.discard_short_fixations(threshold)
dfd09cc1db421e480e569d94caf479c4d496906b
37,304
def sort_notes_into_folders(notes, folders): """ sorts notes into a dictionary with the folder names as the key :param notes: A list of notes objects :param folders: a list of tuples with each tuple holding (folder_id, folder_name) :return: a dictionary with all the notes, sorted into their folders as well as a all key which stores all the notes Note 1: the values in the keys is a list of note objects that is correlated to the folder name Eg. { 'test_folder': [Note_obj_1, Note_obj_2, ...], 'all': [all_note_objs] } """ # so that when the note gets appended to the dictionary, it will already be sorted notes.sort(key=lambda note: note.last_edited, reverse=True) folders = dict(folders) notes_sorted_by_folders = {'All':[]} # to store all the folder_names even when it has no notes for folder_id in folders: folder_name = folders[folder_id] notes_sorted_by_folders.setdefault(folder_name, []) for note in notes: if note.parent_folder_id != 0: # note has a folder folder_name = folders[str(note.parent_folder_id)] notes_sorted_by_folders[folder_name].append(note) notes_sorted_by_folders['All'].append(note) return notes_sorted_by_folders
fb4d788e59b2796c6a01a0b9ad132f44bb1693c1
37,305
def collision(obj1 : dict, obj2 : dict) -> bool: """ detects collision between 2 objects and returns some properties in list """ if (obj1['x'] + obj1['width'] /2 < obj2['x'] - obj2['width'] / 2 or obj1['x'] - obj1['width'] / 2 > obj2['x'] + obj2['width'] / 2 or obj1['y'] + obj1['height'] < obj2['y'] or obj1['y'] > obj2['y'] + obj2['height']): return False return True
e80a740617f5467f5656c068b136c3b0fee9ba03
37,306
def get_url(post): """ Gets the url of the actual JPG file from the post object. """ url = post['data']['url'] if url.endswith == 'jpg': return url elif url.endswith == '/': return url.strip('/') + '.jpg' else: return url + '.jpg'
f4a953a1602e80fff50e0e2730f4976ffb79ff78
37,308
import uuid def _generate_token(): """Generates a new random CSRF token. :returns: A random CSRF token. """ return str(uuid.uuid4())
926a34a7625417d3944571d4f525f48405d9fe71
37,309
def cleanString(s): """converts a string with leading and trailing and intermittent whitespace into a string that is stripped and has only single spaces between words >>> cleanString('foo bar') u'foo bar' >>> cleanString('foo bar') u'foo bar' >>> cleanString('\\n foo \\n\\n bar ') u'foo bar' >>> cleanString('') u'' """ return ' '.join(map(lambda x: x.strip(), s.split()))
5a816b6bb0b93e869b2ce144cf8aaed305c0e9e8
37,310
from pathlib import Path import os def get_config_path() -> Path: """Get configuration file's path from the environment variable.""" config = os.getenv('TOM_CONFIG', '') return Path(config)
8b50b594147871e4d77b7d779c9a5865a708f829
37,312
def compute_pe(estimates, d): """Computes the Percentage Error :param np.ndarray estimates: estimated values :param np.ndarray d: ground-truth :return: MPE score :rtype: float, np.ndarray """ PE = 100 * (estimates-d) / d return PE
1644c9a8069a3fd9f7299cb300006bd6178588fe
37,313
def get_short_name(full_path): """ Returns the short name of an operation or service, without the full path to it. """ return full_path.rsplit('/')[-1]
820a2e8444cecbd7f164563ecd8887dc122bd126
37,315
from typing import Any from typing import OrderedDict def getvalue(object: Any, key: Any) -> Any: """Access a dict or class instance attribute value in a unified way""" if type(object) == dict or type(object) == OrderedDict: # Dict or OrderedDict return object.get(key) else: # Class instance if hasattr(object, key): return getattr(object, key) else: return None
e9e278c184771efa3505d266b6b7f2b50a8217f0
37,316
def model_name_input(): """ model_name_input: Recursivly asks which model to use until valid model is provided. Not optimal, will change in future. """ model_name = input("What is the model you want to use? lightning fast but bad, thunder slow but good:\n") if model_name == "lightning" or model_name == "thunder": return str(model_name) else: print("Try again, not a valid model\n") return model_name_input()
25dc7665ffd3cb6b2e0265bf4d13fcd22b40fa38
37,318
import six def getFileObject(file, openFlags="wb"): """Common code for every Canvas.save() operation takes a string or a potential file object and assures that a valid fileobj is returned""" if file: if isinstance(file, six.string_types): fileobj = open(file, openFlags) else: if hasattr(file, "write"): fileobj = file else: raise ValueError('Invalid file argument to save') else: raise ValueError('Invalid file argument to save') return fileobj
dc9280735f245c2992436a4638b8f072d5ef3b03
37,319
def generalReplacements(tex): """ Replace the common Latex macros that take in no arguments and all Latex definitions that contain a backslash (which may have unde- fined behavior in the actual manpage). Some text may need to be processed separately if it is followed by a period, which will cause all text in the same line following the period to be not shown. """ tex = tex.replace("\openshmem{}", "OpenSHMEM") tex = tex.replace("\openshmem", "OpenSHMEM") tex = tex.replace("\\acp{PE}", "PEs") tex = tex.replace("\\ac{PE}", "PE") tex = tex.replace("\\ac{MPI}", "MPI") tex = tex.replace("\\acp{AMO}", "AMOs") tex = tex.replace("\\ac{AMO}", "AMO") tex = tex.replace("\\ac{API}", "API") tex = tex.replace("\\acp{RMA}", "RMAs") tex = tex.replace("\\ac{RMA}", "RMA") tex = tex.replace("\\ac{MPMD}", "Multiple Program Multiple Data (MPMD)") tex = tex.replace("\\CorCppFor", " C/C++/Fortran") tex = tex.replace("\\CorCpp{}", " C/C++") tex = tex.replace("\\CorCpp", " C/C++") tex = tex.replace("\\Fortran{}", "Fortran") tex = tex.replace("\\Fortran", "Fortran") tex = tex.replace("\\Cstd", "C") tex = tex.replace("\\PUT{}", "PUT") tex = tex.replace("\\GET{}", "GET") tex = tex.replace("\\SIZE{}", "SIZE") tex = tex.replace("\\activeset.", "\n.IR \"Active set\" .\n") tex = tex.replace("\\activeset{}", "\\activeset") tex = tex.replace("\\activeset", "\n.I \"Active set\"\n") tex = tex.replace("\\activeset ", "\n.I \"Active set\"\n") tex = tex.replace("\\dest.", "\n.IR \"dest\" .\n") tex = tex.replace("\\source.", "\n.IR \"source\" .\n") tex = tex.replace("\\dest{}", "\n.I \"dest\"\n") tex = tex.replace("\\source{}", "\n.I \"source\"\n") tex = tex.replace("\\TYPE{}", "TYPE") tex = tex.replace("\\TYPENAME{}", "TYPENAME") return tex
738a6a1c6c50d3cfeb708d5d6c78cc0866d17968
37,320
def learningRateSchedule(baseLR, iteration): """ returns learning rate given training iteration """ if iteration > 500000: return baseLR/8 elif iteration > 400000: return baseLR/4 elif iteration > 300000: return baseLR/2 elif iteration > 200000: return baseLR elif iteration > 100000: return baseLR else: return baseLR
5b36184af23d8492526703dba9c8826171922928
37,321
def parse_test_id(test_id): """ Parse a test ID into useful parts. Takes a test ID and parses out useful parts of it:: > parse_test_id('foo.bar.baz.test_mod.MyTestClass.test_method') { 'scope': 'foo', 'type': 'bar', 'accreditation': 'baz', 'file': 'test_mod', 'class': 'MyTestClass', 'method': 'test_method', 'class_path': 'foo.bar.baz.test_mod.MyTestClass.test_method' } Note: scope/type/accreditation might not be returned if your path structure is different from the suggested one. """ parts = test_id.split('.') full_path = len(parts) == 6 return { 'scope': full_path and parts[0], 'type': full_path and parts[1], 'accreditation': full_path and parts[2], 'file': parts[-3], 'class': parts[-2], 'method': parts[-1], 'class_path': '.'.join(parts[0:-1]) }
facd326fb5e31b72369fa1fc0dc17506ce128af1
37,322
import os import re def is_vrr_file(fname): """ whether this is sub file which do VRR/VRR contraction work """ flist = os.path.splitext(fname) name0 = flist[0] name1 = os.path.basename(name0) if re.search(r"vrr", name1) is not None: return True return False
5ebd876f9ce2f0a46c7a9c6319d0b10f8b63a643
37,323
def cmip5_mip2realm_freq(mip): ## CHECK-ME: Same as above """ Returns realm and frequency given the mip in CMIP5 """ mipdict = {} ## CHECK-ME: I wrote this based on the DKRZ data, some entries might be missing mipdict['Amon'] = ['atmos', 'mon'] mipdict['Omon'] = ['ocean', 'mon'] mipdict['Lmon'] = ['land', 'mon'] mipdict['LImon'] = ['landIce', 'mon'] mipdict['OImon'] = ['seaIce', 'mon'] mipdict['aero'] = ['aerosol', 'mon'] # mipdict['3hr'] = ??? mipdict['cfDay'] = ['atmos', 'day'] mipdict['cfMon'] = ['atmos', 'mon'] mipdict['day'] = ['atmos', 'day'] mipdict['fx'] = ['*', 'fx'] if mip in mipdict.keys(): return mipdict[mip] else: print("ERROR - CMIP5: can not map mip to realm. Exiting")
cc586825309fe695572bc03fa037353e9008b38f
37,324
def parse_cellType(file_name): """ Parsing file_name and extracting cell-type input file must be EXPERIMENT_AREA_CELL-TYPE.bam so bamtools create EXPERIMENT_AREA_CELL-TYPE.REF_chrN.PEAK :param file_name: :return: cell_type """ parse = file_name.split('.')[0].rsplit('_') return parse[len(parse)-1]
872886d5f267452b105794e6befc9c88946350b0
37,325
def point_num_to_text(num_fita): """ Transform point's order number into text """ num_fita = int(num_fita) num_fita_str = str(num_fita) if len(num_fita_str) == 1: num_fita_txt = "00" + num_fita_str elif len(num_fita_str) == 2: num_fita_txt = "0" + num_fita_str else: num_fita_txt = num_fita_str return num_fita_txt
5042a7c200c4987748f825213f08788b44b091c1
37,326
def merge_dictionaries(dictionaries): """ Merge a sequence of dictionaries safely. This function merges dictionaries together, but ensures that there are not same keys which point to different values. That is, merge_dictionaries({'alpha':True}, {'alpha':True}) is OK, but merge_dictionaries({'alpha':True}, {'alpha':False}) will raise an exception """ res = {} for dct in dictionaries: for (k, v) in dct.iteritems(): if k in res: assert res[k] == v res[k] = v return res
b536f68d79e1e55d2d1fa8f2ed4e26c5452003ad
37,327
import os def isDataset(directory): """Checks if a given directory contains a single dataset.""" file_list = os.listdir(directory) tif_found = any(['.tif' in file for file in file_list]) json_found = any(['.json' in file for file in file_list]) return tif_found and json_found
5a79dad1d67f650164f51441b3f9638b1948ef6a
37,328
import argparse def _make_argparser(): """Set up the argparse.ArgumentParser.""" parser = argparse.ArgumentParser(description='Run gabbi tests from STDIN') parser.add_argument( 'target', nargs='?', default='stub', help='A fully qualified URL (with optional path as prefix) ' 'to the primary target or a host and port, : separated. ' 'If using an IPV6 address for the host in either form, ' 'wrap it in \'[\' and \']\'.' ) parser.add_argument( 'prefix', nargs='?', default=None, help='Path prefix where target app is mounted. Only used when ' 'target is of the form host[:port]' ) parser.add_argument( '-x', '--failfast', action='store_true', help='Exit on first failure' ) parser.add_argument( '-q', '--quiet', action='store_true', help='Produce no test runner output' ) parser.add_argument( '-r', '--response-handler', nargs='?', default=None, dest='response_handlers', action='append', help='Custom response handler. Should be an import path of the ' 'form package.module or package.module:class.' ) parser.add_argument( '-v', '--verbose', dest='verbosity', choices=['all', 'body', 'headers'], help='Turn on test verbosity for all tests run in this session.' ) parser.add_argument( '-k', '--insecure', dest='cert_validate', action='store_false', default=True, help='Turn off ssl certificate validation.' ) parser.add_argument( '--unsafe-yaml', dest='safe_yaml', action='store_false', default=True, help='Turn on recognition of Python objects in addition to ' 'standard YAML tags.' ) return parser
5f2052da687328a00cff49051c63d985a26a6c96
37,329
import asyncio async def my_task(seconds): """ A task to do for a number of seconds """ print('This task is taking {} seconds to complete'.format(seconds)) await asyncio.sleep(seconds) return 'task finished'
25decc9efbbd47a99cfe698a5565a689f9cc5a00
37,331
def hash_to_dir(hash): """ Transforms a given hash to a relative path and filename ex: '002badb952000339cdcf1b61a3205b221766bf49' -> '00/2badb952000339cdcf1b61a3205b221766bf49' :param hash: the hash to split :rtype: string """ return hash[:2]+'/'+hash[2:]
c3800e89da7ab6319076e45cbd051a84a55ff9f7
37,332
def ReadBBoxPredictFile(file_path): """ Args: file path : str File format: image_name:<image_name.jpg> (percentage) (abs) <class_name>,<confidence>,<x1>,<y1>,<x2>,<y2> ... end example: image_name:a.jpg full,98%,19,30,37,50 ... end Returns: imgs_bbox : dict {img_name1: [bbox1, bbox2, ...], img_name2: [bbox1, bbox2, ...], ... } """ f = open(file_path, 'r') imgs_bbox = {} img_bbox = [] imgs_name = [] for l in f: if 'image_name:' in l or 'end' in l: if len(img_bbox) != 0: img_bbox.sort(key = lambda x: x['conf'], reverse=True) imgs_bbox[l] = img_bbox.copy() img_bbox = [] # record image name img_name = l.split(':')[-1] imgs_name.append(img_name) else: # Read bboxes! l = l.split(',') bbox = dict() bbox['label'] = l[0] bbox['conf'] = float(l[1].split('%')[0]) bbox['x1'] = int(l[2]) bbox['y1'] = int(l[3]) bbox['x2'] = int(l[4]) bbox['y2'] = int(l[5]) img_bbox.append(bbox) return imgs_bbox
086865ca68bd3387f090ffd5cdedac659cd10bbf
37,333
def make_esc(esc_chars): """ Function generator for Escaping special characters """ return lambda s: ''.join(['\\' + c if c in esc_chars else c for c in s])
fb14f397b3c5803ef135d70a861817374506db2a
37,335
def get_as_list(somestring): """ Input : a string like this : 'a, g, f,w' Output : a list like this : ['a', 'g', 'f', 'w'] """ return somestring.replace(' ', '').split(',')
bb8ce558b4cf21e59691363818e59e40d725de88
37,336
def element_to_set_distance(v, s): """Returns shortest distance of some value to any element of a set.""" return min([abs(v-x) for x in s])
af77a125773312fd815cbad639b81601c427cf6b
37,338
import re def find(seq, pattern, matcher=re.match): """ Search pattern in each element in sequence and return the first element that matches the pattern. (Like re.search but for lists.) """ for elem in seq: if matcher(pattern, elem) is not None: return elem
956c640d52ab2161cf4cf8f5b270511fb514d4a8
37,339
from urllib.request import urlopen from sys import exception def fetch_words(): """Fetch a list of words from a url""" try: story = urlopen('http://sixty-north.com/c/t.txt') story_words = [] for line in story: line_words = line.decode('utf8').split() # the webservice returns byte strings for word in line_words: story_words.append(word) story.close() return story_words #for word in story_words: # print(word) except exception as e: print(e)
28de4d8b3b3b5d2d736f33760f4da790aecb4b33
37,340
def _get_shading(idf): """Get the shading surfaces from the IDF.""" shading_types = ["SHADING:ZONE:DETAILED", "SHADING:SITE:DETAILED"] shading = [] for shading_type in shading_types: shading.extend(idf.idfobjects[shading_type]) return shading
48a212035dae232265a1bb7979d11a0cd1b70d41
37,341
import subprocess def execute(command, environment=None): """Executes command and verifies returncode == 0 and returns stdout. :param command: The command or list of command arguments to execute using the Popen command. :type command: string or list :param environment: The environment dictionary to use for the Popen command. :type environment: dict :return: Returns the stdout output from the command executed. :rtype: List of strings produced by the Popen.communicate call used. """ return subprocess.check_output(command, env=environment, stderr=subprocess.STDOUT, universal_newlines=True)
a748818ff74c2f1dcc42366b9cbee2a13a821ac0
37,345
def compute_taw(fc, pwp, depth, fraction): """ Compute total available water :param fc: Field capacity :param pwp: permanent wilting point :param depth: depth of soil in mm :param fraction: float value :return: a float value for TAW """ return depth * fraction * (fc - pwp)
d97a1e4cc918228fc7b0e457f1fac3ce2502f62e
37,346
def pprint_blockers(blockers): """Pretty print blockers into a sequence of strings. Results will be sorted by top-level project name. This means that if a project is blocking another project then the dependent project will be what is used in the sorting, not the project at the bottom of the dependency graph. """ pprinted = [] for blocker in sorted(blockers, key=lambda x: tuple(reversed(x))): buf = [blocker[0]] if len(blocker) > 1: buf.append(' (which is blocking ') buf.append(', which is blocking '.join(blocker[1:])) buf.append(')') pprinted.append(''.join(buf)) return pprinted
13a716bc33249f3eb467a9c2254d7f6d07b8e522
37,348
def parse_list_arg(args): """ Parse a list of newline-delimited arguments, returning a list of strings with leading and trailing whitespace stripped. Parameters ---------- args : str the arguments Returns ------- list[str] the parsed arguments """ return [u.strip() for u in args.split("\n") if u.strip()]
5df3a59b4ced466e4107f494b8ed0e2fc6de917c
37,349
def after_n_iterations(n): """Return a stop criterion that stops after `n` iterations. Internally, the ``n_iter`` field of the climin info dictionary is inspected; if the value in there exceeds ``n`` by one, the criterion returns ``True``. Parameters ---------- n : int Number of iterations to perform. Returns ------- f : function Stopping criterion function. Examples -------- >>> S.after_n_iterations(10)({'n_iter': 10}) True >>> S.after_n_iterations(10)({'n_iter': 5}) False >>> S.after_n_iterations(10)({'n_iter': 9}) True """ def inner(info): return info['n_iter'] >= n - 1 return inner
134da90fa5890055417a3f89d185f74b082906d1
37,350
def _encode_asn1_str(backend, data): """ Create an ASN1_OCTET_STRING from a Python byte string. """ s = backend._lib.ASN1_OCTET_STRING_new() res = backend._lib.ASN1_OCTET_STRING_set(s, data, len(data)) backend.openssl_assert(res == 1) return s
2b673060b2793c740b0111e8280fd92a9b291fec
37,353
import re def ignore_template_file(filename): """ Ignore these files in template dir """ pattern = re.compile('^\..*|.*\.cache$|.*~$') return pattern.match(filename)
0a9402e968cd3cc8a2ca7839c7d7100206ee663f
37,354
def _dtype_itemsize(dtype): """ size in bytes """ return int(dtype[1:])
31669d98ef8b6c99fe87750d2153ac7d2b5a0872
37,355
import os, re def ReplaceDollarVariablesWithEnvironment(text): """Replaces all substrings of the type `${FOO}` with `FOO` obtained from the `os.environ`. If the key is not defined in the environment, this raises a `KeyError`. """ r = re.compile(r"\$ENV\{([^}]+)\}") def repl(m): if m.group(1) in os.environ: return os.environ[m.group(1)] raise KeyError("'%s' is not defined in the process environment" % m.group(1)) return re.sub(r, repl, text)
d326488ea67dba895bb3cdb47e9cf2dacff96dcb
37,356
def remove_duplicates(cascade_nodes,cascade_times): """ # Some tweets have more then one retweets from the same person # Keep only the first retweet of that person """ duplicates = set([x for x in cascade_nodes if cascade_nodes.count(x)>1]) for d in duplicates: to_remove = [v for v,b in enumerate(cascade_nodes) if b==d][1:] cascade_nodes= [b for v,b in enumerate(cascade_nodes) if v not in to_remove] cascade_times= [b for v,b in enumerate(cascade_times) if v not in to_remove] return cascade_nodes, cascade_times
a853ca8c14819778a4e7024ac829c415a21fa394
37,358
from typing import List def dna_union( start, end, audio_length: int, do_not_align_segments: List[dict], ) -> List[dict]: """ Return the DNA list to include [start,end] and exclude do_not_align_segments Given time range [start, end] to keep, and a list of do-not-align-segments to exclude, calculate the equivalent do-not-align-segment list to keeping only what's in [start, end], and removing both what's outside [start, end] and do_not_align_segments. Args: start (Optional[int]): the start time of the range to keep, None meaning 0, i.e., the beginning of the audio file end (Optional[int]): the end of the range to keep, None meaning the end of the audio file audio_length (int): the full length of the audio file do_not_align_segments (List[dict]): the original list of DNA segments Returns: List[dict]: the union of DNA lists [[0, start], [end, audio_length]] and do_not_align_segments """ current_list = do_not_align_segments if start: new_list = [] new_list.append({"begin": 0, "end": start}) for seg in current_list: if seg["end"] <= start: pass # dna segments that end before start are subsumed by [0,start) elif seg["begin"] <= start: start = seg["end"] new_list[0]["end"] = start else: new_list.append(seg) current_list = new_list if end: new_list = [] for seg in current_list: if seg["begin"] >= end: pass # dna segments after end are subsumed by [end, audio_length) elif seg["end"] >= end: end = seg["begin"] else: new_list.append(seg) new_list.append({"begin": end, "end": audio_length}) current_list = new_list return current_list
efabb0391187500cb157bae20b2720028676c50b
37,359
def reverseString(self, s): """ :type s: List[str] :rtype: None Do not return anything, modify s in-place instead. """ return [s.insert(0, s.pop(i)) for i in range(len(s))]
1cc04e1d7c7b9081b6edd97f3c440a8129357a66
37,360
def dict_updater(source: dict, dest: dict) -> dict: """ Updates source dict with target dict values creating new dict and returning it """ target = dest.copy() for k, v in source.items(): if isinstance(v, dict) and k in dest: target[k] = dict_updater(v, dest[k]) else: target[k] = v return target
cf2aa4aa238b4d7a8057fc8c29fd6755eaf02ff4
37,361
def function_star(list): """Call a function (first item in list) with a list of arguments. The * will unpack list[1:] to use in the actual function """ fn = list.pop(0) return fn(*list)
5ff784703b9238bf5d99dd4a0e1313ed8f97df88
37,362
def _pdf_url_to_filename(url: str) -> str: """ Convert a PDF URL like 'https://www.mass.gov/doc/weekly-inmate-count-4202020/download' into a filename like 'weekly-inmate-count-4202020.pdf' """ name_part = url[25:-9] return f"{name_part}.pdf"
45d4e6608813161227a19bbc29b611d6568c8d23
37,364
def get_freq_weights(label_freq): """ Goal is to give more weight to the classes with less samples so as to match the ones with the higher frequencies. We achieve this by dividing the total frequency by the freq of each label to calculate its weight. """ total_size = 0 for lf in label_freq.values(): total_size += lf weighted_slots = {label: (total_size / (len(label_freq) * freq)) for label, freq in label_freq.items()} return weighted_slots
7e0275365eb014806a6cf7c8f69415f243301d35
37,365
def step(seq): """ >>> step('1') '11' >>> step('11') '21' >>> step('21') '1211' >>> step('1211') '111221' >>> step('111221') '312211' """ result = "" count = 0 lc = seq[0] for c in seq: if c == lc: count += 1 else: result += str(count) + lc count = 1 lc = c result += str(count) + lc return result
c8f03d7470769e957b5d6866afc46a1a420bde77
37,366
def subtract_number(x, y): """This function will subtract 2 numbers""" return x - y
32868ec101c36f5fb7267361a6da4eb017e38a47
37,367
import subprocess def git_init(directory): """Runs the git command to initialize the plugin folder as a git repo""" return subprocess.Popen(["git", "init"], cwd=directory)
2d0d59614097cde5e63fd79fca62d415ff266f0a
37,368
def listSplit(data, pageSize, pageNum): """ 用于自定义数据提前分页(提高系统性能) :param data: 所有数据列表 :param pageSize: 一页多少数据 :param pageNum: 当前页数 :return: 返回数据分页字典 """ dataSum = len(data) # 总数据数 pageSum = (dataSum + pageSize - 1) / pageSize # 总页数 startIndex = (pageNum - 1) * pageSize # 当前起始索引 end = startIndex + pageSize endIndex = end if end <= dataSum else dataSum # 当前结束索引 currentData = data[startIndex:endIndex] # 当前页数据 return {'dataSum': dataSum, 'pageSum': pageSum, 'currentData': currentData}
c8b5ff15d91c10b42bea2c27f07baefa6dc2a3b9
37,369
import glob import os def logAndExport(args, workflowName): """ Set up logging """ # Write snakemake_cmd to log file fnames = glob.glob(os.path.join(args.outdir, '{}_run-[0-9]*.log'.format(workflowName))) if len(fnames) == 0: n = 1 # no matching files, this is the first run else: fnames.sort(key=os.path.getctime) n = int(fnames[-1].split("-")[-1].split(".")[0]) + 1 # get new run number # append the new run number to the file name logfile_name = "{}_run-{}.log".format(workflowName, n) return logfile_name
0eccc3f01666583f183756ab46c0af6d0990778a
37,371
import os def substitute_irfs(pntfile, event_class): """ Substitute IRFs for event class Parameters ---------- pntfile : string Pointing file event_class : dict Event class dictionary """ # Read file f = open(pntfile, 'r') lines = f.readlines() f.close() # Build new filename filename = os.path.splitext(pntfile)[0] + '_%s.dat' % (event_class['name']) # Replace IRFs in all lines for i, line in enumerate(lines): lines[i] = line.replace('_50h', '_%s' % event_class['suffix'], 1) # Write file f = open(filename, 'w') for line in lines: f.write(line) f.close() # Return filename return filename
b0e4b7605b2ed8c00adf6ea417de1bfd0aafacb8
37,372
def nearest(array, pivot): """find the nearest value to a given one Args: array (np.array): sorted array with values pivot: value to find Returns: value with smallest distance """ return min(array, key=lambda x: abs(x - pivot))
75d468613a766425346ae980cef3ab0bd19d5e1c
37,373
import logging def get_builder_log(name): """Get a logging object, in the right place in the hierarchy, for a given builder. :param name: Builder name, e.g. 'my_builder' :type name: str :returns: New logger :rtype: logging.Logger """ return logging.getLogger("mg.builders." + name)
2a158c3cff310f005fd42f8b9ee9ad61d68e08be
37,374
import argparse def arg_parser(): """ Returns an initialized ArgumentParser object """ parser = argparse.ArgumentParser(description='Issuu data analytics software') gui_group = parser.add_mutually_exclusive_group() verbosity_group = parser.add_mutually_exclusive_group() gui_group.add_argument('--gui', action='store_true',\ help='Use the graphical user interface') verbosity_group.add_argument("-v", "--verbose", action="count",\ help="Increases verbosity, add 'v's for even more verbosity") verbosity_group.add_argument("-q", "--quiet", action="store_true",\ help="Do not output to stdout/stderr") parser.add_argument("-t", "--task_id", action='store', #required=True, # Cannot since it is not mandatory when using gui -> Manual checking help="Task to execute") parser.add_argument("-d", "--doc_uuid", action="store", #required=True, help="Document 'doc_uuid' to analyse") parser.add_argument("-f", "--input_file", action="store",# required=True, help='Issuu compliant input data file') parser.add_argument("-o", "--output_file", action="store",\ help='Output file to write to') parser.add_argument("-u", "--user_uuid", action="store",\ help='Issuu compliant input data file') parser.add_argument("-s", "--sort", action="store",\ choices=['freq_asc','freq_desc','biased'], help='Also likes document sort algorithm') parser.add_argument("-x", action="store_true", help="Display also_likes graph with no distortion /!\ Breaks future multibar plots... If not set, plots are fine but graph is distorted.") gui_group.add_argument("--plt", action="store_true",\ help='Show PLOTS (without gui)') return parser
110cc7c083d4e1016b23fc6215cfd9ca07c3d14a
37,375
def replace_redundant_grammemes(tag_str): """ Replace 'loc1', 'gen1' and 'acc1' grammemes in ``tag_str`` """ return tag_str.replace('loc1', 'loct').replace('gen1', 'gent').replace('acc1', 'accs')
5469efb333a8c0174d4696d6559442f77a1fb6da
37,376
import csv def load_from_csv(): """loads a csv as a list of tuples, drops the header.""" with open('hackernews_comments_with_model.csv', encoding='utf-8') as f: data = [tuple(line) for line in csv.reader(f)] data = data[1:] return data
200af160669eb6c52faf9b1b8f0a029be82a9993
37,377
def seating_rule(max_neighbours): """ If a seat is empty (L) and there are no occupied seats adjacent to it, the seat becomes occupied. If a seat is occupied (#) and max_neighbours or more seats adjacent to it are also occupied, the seat becomes empty. Otherwise, the seat's state does not change. Floor (.) never changes; seats don't move, and nobody sits on the floor. """ def rule(state, neighbours): occupied_neighbours = sum(n == '#' for n in neighbours) if state == 'L' and not occupied_neighbours: return '#' if state == '#' and occupied_neighbours >= max_neighbours: return 'L' return state return rule
7011b060d8ccfb378ccf15225af081ae09454e7f
37,380
def f_query(fi_int, ff_int, fecha_init, fecha_init_month, fecha_end): """Consulta Oracle PL-SQL a la base de datos de Naranja. Los argumentos 'fi_int' y 'ff_int' indican los extremos del intervalo de tiempo para llevar a cabo el modelo RFM. Ambas fechas ingresan a la consulta como números enteros. 'fecha_init' representa lo mismo que 'fi_init' pero con un formato diferente (%d/%m/%Y)""" query = (""" select o.dim_cuentas, o.dni, o.apertura, o.antiguedad, o.limite_credito, (p.renta)/4 rentabilidad, q.nivel_riesgo_trim, o.frecuencia, o.recencia, o.monto from (select f.dim_cuentas dim_cuentas, f.dni dni, g.apertura apertura, g.antiguedad antiguedad, f.tipo_cuenta tipo_cuenta, g.limite_credito limite_credito, f.frecuencia frecuencia, """ f"(to_date('{fecha_init}', 'DD/MM/YYYY') - to_date(substr(f.fecha,1,10))) RECENCIA, " """f.monto monto from (select d.dim_cuentas dim_cuentas, e.tipo_cuenta tipo_cuenta, e.dni dni, d.frecuencia frecuencia, d.fecha fecha, d.monto monto from dw.dim_cuentas e inner join (select c.dim_cuentas, count(c.dim_cuentas) as frecuencia, max(c.atr_fecha_presentacion) as fecha, sum(c.monto_parcial) as monto from ( select a.dim_cuentas, a.atr_fecha_presentacion, a.met_importe, case WHEN a.ATR_MONEDA = 'Dolares' THEN a.met_importe * b.valor else a.met_importe end monto_parcial from dw.fac_consumos_comercios a left join( select atr_periodo, avg(valor) valor from riesgocrediticio.cartera_precio_dolar group by atr_periodo) b on a.dim_tiempos = b.atr_periodo """ f'where a.DIM_TIEMPOS BETWEEN {ff_int} AND {fi_int} ' f"and a.atr_fecha_presentacion between to_date('{fecha_end}', 'DD/MM/YYYY') and to_date('{fecha_init}', 'DD/MM/YYYY') " """and (a.ATR_ESTADO_CUENTA_LISTIN = 'RE Recuperable (Amarilla)' or a.ATR_ESTADO_CUENTA_LISTIN IS NULL) and (a.DIM_RUBROS <> 69 and a.DIM_RUBROS <> 27 and a.DIM_RUBROS <> 115 and a.DIM_RUBROS <> -1) and a.ATR_DEBITO <> 'S') c group by c.dim_cuentas) d on d.dim_cuentas = e.dimension_key ) f inner join (SELECT a.dim_cuentas dim_cuentas, a.met_limite_tc limite_credito, """ f"b.fecha_ingreso apertura, round(months_between(to_date('{fecha_init_month}', 'MM/YYYY'), to_date(to_char(b.fecha_ingreso, 'DD/MM/YYYY'), 'DD/MM/YYYY')), 2) antiguedad " """FROM DW.FAC_CUENTAS_SALDOS a INNER JOIN DW.dim_CLIENTES b ON b.DIMENSION_KEY = a.DIM_CLIENTES WHERE b.FECHA_BAJA = TO_DATE('00010101000000','YYYYMMDDHH24MISS') AND (a.ATR_ESTADO_CUENTA_LISTIN NOT IN ('PV Inconsistencia','CB Código blanco','SB Inconsistencia','CI Cuenta inhabilitada') OR a.ATR_ESTADO_CUENTA_LISTIN IS NULL) AND a.ATR_ESTADO_APERTURA = '096 - APROBACION' """ f'AND a.dim_tiempos = {fi_int}) g on f.dim_cuentas = g.dim_cuentas ) o ' """ left join (SELECT A.ATR_DNI DNI, STATS_MODE(B.NIVEL_RIESGO) NIVEL_RIESGO_TRIM FROM DW.FAC_NIVEL_RIESGO A LEFT JOIN DW.DIM_NIVEL_RIESGO B ON B.ID = A.ATR_NIVEL_RIESGO_ID """ f'WHERE a.DIM_TIEMPOS BETWEEN {ff_int} AND {fi_int} ' """GROUP BY A.ATR_DNI ) q on q.dni = o.dni LEFT JOIN (select atr_dni dni, dim_cuentas, met_rent_markov_ajuste_inf renta from dw.fac_rentacum """ f'where dim_tiempos = {fi_int}) p on p.dim_cuentas = o.dim_cuentas ' """ """) return query
4144b89f660872956c29958cdd2300d04012486b
37,381
def crossProduct(p1, p2, p3): """ Cross product implementation: (P2 - P1) X (P3 - P2) :param p1: Point #1 :param p2: Point #2 :param p3: Point #3 :return: Cross product """ v1 = [p2[0] - p1[0], p2[1] - p1[1]] v2 = [p3[0] - p2[0], p3[1] - p2[1]] return v1[0] * v2[1] - v1[1] * v2[0]
4faf8f1d63192913c6a3abf726e343e377e9622a
37,383
import os def average_depth(list_of_dirs, relative=True): """ Returns the average depth of a list of directories. e.g. >>> dicomdirs = ['./short/path/to/dicomdir', './a/longer/path/to/a/dicomdir', './an/even/longer/path/to/a/dicomdir'] >>> print('Average depth:', average_depth(dicomdirs)) # Average depth: 4.66666666667 :param list_of_dirs: A list of directories to check (list). :param relative: True if user wants relative depth (bool). :return: The average depth of the directories (float). """ if relative: list_of_dirs = map(os.path.relpath, list_of_dirs) depths = [os.path.relpath(d).count('/') for d in list_of_dirs] return sum(depths)/len(depths)
d63cd0bbd8f11efa8e3eba34f3d5ff09a44220a2
37,384
from datetime import datetime def date_passed(value): """ Checks if the embargoded date is greater than the current date args: value (str): Date in the format yyy-mm-dd that needs to be checked returns: (bool): If the given date is less than the current date or not """ value_date = datetime.strptime(value, "%Y-%m-%d") current_date = datetime.now() return value_date.date() < current_date.date()
fa3b3a47265dd15c6a9a6ba1820a3e7e61bbe945
37,385
from typing import Any import typing def split_annotated(type_hint: Any) -> tuple[Any, tuple[Any, ...]]: """Return a tuple separating the python type and annotations.""" if not typing.get_origin(type_hint) is typing.Annotated: return type_hint, () args = typing.get_args(type_hint) return args[0], args[1:]
2bdf16e6617cead8f652393457f70152d94e4c76
37,386
def original_syslog_servers(duthost, tbinfo): """A module level fixture to store original syslog servers info """ mg_facts = duthost.get_extended_minigraph_facts(tbinfo) original_syslog_servers = [] for syslog_server in mg_facts['syslog_servers']: original_syslog_servers.append(syslog_server) return original_syslog_servers
f99447e0ae49c0871d0905d1f0fc41ca8b9155b2
37,387
import sys def check_file(fname): """Check that file can be read; exit with error message if not.""" try: f = open(fname, "rb") f.close() return 0 except IOError: print("ERROR: Could not read file", fname) return 1 sys.exit() f.close()
ea70e7b84ee5eed13f4d1a5f7ed1dc975b419ae3
37,388
from typing import Dict from typing import Any def value_from_dot_notation(data: Dict[str, Any], path: str) -> Any: """Take a dictionary `data` and get keys by the `path` parameter, this path parameter will use the dots (.) as delimiter. """ for key in path.split('.'): data = data[key] return data
847f08c3132d17da9923d5a78a06f6f831be34c8
37,389
def snakecase_to_kebab_case(key: str) -> str: """Convert snake_case to kebab-case.""" return f'--{key.lower().replace("_", "-")}'
5a6f32ac7457b7d88bb717fbc1539ddca82177ce
37,390
def formatCoreTime(core_time_ms): """ Format core time in millis to core hours. """ return "%0.2f" % (core_time_ms / 1000 / 3600.0)
948245393134e1b9069ed6caab67fb5b028c2212
37,391
def run_fsm_on_ss_for_inst(fsm, ss): """Run FSM with a sequence of CV symbolic states as inputs and get instructions. """ return ss['val'].apply(fsm.add_symbolic_state_for_instruction)
74367b23555c2167fbe0052c6e078ceb6d75aeaa
37,392
def create_module_params(module): """ Reads the module parameters and returns a dict :return: dict """ instance_parameters = dict( # ReplicationSubnetGroupIdentifier gets translated to lower case anyway by the API ReplicationSubnetGroupIdentifier=module.params.get('identifier').lower(), ReplicationSubnetGroupDescription=module.params.get('description'), SubnetIds=module.params.get('subnet_ids'), ) return instance_parameters
8a1affe7f61d70ffc2bc4621b04d9d80acd371b4
37,393
import re def get_stats(tweet): """ input: string Extracts and returns Replies, Retweets, Likes from scraped tweet. Also returns tweet text stripped of redundant characters. output: Pandas series """ pattern = "\|.([0-9]+).*([0-9]+).*([0-9]+).*$" tweet_split = re.split(pattern, tweet) tweet_cleaned = tweet_split[0].strip() try: replies = int(tweet_split[1]) retweets = int(tweet_split[2]) likes = int(tweet_split[3]) except (TypeError, IndexError) as e: # No tweet stats included in scraped tweet replies = 0 retweets = 0 likes = 0 return [tweet_cleaned, replies, retweets, likes]
58cd75eb49ab30ffeac3be8e408f82d4f9c08842
37,396
def getEnergy(tl, t, tr, l, r, dl, d, dr): """helper for getEnergyMap that returns the energy of a single pixel given its neighbors""" vertEnergy = tl + 2 * t + tr - dl - 2 * d - dr horizEnergy = tl + 2 * l + dl - tr - 2 * r - dr return (vertEnergy ** 2 + horizEnergy ** 2) ** 0.5
7f9fc3edb7c8987a2a7fac7a4145e0054fb6b8ee
37,399
def fibonacci_search(array, target): """ 斐波那契查找首选需要一个现成的斐波那契表,且其最大元素必须超过查找表中元素个数的数值 >>> fibonacci_search([0, 1, 2, 4, 5, 6, 41, 56, 66, 77, 88, 91, 244, 1000, 2000, 5000, 10000, 20000], 1000) 13 >>> fibonacci_search([0, 1, 2, 4, 5, 6, 41, 56, 66, 77, 88, 91, 244, 1000, 2000, 5000, 10000, 20000], 255) -1 """ fib = [1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610, 987, 1597, 2584, 4181] left = 0 right = len(array) - 1 # print("right: {}".format(right)) k = 0 # k为F表中首个元素值不小于查找表中的元素个数值的下标 while right > fib[k] - 1: k += 1 i = right while fib[k] - 1 > i: array.append(array[right]) # 查找表的末尾添加末位元素 i += 1 # 主逻辑 while left <= right: if k < 2: mid = left else: mid = left + fib[k - 1] - 1 if target < array[mid]: right = mid - 1 k -= 1 elif target > array[mid]: left = mid + 1 k -= 2 else: # target == array[mid] if mid <= right: return mid else: return right return -1
123e299ba898f30abf3a557ac981d21b261f269f
37,400
def _module_map(tests, tests_module): """Returns a dictionary mapping test module names to a list of the test case classes in the module :param tests: List of test case classes :param tests_module: The module object for ``<test_project>.tests`` :return: Dictionary mapping test module names to a list of the test case classes in the module """ module_prefix = tests_module.__name__ + '.' module_map = {} for test in tests: module = test.__module__.replace(module_prefix, '') if module not in module_map: module_map[module] = [] module_map[module].append(test) return module_map
7abcfbb1c32a9e60fcfa2700f12a3909989c0050
37,402
def test_draft_name(): """Provide test draft name.""" return "test-draft"
d3a1b3710df625bb1f04a2b97d10bd743b5d2514
37,403
import os def get_filename(file_path): """Get filename from filepath.""" return os.path.basename(file_path)
06b8564304c3d53e1817a9c1a3477be22930b2f8
37,404
def calc_line_x(y, slope, intercept): """Calculate x value given y, slope, intercept""" return int((y - intercept)/slope)
6597394f49ca74f70ca110cd83b258751ba3262d
37,405
def coinChangeDp1(coins: list, amount: int): """自底向上,迭代+dp table""" mx = amount + 1 dp = {0: 0, amount: mx} # dp table dp[i]=x表示当金额为i时至少需要x枚硬币 i = 0 while i < mx: for x in coins: t = i - x if t < 0: continue # # 子问题无解,跳过 dp[i] = min(dp.get(i, amount), 1 + dp.get(t, amount)) i += 1 print(dp) return dp[amount] if dp[amount] != amount + 1 else -1
e9b1e6fbf7525b1cff255a3e4f8c7e8d6020df45
37,406
def read_bands(bands): """ Read energy bands list, each band per line. """ bands = map(str.split, open(bands).readlines()) bands = ["-".join(b) for b in bands] return bands
b116e218df30c804142f6db74f9f886e424bef54
37,407
def join_answers(answer_list) : """ Joins together and sorts a list of answers """ answers = answer_list[0] for item in answer_list[1:] : answers.join(item) return answers
aaef95d645569eb8584419c11bdba464e884a81b
37,408
import logging def get_logger(name: str) -> logging.Logger: """ Gets the appropriate logger for this backend name. """ return logging.getLogger('proxytest.' + name)
d0e0f9de13d9b603326b70a6acdbff7f3288b421
37,409
def parse_str(s): """ parser for (stripped) string :s: the input string to parse :returns: the string, stripped of leading and trailing whitespace """ return s.strip()
f8f44a30384f634f6f07acd3de19bcb6c42541b6
37,411
def write_test_files(tmpdir): """ write_test_files :param tmpdir: :return: """ # os.write(['111']) pass return tmpdir
87bc79a627d87a0d866b8dabbb0db4bddeedcf4e
37,412
def generate_config_expected_result(config) -> dict: """Generate expected result for config""" expected_result = { 'visible': not config['ui_options']['invisible'], 'editable': not config['read_only'], "content": config['default'], } if config['required'] and not config['default']: expected_result['save'] = False expected_result['alerts'] = True else: expected_result['save'] = True expected_result['alerts'] = False if config['read_only']: expected_result['save'] = False if config['ui_options']['invisible']: expected_result['save'] = False if config['ui_options']['advanced'] and not config['ui_options']['invisible']: expected_result['visible_advanced'] = True else: expected_result['visible_advanced'] = False return expected_result
51f0836e36eb06991d67429199e7a86ad213bcde
37,413
import pwd import os def find_owner(path): """Get the username of the owner of path.""" if pwd is None: raise OSError("find_owner cannot be called on Windows") return pwd.getpwuid(os.stat(os.path.abspath(path)).st_uid).pw_name
0cce1d08258f124c901fb559c1ea47a9cc580fcf
37,414
def doc2str(document): """ document: list of list [n_sent, n_word] """ document = [" ".join(d) for d in document] return "\n".join(document)
e40d82317cd5cb655e2a88babfd6957dc3e76f52
37,415
import re import logging def rule_caption(text, cls): """ Objective: If first word is "Figure" or "Table" followed by a number, assume it's a caption. """ new_cls = cls first_line = text.split("\n")[0] matches = re.findall('^(figure|fig)(?:\.)? (?:(\d+\w+(?:\.)?)|(\d+))', first_line, flags=re.IGNORECASE|re.MULTILINE) if len(matches) >0: new_cls = "Figure Caption" logging.info(f"Figure caption detected. Used to be {cls}") matches = re.findall('^(table|tbl|tab)(?:\.)? (?:(\d+\w+(?:\.)?)|(\d+))', first_line, flags=re.IGNORECASE|re.MULTILINE) if len(matches) >0 and cls != 'Table': new_cls = "Table Caption" logging.info(f"Table caption detected. Used to be {cls}") return new_cls
296ee0d8613ee5d2b4bc0e0d00692739ffb18543
37,416
def get_dict_fields(dict_obj): """Returns a list of fields in a `dict` object.""" return dict_obj.keys()
b4c8165ca8b883c6503c64d62e31ab99c932884b
37,418
import os def merge_relative_path(dst_path, rel_path): """Merge a relative tar file to a destination (which can be "gs://...").""" # Convert rel_path to be relative and normalize it to remove ".", "..", "//", # which are valid directories in fileystems like "gs://". norm_rel_path = os.path.normpath(rel_path.lstrip("/")) if norm_rel_path == ".": return dst_path # Check that the norm rel path does not starts with "..". if norm_rel_path.startswith(".."): raise ValueError("Relative path %r is invalid." % rel_path) merged = os.path.join(dst_path, norm_rel_path) # After merging verify that the merged path keeps the original dst_path. if not merged.startswith(dst_path): raise ValueError("Relative path %r is invalid. Failed to merge with %r." % (rel_path, dst_path)) return merged
cc295313883dc636b5f9528004e76686f40de786
37,420
def _window_invalid(d, win): """ Small helper used internally in this module. Tests if TimeWidow defined by win has a span inside the range of the data object d. Return True if the range is invalid - somewhat reversed logic is used because of the name choice that make the code conditioals clearer. """ if d.t0 < win.start and d.endtime() > win.end: return True else: return False
92dee76c7d90c9961763abdf7b0b914f78546045
37,421
def kernel_scale_factor( dimensionality: float, num_points: int, num_samples: int ) -> float: """Computes a scaling factor to accound for dimensionality as well as the number (or ratio) of samples. """ return (num_points / float(num_samples)) ** (1.0 / dimensionality)
9f2d5c5284ef90085ddb0db4fb4b4a35ed026ee0
37,422