content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def _keytify_test_cases(test_cases): """Traverse the test cases list and return a dictionnary which associate test case name to its duration. This is used for fast access to tests cases duration. """ res = {} for tc in test_cases: key = "%s/%s" % (tc.get('classname'), tc.get('name')) if tc.get('time') is None or float(tc.get('time')) == 0.0: continue res[key] = float(tc.get('time')) return res
3adb4f94e617bc03722cb860f0bcd2c388d6ec1a
697,422
def vv_vel(vel, dt, force, force_previous, mass): """Computes the new velocities using the velocity-verlet integration scheme Args: vel -- np.array(N,dim) containing the current particle velocities DT -- timestep force -- np.array(N,dim) containing the current net forces on each particle force_previous -- np.array(N,dim) contaning forces at previous timestep mass -- value of particle mass Returns: vel -- np.array(N,dim) containing updated velocities Notes: -- the routine runs using Numba @njit decorator for faster run time """ for dim in range(vel.shape[1]): for i in range(vel.shape[0]): vel[i, dim] += 0.5*dt * (force[i, dim] + force_previous[i, dim])/mass return vel
df56e09ad191b161d258f1decf2bdcaedf7bded9
697,423
import requests import json def getdetailedinfofromid(url, token, personid): """ @param url: url for the webex teams API calls @param token: WebEx Teams Token to be used for the queries of the WebEx Teams Cloud @param personid: personID of the end user that we would like to return the email to. @return: Detailed array of all identification data for the user specifed by the personID field This function will take the person ID value and query the WebEx Teams API to return all data associated with the user. It includes: "avatar": "created": "displayName": "emails": "firstName": "id": "lastName": "nickName": "orgId": "type": """ apistring = url+"/v1/people?id="+personid # Set up the Headers based upon the WebEx Teams API headers = {'Authorization': 'Bearer {}'.format(token), 'content-type': 'application/json'} # Send the request to the WebEx Teams API using the payload and headers defined above try: resp = requests.get(apistring, headers=headers) except requests.exceptions.RequestException as e: print(e) return '' if resp.status_code == 200: message_dict = json.loads(resp.text) message_dict['statuscode'] = str(resp.status_code) return message_dict['items'][0] else: return ''
c6bd54182cb8c7cc109ad68505fca5887a6f9fad
697,425
import pkg_resources def sample_lmp_zones_raster_file(): """Return path for the sample lmp zoness raster file.""" return pkg_resources.resource_filename('cerf', 'data/lmp_zones_1km.img')
fe3f0c1157a1230675b52cde146921a3f7268a7a
697,426
def map_nlist(nlist, fn): """ Maps `fn` to elements in nested list that has arbitrary depth. https://stackoverflow.com/a/26133679 :param nlist: Input list :param fn: Function to be applied :return: Modified list """ new_list = [] for i in range(len(nlist)): if isinstance(nlist[i], list): new_list.append(map_nlist(nlist[i], fn)) else: new_list.append(fn(nlist[i])) return new_list
5f808eff663eae9b38073804241295c6976237a3
697,427
def s3_upload_part_copy( s3_obj, bucketname, copy_source, object_key, part_number, upload_id ): """ Boto3 client based upload_part_copy operation Args: s3_obj (obj): MCG or OBC object bucketname (str): Name of the bucket copy_source (str): Name of the source bucket and key name. {bucket}/{key} part_number (int): Part number upload_id (str): Upload Id object_key (str): Unique object Identifier for copied object Returns: dict : upload_part_copy response """ return s3_obj.s3_client.upload_part_copy( Bucket=bucketname, CopySource=copy_source, Key=object_key, PartNumber=part_number, UploadId=upload_id, )
e89fdcf85616eeeab6c8da93dbbd290ad992820b
697,428
import math import torch def sdeint_euler(f, g, dif_g, t, h, x0, mid_state=None): """ SDE integration from t0=0 to t1=t. Assume diagnoal noise. Args: f: drift function of (t, X), t is time and X is a d-dimensional vector. Outputs a d-dimensional vector g: diffusion function of (t, X), t is time and X is a d-dimensional vector. We assume G=g(t, X) is diagnoal matrix, so g(t, X) outputs the diagnoal vector (d-dimensional). t: final time. h: step size of discritization (the real step size might be slightly smaller). x0: initial value of X. d: dimension of X. Returns: y: d-dimensional vector, storing the integration result X(t). """ N = int(t / h) + 1 h_real = t / N root_h = math.sqrt(h_real) # for storing the noise tt = 0 x = x0 z = torch.randn_like(x0).to(x0) z.requires_grad = False for step in range(N): if mid_state is not None: mid_state.append(x.detach().clone()) tmp = root_h * z.normal_() x = x + f(tt, x) * h_real + g(tt, x) * tmp tt += h_real return x
f9147add232a244cd94d91f95610ff37987c07c3
697,430
def _translate(expr, trexpr, newexpr=None): """Private translate function to allow repeated processing (in a loop).""" # We use the string and translation map for both paths expr = str(expr) trexpr = str(trexpr) trmap = dict() if newexpr is not None: # Create a translation map between the two strings # In this iteration, we map positionally identical characters # between the input string and the replacement string # Characters without matches are deleted (i.e. if the # replacement string is shorter than the translation string) newmap = str(newexpr) for i, c in enumerate(trexpr): try: trmap[ord(c)] = ord(newmap[i]) except IndexError: trmap[ord(c)] = None else: # Create a translation map which deletes each character for c in trexpr: trmap[ord(c)] = None # Return the translated string return expr.translate(trmap)
2d3f9e9f6c1ee9fed6995ef6e60f989ffb690c82
697,431
def max_element(l): """ Returns the maximum element of a given list of numbers. """ max_ = None for x in l: if max_ is None or x > max_: max_ = x return max_
1596f33f0bb91839fbcaf2613892bf90fafd6afd
697,432
def mask_pipe_mw(text: str) -> str: """ Mask the pipe magic word ({{!}}). :param text: Text to mask :return: Masked text """ return text.replace("{{!}}", "|***bot***=***param***|")
546d7c4b71ce3403da7a33883dac2cd3171224e6
697,433
def to_scalar(var): """ convert a tensor to a scalar number """ return var.view(-1).item()
56170067e38773ce452268827ef87d7b1bab877a
697,434
def check_input(def_list): """Check that all defect structures in list are not None. Args: def_list (list): List of defect structures Returns: True if all defect structures are present. """ for defect in def_list: if not defect: return False return True
0c1372688e4780dd44df575d470e0511366dbbc4
697,435
import os def read_images(): """Read images for directory test_image""" images_buffer = [] image_files = [] for path, _, file_list in os.walk("./test_image/"): for file_name in file_list: image_file = os.path.join(path, file_name) image_files.append(image_file) for image_file in image_files: with open(image_file, "rb") as fp: images_buffer.append(fp.read()) return images_buffer, image_files
c88466737fba4ecd33aa8d62847e83bc86a88be9
697,436
def sdm_particular_from_rref(A, ncols, pivots): """Get a particular solution from A which is in RREF""" P = {} for i, j in enumerate(pivots): Ain = A[i].get(ncols-1, None) if Ain is not None: P[j] = Ain / A[i][j] return P
9743bfd1c30564c859c3a135d3df092c50dead75
697,437
import os def get_dispatch_cmake_arg(args): """Returns the CMake argument to the Dispatch configuration to use for bulding SwiftPM.""" dispatch_dir = os.path.join(args.dispatch_build_dir, 'cmake/modules') return '-Ddispatch_DIR=' + dispatch_dir
622b031de03c55705b0cf529d0c638ab08a099f3
697,438
def load_model(path, version=None): """ Load a model from disk :param path: location of the model :param version: defines requested version(None == latest) :return: model """ return 3
b41b7f49080b07d98f4e1ac71d44a861bcf52a03
697,439
from time import strftime, localtime from typing import Union def format_time(time_tick: Union[int, float]) -> str: """ 格式化时间戳为具体日期,例\n >>> format_time(123456789)\n >>> '1973-11-30 05:33:09'\n :param time_tick: 时间戳 :return: 具体日期 """ return strftime('%Y-%m-%d %H:%M:%S', localtime(time_tick))
8f3da87066013ce267b485d4924617c9615b455e
697,440
def pack(word, pattern): """Return a packed word given a spaced seed pattern. >>> pack('actgac', [True, False, True, True, False, True]) 'atgc' """ ret = [] for i, char in enumerate(word): if pattern[i]: ret.append(char) return "".join(ret)
9697a4ee5b9bbc3d7f4d22040196bc8856fd2b6d
697,441
import io import requests def upload_prefixes_list(url, filename, prefixes_list, headers): """Upload a targets list given the target list path.""" fd = io.StringIO() for prefix in prefixes_list: fd.write(",".join(prefix) + "\n") fd.seek(0) req = requests.post( url + "/targets/", files={"target_file": (filename, fd)}, headers=headers, ) fd.close() if req.status_code == 201: return True else: print(req.text) return False
065231b0a6e81edac30591977b31976d0d5c1f01
697,442
import os def config_dir(): """ Get config directory for per-user configurations. Configuration on a per-user level should be stored within a directory (only) readable by the currently logged-in user. Returns ------- config_dir : :class:`str` Path to config directory, usually in the user's directory """ config_dir_ = os.environ.get( 'XDG_CONFIG_HOME', os.path.join(os.path.expanduser('~'), '.config') ) return config_dir_
c6c84c3fdce9f7f78f71a786b079a73aa154a741
697,443
import os def exists(path, strict=False): """Case-sensitive os.path.exists.""" if not strict: return os.path.exists(path) if os.path.exists(path): dirpath, name = os.path.split(path) return name in os.listdir(dirpath) return False
92f0dc2f11cf421992e2c6ec6daad322995a7eb6
697,444
def read_timestamps(filename): """ assume the first column of the file contains timestamp in second """ file = open(filename) data = file.read() lines = data.replace(",", " ").replace("\t", " ").split("\n") data_list = [[v.strip() for v in line.split(" ") if v.strip() != ""] for line in lines if len(line) > 0 and line[0] != "#"] stamps = [float(v[0]) for v in data_list] return stamps
4ecc35c287eaa3c4240943b94b3761c769468903
697,445
def getVertexSortingLabel(vertex): """ Return a value used to sort vertices prior to poposing candidate pairs in :meth:`__VF2_pairs`. The value returned is based on the vertex's connectivity values (and assumes that they are set properly). """ return vertex.sortingLabel
265fc105637c5e9c301c7da3f1cd1818ec4f0d63
697,446
def agent_registration_details(): """Data that an agent shares in response to the 'register' request.""" return { "name": "PyTestAgent", "display_name": "Pytest agent", "description": "Just a trivial agent for testing purposes.", "default_options": {}, }
9e187c2d42570b80d605ea4b2caf3175a6332c04
697,447
def get_shp_extent(gdf): """Fetch extent of input shapefile""" extent = tuple(gdf.bounds.values[0]) return extent
d9343f94f349c1db5af033d78693d3b55407a3c8
697,448
def get_grid_data(df): """ Prunes dataframe to rows whose longitudes are multiples of 60 degrees and whose years are multiples of 10. This includes only lat/lon grid locations for which we have USNO data for all eight types of twilight events. """ bools = (df['Longitude'] % 60 == 0) & (df['Year'] % 10 == 0) return df[bools]
4788356a9c0b14759a34c436f2f8267ea07043be
697,449
def factorial(value: int) -> int: """ Calculates the factorial of the given value. factorial ========= The `factorial` function takes an positive integer value and calculates the factorial (n! = n * (n - 1) * (n - 2) * ... * 3 * 2 * 1). Parameters ---------- value: int an positive integer value Returns ------- factorial: int factorial of the value (value!) """ if value == 1: # The base case: if the value is 1 then return 1 # stop recursion and return return value * factorial(value - 1) # The recursive case: call the # factorial function with value # decreased by 1
642700f9ec42c3ab47130d599626c44b2de4165c
697,450
import ast def _build_table(src): """ Builds AST tree table from given source. Args: src (str): source code Returns: dict: table of ast.AST nodes in tree """ table = {} tree = ast.parse(src) for node in ast.walk(tree): curr = table.get(node.__class__, []) table[node.__class__] = curr + [node] return table
955fcec38e57a9657ffad183a7486ddfc5847bfc
697,451
import os import shutil def delete_after(filename): """Decorator to be sure the file given by parameter is deleted after the execution of the method. """ def delete_after_decorator(function): def wrapper(*args, **kwargs): try: return function(*args, **kwargs) finally: if os.path.isfile(filename): os.remove(filename) if os.path.isdir(filename): shutil.rmtree(filename) return wrapper return delete_after_decorator
4b69a7bcfe65967aa0b4dca4a30793db594cfc15
697,452
import torch def make_bow_vector(sentence, word_to_ix): """ Turn a sentence (list of words) into a vector using the dictionary (word_to_ix) return data is a FloatTensor of shape 1 (batch size) by len(word_to_ix)""" vec = torch.zeros(len(word_to_ix)) for word in sentence: vec[word_to_ix[word]] += 1 return vec.view(1, -1)
ae0df26878537ae232e70881d5e8a56fcea0c350
697,453
def matmul(a, b): """ Multipy two matrices :param a: list of list of floats :param b: list of list of floats :return: resulting matrix """ res = [] for r_idx, a_r in enumerate(a): res_row = [ sum([a_v * b_r[b_idx] for a_v, b_r in zip(a_r, b)]) for b_idx in range(len(b[0])) ] res.append(res_row) return res
504f53c1de68a28a2c23c479b53acc76f359f347
697,454
import requests def _generate_swagger_spec(): """Downloads current Dialpad API swagger spec and returns it as a dict.""" # Unfortunately, a little bit of massaging is needed to appease the swagger parser. def _hotpatch_spec_piece(piece): if 'type' in piece: if piece['type'] == 'string' and piece.get('format') == 'int64' and 'default' in piece: piece['default'] = str(piece['default']) if 'operationId' in piece and 'parameters' in piece: for sub_p in piece['parameters']: sub_p['required'] = sub_p.get('required', False) if 'basePath' in piece: del piece['basePath'] def _hotpatch_spec(spec): if isinstance(spec, dict): _hotpatch_spec_piece(spec) for k, v in spec.items(): _hotpatch_spec(v) elif isinstance(spec, list): for v in spec: _hotpatch_spec(v) return spec # Download the spec from dialpad.com. spec_json = requests.get('https://dialpad.com/static/openapi/apiv2openapi-en.json').json() # Return a patched version that will satisfy the swagger lib. return _hotpatch_spec(spec_json)
ebde3f0ce27ca4df8e5f1888f61910e3b91c6f3c
697,455
from typing import Dict from typing import Any from typing import List def directory(module_locals: Dict[str, Any]) -> List[str]: """ Return the module's public directory for dir function. Parameters ---------- module_locals The module's locals as generated by the locals() function. Returns ------- List of public objects. """ dir_ = [] for key in module_locals: if key.startswith('_'): continue try: include_in_dir = module_locals[key].__dict__['_include_in_dir'] except KeyError: include_in_dir = True except AttributeError: include_in_dir = False if include_in_dir: dir_.append(key) return dir_
37dc44ddc18a21c5aa6faba9887cae0aa6bcdd0b
697,456
def _comparable(klass): """ C{__eq__}, C{__lt__}, etc. methods are added to the class, relying on C{__cmp__} to implement their comparisons. """ def __ne__(self, other): if not isinstance(other, self.__class__): return NotImplemented return tuple(self) != tuple(other) def __lt__(self, other): if not isinstance(other, self.__class__): return NotImplemented return self.precedence_key < other.precedence_key def __le__(self, other): if not isinstance(other, self.__class__): return NotImplemented return self.precedence_key <= other.precedence_key def __gt__(self, other): if not isinstance(other, self.__class__): return NotImplemented return self.precedence_key > other.precedence_key def __ge__(self, other): if not isinstance(other, self.__class__): return NotImplemented return self.precedence_key >= other.precedence_key klass.__lt__ = __lt__ klass.__gt__ = __gt__ klass.__le__ = __le__ klass.__ge__ = __ge__ klass.__ne__ = __ne__ return klass
5cfeb87e225aa0f72c7489571c42c9f513c905a5
697,458
def SplitDataset_x_y(df, IndependentColumnName): """ This function split a dataframe into a dataframe with independent variables and a dataframe with dependent variables. The IndependentColumnName define what is/are the independent variables based on the related column name. Arguments: ---------- - df: pandas dataframe The dataframe containing both the independent and dependent variables - IndependentColumnName: list The column name of the independent variables - can be a list of one element or more. Return: ---------- - x: pandas dataframe All columns contained in the initial df dataframe excepted the column provided into the IndependentColumnName list. - y: pandas dataframe Only the columns provided into the IndependentColumnName """ y = df[IndependentColumnName] x = df.drop(IndependentColumnName, axis=1) return x, y
a2e4a6244fc5bcee401981ee54f20d179c09dc89
697,459
def dedup_commands(command_list): """ Deduplicate a command list. Now it only removes repeated commands. """ cmd_set = set() deduped_cmd_list = [] for command in command_list: if command.lower() not in cmd_set: cmd_set.add(command.lower()) deduped_cmd_list.append(command) return deduped_cmd_list
604dbaf60352924eebc46c29c54d3a7bbbf352b1
697,460
from functools import reduce import operator def validate_nmea_checksum(sentence): """ Validates NMEA sentence using checksum according to the standard. :param sentence: NMEA sentence including checksum :returns: - Boolean result (checksum correct) - raw NMEA data string, with prefix $Gx and checksum suffix removed """ sentence = sentence.strip('\n').strip('\r') nmeadata, cksum = sentence.split('*', 1) nmeadata = nmeadata.replace('$', '') xcksum = str("%0.2x" % (reduce(operator.xor, (ord(c) for c in nmeadata), 0))).upper() return (cksum == xcksum), nmeadata[2:]
82f5a63943916e4323178063366a0f8c276ad64a
697,461
def get_sorter_by_args(model, args: list): """Get list of SQLAlchemy order_by args from dictionary of arguments. """ sorters = [] for key in args: if key[0] == '-': sorters.append(getattr(model, key[1:]).desc()) else: sorters.append(getattr(model, key)) return sorters
d9572ad576d8980dd453835b80f36673f6ab4f16
697,462
import sys import re def re_group(regexp, text): """search for regexp in text, return 1st group on match""" if sys.version < '3': m = re.search(regexp, text) else: # text is bytes-like m = re.search(regexp, text.decode('ascii', 'replace')) if m: return m.group(1)
1a9e1e3954fcecf56275befb35e31bdae419485b
697,463
def _draw_rectangle(data, obj, draw_options): """Return the PGFPlots code for rectangles. """ # Objects with labels are plot objects (from bar charts, etc). # Even those without labels explicitly set have a label of # "_nolegend_". Everything else should be skipped because # they likely correspong to axis/legend objects which are # handled by PGFPlots label = obj.get_label() if label == "": return data, [] # get real label, bar charts by default only give rectangles # labels of "_nolegend_" # See # <http://stackoverflow.com/questions/35881290/how-to-get-the-label-on-bar-plot-stacked-bar-plot-in-matplotlib> handles, labels = obj.axes.get_legend_handles_labels() labelsFound = [ label for h, label in zip(handles, labels) if obj in h.get_children() ] if len(labelsFound) == 1: label = labelsFound[0] legend = "" if label != "_nolegend_" and label not in data["rectangle_legends"]: data["rectangle_legends"].add(label) legend = ("\\addlegendimage{ybar,ybar legend,%s};\n") % (",".join(draw_options)) left_lower_x = obj.get_x() left_lower_y = obj.get_y() cont = ( "%s\\draw[%s] (axis cs:%.15g,%.15g) " "rectangle (axis cs:%.15g,%.15g);\n" ) % ( legend, ",".join(draw_options), left_lower_x, left_lower_y, left_lower_x + obj.get_width(), left_lower_y + obj.get_height(), ) return data, cont
f3bd7c998f44109a6e6ec47a4e835bb5fdd8e1b8
697,464
def make_ordered_test(): """this method is for comparing and arranging in order""" order = {} def ordered_method(f): """method for ordering""" order[f.__name__] = len(order) return f def compare_method(a, b): """method for comparing order of methods""" return [1, -1][order[a] < order[b]] return ordered_method, compare_method
fcc5f86e18b312ad94f3613bba2440e33c3e9706
697,465
def get_crash_arg() -> bytes: """ This function returns the (pre-encoded) `password` argument to be sent to the `sudo` program. This data should cause the program to crash and generate a core dump. Make sure to return a `bytes` object and not an `str` object. WARNINGS: 0. Don't delete this function or change it's name/parameters - we are going to test it directly in our tests, without running the main() function below. Returns: The bytes of the password argument. """ return (135 * b'A' + 4 * b'B')
f25bf1c66c5387076830e77077b97d9844244428
697,466
def add_mappings(mappings): """Returns a dict with additional es mappings.""" # return mappings since we are not adding any new index fields return mappings
a358c5dc17568f20a8ccbfd3b8d40e1a9b630d1b
697,467
def processRecord(rec): """return tuple of (cas#,[syn])""" cas = 'Nope' prime = '' lst = [] fields = rec.split('FIELD ') for fld in fields: if 'Registry Number:' in fld: start = fld.find(':')+1 end = fld.find('\n') cas = fld[start:end] if 'CA Index Name:' in fld: start = fld.find(':')+1 end = fld.find('\n') prime = fld[start:end].lower() if 'Other Names:' in fld: start = fld.find(':')+1 lst = fld[start:].split(';') olst = [prime] for syn in lst: syn = syn.strip().lower() if len(syn)>0: if syn not in olst: olst.append(syn) return (cas,olst)
cf514412053ab7449887064c9fafa7e422c450ae
697,468
def row_indices(tbl): """ table row_indices """ return tuple(tbl.index)
9eec5da90a9563687c66c26f6ad134973170301d
697,469
def ParseLine(line, new_target): """Parse one line of a GCC-generated deps file. Each line contains an optional target and then a list of space seperated dependencies. Spaces within filenames are escaped with a backslash. """ filenames = [] if new_target and ':' in line: line = line.split(':', 1)[1] line = line.strip() line = line.rstrip('\\') while True: # Find the next non-escaped space line = line.strip() pos = line.find(' ') while pos > 0 and line[pos-1] == '\\': pos = line.find(' ', pos+1) if pos == -1: filenames.append(line) break filenames.append(line[:pos]) line = line[pos+1:] return filenames
ab0df773a9279f30b438ee7c325225a74574a588
697,470
def has_refseq(db_list): """ Return the index of the list where the 'RefSeq' string is located. Otherwise return None :param db_list: A list of db names taken as the first element of the tuples in a Swissprot.record.cross_references list :return: int: index or None """ if 'RefSeq' in db_list: return db_list.index('RefSeq') else: return None
ffa175079e7e14e91fe9b8901c3d8cd60b200eea
697,472
def normalize_identity(my_string: str) -> str: """ Return identity if string :param my_string: string :return: my_string """ if my_string and isinstance(my_string, str): return my_string else: return ''
efee4dd778dbb34aeacd11b5865d3241b0fdfa0b
697,473
def __or(funcs, args): """ Support list sugar for "or" of two predicates. Used inside `select`. """ results = [] for f in funcs: result = f(args) if result: results.extend(result) return results
b7f6a558812de56bc8f2348be3e0a9edcc6339e7
697,474
import platform def get_exec_env_parameters(): """Params of env such as os info, cpu arch, python ver, etc. :return Dict[str, str]: """ result = dict() result["OperatingSystem"] = platform.system() result["MachineHostName"] = platform.node() result["OSReleaseInfo"] = platform.release() result["CPU"] = platform.machine() result["PythonVersion"] = platform.python_version() return result
cb4c2d10dc33649312fd18fc6fb0fc32c00bbab6
697,475
import ipaddress def prefix_to_network(prefix): """Convert an IP prefix to an IP-address and network mask.""" ipaddr = ipaddress.ip_interface(prefix) # turn into ipaddress object address = ipaddr.ip mask = ipaddr.netmask return address, mask
ac521405d5bdd90f082bc4d0e5f434b7a5b7f3f7
697,476
def deflatten(d, sep='.', maxdepth=-1): """Build a nested dict from a flat dict respecting a separator. >>> d_in = {'a': 1, 'c': {'a': 2, 'b': {'x': 5, 'y' : 10}}, 'd': [1, 2, 3]} >>> d = flatten(d_in) >>> for k, v in d.items(): print(k, v) a 1 c.a 2 c.b.x 5 c.b.y 10 d [1, 2, 3] >>> deflatten(d) {'a': 1, 'c': {'a': 2, 'b': {'x': 5, 'y': 10}}, 'd': [1, 2, 3]} >>> deflatten(d, maxdepth=1) {'a': 1, 'c': {'a': 2, 'b.x': 5, 'b.y': 10}, 'd': [1, 2, 3]} >>> deflatten(d, maxdepth=0) {'a': 1, 'c.a': 2, 'c.b.x': 5, 'c.b.y': 10, 'd': [1, 2, 3]} >>> d = flatten(d_in, sep='_') >>> for k, v in d.items(): print(k, v) a 1 c_a 2 c_b_x 5 c_b_y 10 d [1, 2, 3] >>> deflatten(d, sep='_') {'a': 1, 'c': {'a': 2, 'b': {'x': 5, 'y': 10}}, 'd': [1, 2, 3]} >>> deflatten({('a', 'b'): 'd', ('a', 'c'): 'e'}, sep=None) {'a': {'b': 'd', 'c': 'e'}} >>> deflatten({'a.b': 1, 'a': 2}) Traceback (most recent call last): ... AssertionError: Conflicting keys! ('a',) >>> deflatten({'a': 1, 'a.b': 2}) Traceback (most recent call last): ... AssertionError: Conflicting keys! ('a', 'b') """ ret = {} if sep is not None: d = { tuple(k.split(sep, maxdepth)): v for k, v in d.items() } for keys, v in d.items(): sub_dict = ret for sub_key in keys[:-1]: if sub_key not in sub_dict: sub_dict[sub_key] = {} assert isinstance(sub_dict[sub_key], dict), ( f'Conflicting keys! {keys}' ) sub_dict = sub_dict[sub_key] assert keys[-1] not in sub_dict, f'Conflicting keys! {keys}' sub_dict[keys[-1]] = v return ret
dc507bd9c167c1b2c9cb89605a424d235c11fb82
697,477
import threading def makethread(func): """Função que transforma uma função qualquer numa Thread Args: func (function): função a ser transformada em Thread Returns: (function) wrapper da função a ser transformada em thread """ def _thread(*args, **kwargs): """Decorador interno da função Args: *args (tuple): tupla de argumentos da função **kwargs (dict): dicionários de palavras-chave da função """ pcs = threading.Thread(target = func, args = args, kwargs = kwargs) pcs.start() return _thread
3c3565143b817c57903771e5a5c5aa904b44a4d5
697,478
import click def config_option(config_function): """Helper decorator that turns an option function into a cli option""" return (lambda function: click.option('--' + config_function.__name__, help=config_function.__doc__.strip() + '. Example: "' + str(config_function()) + '"')(function))
f6e8dd160bf6b1f38612cbf338140f6f94862644
697,479
import re def strip_headers(pem_text): """ Strips the headers off a a FinCrypt key or message. :param pem_text: Text of key or message (string) :return: Tuple (header (ie. 'BEGIN FINCRYPT MESSAGE'), base64 (string)) """ match = re.match( r'(?:-+ (BEGIN FINCRYPT (?:PUBLIC |PRIVATE )?(?:KEY|MESSAGE)) -+\n)([a-zA-Z0-9\n\-_=]+[^\n])' r'(?:\n-+ END FINCRYPT (?:PUBLIC |PRIVATE )?(?:KEY|MESSAGE) -+)', pem_text) if match is None: return None, None return match[1], match[2]
c00f3006f392bde2ed835905cdf0582e25049a9d
697,480
def _CreateAssetsList(path_tuples): """Returns a newline-separated list of asset paths for the given paths.""" dests = sorted(t[1] for t in path_tuples) return '\n'.join(dests) + '\n'
bb57f014e68d398eb89468a7c86c972f03e47b7d
697,481
def _read_json_success(response): """Attempts to read the `success` value from a JSON response, if applicable. Returns True or False""" if response.status_code != 200: return False try: # requests > 1.0.0 return response.json().get('success', False) except AttributeError: # requests < 1.0.0 had a json attribute instead of a method. return response.json.get('success', False)
97f48f24cea4be5fb2d648907282ab8eb3ac340e
697,482
def discount_with_dones(rewards, dones, gamma): """ Calculates discounted rewards. This is still valid if the episode terminated within the sequence. From OpenAI basline's A2C. Parameters ---------- rewards: list list of rewards with the last value being the state value dones: list list of dones as floats gamma: float discount factor Returns ------- list of discounted rewards """ discounted = [] r = 0 # discounted rewards are calculated on the reversed reward list. # that returns are calculated in descending order is easy to # overlook in the original pseudocode. # when writing down an example of the pseudocode, it is clear, that # r_t + gamma * V(s_tp1) is calculated for each list element and # this is also what is done here. for reward, done in zip(rewards[::-1], dones[::-1]): r = reward + gamma*r*(1.-done) discounted.append(r) return discounted[::-1]
2feef30d209795414029cfc79aef2ea8e65220af
697,484
def ema(series, n): """ 指数加权移动平均线: 求series序列n周期的指数加权移动平均 计算公式: ema(x, n) = 2 * x / (n + 1) + (n - 1) * ema(x, n).shift(1) / (n + 1) 注意: 1. n 需大于等于1 2. 对距离当前较近的k线赋予了较大的权重 Args: series (pandas.Series): 数据序列 n (int): 周期 Returns: pandas.Series: 指数加权移动平均线序列 Example:: from tqsdk import TqApi, TqSim, tafunc api = TqApi(TqSim()) klines = api.get_kline_serial("CFFEX.IF1908", 24 * 60 * 60) ema = tafunc.ema(klines.close, 5) print(list(ema)) """ ema_data = series.ewm(span=n, adjust=False).mean() return ema_data
fcdf4a33821ba20d7026f30f4229ab7f62f32eda
697,485
import io import numpy def adapt_array(array): """ Using the numpy.save function to save a binary version of the array, and BytesIO to catch the stream of data and convert it into a BLOB. """ out = io.BytesIO() numpy.save(out, array) out.seek(0) return out.read()
2a9558bea44e124e846e8587f5280175be6911a6
697,487
def tile_square(F, data): """ Tiles an NxN matrix into N^2xN :param F: :param data: :return: """ e = F.expand_dims(data, axis=0) # 1xNxN f = F.broadcast_like(e, data, lhs_axes=0, rhs_axes=0) # NxNxN g = F.reshape(f, (-3, 0)) # N^2 x N return g
adde9f14acf5568bfbfe869fbc1a8f7967702206
697,488
def count_letters(word): """ count letters ignore , or -, or space """ count = 0 for letter in word: if letter != ',' and letter !='-' and letter !=' ': count += 1 return count
5ea2539cd5a81bdef6b8b3fed1a90410d43de7e0
697,489
from typing import Callable import sys def verify(func: Callable): """ verifies whether the response from a function is not None. If it is None, sys.exit() is run to kill the process. :param func -> ``Callable``: the function whose response to verify. :returns ``Callable``: the 'wrapped' function. """ def wrapper(*args): function_response = func(*args) if not function_response: sys.exit(1) else: return function_response return wrapper
1088b541f5de0ff0cb38e9decf192020f383e328
697,490
from typing import Any from typing import Dict def _get_feature_row(features: Any, index: int) -> Dict[str, Any]: """Returns one row of the features table as a dictionary.""" return {name: values.iloc[index] for name, values in features.items()}
8729d3f4c8adaa00fd219e2bb74d6ba138776793
697,491
def biquad(X, c, x1, y1, x2, y2, xy): """ Biquadratic surface, for curve fitting """ x,y = X return x2*x**2 + y2*y**2 + xy*x*y + x1*x + y1*y + c
e2f9da8552d7def47f26425055611cfe0b7bea0a
697,492
def findimagenumber (filename): """find the number for each image file""" #split the file so that name is a string equal to OBSDATE+number name=filename.split('/')[-1].split('.')[0] return int(name[9:])
39219217644f264e089ba3adc94c1175f7c4a6dc
697,493
def get_quarter_from_month(month: int): """Returns the quarter for a given month""" month_quarter_map = {1: 1, 2: 1, 3: 1, 4: 2, 5: 2, 6: 2, 7: 3, 8: 3, 9: 3, 10: 4, 11: 4, 12: 4} return month_quarter_map[month]
ef7f3a83c3b4b75823e67bca9ce3f62dd1cae5cf
697,494
import subprocess def is_tmm(): """Determines if the TMOS dataplane microkernels are running""" tmm_running = int(subprocess.Popen("ps -ef|grep /usr/bin/tmm|grep -v grep|wc -l| tr -d ';\n'", stdout=subprocess.PIPE, shell=True).communicate()[0]) if tmm_running == 1: return True return False
0bbc1e811753b2381e7c1d94a4ffa9f87d213f0c
697,495
def _precision(tp, fp): """Calculate precision from true positive and false positive counts.""" if fp == 0: return 1 # by definition. else: return tp / (tp + fp)
9bf80bc0ce2b657b1c8bbc8fb57e64e7df2081c9
697,496
def inference(model, X): """Run model inferences and return the predictions. Inputs ------ model : ??? Trained machine learning model. X : np.array Data used for prediction. Returns ------- preds : np.array Predictions from the model. """ return model.predict(X)
c9edd988933bff12fa4da14c9735f8f482e82ca8
697,497
from datetime import datetime def now(frm: str = '%d/%m/%Y %H:%M:%S') -> str: """ Функция возвращает текущее время и дату Args: frm (str): Формат времени и даты Returns: str: Текущее время и дата """ return datetime.now().strftime(frm)
e21851f65de1e7640dc7e34f8f3761129052e1c3
697,498
async def put_url(session, path, headers, body): """ aiohttp: for use with BODY in PUT requests """ async with session.put(url=path, headers=headers, data=body) as resp: res = await resp.json() return res
876add17ed4c76b7d7c43a811fef9d1d720559a5
697,500
def get_full_piece_id(piece): """ Returns rdb piece id and append movement id if exists. :param piece: A piece as stored in dataset :return: A string containing either the piece ID (no movement information) like "123", or the piece ID followed by dash, followed by the movement ID like "123-46123" """ piece_id_only = piece["rdb_id_piece"] movement_id = str(piece.get("rdb_id_movement", "")) piece_id = str(piece_id_only) if movement_id: piece_id += "-" + movement_id return piece_id
7d9c0946d26035cdbf945f6d854913388818e8f9
697,501
import itertools def maxgap(data, max_gap): """ Arrange data into groups where successive elements differ by no more than *maxgap* The input has to be a list of list with the structure: [['id1', distance between start decay and last expression],['id2',dist2],...,['idn', distn]] The output will be a list of lists with the identifiers clustered together by the distances if the difference between them is less than the maxgap [[id1, id2],[id3,..]] Example: in: [['id1',1], ['id2',-1], ['id3',2], ['id4',80], ['id5',81], ['id3',82]] out: [['id1','id2','id3'], ['id4','id5','id3']] """ # Sort the list by the second value (distances) data.sort(key=lambda x: x[1]) # Separate lists identifiers = [x[0] for x in data] distances = [x[1] for x in data] # Cluster the distances by max gap and return the identifiers groupes groups = [] for k, g in itertools.groupby(distances, key=lambda n: n//max_gap): i = len(list(g)) groups.append(identifiers[:i]) identifiers = identifiers[i:] return groups
fba36520d40263eafe25556304a8cbbaf8f724fb
697,502
def get_slice(img, ori, slc): """ Extract one slice from a 3D numpy.ndarray image. :param img: np.array. input image to extract slice. :param ori: int. orientation to extract slice. 1. sagittal. 2. coronal. 3. axial. :param slc: int. slice number to extract. :return: extracted 2D slice: np.array. """ if ori == 1 and 0 <= slc < img.shape[0]: return img[slc, :, :] elif ori == 2 and 0 <= slc < img.shape[1]: return img[:, slc, :] elif ori == 3 and 0 <= slc < img.shape[2]: return img[:, :, slc] else: raise Exception('Invalid orientation or slice number')
dcf03eec0d16c68f55f701b2d312dbf4fa946ee7
697,503
def prune_multiple_copies_of_species(tree, g2s_map, s2g_map): """ Removes all but one leaf with the same species label Parameters ---------- tree : treeswift tree object g2s_map : dictionary maps gene copy labels to species labels s2g_map : dictionary maps species label to gene copy labels """ found_duplicate = set([]) nLMX = 0 c = 0 for leaf in tree.traverse_leaves(): gene = leaf.get_label() species = g2s_map[gene] all_genes = s2g_map[species] if gene == all_genes[0]: leaf.set_label(species) nLMX += 1 else: leaf.contract() if not (species in found_duplicate): found_duplicate.add(species) c += 1 tree.suppress_unifurcations() return [nLMX, c]
a7b5b5a0b8af0ea157d5bbf1bbbfb1337dc7ee69
697,504
def _digit_span_to_special_tag(span): """ :param str span: 需要替换的str :return: """ if span[0] == '0' and len(span) > 2: return '<NUM>' decimal_point_count = 0 # one might have more than one decimal pointers for idx, char in enumerate(span): if char == '.' or char == '﹒' or char == '·': decimal_point_count += 1 if span[-1] == '.' or span[-1] == '﹒' or span[ -1] == '·': # last digit being decimal point means this is not a number if decimal_point_count == 1: return span else: return '<UNKDGT>' if decimal_point_count == 1: return '<DEC>' elif decimal_point_count > 1: return '<UNKDGT>' else: return '<NUM>'
8dc210f4e3834abbdbf23b9dad01e65f12289c80
697,505
def choose_window_type(measure): """ # Chose Window Type Args: measure (float): dB value. Returns: str: name of window type. """ reference = { 'rectangular': -21, 'barlett': -25, 'hanning': -44, 'hamming': -53, 'blackman': -74 } allow_values = list( filter(lambda value: value[1] < measure, reference.items())) closet_value = sorted(list(map(lambda value: (value[0], abs( value[1]-measure)), allow_values)), key=lambda element: element[1]) return closet_value[0][0]
05cd22430f82fc08b3ab8fa5f4bad6a7866e3200
697,506
def get_link(task_id, tasks_df): """Return the task link from the task ID.""" task = tasks_df.loc[int(task_id)] return task['info']['link']
a24c467f298526d2176747960430f5e283384043
697,507
def known_face_sentence(known_face_name): """ describe known person Example: Anuja is in front of you. :param known_face_name: name of known person in the frame :return: sentence descibing person """ return "%s in front of you" % ( known_face_name )
266da5bf129ba6844dbe0bd3cbcf9b4663100d1c
697,508
from typing import Union from pathlib import Path def ensure_path(path: Union[str, Path]) -> Path: """ Ensure string is converted to a Path. This is a more restrictive version of spaCy's [ensure_path](https://github.com/explosion/spaCy/blob/ac05de2c6c708e33ebad6c901e674e1e8bdc0688/spacy/util.py#L358) # Parameters path : `Union[str, Path]` If string, it's converted to Path. # Returns `Path` """ if isinstance(path, str): return Path(path) return path
1f70ab426f6347399da73e854c0c2be9eee7843c
697,510
def api_client_pass(application_pass, api_client): """Create api_client for 'application_pass'""" return api_client(application_pass, disable_retry_status_list={404})
b17c539e0c8df55f21a62015d4bee3fd243f7f5f
697,511
def mock_view_function(request): """ Function to mock a view function with a mock HTTP request """ return "this is a mock view function"
b2b95ae346ff1774252eb73cb36f9a0ff86b1e9c
697,512
from datetime import datetime import sys import requests from bs4 import BeautifulSoup import json def fetch_heathrow_data(iso_date_str, direction): """ Returns python object with data from Heathrow API. Inputs: `iso_date_str` (str): YYYY-MM-DD format of the date for which you want data. `direction` (str): Specify whether you want "departures" or "arrivals" Outputs: `heathrow_data` (dict); Successful = * Python Dictionary with keys: ['header', 'flightSummaryList', 'references'] * Payload is the list output['flightSummaryList']['flight'] * Where each dictionary in the list represents a different flightID. Failure = * Wrong Date will sys.exit("message") """ def check_date_format(iso_date_str): """Exit if date is not an iso date string""" try: datetime.strptime(iso_date_str, '%Y-%m-%d') except ValueError as err: sys.exit(f"Invalid Date: {iso_date_str}. Required Format is YYYY-MM-DD") return iso_date_str def make_url(iso_date_str, direction): url = 'https://api-dp-prod.dp.heathrow.com/infohub/api/v1/flights/' + direction + '/inactive/' + iso_date_str + 'Z' return url def fetch_html_soup(url_to_scrape, headers=None): """Send a Get request to the specified URL. Returns: Fail: Raises Error if the request fails Success: soup (bs4.BeautifulSoup): Soup with the response content """ r = requests.get(url_to_scrape, headers = headers) r.raise_for_status() # Raise error if get request returned an unsuccessful status code soup = BeautifulSoup(r.text, 'html.parser') return soup def soup_to_dict(soup, iso_date_str): return json.loads(str(soup)) check_date_format(iso_date_str) url = make_url(iso_date_str, direction) headers={"origin": "https://www.heathrow.com"} soup = fetch_html_soup(url, headers) heathrow_data = json.loads(str(soup)) return heathrow_data
59e5ad69268d835b21d3f4540729a46cf0f3fd0d
697,514
def set_meta(request): """ This context processor returns meta informations contained in cached files. If there aren't cache it calculates dictionary to return """ context_extras = {} if not request.is_ajax() and hasattr(request, 'upy_context') and request.upy_context['PAGE']: context_extras['PAGE'] = request.upy_context['PAGE'] context_extras['NODE'] = request.upy_context['NODE'] return context_extras
3ebcb2aa687d6d8b6b74fb603acbbc55069622b5
697,515
import argparse def parse_options(): """ Parse options """ parser = argparse.ArgumentParser(description="Generate SQLite3 database " "with all options and switches for all " "installed commands.") parser.add_argument("--from-help", help="WARNING: Use this parameter only on " "virtual machine, which could be lost. Try to run all " "found commands with '--help' parameter to fetch all " "options from the output. Please use this only if you " "know what you are doing. ", action="store_true") parser.add_argument("--os-name", help="Name of the OS. Whole name will be " "created by concatenating OS name and OS version.", required=True) parser.add_argument("--os-version", help="Version of OS. Whole name will be " "created by concatenating OS name and OS version.", required=True) parser.add_argument("--schema-file", default="./schema.sql", help="File with database schema. Default file: " "./schema.sql") parser.add_argument("--db-file", default="switch.sqlite3", help="The name of the database file.") parser.add_argument("--output-db-dir", default="/tmp/switchTest", help="Directory to write generated database to. " "Default directory: /tmp/switchTest/") prog_args = parser.parse_args() # Name of schema file. if prog_args.schema_file: global schema_file schema_file = prog_args.schema_file # Name of database file. if prog_args.output_db_dir: global db_path db_path = prog_args.output_db_dir # DB path if prog_args.db_file: global db_file db_file = prog_args.db_file return prog_args
187444d9407031ddea051403291e5a63fd3ee1d0
697,516
def tokenize_table_name(full_table_name): """Tokenize a BigQuery table_name. Splits a table name in the format of 'PROJECT_ID.DATASET_NAME.TABLE_NAME' to a tuple of three strings, in that order. PROJECT_ID may contain periods (for domain-scoped projects). Args: full_table_name: BigQuery table name, as PROJECT_ID.DATASET_NAME.TABLE_NAME. Returns: A tuple of project_id, dataset_name, and table_name. Raises: ValueError: If full_table_name cannot be parsed. """ delimiter = '.' tokenized_table = full_table_name.split(delimiter) if not tokenized_table or len(tokenized_table) < 3: raise ValueError('Table name must be of the form ' 'PROJECT_ID.DATASET_NAME.TABLE_NAME') # Handle project names with periods, e.g. domain.org:project_id. return (delimiter.join(tokenized_table[:-2]), tokenized_table[-2], tokenized_table[-1])
a092f749e18cdec41b50a8f1b7e1e5e99cd8e2e3
697,517
def linear_annuity_mapping_fprime(underlying, alpha0, alpha1): """linear_annuity_mapping_fprime first derivative of linear annuity mapping function. See :py:func:`linear_annuity_mapping_func`. The function calculates following formula: .. math:: \\alpha^{\prime}(S) := \\alpha_{0.} where :math:`S` is underlying, :math:`\\alpha_{0}` is alpha0. :param float underlying: :param float alpha0: :param float alpha1: not used. :return: value of first derivative of linear annuity mapping function. :rtype: float. """ return alpha0
ff57723cad7ade65644744dc30abb6db5c1e6b95
697,518
def third_bashforth(state, tendencies_list, timestep): """Return the new state using third-order Adams-Bashforth. tendencies_list should be a list of dictionaries whose values are tendencies in units/second (from oldest to newest), and timestep should be a timedelta object.""" return_state = {} for key in tendencies_list[0].keys(): return_state[key] = state[key] + timestep.total_seconds() * ( 23./12*tendencies_list[-1][key] - 4./3*tendencies_list[-2][key] + 5./12*tendencies_list[-3][key] ) return return_state
6b1a62b94c662a1b14eafa3be6953e73486b6cfd
697,519
from typing import OrderedDict def getAttrFromList(objList, attr): """\ Given a list of objects in objList, each having the attribute attr, return a list comprising the value of attr for each object in objList. Return: A list of values. Parameters: * objList: The list of objects * attr: The attribute had in common """ values = [] for o in objList: if type(o) == dict or type(o) == OrderedDict: values.append(o.get(attr, None)) else: values.append(getattr(o, attr)) return values
bef6386afff15a0d6a78d92f02878a47171dd9d7
697,521
def environment_algorithm(db): """! This algorithm interprets the data obtained from the environment sensors. @param db: The master database """ # Check Temperature level msg = ["environment_sensor", {}] temperature = int(db['latest']['environment_sensor']['temperature']) if temperature >= db['Temperature_High']: flag = "HIGH" elif temperature <= db['Temperature_Low']: flag = "LOW" else: flag = None msg[1]['temperature'] = {'value': temperature, 'flag': flag} # Check Humidity level humidity = int(db['latest']['environment_sensor']['humidity']) if humidity >= db['Humidity_High']: flag = "HIGH" # As we are not controlling humidity and only measuring, no action needed elif humidity <= db['Humidity_Low']: flag = "LOW" # As we are not controlling humidity and only measuring, no action needed else: flag = None msg[1]['humidity'] = {'value': humidity, 'flag': flag} # Check VOC/gas level gas = round(int(db['latest']['environment_sensor']['gas'])/1000, 2) if gas >= db['VOC_High']: flag = "HIGH" # NOTE: In future iterations, poor VOC readings may result in the fan being turned on elif gas <= db['VOC_Low']: flag = "LOW" # If VOC is acceptable, no need to take any action. else: flag = None msg[1]['gas'] = {'value': gas, 'flag': flag} return msg
153d6e0e2cc4bb00535bf975d92926775ff36b32
697,522
def remove_control_char(pdf_content): """ 移除控制字符,换行符、制表符、转义符等)python-docx是不支持控制字符写入的 :param pdf_content: PDF 文件内容 :return: 返回去除控制字符的内容 """ # 使用str的translate方法,将ASCII码在32以下的都移除 http://ascii.911cha.com/ return pdf_content.translate(dict.fromkeys(range(32)))
84dff48a5654b12f7446f77cbc4d132716d2018c
697,523
def confirm(question: str) -> bool: """Ask a confirmation to the user in the console. Args: - question: the question to ask the user. Returns: True if the user answered 'yes' or 'y', False otherwise. """ input_ = input(question + " Enter (y)es or (n)o: ") return input_ == "yes" or input_ == "y"
bd6415658d4c7adf73d682fb151dc8aecc7eaf6d
697,524
def get_flag_add(a, b, ci): """ get the result of a + b + ci a,b: int must be 4-bit number ci: int must be 1-bit number ret: tuple[int,int] return a tuple the cantain two 4-bit nubmers. ret[0] is a number cantain ov and cy. ret[0][3] is cy, ret[0][2] is ov. ret[1] is the result of a+b+ci. """ R = a + b + ci cy = (R >> 1) & 0x08 # carry at bit 3 # (A XNOR B) AND (A XOR R) ov = (((~(a ^ b)) & (a ^ R)) >> 1) & 0x04 # ov at bit 2 return cy | ov , (R & 0xF)
eebc7eefa24cb7cd92265e5859ce8b4018348242
697,525
def make_safe_label(label): """Avoid name clashes with GraphViz reserved words such as 'graph'.""" unsafe_words = ("digraph", "graph", "cluster", "subgraph", "node") out = label for word in unsafe_words: out = out.replace(word, "%sX" % word) return out.replace(".", "__").replace("*", "")
e122575956e492cbb450287a6b91174fdc59233a
697,526
def load_bands(src, bands, masked=False): """ Load selected bands of a raster as array ********* params: -------- src : rasterio.DatasetReader object bands : list list of bands to load, e.g. [1,2,3] masked : bool (default:False) if True exclude nodata values return: tuple: array, metadata """ arr = src.read(bands, masked=masked) metadata = src.profile metadata.update({ 'driver': 'GTiff', 'count': len(bands)}) return arr, metadata
e0c131ca93066387ae12f9bc6206626d4313cdf7
697,527
def is_in_tol(value, expected_value, tolerance): """Is the provided value within expected_value +/- tolerance Parameters ---------- value : int, float Value of interest expected_value : int, float Expected value tolerance : int, float Allowed deviation from expected_value Returns ---------- bool True if value is within within expected_value +/- tolerance, exclusive """ return expected_value + tolerance > value > expected_value - tolerance
b1cde33b7979995b86e3ecd4cd41330cfa31c447
697,529
def uncamel(name): """helloWorld => hello_world""" n = '' for c in name: if c.isupper(): n += '_' + c.lower() else: n += c return n
ab2496d24bff11f1ae24f6f480c5a8439fb24b04
697,530
def merge(left, right): """ Merge two sorted arrays in a resulting array of size len(left) + len(right) """ result = [] while len(left) != 0 and len(right) != 0: if left[0] < right[0]: result.append(left.pop(0)) else: result.append(right.pop(0)) if len(left) == 0: result += right else: result += left return result
9a962735cc157a135c4d65b4f18c5e55d8e182d2
697,531
import argparse def parse_args(): """Parse arguments from the command line.""" parser = argparse.ArgumentParser("Loaded_client") parser.add_argument('--model-path', type=str, required=True, help="path to a NN model.") parser.add_argument('--save-dir', type=str, default="", help="path to save logs.") parser.add_argument("--history-len", type=int, default=10, help="Feature history length.") parser.add_argument("--input-features", type=str, # default=["sent latency inflation", "latency ratio", # "send ratio"], default=["sent latency inflation", "latency ratio", "recv ratio"], nargs=3, help="Feature type.") args, unknown = parser.parse_known_args() return args
57a80c25c8fe96e37ef48126966b80203525e3b2
697,532