content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def get_first_package_name(name): """ Returns first package name. From `a.b.c` it returns `a`. :param str name: Full module name :return: First package name. :rtype: str """ return name.split(".")[0]
814b175356ef715af46f788dc9eac7e776884b30
698,298
def _parse_url_work_relation(response): """ response is {'resource': 'https://imslp.org/wiki/7_Bagatelles,_Op.33_(Beethoven,_Ludwig_van)', 'relations': [{'source-credit': '', 'target-credit': '', 'type-id': '0cc8527e-ea40-40dd-b144-3b7588e759bf', 'type': 'download for free', 'end': None, 'direction': 'forward', 'ended': False, 'begin': None, 'target-type': 'work', 'work': {'title': '7 Bagatelles, op. 33', 'attributes': [], 'languages': [], 'disambiguation': '', 'type': None, 'iswcs': [], 'type-id': None, 'id': '94a19e47-2c1d-425b-b4f0-63d62d5bf788', 'language': None}, 'attribute-values': {}, 'attribute-ids': {}, 'attributes': []}], 'id': '2d264d6e-5082-46a7-a60a-e2d02ab103e1'} """ relations = response.get('relations', []) if relations: return relations[0]['work']['id']
4e36330be029846bc44c235dafb20e6dc9c5f58f
698,299
import toml import yaml import json def mock_settings_file(request, monkeypatch, tmpdir, file_extension): """Temporarily write a settings file and return the filepath and the expected settings outcome.""" ext = file_extension p = tmpdir.mkdir("sub").join("settings" + ext) expected_result = {"testgroup": {"testvar": 123}, "othergroup": {"blabla": 555}} if ext == ".toml": p.write(toml.dumps(expected_result)) elif ext in [".yml", ".yaml"]: p.write("---\n" + yaml.dump(expected_result)) elif ext == ".json": p.write(json.dumps(expected_result)) else: # pragma: nocover raise NotImplementedError("Invalid file extension :{}.".format(ext)) return str(p), expected_result
4899ebbc9efbe31a931387d4a09dcb8c727eaf8d
698,303
import os def find_all(filename, path): """Returns all files called <filename> in <path>.""" result = [] for root, dirs, files in os.walk(path): if filename in files: result.append(os.path.join(root, filename)) return result
f8ebdb55b3c538bdc64d35526421e41f76d913dc
698,304
def MathSolution(N): """Use sum of squares formula and sum of integers formula for O(1) solution""" sumOfSquares = N * (N+1) * (2*N + 1) // 6 squareOfSum = pow((N + 1) * N // 2, 2) diff = squareOfSum - sumOfSquares print("Math solution: ", diff) return diff
d30046150d95c3217bc446fc3c8e5f3a985f6c51
698,305
import requests def get_page(link): """ this thing takes a link and shoves back some html shit """ r = requests.get(link) r.raise_for_status() return r.content
913b96c8670e7d91da92aa00d53582d7eae33dbe
698,306
def replicate_idx(head): """Return a list of indices representing replicate groups""" h = head g = h.groupby(['Sample Name', 'Measurement day']) idx = [i[1].index for i in g] return(idx)
ac5b80de8d15ef283185b0e93d495516695aa936
698,308
def str2ascii(string: str) -> list: """Convert a string to a list of ascii-codes""" return [ord(i) for i in string]
a938b0c585e78a455721e9d17e8915b0769a025f
698,310
def rgbToHexColor(r, g, b): """Convert r, g, b to #RRGGBB.""" return f'#{int(r):02X}{int(g):02X}{int(b):02X}'
5cff9abc67c235a4f0fdf258ea555f018d80d1ad
698,311
import pathlib def within_dev_env(): """ detect whether we are within an ASMR dev environment. """ return pathlib.Path('/.ASMR_DEV_ENV').exists()
86430b29846435b03460b2676d41cea0ba4063e4
698,312
from typing import Optional import re def anonymize_run_string(t: Optional[str]) -> Optional[str]: """ Generalizes hex addresses, some sanitizer error numbers, values in "()" brackets. Use with output, location or issue title """ if not t: return t re_hex_addr = re.compile(r"0[xX][0-9a-fA-F]{1,}") re_sanitizer_error_id = re.compile(r"^==(\d+)==([^=])", re.MULTILINE) re_register = re.compile(r" sp 0xADDRESS T\d+\)$", re.MULTILINE) re_sh_segfault = re.compile( r"^sh: line \d+:\s*\d+\s*(?:Segmentation fault|Aborted).*$", re.MULTILINE ) re_bracketed_values = re.compile(r"(\(.*?\))", re.MULTILINE) re_multiple_spaces = re.compile(r"( {2,})") t = re.sub(re_hex_addr, "0xADDRESS", t) t = re.sub(re_register, " sp 0xADDRESS T<NUM>)", t) t = re.sub(re_sanitizer_error_id, r"==1==\2", t) t = re.sub(re_sh_segfault, "Segmentation fault", t) t = re.sub(re_bracketed_values, " ", t) t = re.sub(re_multiple_spaces, " ", t) return t
ff475c9fa6e4d56a8a00477c34e2e9451ac8713d
698,313
def filter_refgene_ensgene_exon(var_df_per_chrom, exon_class, refgene, ensgene): """Filter for a refgene function, ensembl function or both. Args: var_df_per_chrom (:obj:`DataFrame`): all variants in a chromosome variant_class (:obj:`str`): annovar variant class to filter on (default None) exon_class (:obj:`str`): annovar EXON class to filter on (default None) refgene (:obj:`boolean`): if used RefSeq to define variant classes ensgene (:obj:`boolean`): using ENSEMBL to define variant classes Returns: var_df_per_chrom (:obj:`DataFrame`): only variants in the desired `exon_class` Description: First prepends a ^ so that only the highest impact `exon_class` is considered as the de-facto class for filtering. """ exon_class = "^" + exon_class if not refgene and not ensgene: print("Using RefGene for filtering") refgene = True if refgene: vars_refgene = var_df_per_chrom.exon_func_refgene.str.contains( exon_class, regex=True) var_df_per_chrom = var_df_per_chrom[vars_refgene] if ensgene: vars_ensgene = var_df_per_chrom.exon_func_ensgene.str.contains( exon_class, regex=True) var_df_per_chrom = var_df_per_chrom[vars_ensgene] return var_df_per_chrom
905db5cab873eabce8ee24872231f2dacabcec29
698,314
def _lin_f(p, x): """Basic linear regression 'model' for use with ODR. This is a function of 2 variables, slope and intercept. """ return (p[0] * x) + p[1]
52bc228dd48ee7939fa60cd052989de70a44b197
698,315
def mock_user(request): """Define a mock user to be used when testing REST API services""" user = dict( id="test_id", name="User Name", description="I'm a test user", url="someurl", ) return user
ff6eee62cd27328130bcd37c6164d3b4b17e2558
698,316
def get_type_as_string(instance: object) -> str: """ >>> x='a' >>> get_type_as_string(x) 'str' >>> x=1 >>> get_type_as_string(x) 'int' >>> import decimal >>> x=decimal.Decimal(1.00) >>> get_type_as_string(x) 'Decimal' >>> x=[] >>> get_type_as_string(x) 'list' """ return type(instance).__name__
5fd2dd4362a98b2121f9ad2eb4e44ead83446327
698,317
import numbers def update(default_obj, custom_obj): """ Recursively updates data objects, according to the custom settings """ # if simple basic type, return the custom object if any(( isinstance(custom_obj, numbers.Number), isinstance(custom_obj, str), isinstance(custom_obj, bytes) )): return custom_obj # if mapping type, update every value from the mapping try: for key, value in custom_obj.items(): default_value = default_obj.get(key) default_obj.update(((key, update(default_value, value)),)) except AttributeError: pass except Exception as exception: print('Warning: {}\n'.format(exception)) return default_obj
26fe3d6d16b58420913143316d6d2ce3c5ade0e2
698,318
import math def get_factors(number): """ Returns all possible factors :param number: :return: """ factors = set() # efficient way of finding all the factors of a number in Python for n in range(1, int(math.sqrt(number)) + 1): if number % n == 0: factors.add(n) # the factors of a number not including the number itself if n != 1: factors.add(number//n) return factors
2ee587a0459a4868993e56448da29dda40298519
698,319
def resample(x, fs): """ If the input streams (or traces) have different sampling rates, the one characterized by the largest sampling rate is downsampled to the sampling rate of the other stream (or trace). The downsampling is carried out via the seislib.processing.resample method, which modifies the input streams in place. Parameters ---------- st1, st2 : obspy.Stream or obspy.Trace Returns ------- st1, st2 : obspy.Stream or obspy.Trace """ nyquist_f = fs/2 - (fs/2)*0.01 try: x.filter('lowpass', freq=nyquist_f, corners=4, zerophase=True) except ValueError: pass # when fs > sampling_rate(x), filtering is not needed x.interpolate(sampling_rate=fs, method="weighted_average_slopes") return x
c72df4fca33e1fdbbd0ef76fbed476c1e61b145d
698,320
def is_property(class_to_check, name): """Determine if the specified name is a property on a class""" if hasattr(class_to_check, name) and isinstance( getattr(class_to_check, name), property ): return True return False
0f7dce28f1e78e8b6b937a5a6536060886666371
698,321
def diag_val(request): """Diagonal value of matrix.""" return request.param
d00bc2f9c8e676eb3cbc1c9769be2c8b584cb390
698,322
from string import ascii_uppercase def get_cluster_label(cluster_id): """ It assigns a cluster label according to the cluster id that is supplied. It follows the criterion from below: Cluster id | Cluster label 0 --> A 1 --> B 2 --> C 25 --> Z 26 --> AA 27 --> AB 28 --> AC Parameters ---------- cluster_id : int The id of the cluster that will be used to generate the label Returns ------- cluster_label : str The cluster label according to the supplied id and the criterion mentioned above """ cluster_label = '' current_index = cluster_id while current_index >= 0: if current_index < len(ascii_uppercase): cluster_label += ascii_uppercase[current_index] else: for letter in reversed(cluster_label): if letter != 'Z': idx = ascii_uppercase.index(cluster_label[-1]) cluster_label = \ cluster_label[:-1] + ascii_uppercase[idx + 1] break else: cluster_label = 'A' + cluster_label current_index -= 26 return cluster_label
06210786dfb375dce31ec3c014cc5f545d019c50
698,323
def get_cookie_from(response): """ :param response: a requests.response object :rtype: string :return: cookie information as string """ cookie = response.headers['Set-Cookie'].split(';')[0] return cookie
d5b34eecf092119cd537b84b8aed9877becff62d
698,324
def extract_keys(nested_dict): """ This function is used in order to parse the.json output generated by executing conda search <package-name> for potential install candidates. Parameters ---------- nested_dict : dictionary .json file Returns ------- key_lst: list list containing only the keys (the potential installation candidates) """ key_lst = [] for key, value in nested_dict.items(): key_lst.append(key) if isinstance(value, dict): extract_keys(value) return key_lst
04cb7066f9e2c593a650cb23152b177508f0dc48
698,325
import os def list_all_files_recursively(src_directory): """ Make a dictionary of all the files in the given directory recursively. Returns dictionary = root: { dirs: {} , files: {} } """ cwd = os.getcwd() os.chdir(src_directory) files_tree = {} for root, dirs, files in os.walk(src_directory, topdown=True): files_tree[root] = {} files_tree[root]["dirs"] = dirs files_tree[root]["files"] = files os.chdir(cwd) return files_tree
e57bb0703032c9828bc0d7decd7edcbb706c4dfd
698,326
def get_all_recommendations(csv_file): """ This function reads all recommendations for users using the result of the recommendation system stored in a csv file :param csv_file: the csv file where the recommendations are stored :return: the dataframe of recommendations predicted for users """ df_recommendations = csv_file df_recommendations.columns = ['user_id', 'recommendations'] return df_recommendations
fe0578bb77c60deaf833a22fd9a1b9e946183f91
698,327
from bs4 import BeautifulSoup def filter_bank_account_info(html: str) -> dict: """Filter the bank account info in the data of the HTML structure. Keyword arguments: `html: str` - Content in html structure. """ soup = BeautifulSoup(html, 'html.parser') # Get all the labels to use as a key in the dictionary return. labels = [div.text for div in soup.find_all('div', 'output-subtitle')] # Get all the data to use as a value in the dictionary return. data_of_bank_account = [div.text for div in soup.find_all('div', 'output-txt')] # Join labels with bank account data # and convert to a dictionary in the key-value format respectively. data_dict = dict( zip(labels, data_of_bank_account) ) return data_dict
e4558d52895dbb01f57a26d3722597577367ba26
698,328
def is_prime(nb: int) -> bool: """Check if a number is a prime number or not :param nb: the number to check :return: True if prime, False otherwise """ # even numbers are not prime if nb % 2 == 0 and nb > 2: return False # checking all numbers up to the square root of the number # full explanation: # https://stackoverflow.com/questions/18833759/python-prime-number-checker/18833870#18833870 return all(nb % i for i in range(3, int(nb ** .5) + 1, 2))
4368fd20eadd4d773d5f5af06f8717ef633d48b8
698,329
async def present(hub, ctx, name: str, image: str, **kwargs): """ Ensure a container is present .. code-block:: yaml new container: lxd.containers.present: - name: webserver01 - image: ubuntu1804 """ ret = { "result": False, "name": name, "comment": "", "changes": {}, } container = await hub.exec.lxd.containers.get(ctx, name) if name in container: ret["result"] = True ret["comment"] = 'Container "{}" already exists'.format(name) return ret if ctx["test"]: ret["result"] = None ret["comment"] = 'Container "{}" does not exist and will be created'.format( name ) return ret changes = await hub.exec.lxd.containers.create(ctx, name, image, wait=True) container = await hub.exec.lxd.containers.get(ctx, name) ret["result"] = True ret["comment"] = 'Container "{}" was created'.format(name) ret["changes"] = {"new": changes["status"]} return ret
911b27a3318bc273f790041f2c003f6e8f56645d
698,330
from typing import Sequence from typing import Any from functools import reduce from operator import getitem def get_in_dict(d: dict, keys: Sequence[str]) -> Any: """ Retrieve nested key from dictionary >>> d = {'a': {'b': {'c': 3}}} >>> get_in_dict(d, ('a', 'b', 'c')) 3 """ return reduce(getitem, keys, d)
d851c1d9360e90cbecbff401b98b76afb9b7e52f
698,331
import logging def level_from_verbosity(verbosity=3, maxlevel=logging.CRITICAL): """Return the logging level corresponding to the given verbosity.""" return max(1, # 0 disables it, so we use the next lowest. min(maxlevel, maxlevel - verbosity * 10))
45064e04e27fa170257f37faa56de02ea544f811
698,332
def add_tag(row, tag_key, capitalize=False): """ Looks up the correct tag based on the row, and appends it to the list of tags :param row: :param tag_key: the tag key (string of tag) to add :param capitalize: True to initial cap the tag key :return: the updated row """ if not tag_key: return if 'tag' not in row: row['tag'] = [] if capitalize: tag_key = tag_key.capitalize() if tag_key not in row['tag']: row['tag'].append(tag_key) return row
f8a84d4eeb82f516a301553528c3f66741879a7a
698,333
def get_host_fsids(node_metadata): """ Return all the cluster FSIDs found for each socket in a host """ all_fsids = [] for socket_metadata in node_metadata['ceph']['sockets'].values(): config = socket_metadata.get('config', {}) if not config: continue fsid = config.get('fsid') if not fsid: continue all_fsids.append(fsid) return all_fsids
543cf63d563fe5993dae343cc662084f51147162
698,334
def nested_dict_traverse(path, d): """Uses path to traverse a nested dictionary and returns the value at the end of the path. Assumes path is valid, will return KeyError if it is not. Warning: if key is in multiple nest levels, this will only return one of those values.""" val = d for p in path: val = val[p] return val
1f7e4744585ae134ab1f2eface9284756526ed65
698,335
def is_file_download_finish(file_name): """ 判断文件是否下载完 判断文件后缀为xltd即未下载完 :param file_name: :return: """ return not file_name.endswith('.xltd')
b782d358359c03f50efb6d791bd4403a4190aac0
698,336
def fix_zip5(zip5): """add leading zeros if they have been stripped by the scorecard db""" if len(zip5) == 4: return "0{0}".format(zip5) if len(zip5) == 3: return "00{0}".format(zip5) else: return zip5[:5]
ff781562c97c297dde7a05740436b5d668d7fbb8
698,337
def count_bit_differences(s1, s2): """number of positions at which the corresponding bits are different""" assert len(s1) == len(s2), 'len(s1) ({}) != len(s2) ({})'.format(len(s1), len(s2)) return (~(~s1 ^ s2)).count(True)
618a469cf6be2386b13950813102a18ed73ba68d
698,338
def flag_callback(): """JS callback for the flagging button Returns ------- code : :obj:`str` """ code = """ //f_sources: Flagged data source //n_ax: number of figures available //uf_sources: unflagged data source for (let n=1; n<=n_ax; n++){ for (let i=0; i<uf_sources.length; i++){ if (cb_obj.active.includes(0)){ uf_sources[i].data[`y${n}`] = f_sources[i].data[`iy${n}`]; uf_sources[i].change.emit(); } else{ uf_sources[i].data[`y${n}`] = f_sources[i].data[`y${n}`]; uf_sources[i].change.emit(); } } } """ return code
6fdc9abbfe103bdd40ce02e009f9c59be7b32c9f
698,339
import itertools def list_flatten(_l): """Flatten a complex nested list of nested lists into a flat list """ return itertools.chain(*[list_flatten(j) if isinstance(j, list) else [j] for j in _l])
05d1e4018accfe850c07504f123d85949a2ced60
698,340
def get_create_indexes_queries(graph_name, backend): """Format all SQlite CREATE INDEXES statements with the name of the RDF graph to insert.""" if backend == "sqlite" or backend == "sqlite-catalog": return [ f"CREATE UNIQUE INDEX IF NOT EXISTS {graph_name}_spo_index ON {graph_name} (subject,predicate,object);", f"CREATE UNIQUE INDEX IF NOT EXISTS {graph_name}_osp_index ON {graph_name} (object,subject,predicate);", f"CREATE UNIQUE INDEX IF NOT EXISTS {graph_name}_pos_index ON {graph_name} (predicate,object,subject);" ] else: raise Exception(f"Unknown backend for SQlite: {backend}")
445a13ee13f7ae7dc3bd8d260ebf7deed6f0709c
698,341
import os def complete_path(wdir, path): """returns the joined path""" return os.path.join(wdir, path)
7fab8fad99f10b267e58cbe05b885d6fbd44dc86
698,342
def handle_2_columns(datalist, return_list = False): """This function has the intent of changing: ('A8', '2') => ('A8', '', '2') ('A8', '', '2') => ('A8', '', '2') [('E2', '5')] => [('E2', '', '5')] [('G1', '', '5')] => [('G1', '', '5')] with the purpose of handling 2 column csv part file inputs, as at times when 2 column csv files are input it creates tuples of length 2 instead of 3 """ if isinstance(datalist,list): # if data list is a list, extract tuple datalist = datalist[0] return_list = True if len(datalist) == 2: # if tuple is of length 2, insert empty value at position 1 datalist = (datalist[0], "", datalist[1]) if return_list: # if list passed to function, return output as list datalist = [datalist] return datalist
57a9c1dbad9484a3513cebab68069983d3ae9f35
698,343
def format_button(recipient_id, button_text, buttons): """ Ref: https://developers.facebook.com/docs/messenger-platform/send-api-reference/button-template """ return { "recipient": {"id": recipient_id}, "message": { "attachment": { "type": "template", "payload": { "template_type": "button", "text": button_text, "buttons": buttons, } } } }
a05f6d08b87d0897a65b291c539b3cbd883c0799
698,344
from pathlib import Path import os def get_active_lab(eveng_directory: str): """get active lab from eve-ng directory Args: eveng_directory (str): path to eve-ng directory Returns: [str]: return lab path or None """ # ensure directory exists Path(eveng_directory).mkdir(exist_ok=True) # path to active lab file active_lab_filepath = Path(eveng_directory) / "active" if active_lab_filepath.exists(): return active_lab_filepath.read_text() return os.environ.get("EVE_NG_LAB_PATH")
921cfe1b9e6456a30ec19623e59fc0e5325193f0
698,345
import re def remove_line_comment(old_str: str, comment_mode: str): """ 去除行内注释 :param old_str: 原字符串 :param comment_mode: 注释符号 :return: """ temp_str = new_str = old_str if comment_mode not in old_str: return new_str for regex in ["\".+\"", "\'.+\'"]: for s in re.findall(regex, old_str): temp_str = temp_str.replace(s, "x" * len(s)) new_str = old_str[0:temp_str.index(comment_mode)] return new_str
b5b53f909bf04da8688b7ef54d56815dfadbf993
698,346
import numpy def ComputeGeodesicSphereMesh(radius=1.0,level=2): """Computes a geodesic sphere to a specified level. Returns the vertices and triangle indices""" GTS_M_ICOSAHEDRON_X = numpy.sqrt(numpy.sqrt(5)+1)/numpy.sqrt(2*numpy.sqrt(5)) GTS_M_ICOSAHEDRON_Y = numpy.sqrt(2)/numpy.sqrt(5+numpy.sqrt(5)) GTS_M_ICOSAHEDRON_Z = 0.0 vertices = [numpy.array((+GTS_M_ICOSAHEDRON_Z, +GTS_M_ICOSAHEDRON_X, -GTS_M_ICOSAHEDRON_Y)), numpy.array((+GTS_M_ICOSAHEDRON_X, +GTS_M_ICOSAHEDRON_Y, +GTS_M_ICOSAHEDRON_Z)), numpy.array((+GTS_M_ICOSAHEDRON_Y, +GTS_M_ICOSAHEDRON_Z, -GTS_M_ICOSAHEDRON_X)), numpy.array((+GTS_M_ICOSAHEDRON_Y, +GTS_M_ICOSAHEDRON_Z, +GTS_M_ICOSAHEDRON_X)), numpy.array((+GTS_M_ICOSAHEDRON_X, -GTS_M_ICOSAHEDRON_Y, +GTS_M_ICOSAHEDRON_Z)), numpy.array((+GTS_M_ICOSAHEDRON_Z, +GTS_M_ICOSAHEDRON_X, +GTS_M_ICOSAHEDRON_Y)), numpy.array((-GTS_M_ICOSAHEDRON_Y, +GTS_M_ICOSAHEDRON_Z, +GTS_M_ICOSAHEDRON_X)), numpy.array((+GTS_M_ICOSAHEDRON_Z, -GTS_M_ICOSAHEDRON_X, -GTS_M_ICOSAHEDRON_Y)), numpy.array((-GTS_M_ICOSAHEDRON_X, +GTS_M_ICOSAHEDRON_Y, +GTS_M_ICOSAHEDRON_Z)), numpy.array((-GTS_M_ICOSAHEDRON_Y, +GTS_M_ICOSAHEDRON_Z, -GTS_M_ICOSAHEDRON_X)), numpy.array((-GTS_M_ICOSAHEDRON_X, -GTS_M_ICOSAHEDRON_Y, +GTS_M_ICOSAHEDRON_Z)), numpy.array((+GTS_M_ICOSAHEDRON_Z, -GTS_M_ICOSAHEDRON_X, +GTS_M_ICOSAHEDRON_Y))] triindices = [[0, 1, 2],[1, 3, 4],[3, 5, 6],[2, 4, 7],[6, 5, 8],[2, 7, 9],[5, 0, 8],[9, 7, 10],[1, 0, 5],[10, 7, 11],[3, 1, 5],[6, 10, 11],[3, 6, 11],[9, 10, 8],[4, 3, 11],[6, 8, 10],[7, 4, 11],[2, 1, 4],[8, 0, 9],[0, 2, 9]] while level > 0: level -= 1 newindices = [] mapnewinds = dict() for tri in triindices: # for ever tri, create 3 new vertices and 4 new triangles. v = [vertices[i] for i in tri] inds = [] for j in range(3): key = (tri[j],tri[numpy.mod(j+1,3)]) if key in mapnewinds: inds.append(mapnewinds[key]) else: mapnewinds[key] = mapnewinds[key[::-1]] = len(vertices) inds.append(len(vertices)) vnew = v[j]+v[numpy.mod(j+1,3)] vertices.append(vnew/numpy.sqrt(sum(vnew**2))) newindices += [[tri[0],inds[0],inds[2]],[inds[0],tri[1],inds[1]],[inds[2],inds[0],inds[1]],[inds[2],inds[1],tri[2]]] triindices = newindices return radius*numpy.array(vertices),triindices
d823f621b23d04c1f195968337649d03468366e0
698,347
def get_position_key(atom): """ Get atom position as tuple of integers to be used as lookup key. Rounds the coordinates to 4 decimal places before multiplying by 50 to get unique integer-space coordinates, and avoid floating point errors. :param atom: Atom to get key for :type atom: :class:`nanome.structure.Atom` :return: Position key tuple :rtype: (int, int, int) """ return tuple(map(lambda x: int(50 * round(x, 4)), atom.position))
b367fd851a0afa903c2d73548849cc8864233f56
698,348
def _str_eval_break(eval, act, ctxt) : """Passes through [break] so that the writer can handle the formatting code.""" return ["[break]"]
2da4059adb4305a3d7e2183c29d89c9539f3c63b
698,349
import math def tuning_score_size(performance_space, peak): """ Computes TS_s, the tuning score based on the size of the performance space compared to the achievable performance. """ return math.sqrt(performance_space.top() / peak)
9e5386932fa164f0d33e65dd41d57d7a22ebe4d6
698,350
import sys def get_month_from_string(month_string): """Convert string month to int month with Jan == 1 """ if month_string == 'Jan': month_int = 1 elif month_string == 'Feb': month_int = 2 elif month_string == 'Mar': month_int = 3 elif month_string == 'Apr': month_int = 4 elif month_string == 'May': month_int = 5 elif month_string == 'Jun': month_int = 6 elif month_string == 'Jul': month_int = 7 elif month_string == 'Aug': month_int = 8 elif month_string == 'Sep': month_int = 9 elif month_string == 'Oct': month_int = 10 elif month_string == 'Nov': month_int = 11 elif month_string == 'Dec': month_int = 12 else: sys.exit("Could not convert string month to int month") return int(month_int)
921e3bb7cf25bc1b5ccc759f6217886e3bd925d2
698,351
def available(func): """A decorator to indicate that a method on the adapter will be exposed to the database wrapper, and will be available at parse and run time. """ func._is_available_ = True return func
a0c886bccb9bbbdacfe23c5659929b8be68c004e
698,352
def spaces_and_caps_to_snake(spaced_str: str) -> str: """Convert caps and spaces to snake.""" underscored = spaced_str.strip().replace(' ', '_') return underscored.lower()
31796e696c2d4efa7821a0cdff09e04b3e7e8232
698,353
from typing import OrderedDict from typing import Type import collections def kwtypes(**kwargs) -> OrderedDict[str, Type]: """ This is a small helper function to convert the keyword arguments to an OrderedDict of types. .. code-block:: python kwtypes(a=int, b=str) """ d = collections.OrderedDict() for k, v in kwargs.items(): d[k] = v return d
8024e6940f84f2eab8d4c44924889624d75ca3bd
698,354
def sol_rad_from_sun_hours(daylight_hours, sunshine_hours, et_rad): """ Calculate incoming solar (or shortwave) radiation, *Rs* (radiation hitting a horizontal plane after scattering by the atmosphere) from relative sunshine duration. If measured radiation data are not available this method is preferable to calculating solar radiation from temperature. If a monthly mean is required then divide the monthly number of sunshine hours by number of days in the month and ensure that *et_rad* and *daylight_hours* was calculated using the day of the year that corresponds to the middle of the month. Based on equations 34 and 35 in Allen et al (1998). :param dl_hours: Number of daylight hours [hours]. Can be calculated using ``daylight_hours()``. :param sunshine_hours: Sunshine duration [hours]. :param et_rad: Extraterrestrial radiation [MJ m-2 day-1]. Can be estimated using ``et_rad()``. :return: Incoming solar (or shortwave) radiation [MJ m-2 day-1] :rtype: float """ # _check_day_hours(sunshine_hours, 'sun_hours') # _check_day_hours(daylight_hours, 'daylight_hours') # 0.5 and 0.25 are default values of regression constants (Angstrom values) # recommended by FAO when calibrated values are unavailable. return (0.5 * sunshine_hours / daylight_hours + 0.25) * et_rad
86b57eee15d486b4acfe87bf518e5077cfd38c97
698,355
def sort_libraries(libraries): """Sort libraries according to their necessary include order""" for i in range(len(libraries)): for j in range(i, len(libraries)): if libraries[i].depends_on(libraries[j]) or libraries[i].has_addon_for(libraries[j]): tmp = libraries[i] libraries[i] = libraries[j] libraries[j] = tmp return libraries
81187aaeb1879d8de4b23d0adae6547739f6a327
698,356
def get_all_inputs( manifest, only_inputs_with_default_values = False, return_inputs_to_update_states = True, return_inputs_to_calculate_outputs = True, return_inputs_to_reset_states = True ): """ Return a key-value structure containing the inputs that are needed to run the system the manifest is referring to. Please note, that some inputs that are defined might not appear in in case they are not used or needed in the system. The list manifest['io']['all_inputs_by_port_number'] contains the full list of system inputs. """ ret = {} def fill_in(ports): for i in range(len(ports['names'])): # introduce new structure s = {} k = ports['names'][i] s['cpptype'] = ports['cpptypes'][i] s['printf_pattern'] = ports['printf_patterns'][i] s['port_number'] = ports['port_numbers'][i] if 'properties' in ports: properties = ports['properties'][i] s['properties'] = properties if not 'default_value' in properties and only_inputs_with_default_values: break # fill in structure ret[k] = s if return_inputs_to_calculate_outputs: manifest_in_o = manifest['io']['inputs']['calculate_output'] fill_in(ports=manifest_in_o) if return_inputs_to_update_states: manifest_in_u = manifest['io']['inputs']['state_update'] fill_in(ports=manifest_in_u) if return_inputs_to_reset_states: manifest_in_r = manifest['io']['inputs']['reset'] fill_in(ports=manifest_in_r) return ret
97695df60c188ddf757159dc459debe939f6cb80
698,357
import torch def get_diff_kernel_3x3(): """ Utility function that returns a sobel kernel of 3x3 """ return torch.tensor([ [-0., 0., 0.], [-1., 0., 1.], [-0., 0., 0.], ])
6fdfe2837fc8bab120298aec4246e2a7f4f0378c
698,358
def hamming(g1,g2,cutoff=1): """ Compare two genotypes and determine if they are within cutoff sequence differences of one another. Parameters ---------- g1, g2 : strs two genotypes to compare (must be same length) cutoff : int max allowable sequence differences between them Returns ------- neighbor : bool whether or not the genotypes are neighbors """ try: if len(g1) != len(g2): raise TypeError except TypeError: err = "g1 and g2 must have the same length\n" raise ValueError(err) return sum([g1[i] != g2[i] for i in range(len(g1))]) <= cutoff
24a180c68545fc1cbd0887bf2bee38cbfe6034e0
698,359
def task_build(): """Build the documentation. The documentation will be converted to HTML files to help double-check syntax and formatting on PyPI and on GitHub. Note that the HTML files will not be included in the distribution files. """ return { 'actions': [ 'rst2html.py CHANGES.rst CHANGES.html', 'rst2html.py README.rst README.html', ], 'verbosity': 2, 'file_dep': ['CHANGES.rst', 'README.rst'], 'targets': ['CHANGES.html', 'README.html'], }
f0d0d3d88abc326d78b3f7dcabc655df34d5eb7f
698,360
def __dtw_backtracking(steps, step_sizes_sigma, subseq, start=None): # pragma: no cover """Backtrack optimal warping path. Uses the saved step sizes from the cost accumulation step to backtrack the index pairs for an optimal warping path. Parameters ---------- steps : np.ndarray [shape=(N, M)] Step matrix, containing the indices of the used steps from the cost accumulation step. step_sizes_sigma : np.ndarray [shape=[n, 2]] Specifies allowed step sizes as used by the dtw. subseq : bool Enable subsequence DTW, e.g., for retrieval tasks. start : int Start column index for backtraing (only allowed for ``subseq=True``) Returns ------- wp : list [shape=(N,)] Warping path with index pairs. Each list entry contains an index pair (n, m) as a tuple See Also -------- dtw """ if start is None: cur_idx = (steps.shape[0] - 1, steps.shape[1] - 1) else: cur_idx = (steps.shape[0] - 1, start) wp = [] # Set starting point D(N, M) and append it to the path wp.append((cur_idx[0], cur_idx[1])) # Loop backwards. # Stop criteria: # Setting it to (0, 0) does not work for the subsequence dtw, # so we only ask to reach the first row of the matrix. while (subseq and cur_idx[0] > 0) or (not subseq and cur_idx != (0, 0)): cur_step_idx = steps[(cur_idx[0], cur_idx[1])] # save tuple with minimal acc. cost in path cur_idx = ( cur_idx[0] - step_sizes_sigma[cur_step_idx][0], cur_idx[1] - step_sizes_sigma[cur_step_idx][1], ) # If we run off the side of the cost matrix, break here if min(cur_idx) < 0: break # append to warping path wp.append((cur_idx[0], cur_idx[1])) return wp
e22a25f7d9e02aeb9413a7f9b39401f7628848e8
698,361
def isglobalelement(domains): """ Check whether all domains are negations.""" for domain in domains.split(","): if domain and not domain.startswith("~"): return False return True
36fd185722c55ae6e3ecd4729bb873f122322c34
698,362
import numpy def approximate_dependencies(lhs_set, rhs, df, accuracy, masks): """ Checks whether the columns represented in lhs_set functionally determines the column rhs for the dataframe df. If lhs_set --> rhs, returns True. Otherwise returns False. *in order to be a dependency: - the number of equivalence classes for tuples in columns in lhs_set, is equal to the number of equivalence classes for tuples in columns in lhs_set+rhs - this holds in data for at least accuracy % of rows - at least 15% of values are repeating (*to be added as custom argument*) """ df_lhs_rhs = df.drop_duplicates(lhs_set + [rhs]) df_lhs = df_lhs_rhs.drop_duplicates(lhs_set) # if df_lhs.shape[0] > df.shape[0] * rep_percent: # return False limit = df.shape[0] * (1 - accuracy) if df_lhs_rhs.shape[0] - df_lhs.shape[0] > limit: return False merged = df_lhs.merge(df_lhs_rhs, indicator=True, how='outer') # create new df that is the merge of df_one and df_two indicator = merged[merged['_merge'] == 'right_only'] # filter out the rows that were only on the right side (the rows that are preventing the two dataframes from being equal) indicator = indicator.drop_duplicates(lhs_set) # find unique combinations of columns in LHS_set that characterize the disrepencies (have 2+ different values in rhs column) acc = 0 for index, row in indicator.iterrows(): mask = None for attr in lhs_set: m = masks.get_mask(attr, row[attr]) if m is None: if df[attr].dtypes.name == 'datetime64[ns]': m = df[attr] == row[attr] else: m = df[attr].values == row[attr] masks.add_mask(attr, row[attr], m) if mask is None: mask = m else: mask = mask & m options = df[mask] _, unique_counts = numpy.unique(options[rhs].to_numpy(), return_counts=True) acc += unique_counts.sum() - unique_counts.max() if acc > limit: return False # idea: try using numpy arrays and taking intersections of sets for each column???? return True
01d60ff073dca7bc6197dd9d405eba6372e9e33b
698,363
import torch def extract_bboxes(mask): """Compute bounding boxes from masks. mask: [height, width]. Mask pixels are either 1 or 0. Returns: bbox array (y1, x1, y2, x2). """ # Bounding box. horizontal_indicies = torch.where(torch.any(mask, dim=0))[0] vertical_indicies = torch.where(torch.any(mask, dim=1))[0] if horizontal_indicies.shape[0]: x1, x2 = horizontal_indicies[[0, -1]] y1, y2 = vertical_indicies[[0, -1]] # x2 and y2 should not be part of the box. Increment by 1. x2 += 1 y2 += 1 else: # No mask for this instance. Might happen due to # resizing or cropping. Set bbox to zeros x1, x2, y1, y2 = 0, 0, 0, 0 boxes = torch.Tensor([x1, y1, x2, y2]).to(mask.device) return boxes
f634b6cfca16cf06da07f194c701920187f4d3e7
698,364
def strip_domain_address(ip_address): """Return the address or address/netmask from a route domain address. When an address is retrieved from the BIG-IP that has a route domain it contains a %<number> in it. We need to strip that out so we are just dealing with an IP address or IP address/mask. Examples: 192.168.1.1%20 ==> 192.168.1.1 192.168.1.1%20/24 ==> 192.168.1.1/24 """ mask_index = ip_address.find('/') if mask_index > 0: return ip_address[:mask_index].split('%')[0] + ip_address[mask_index:] else: return ip_address.split('%')[0]
adf566580249d00b660e108cabcf99892a44d32b
698,365
from datetime import datetime def m_get_date_to_now_days(seq): """ :param seq: 日期字符串 :return: 距离现在的天数 """ try: seq = datetime.strptime(seq, "%Y-%m-%d") except: seq = datetime.strptime(seq, "%Y-%m-%d %H:%M:%S") seq = (datetime.now() - seq).days return seq
53b168c9d0e95d379b8639dea951367d55c5db4a
698,366
def metric_to_ips(d, min_depth, max_depth): """ Args: d: metric depth [min_depth, max_depth] min_dpeth: in meter max_depth: in meter Returns: """ # d = d.clamp(min_depth, max_depth) return (max_depth * d - max_depth * min_depth) / ((max_depth - min_depth) * d)
20820265cf0500b47c308ee0730afc07c1d98906
698,367
from typing import List def athlete_sort(k: int, arr: List[List[int]]) -> List[List[int]]: """ >>> athlete_sort(1, [[10, 2, 5], [7, 1, 0], [9, 9, 9], ... [1, 23, 12], [6, 5, 9]]) [[7, 1, 0], [10, 2, 5], [6, 5, 9], [9, 9, 9], [1, 23, 12]] """ arr.sort(key=lambda x: x[k]) return arr
1655882b7760a705afcbbeba55f5051eaf7d448f
698,368
def locProts(df, thresh=.75): """ removes entries with localiatoin probabiliy below threshold @params @df :: dataframe to be filtered @thresh :: threshold of localization probability """ if "Localization prob" not in df.columns: print("This dataframe has no 'Localization prob' column!") return True print(f"{df.shape[0]} entries in dataframe.") df = df[df["Localization prob"]>=thresh] print(f"{df.shape[0]} entries in dataframe with localization prob >= {thresh*100}%.") return df
c90f8f1e24a291dd2dd66293d3e74f2de1eb2314
698,369
from glob import glob from numpy import sort import six def _ensure_mfdataset_filenames(fname): """Checks if grib or nemsio data Parameters ---------- fname : string or list of strings Description of parameter `fname`. Returns ------- type Description of returned object. """ if isinstance(fname, six.string_types): names = sort(glob(fname)) else: names = sort(fname) gribs = [True for i in names if 'grb2' in i or 'grib2' in i] grib = False if len(gribs) >= 1: grib = True return names, grib
1764cf93a766bfd646578bdc5d5cb83585072d4f
698,371
def get_meeting_aggregates(meeting_data): """ Aggregates the attendance counts and lines said for users across a meeting category """ meeting_aggregate = {} for meeting_name in meeting_data.keys(): meeting_users = {} for meeting in meeting_data[meeting_name]: for user_tuple in meeting: if user_tuple[0] not in meeting_users.keys(): meeting_users[user_tuple[0]] = {'attendance_count': 1, 'lines_said': int(user_tuple[1])} else: meeting_users[user_tuple[0]]["attendance_count"] += 1 meeting_users[user_tuple[0]]["lines_said"] += int(user_tuple[1]) meeting_aggregate[meeting_name] = meeting_users return meeting_aggregate
b135016b05fc20ae0fa9dfc40d11cc51ffc7a8d0
698,372
def list_to_group_count(input_list): """ List to item occurrences count dictionary """ group_count = {} for input_item in input_list: if input_item in group_count: group_count[input_item] = group_count[input_item] + 1 else: group_count[input_item] = 1 return group_count
bc6714d2d0b8872e29089eb0d142517c4ef63154
698,373
def maior_numero_lista(lista_numeros, posicao_n): """ Determina o maior elemento de uma lista recebendo a posição_n de um elemento da lista e comparando com o resto """ if posicao_n == 0: return lista_numeros[0] maior_elemento = maior_numero_lista(lista_numeros, posicao_n - 1) if lista_numeros[posicao_n] > maior_elemento: return lista_numeros[posicao_n] else: return maior_elemento
25ff12d0853972ada32b6d184e98fa6395ba18a7
698,374
def apply_to_audio(f, clip, *a, **k): """ This decorator will apply the function f to the audio of the clip created with f """ newclip = f(clip, *a, **k) if hasattr(newclip, 'audio') and (newclip.audio != None): newclip.audio = f(newclip.audio, *a, **k) return newclip
88cdc94e7c999b148b8e87e9f7715992d4598a9f
698,375
from typing import Any def affix(orig: Any, ch: str) -> str: """Ensure a given character is prepended to some original value.""" if isinstance(orig, (list, tuple,)): orig = '.'.join(map(str, orig)) return '%s%s' % (ch, str(orig).lstrip(ch))
f5f01b7fa0e82944646dc755cfa0770cec8c72c9
698,376
import yaml def get_project_id(dataset: str) -> str: """Returns the GCP project ID associated with the given dataset.""" with open(f'../stack/Pulumi.{dataset}.yaml', encoding='utf-8') as f: return yaml.safe_load(f)['config']['gcp:project']
d951e8d2a6c96df9aa2dc15499c36f93123f8038
698,377
import re import argparse def __validate_address(address): """ If address looks like a valid e-mail address, return it. Otherwise raise ArgumentTypeError. Args: address(string): email address to send to .. document private functions .. automethod:: _evaporate """ if re.match('^([^@\s]+)@((?:[-a-z0-9]+\.)+[a-z]{2,})$', address): return address raise argparse.ArgumentTypeError('Invalid e-mail address: %s' % address)
ebc09945e737a7d1c7c85d34dd0bf1b01dcaa587
698,378
def score_state(num_answer, ans_one): """Takes in the player's answer, and ans_one (explained in question); checks if the player's choice is correct """ #assert num_answer in [1, 2] if num_answer == 1: if ans_one == True: correct = True else: correct = False else: if ans_one == True: correct = False else: correct = True if correct == True: print('Correct! choice ' + str(num_answer) + ' is real!') else: print('Incorrect!: choice ' + str(num_answer) + ' is fake!') return correct
123a0c5f40930fe5a6dc65220c39097a55120c19
698,379
def is_anagram_v3(s, t): """ NOTE: THIS SOLUTION DOES NOT WORK FOR ALL CASES e.g: s = 'ac', t = 'bb', or aa vs bb """ val = 0 for i in range(len(s)): val ^= ord(s[i]) print(f"val: {val}") val ^= ord(s[i]) print(f"val: {val}") return val == 0
121c4fd1841d6372fd5a03af3ae6f74e3dfb7b25
698,380
import os def get_path(path='.'): """ :type path: str :param path: relative path from this file """ return os.path.abspath( os.path.join( os.path.dirname(__file__), path, ), )
76f3074421d3241e1cc8235587696ea0ee6edd77
698,381
def arg_name2cli_name(arg_name: str) -> str: """ Convert a snake_case argument into a --kabob-case argument. """ return "--" + arg_name.lower().replace("_", "-")
541601ae07b09acdd2d33487517159cd3cbd6125
698,382
import os import torch def load_checkpoint(path, model, optimizer, scheduler, verbose=None): """Loads intermediate state of the training procedure from file. Parameters ---------- path : str Path to saved parameters. model : nn.Module Pytorch model. optimizer : Pytorch Optimizer Model optimizer. scheduler : Pytorch LRScheduler Optimizer's learning rate scheduler. verbose : printing class, default -- None Class with method .print_string(string) to print string. Returns ------- model : nn.Module Updated trained model. optimizer : Pytorch Optimizer Updated model optimizer. scheduler : Pytorch LRScheduler Updated optimizer's learning rate scheduler. iteration : int >= 0 [scalar] Number of the current iteration. lr : float > 0 [scalar] Updated optimizer's learning rate. """ assert os.path.isfile(path) param_dict = torch.load(path, map_location='cpu') if model is None: model = param_dict['model_class'] model.load_state_dict(param_dict['model']) else: model.load_state_dict(param_dict['model']) optimizer.load_state_dict(param_dict['optimizer']) lr = param_dict['lr'] if (param_dict['scheduler'] is None) or (scheduler is None): scheduler = None else: scheduler.load_state_dict(param_dict['scheduler']) iteration = param_dict['iter'] if not (verbose is None): verbose.print_string("Loaded checkpoint {} at iteration {}" .format(path, iteration)) return model, optimizer, scheduler, iteration, lr
6ada8093fccbfe9645cbbc91d25a388e15218bd4
698,383
def _label_check(label_string: str, search_label: str): """ Mask labels with Booleans that appear in row """ for label in label_string.split(' '): if search_label == label: return True return False
084d23321133dfcd9d8f819e8209165d1cac2c15
698,384
def flip_nested_dict(nested_dict): """Flip nested dictionary inside out.""" flipped = dict() for key, subdict in nested_dict.items(): for k, v in subdict.items(): flipped[k] = flipped.get(k, dict()) flipped[k][key] = v return flipped
c1ddbe48a158ed4a96db3b33deb2f423afd304fe
698,386
def get_users(client): """Return the db containing users metadata. Parameters ---------- client --> pymongo.mongo_client.MongoClient class, MongoDB client Returns ------- metadatafs['fs'] --> pymongo.collection.Collection, reference to collection users """ #get the MongoDb collection called "users" metadatafs = client['metadatafs'] return metadatafs['users']
bd2f0b60781e27a23f015f7a7573821a0e2b8ef0
698,387
def read_raw_values(datasize, datastore, test): """read the raw data for a given test""" in_file_name = str(datasize) + '/' + datastore + '_' + test + '.csv' return [int(line.strip()) for line in open(in_file_name)]
1d724b873ee0ccf50d2027df79a2d32472e7942a
698,388
def TrimMu(Stot, oldmu, thresh, norm=True): """ Remove mu measurements below the threshold and adjust mu measurements above the threshold to unity if we seek P(mu > thresh). """ trim = oldmu > thresh if norm: oldmu[:] = 0 oldmu[trim] = 1 else: oldmu = oldmu[trim] Stot = Stot[trim] return Stot, oldmu
2a986599a6116b104b839d8b664f0caff57b1aee
698,389
import numpy def eval_h(x, lagrange, obj_factor, out): """Hessian of the Lagrangian L = obj_factor * f + <lagrange, g>, where <.,.> denotes the inner product. """ out[-1] = 0.0 out[:-2:2] = obj_factor * (2.0 + 400.0 * (3.0 * x[:-1] * x[:-1] - x[1:])) out[:-4:2] -= lagrange * (2.0 + x[:-2]) * numpy.exp(x[:-2] - x[1:-1]) out[2::2] += obj_factor * 200.0 out[2:-2:2] += lagrange * ( 18.0 * x[1:-1] - 2.0 * numpy.sin(x[1:-1] - x[2:]) * numpy.sin(x[1:-1] + x[2:]) + 2.0 * numpy.cos(x[1:-1] - x[2:]) * numpy.cos(x[1:-1] + x[2:]) - x[:-2] * numpy.exp(x[:-2] - x[1:-1]) ) out[4::2] += lagrange * ( -2.0 * numpy.sin(x[1:-1] - x[2:]) * numpy.sin(x[1:-1] + x[2:]) - 2.0 * numpy.cos(x[1:-1] - x[2:]) * numpy.cos(x[1:-1] + x[2:]) ) out[1::2] = obj_factor * (-400.0 * x[:-1]) out[1:-2:2] += lagrange * (1.0 + x[:-2]) * numpy.exp(x[:-2] - x[1:-1]) return out
5864ec66b160a98a75c62cdafc859a87e7e7795f
698,390
import collections def mkparts(sequence, indices=None): """ Make some parts from sequence by indices :param sequence: indexable object :param indices: index list :return: [seq_part1, seq_part2, ...] """ indices = indices or [1] result_list = collections.deque() start = 0 seq_len = len(sequence) for end in indices: if end < 0: end = seq_len + end if end < start: raise ValueError("end index is less than start index") result_list.append(sequence[start:end]) start = end result_list.append(sequence[start:]) return tuple(result_list)
710841ca3861a33382b6243579eec07f866eb965
698,391
def log_minor_tick_formatter(y: int, pos: float) -> str: """ Provide reasonable minor tick formatting for a log y axis. Provides ticks on the 2, 3, and 5 for every decade. Args: y: Tick value. pos: Tick position. Returns: Formatted label. """ ret_val = "" # The positions of major ticks appear to be skipped, so the numbering starts at 2 # Thus, to label the 2, 3, and 5 ticks, we need to return the label for the 0th, 1st, and # 3rd labels. values_to_plot = [0, 1, 3] # The values 2 - 9 are available for the minor ticks, so we take the position mod 8 to # ensure that we are repeating the same labels over multiple decades. if (pos % 8) in values_to_plot: # "g" auto formats to a reasonable presentation for most numbers. ret_val = f"{y:g}" return ret_val
a378bd94ceab8698a45403544a728c6c29b402c9
698,392
def s_and_were_or_just_was(parameter): """a utility function for grammatical correctness [and DRY, i.e. Don`t Repeat Yourself]""" return ("s were" if parameter != 1 else " was")
e4d1c03565e89b9b8ecba0137e1b0b9e3bcafa40
698,393
from typing import Union from pathlib import Path from typing import Any from typing import TextIO def fopen(filepath: Union[str, Path], *args: Any, **kwargs: Any) -> TextIO: """Open file. Args: filepath (Union[str, Path]): filepath Returns: TextIOWrapper: buffer text stream. """ if isinstance(filepath, str): return open(filepath, *args, **kwargs) elif isinstance(filepath, Path): return filepath.open(*args, **kwargs)
fcf4ccf53c99a390ceaa9b27f86a7e3f4cff3af1
698,394
def merge(line): """ Helper function that merges a single row or column in 2048 line: list like [8, 0, 16, 0, 16, 8] returns: merged list [8, 32, 8, 0] loop cur pos and index in list: if two equal numbers seperated by 0s, double the previous index and replace number at current position with 0 elsif 0 and a number, swap recursively until it hits a nonzero number """ prev_index = zero = zero_index = 0 used_index = [] for index, current_num in enumerate(line): #if index == 0: prev_index = 0 #print "\n" #print "current_num:{} | index:{}".format(current_num, index) #if cur number = last nonzero number and its not 0 and prev and present index are not equal if current_num == line[prev_index] and current_num != 0 and prev_index != index and prev_index not in used_index: used_index.append(prev_index) line[prev_index] += current_num current_num = line[index] = 0 zero = 1 #zero_index = index #all abt shifting non-zeros fwd and checking zero continuity #print "zero:{}".format(zero) #if its 0 then set continuous zero status to 1 and as latest zero index if current_num == 0: zero = 1 zero_index = index elif current_num != 0 and zero == 1: #if its a nonzero and zero status is ON zero = 0 while line[zero_index] == 0: #shift number forward till it hits a non-zero zero_index -= 1 if zero_index < 0: break; zero_index += 1 line[zero_index]= current_num line[index] = 0 #current pos is 0 so set zero status zero = 1 prev_index = zero_index #print "line:{} | prev_index:{} | zero_index:{}".format(line, prev_index, zero_index) else: prev_index = index #if its a non zero but zero status OFF del prev_index, zero, zero_index, used_index return line
ad43cd74f991490a2fd1ce18a68ae360534c9fd7
698,396
import signal def signal_number_to_name(signum): """ Given an OS signal number, returns a signal name. If the signal number is unknown, returns ``'UNKNOWN'``. """ # Since these numbers and names are platform specific, we use the # builtin signal module and build a reverse mapping. signal_to_name_map = dict((k, v) for v, k in signal.__dict__.items() if v.startswith('SIG')) return signal_to_name_map.get(signum, 'UNKNOWN')
cc17db79f6e47b25e9ec5b9c33fb8d7bf7c0ad26
698,397
def _toledo8217StatusParse(status): """ Parse a scale's status, returning a `(weight, weight_info)` pair. """ weight, weight_info = None, None stat = status[status.index(b'?') + 1] if stat == 0: weight_info = 'ok' else: weight_info = [] if stat & 1 : weight_info.append('moving') if stat & 1 << 1: weight_info.append('over_capacity') if stat & 1 << 2: weight_info.append('negative') weight = 0.0 if stat & 1 << 3: weight_info.append('outside_zero_capture_range') if stat & 1 << 4: weight_info.append('center_of_zero') if stat & 1 << 5: weight_info.append('net_weight') return weight, weight_info
83d8132398e740c71bdf3ac6f65f323f8c8ad65b
698,398
import os import glob def list_all_files(src_directory): """Make a list of all the files in the given directory. Returns list.""" cwd = os.getcwd() os.chdir(src_directory) files = [] for file in glob.glob("*"): files.append(file) os.chdir(cwd) return files
1e2653dc3f7b8a0a093281004ac17d5a25ced744
698,400
def generate_bm(df, experiment_keys): """ Given the flatten DataFrame, containing all the experiment here we extract the experiments correspondent to the dictionary experiment_keys. :param df: pandas DataFrame containing the flatten df :param experiment_keys: dictionary with all the keys. :returns df_copy: the reduced dictionary. """ df_copy = df.copy() for (k_, v_) in experiment_keys.items(): df_copy = df_copy[df_copy[k_] == v_] return df_copy
43f91e734419c76157920fe486db48e1c17ebdc5
698,401
def labeledFcn(fcn, paramNames): """Wraps a function with its parameter names (in-place). :type fcn: callable Python object :param fcn: function to wrap :type paramNames: list of strings :param paramNames: parameters to attach to the function :rtype: callable Python object :return: the original function, modified in-place by adding ``paramNames`` as an attribute """ fcn.paramNames = paramNames return fcn
e8885cf0cf4db4e84069ec4f0164748b0ab52ae3
698,402
import os def get_geotiff_size_bytes(tif_path): """Reads file size in bytes from GeoTIFF at tif_path Args: tif_path (str): Path to local GeoTIFF file Returns: Size of the GeoTIFF file in bytes """ return os.path.getsize(tif_path)
0095eec3091cd44cae58eecd3d9a5e879d934349
698,403
def get_min_max(input_list: list) -> tuple: """ Return a tuple(min, max) out of list of unsorted integers. Args: input_list(list): list of integers containing one or more integers """ print("->get_min_max: input_list= " + str(input_list)) if len(input_list) == 0: return None, None if len(input_list) == 1: return input_list[0], input_list[0] if input_list[0] is None or input_list[1] is None: print("input_list must not contain None values!") return None, None if input_list[0] < input_list[1]: min_value = input_list[0] max_value = input_list[1] else: min_value = input_list[1] max_value = input_list[0] # print("init: min_value="+str(min_value)+", max_value= "+str(max_value)) for i in range(2, len(input_list)): current_value = input_list[i] if current_value is None: print("input_list must not contain None values!") return None, None # print("i="+str(i)+", current_value="+str(current_value)) if current_value < min_value: min_value = current_value if current_value > max_value: max_value = current_value # print("min_value=" + str(min_value) + ", max_value= " + str(max_value)) result = min_value, max_value print("result= " + str(result)) return result
26db872de57ed6c94825e089607a4bb81390b423
698,404
import torch def value_td_residuals( rewards: torch.Tensor, values: torch.Tensor, next_values: torch.Tensor, discount: float, ) -> torch.Tensor: """Compute TD residual of state value function. All tensors must be one dimensional. This is valid only for one trajectory. Parameters ---------- rewards: The one step reward. values: The estimated values at the current step. Note that the last test is terminal, the associated value should be zero. next_values: The estimated values at the next step. discount: The discount rate. """ return rewards + (discount * next_values) - values
723ed0f0ce651cc7da5ad9c7950972d104198888
698,405
import traceback import sys def exception_format(): """ Convert exception info into a string suitable for display. """ return "".join(traceback.format_exception( sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2] ))
4bb7f74140bf5e6023850b2ad27809ded1c6fc09
698,406