content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def _to_bool(value): """Simplified version of the bool filter. Avoids having a dependency on Ansible in unit tests. """ if value == 'yes': return True if value == 'no': return False return bool(value)
6e0da8b211ce638e19764a966c417841fafe2a5b
13,102
import math def tan(x): """tan(x) Return the tangent of x.""" x = complex(x, 0) sr = math.sin(x.real) cr = math.cos(x.real) shi = math.sinh(x.imag) chi = math.cosh(x.imag) rs = sr * chi is_ = cr * shi rc = cr * chi ic = -sr * shi d = rc*rc + ic * ic real = (rs*rc + is_*ic) / d imag = (is_*rc - rs*ic) / d return complex(real, imag)
f2aedcff8f7fbce85169dab263163aa461367a53
13,103
import random def get_best_move(board, scores): """ Function that computes the best move for the machine given the actual board state """ max_score = max(scores[row][col] for row, col in board.get_empty_squares()) candidates = [(row, col) for row, col in board.get_empty_squares() if scores[row][col] == max_score] return random.choice(candidates)
afabd281ad1547b118cab10ccc912bdf4956f9c4
13,104
def vapour_pressure_deficit(es, ea): """ VPD - Vapour Pressure Deficit :param es: :param ea: calculated by vp_from_rhmin_rhmax() :return: """ return es - ea
cb1d22a236081b3de3f080f8f2e2acb64543620f
13,105
import pathlib import json def load_json(filepath): """ Loads the metrics in a dictionary. :param log_dir: The directory in which the log is saved :param metrics_file_name: The name of the metrics file :return: A dict with the metrics """ if isinstance(filepath, pathlib.Path): filepath = str(filepath) if ".json" not in filepath: filepath = f"{filepath}.json" with open(filepath, "rb") as json_file: metrics_dict = json.loads(json_file.read()) return metrics_dict
1dd8ff7822228bc0173a66df626902a723338d55
13,109
from typing import Callable import random def choose(option1: Callable = lambda: None, option2: Callable = lambda: None): """ Randomly run either option 1 or option 2 :param option1: a possible function to run :param option2: another possible function to run :return: the result of the function """ if random.getrandbits(1): return option1() else: return option2()
0f0ecbc945de9f6d5698cd86103265eccf5708e6
13,110
def transform_response_to_context_format(data: dict, keys: list) -> dict: """ Transform API response data to suitable XSOAR context data. Remove 'x-ms' prefix and replace '-' to '_' for more readable and conventional variables. Args: data (dict): Data to exchange. keys (list): Keys to filter. Returns: dict: Processed data. """ return {key.replace('x-ms-', '').replace('-', '_').lower(): value for key, value in data.items() if key in keys}
34f1a613654deb71581bcd33757b8741840ae44f
13,111
def per_field_value(values, fields): """ This normalises all patches relative to their field. 'fields' is a map of field numbers to a list of patches in that field. """ field_list = list(fields.values()) # We calculate the maximum and minimum values for each field. maxs = [max((values[index][patch] for index in values \ for patch in field)) for field in field_list] mins = [min((values[index][patch] for index in values \ for patch in field)) for field in field_list] new_values = {} for field_id, field in enumerate(field_list): f_max = maxs[field_id] f_min = mins[field_id] for index in values: if index not in new_values: new_values[index] = {} for patch in field: try: scaled_value = float(values[index][patch] - f_min) / \ (f_max - f_min) except ZeroDivisionError: scaled_value = 0 new_values[index][patch] = scaled_value return new_values
b51e344463d4d75f21ab8fc3a58d6201249fc2dc
13,112
import torch def one_hot(y, K, smooth_eps = 0): # pylint: disable=invalid-name """One-hot encodes a tensor with optional label smoothing. Args: y: A tensor containing the ground-truth labels of shape (N,), i.e. one label for each element in the batch. K: The number of classes. smooth_eps: Label smoothing factor in [0, 1] range. Returns: A one-hot encoded tensor. """ assert 0 <= smooth_eps <= 1 assert y.ndim == 1, "Label tensor must be rank 1." y_hot = torch.eye(K)[y] * (1 - smooth_eps) + (smooth_eps / (K - 1)) return y_hot.to(y.device)
ee47f9c778d875834c49c098ded4936edb104887
13,113
import re def wrap_with_span(string, arg): """ Wraps all instances of a string with a span element""" words = arg.split(' ') for word in words: if word[-1].lower() == 's': word = word[:-1] pattern = re.compile(r'\b({0}[\w\d]*)\b'.format(word), flags=re.I) for (match) in re.findall(pattern, string): string = re.sub(r'{0}'.format(match), '<span>{0}</span>'.format(match), string) break; return string.replace('&amp;#x', '&#x')
9390f0f12d673c9a809760c1e7ff614d4fc079ef
13,114
def import_name(modulename, name=None): """ Import identifier ``name`` from module ``modulename``. If ``name`` is omitted, ``modulename`` must contain the name after the module path, delimited by a colon. Parameters: modulename (str): Fully qualified module name, e.g. ``x.y.z``. name (str): Name to import from ``modulename``. Returns: object: Requested object. """ if name is None: modulename, name = modulename.rsplit(':', 1) module = __import__(modulename, globals(), {}, [name]) return getattr(module, name)
a320063e878db935f8a2409c7487617e2c9f1802
13,117
import argparse def get_opts(): """Return hostname and server from the list of command arguments.""" parser = argparse.ArgumentParser( description='Get certificate information from server and validate it') parser.add_argument('--server', '-s', help='Server to connect to') parser.add_argument( '--port', '-p', help='Port to connect to', default=443, type=int) parser.add_argument('hostname', help='Certificate hostname') return parser.parse_args()
6ac6b243d8757828339f9ed0d7e9c59059c659c2
13,118
def sort_012(arr): """ Given an input array consisting on only 0, 1, and 2, sort the array in a single traversal. Args: arr(list): List to be sorted """ if arr is None: return arr zero_index = 0 current_index = 0 two_index = len(arr) - 1 while current_index < len(arr) and current_index <= two_index: if arr[current_index] == 0: arr[current_index] = arr[zero_index] arr[zero_index] = 0 zero_index += 1 current_index += 1 elif arr[current_index] == 2: arr[current_index] = arr[two_index] arr[two_index] = 2 two_index -= 1 else: current_index += 1 return arr
d1fa4ee67c9d6b62928b5ea1e2fc1daf011db29d
13,119
def stop_process(client, pid): """获取进程状态""" stop_result = "" command = 'kill -9 "%s" ' stdin, stdout, stderr = client.exec_command(command % pid) # 执行bash命令 kill_status = stdout.read() error = stderr.read() if error or kill_status: error = error.split(':')[3].strip() stop_result = "停止失败:"+str(error) else: stop_result = "停止成功" return stop_result
454bb8763017119efe360cb5f7c07b9a353ba345
13,120
def calculate(): """ Perform a super important calculation. """ return 12
b6aa643b9f6e24f579fdc273c2dec512692cdc4b
13,123
def N_(msg): """Designate a string to be found by gettext but not to be translated.""" return msg
e82c1f4219e53be05e863bfd636bec932ddd7f5c
13,125
import json def _get_needed_packages(json_file, test=None): """ Returns a dict with needed packages based on a JSON file. If a test is specified it will return the dict just for that test. """ needed_packages = {} with open(json_file) as f: test_packages = json.load(f) for key,value in test_packages.items(): needed_packages[key] = value if test: if test in needed_packages: needed_packages = needed_packages[test] else: needed_packages = {} return needed_packages
b929e57beda05372b03936598db275e87a318962
13,126
def get_factory_log_recipients(entry): """Read the log recipients specified by the Factory in its configuration Args: entry: dict-like object representing the entry configuration Returns: list: list contaning the URLs of the log servers, empty if none present """ entr_attrs = entry.get_child_list("attrs") for attr in entr_attrs: if attr["name"] == "LOG_RECIPIENTS_FACTORY": return attr["value"].split() return []
f3e776ea9b8102247b5c7e817762523467d9f953
13,129
import csv def get_conditions_bids(infile): """ read onsets, duration and amplitude from 3 column format file. (e.g. events.tsv in BIDS format) """ spec = [] if infile.endswith('.tsv'): # read tsv file with open(infile) as f: reader = csv.reader(f, delimiter='\t') rows = [row for row in reader] header = rows[0] trial_types = set([row[header.index('trial_type')] for row in rows]) # create a dict for each trial type for event in trial_types: event_dict = { 'trial_type': event, 'onsets': [float(row[header.index('onset')]) for row in rows[1:] if row[header.index('trial_type') == event]], 'durations': [float(row[header.index('duration')]) for row in rows[1:] if row[header.index('trial_type') == event]] } spec.append(event_dict) return spec
3bd0546115fa635abbcc7829911fdb476ad1c33e
13,131
def preprocess(raw): """ Basic text formatting e.g. BOM at start of file """ ## 1. Remove byte order marks if necessary if raw[0]=='\ufeff': raw = raw[1:] # if raw[0] == '\xef': # raw = raw[1:] # if raw[0] == '\xbb': # raw = raw[1:] # if raw[0] == '\xbf': # raw = raw[1:] return raw
dfeb75152196ff2ab751ee206aab03eed1f28502
13,133
def calc_num_pixels(num_pixels, stride): """ Converts the current number of pixels to the number there will be given a specific stride. """ return 1 + (num_pixels - 1) // stride
b6ae056339913c496017251709381c19f551a074
13,134
def readme(): """Returns the contents of the README without the header image.""" header = '======\nGimbal\n======\n' with open('README.rst', 'r') as f: f.readline() return header + f.read()
892100a63287917a35850258c677ee53ee2883a0
13,137
import os def load_images_map(images_dir): """From all face images, produce an easy lookup table. Format: {frame_index1: set(bbs_tuple1, bbs_tuple2, etc...)} """ _, _, files = next(os.walk(images_dir)) # facerec image file format: <movie_id>:<frame_i>:x1_y1_x2_y2.jpeg image_map = {} for name in files: name, ext = os.path.splitext(os.path.basename(name)) if ext != ".jpeg": continue _, frame_str, box_str = name.split(":") frame_i = int(frame_str) bbox = tuple([int(p) for p in box_str.split("_")]) if frame_i not in image_map: image_map[frame_i] = set() image_map[frame_i].add(bbox) return image_map
691a5b3a03af4dcd14ce08ba33326b6d33d1f927
13,140
def rewrite_arcs (label_map, nfa): """Rewrite the label arcs in a NFA according to the input remapping.""" states = [[(label_map[label], tostate) for (label, tostate) in arcs] for arcs in nfa[2]] return (nfa[0], nfa[1], states, nfa[3], nfa[4])
2bd9911a5c65ce7711848746614a3a6ceb37f8d2
13,141
import copy def merge_params(base_params, partial_params=None): """Merge a partial change to the base configuration. Parameters ---------- base_params The base parameters partial_params The partial parameters Returns ------- final_params The final parameters """ if partial_params is None: return base_params elif base_params is None: return partial_params else: if not isinstance(partial_params, dict): return partial_params assert isinstance(base_params, dict) final_params = copy.deepcopy(base_params) for key in partial_params: if key in base_params: final_params[key] = merge_params(base_params[key], partial_params[key]) else: final_params[key] = partial_params[key] return final_params
4c52d492358106b7c5e6df0c1e099d45043a7935
13,142
def create_dicts_same_nodes(my_set, neighbors_dict, node, dict_out, dict_in): """ A function to create useful dictionaries to represent connections between nodes that have the same type, i.e between nodes that are in the embedding and between nodes that aren't in the embedding. It depends on the input. :param my_set: Set of the nodes that aren't currently in the embedding OR Set of the nodes that are currently in the embedding :param neighbors_dict: Dictionary of all nodes and neighbors (both incoming and outgoing) :param node: Current node :param dict_out: explained below :param dict_in: explained below :return: There are 4 possibilities (2 versions, 2 to every version): A) 1. dict_node_node_out: key == nodes not in embedding, value == set of outgoing nodes not in embedding (i.e there is a directed edge (i,j) when i is the key node and j isn't in the embedding) 2. dict_node_node_in: key == nodes not in embedding , value == set of incoming nodes not in embedding (i.e there is a directed edge (j,i) when i is the key node and j isn't in the embedding) B) 1. dict_enode_enode_out: key == nodes in embedding , value == set of outgoing nodes in embedding (i.e there is a directed edge (i,j) when i is the key node and j is in the embedding) 2. dict_enode_enode_in: key == nodes in embedding , value == set of incoming nodes in embedding (i.e there is a directed edge (j,i) when i is the key node and j is in the embedding) """ set1 = neighbors_dict[node].intersection(my_set) count_1 = 0 count_2 = 0 if (len(set1)) > 0: count_1 += 1 dict_out.update({node: set1}) neigh = list(set1) for j in range(len(neigh)): if dict_in.get(neigh[j]) is None: dict_in.update({neigh[j]: set([node])}) else: dict_in[neigh[j]].update(set([node])) else: count_2 += 1 return dict_out, dict_in
c4ef94964f2944dcbddd79f76182a4bcc64633e7
13,143
def flatten(list_of_lists): """Single level flattening of a list.""" return sum((list(sublist) for sublist in list_of_lists), [])
d7a2b9b75a1bd920f50d78cc725f8e551d6bb2f5
13,144
def maximum() -> int: """Returns 9.""" return 9
dfd9a240bdaf985f89ca1b90c2b6e01f5426b2b0
13,146
def config(): """ Configuration file for edith usage """ config = { "edith": { # don't change "version": 1.0 # don't change }, "config": { # change this "host": "Raspberry Pi Zero WH", # change host to machine running program "online server": { # online server configuration "host": "false", # change to true or false according to needs "port": 8080 # change port }, "go commands": "true" # change (true or false) according to needs. } } return config
3c4adf4407311428be2c97fdc61dd636de63d760
13,147
import os def _has_extension(file_name, extension): """Check if a file has an extension.""" return os.path.splitext(file_name)[1] == extension
0b8a0979bd3aeaa65509c3b817ca48507775ac4d
13,148
def _is_schar(self): """Test auf Schar""" return len(self.sch_par) == 1
8d1229a45fab73a9d7f1560806c6e3497bbb1bcf
13,149
def read_format(info): """input info and return readable format""" info_name = info["name"] info_desc = info["description"] info_coun = info["country"] return f"{info_name}, a {info_desc}, from {info_coun}"
1804b25b8d1d01b2525887e9f4ba75d031cdf515
13,154
def score_function(nom_cosine, w): """ 最终得分值为归一化的余弦距离 * 权重项 :param nom_cosine: :param w: :return: """ return nom_cosine * w
99468bca131a42148b0bf012f5f3c61215716b57
13,155
def load_motion_masks(motion_root): """Load motion masks from disk. Args: motion_root (Path): Points to a directory which contains a subdirectory for each sequence, which in turn contains a .png for each frame in the sequence. Returns: motion_masks (dict): Map sequence to dict mapping frame name to motion mask path. """ motion_mask_paths = {} for sequence_path in motion_root.iterdir(): if not sequence_path.is_dir(): continue sequence = sequence_path.stem motion_mask_paths[sequence] = {} for motion_path in sequence_path.glob('*.png'.format(sequence)): # Pavel's ICCV 2017 method outputs an extra set of soft masks that # start with 'raw_' or 'input_'; ignore them by starting the glob # with the sequence name. if (motion_path.stem.startswith('raw_') or motion_path.stem.startswith('input_')): continue motion_mask_paths[sequence][motion_path.stem] = motion_path return motion_mask_paths
babe7c83b7f2c7a1cade2d4fcb3de8461d88ba00
13,156
def parse_genetic_models(models_info, case_id): """Parse the genetic models entry of a vcf Args: models_info(str): The raw vcf information case_id(str) Returns: genetic_models(list) """ genetic_models = [] if models_info: for family_info in models_info.split(","): splitted_info = family_info.split(":") if splitted_info[0] == case_id: genetic_models = splitted_info[1].split("|") return genetic_models
9d8d94d9008e2f287a875aa9bd15330c82bbf8b5
13,157
def read_labels(labels_file): """Get the labels from names file""" with open(labels_file, 'r') as f: lines = f.readlines() return [lab for lab in lines if len(lab) > 0]
a1b4ce5c0b5c613db5ec0d7d619910210d88106e
13,158
def welcome(): """List all available api routes.""" return ( f"Welcome to Climate App<br/>" f"Available Routes:<br/>" f"Precipitation Data: /api/v1.0/precipitation<br/>" f"Stations List: /api/v1.0/stations<br/>" f"Temperature Observation for the most active station: /api/v1.0/tobs<br/>" f"Enter a Starting Date: /api/v1.0/yyyy-mm-dd to get the TMIN, TAVG and TMAX for all dates greater and equal to the Starting Date<br/>" f"Enter a Starting and Ending Date: /api/v1.0/yyyy-mm-dd/yyyy-mm-dd to get the TMIN, TAVG and TMAX for the dates between the Start and End dates inclusives" )
3fead56e744b1399b20ab24da4d2c1c9b901379e
13,159
from bs4 import BeautifulSoup def get_string_from_tag(row_with_tag): """Given a row with a tag, return the string from it. Ex. Given, <a href="http://www.example.com/page/index.html">Example Page</a> return the string "Example Page" """ row = BeautifulSoup(str(row_with_tag)) return u''.join([row.string for row in row.findAll(text = True)])
7c154fa4752ae59a2a33c45aaded394db2cb42f3
13,160
import re def slug_sub(match): """Assigns id-less headers a slug that is derived from their titles. Slugs are generated by lower-casing the titles, stripping all punctuation, and converting spaces to hyphens (-). """ level = match.group(1) title = match.group(2) slug = title.lower() slug = re.sub(r'<.+?>|[^\w-]', ' ', slug) slug = re.sub(r'[ \t]+', ' ', slug).strip() slug = slug.replace(' ', '-') if slug: return '<{0} id="{1}">{2}</{0}>'.format(level, slug, title) return match.group(0)
58c40e4ca23bd76a0e25d746aeb4ccdbed6640d7
13,162
def validateIsNotEmpty(data: str, fieldName: str): """Validate if data empty""" if data == '' or data == None: return "{} Must be not empty".format(fieldName) return None
26bec771bb881e32ded022acc57e62b81b2991f9
13,163
def get_message_after(response, index): """Returns next message of search response after index or None""" try: return response[index + 1].object except IndexError: return None
0208fd1f4397e77636df11b62054e86eb84fb682
13,164
import time import json def result_running_callback(request, context): # pylint: disable=unused-argument """ Callback function returning 'running' JSON. """ job_result_running = { 'status': 'running', } time.sleep(1) return json.dumps(job_result_running)
cb38b44b86cdcc96706dbf40a764d8d2059de449
13,165
import os import zipfile import xml.etree.ElementTree as ET def get_files_list(folderlist): """Gets a list of files from the arg folder and check for errors. Args: folderlist: the list of folder where the Lattes CV files are found. The Lattes CV files are downloaded as .zip files containing a .xml file. """ if not isinstance(folderlist, list): folderlist = [folderlist] goodlist = [] badlist = [] for cfolder in folderlist: folder = os.path.normpath(cfolder) fileslist = [os.path.join(folder, x) for x in os.listdir(folder)] good_dummy = [x for x in fileslist if x.endswith('.zip')] bad_dummy = [x for x in fileslist if not x.endswith('.zip')] goodlist += good_dummy badlist += bad_dummy #test each xml for parsing capabilities for filename in goodlist: try: # rightname = os.path.join(folder, filename) archive = zipfile.ZipFile(filename, 'r') if archive.namelist()[0][-3:].lower() == 'xml': cvfile = archive.open(archive.namelist()[0], 'r') ET.parse(cvfile) else: print('Error: file ' + archive.namelist()[0] + \ 'is not a xml file.') except: print('XML parsing error in file ' + filename) goodlist.remove(filename) badlist.append(filename) return [goodlist, badlist]
d8191746b2545111617b086f5f06a1fd2d4d2572
13,166
import struct def get_system_bits(): """Return 32 for 32-bit systems and 64 for 64-bit""" return struct.calcsize("P") * 8
252d5acab4dec512df238031f89c072129485959
13,168
def format_knot_hash(value, format_spec): """ >>> format_knot_hash([64, 7, 255], '02x') '4007ff' >>> format_knot_hash(bytearray.fromhex('A0C20170'), '08b') '10100000110000100000000101110000' """ return "".join(format(h, format_spec) for h in value)
2a12f299807bcc6606a00092faecf1b34ec32170
13,171
def compute_intersection_length(A, B): """Compute the intersection length of two tuples. Args: A: a (speaker, start, end) tuple of type (string, float, float) B: a (speaker, start, end) tuple of type (string, float, float) Returns: a float number of the intersection between `A` and `B` """ max_start = max(A[1], B[1]) min_end = min(A[2], B[2]) return max(0.0, min_end - max_start)
6cf038e1febbc1a7aa19eb104f5628ff2f935174
13,173
import socket def bindsocket(port, host=''): """ Creates a socket assigned to the IP address (host, port). Parameters ---------- port : int port assigned to the socket. host : str, optional host assigned to the socket. The default is ''. Returns ------- tcpsock : socket socket connected at (host, port). """ tcpsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) tcpsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) tcpsock.bind((host, port)) return tcpsock
48be29952a3d35af0ec0886e3ca332f9280384f1
13,175
import re def _yesno(message, default='no', suffix=' '): """Modified from github.com/tylerdave/prompter to reduce depenencies. All credit to the creator.""" if default == 'yes': yesno_prompt = '[Y/n]' elif default == 'no': yesno_prompt = '[y/N]' else: raise ValueError("default must be 'yes' or 'no'.") if message != '': prompt_text = "{0} {1}{2}".format(message, yesno_prompt, suffix) else: prompt_text = "{0}{1}".format(yesno_prompt, suffix) while True: response = input(prompt_text).strip() if response == '': return True else: if re.match('^(y)(es)?$', response, re.IGNORECASE): if default == 'yes': return True else: return False elif re.match('^(n)(o)?$', response, re.IGNORECASE): if default == 'no': return True else: return False
f66bdf51fd79deb9ccba52f426a691361c9c6993
13,176
import json import logging def output(post, quiet=False, use_test_node=False, diagnostic=False, urls_only=False) -> int: """Prints out the post and extracts the custom_json""" data = json.loads(post.get("json")) if diagnostic: logging.info( f"Diagnostic - {post.get('timestamp')} " f"- {data.get('server_account')} - {post.get('trx_id')} - {data.get('message')}" ) logging.info( json.dumps(data, indent=2) ) if quiet: if data.get("num_urls"): return data.get("num_urls") else: return 1 if urls_only: if data.get("url"): print(data.get("url")) return 1 elif data.get("urls"): for url in data.get("urls"): print(url) return data.get("num_urls") data["required_posting_auths"] = post.get("required_posting_auths") data["trx_id"] = post.get("trx_id") data["timestamp"] = post.get("timestamp") count = 0 if use_test_node: data["test_node"] = True if data.get("url"): logging.info( f"Feed Updated - {data.get('timestamp')} - {data.get('trx_id')} " f"- {data.get('url')}" ) count = 1 elif data.get("urls"): for url in data.get("urls"): count += 1 logging.info( f"Feed Updated - {data.get('timestamp')} - {data.get('trx_id')} - {url}" ) return count
31f3e977d47d44ed6b9ec45d171cbe9cfecb7ed9
13,178
import os def get_filepath(date, data_source='data'): """Renvoie le filepath d'un fichier donné Args: date (string): string contenant la date sous le format {jour}{Mois abbrégé} -> exemple : 7Nov Returns: string """ return os.path.join(data_source, f'data_seating_{date}.csv')
27456a7a49c56d11d76ce63c5e043fcc94cdac27
13,179
import six def _UnitsByMagnitude(units, type_abbr): """Returns a list of the units in scales sorted by magnitude.""" scale_items = sorted(six.iteritems(units), key=lambda value: (value[1], value[0])) return [key + type_abbr for key, _ in scale_items if key]
972a17b51901a133444ddb77989c4ebc372fc35e
13,180
import functools import traceback def check_workchain_step(func): """ Decorator for workchain steps that logs (and re-raises) errors occuring within that step. """ @functools.wraps(func) def inner(self, *args, **kwargs): try: return func(self, *args, **kwargs) except Exception as e: self.report( '{} in {}: {}.\nTraceback:\n{}'.format(type(e).__name__, func.__name__, e, traceback.format_exc()) ) raise e return inner
d6e5834b233075fbb1097a84f7e3010c3f292194
13,183
def _construct_GPL_url(accession): """Example URL: ftp://ftp.ncbi.nlm.nih.gov/geo/platforms/GPLnnn/GPL570/annot/GPL570.annot.gz """ number_digits = len(accession) - 3 # 'GPL' is of length 3. if number_digits < 4: folder = accession[:3] + 'nnn' # e.g. GPLnnn. elif 3 < number_digits < 5: folder = accession[:4] + 'nnn' # e.g. GPL6nnn. else: folder = accession[:5] + 'nnn' # e.g. GPL19nnn. url = '/'.join(['ftp://ftp.ncbi.nlm.nih.gov/geo/platforms', folder, accession, 'annot', accession + '.annot.gz']) return url
36527d413cc005abe5055eab46e4d7fe93f4aa8e
13,184
def get_get_upload_details_retry_predicate(resp): """Triggers retry if upload details came back without last status.""" return not resp.last_status
ecadf182751e55c2c3cbd85f2a7343adc34821e5
13,185
def get_setuptools_package_version(setuptools_version: str) -> str: """ Generate the right setuptools command for pip command :param setuptools_version: Setuptools version obtained from :return: A string formatted for pip install command (e.g setuptools==58.0.0) """ setuptools_version = setuptools_version.lower().strip() # tox.ini: setuptools_version = setuptools==19.0 if setuptools_version.startswith("setuptools"): return setuptools_version # tox.ini: setuptools_version = 19.0 return f"setuptools=={setuptools_version}"
99554292253752545d1bbf82edfef92a925b1746
13,186
import os import re def base_disk_for_block_device(partition: str) -> str: """ Returns the base disk of a disk partition. :param partition: Path to the partition to get the base disk of :return: Path to base disk of given partition """ # Follow link(s) # partition should now be in format '/dev/sda5' partition = os.path.realpath(partition) # Remove trailing number partition = re.sub('[0-9]+$', '', partition) return partition
4734d67dbc958637d088be17b25b0ee8e63fa20d
13,188
import torch def log1pMSELoss(log_predicted_counts, true_counts): """A MSE loss on the log(x+1) of the inputs. This loss will accept tensors of predicted counts and a vector of true counts and return the MSE on the log of the labels. The squared error is calculated for each position in the tensor and then averaged, regardless of the shape. Note: The predicted counts are in log space but the true counts are in the original count space. Parameters ---------- log_predicted_counts: torch.tensor, shape=(n, ...) A tensor of log predicted counts where the first axis is the number of examples. Important: these values are already in log space. true_counts: torch.tensor, shape=(n, ...) A tensor of the true counts where the first axis is the number of examples. Returns ------- loss: torch.tensor, shape=(n, 1) The MSE loss on the log of the two inputs, averaged over all examples and all other dimensions. """ log_true = torch.log(true_counts+1) return torch.mean(torch.square(log_true - log_predicted_counts), dim=-1)
ba7d244885303fa6755c4c25f1f991afae6d10ed
13,189
import logging def check_connection(ssh_conn): """ This will check if the connection is still available. Return (bool) : True if it's still alive, False otherwise. """ try: ssh_conn.exec_command("ls", timeout=5) return True except Exception as e: logging.error( "unable to execute a simple command on remote connection. Error: {}".format( e.__str__() ) ) return False
f443d6788eb4a79db7011f0ca9dc4ae15cdf6145
13,190
import re def parse_show_ip_bgp_route_map(raw_result): """ Parse the 'show ip bgp route-map' command raw output. :param str raw_result: vtysh raw result string. :rtype: dict :return: The parsed result of the show ip bgp route-map command in a \ dictionary of the form: :: { '1': { 'action': 'deny', 'set_parameters': '', 'as_path_exclude': '20 30 40', 'match_parameters': '', 'prefix_list': 'List2', 'ipv6_prefix_list': 'List2-6' }, '2': { 'action': 'permit', 'set_parameters': '', 'match_parameters': '', 'prefix_list': None, } '3': { 'action': 'permit', 'set_parameters': '', 'match_parameters': '', 'prefix_list': 'List1', } } """ rmap_re = ( r'Entry\s(?P<entry_number>\d+):\n' r'\s+action\s:\s(?P<action>\w+) \n' r'\s+Set\sparameters\s:(?P<set_parameters>[\S]*)\n' r'(\s+as_path_exclude\s:\s(?P<as_path_exclude>[\d ]+))?' r'\s+Match\sparameters\s:(?P<match_parameters>[\S]*)\n?' r'(\s+prefix_list\s:\s(?P<prefix_list>[\w-]+) \n?)?' r'(\s+ipv6_prefix_list\s:\s(?P<ipv6_prefix_list>[\w_\-]+) \n?)?' ) result = {} for output in re.finditer(rmap_re, raw_result): entry = output.groupdict() result[entry['entry_number']] = entry if result[entry['entry_number']]['prefix_list'] is None: del result[entry['entry_number']]['prefix_list'] if result[entry['entry_number']]['ipv6_prefix_list'] is None: del result[entry['entry_number']]['ipv6_prefix_list'] if result[entry['entry_number']]['as_path_exclude'] is None: del result[entry['entry_number']]['as_path_exclude'] del result[entry['entry_number']]['entry_number'] assert result return result
48aecdc76da27fbbc991c30405d1efef82bcbfc3
13,191
import torch def sample_image(): """Sample torch image of correct shape.""" return torch.zeros((3, 300, 300))
fc62ae5720fc1237822faad81c8924026f5b42a7
13,192
def generate_layer(layer_def, query_tokens, extent, empty_zoom): """ If empty_zoom is True, adds an extra sql column with a constant value, otherwise if it is an integer, tests if the geometry of this layer covers the whole tile, and outputs true/false, otherwise no extra column is added """ layer = layer_def["layer"] buffer = layer['buffer_size'] query = layer["datasource"]["query"].format(**query_tokens) if query.startswith("("): # Remove the first and last parenthesis and "AS t" query = query[1:query.rfind(")")] query = query.replace( "geometry", f"ST_AsMVTGeom(geometry, !bbox!, {extent}, {buffer}, true) AS mvtgeometry") if isinstance(empty_zoom, bool): is_empty = "FALSE AS IsEmpty, " if empty_zoom else "" else: # Test that geometry covers the whole tile zero = 0 wkt_polygon = f"POLYGON(({zero} {extent},{zero} {zero},{extent} {zero},{extent} {extent},{zero} {extent}))" is_empty = f"""\ CASE z(!scale_denominator!) <= {empty_zoom} \ WHEN TRUE THEN FALSE \ ELSE ST_WITHIN(ST_GeomFromText('{wkt_polygon}', 3857), ST_UNION(mvtgeometry)) \ END AS IsEmpty, """ # Combine all layer's features into a single MVT blob representing one layer # only if the MVT geometry is not NULL # Skip the whole layer if there is nothing in it return f"""\ SELECT {is_empty}ST_AsMVT(tile, '{layer['id']}', {extent}, 'mvtgeometry') as mvtl \ FROM ({query} WHERE ST_AsMVTGeom(geometry, !bbox!, {extent}, {buffer}, true) IS NOT NULL) AS tile \ HAVING COUNT(*) > 0"""
6afc16d5d84775f329ceda1334c714e72b341e7a
13,193
def __create_brightness_temparature_levels(df): """ Args: df (pandas.DataFrame): Returns: dict: a dictionary of DataFrame objects; """ levels = {} levels[1] = df.query("value > 330") levels[2] = df.query("value <= 330 and value > 320") levels[3] = df.query("value <= 320 and value > 310") levels[4] = df.query("value <= 310 and value > 300") levels[5] = df.query("value <= 300 and value > 295") levels[6] = df.query("value <= 295 and value > 290") levels[7] = df.query("value <= 290 and value > 285") levels[8] = df.query("value <= 285 and value > 280") levels[9] = df.query("value <= 280 and value > 270") levels[10] = df.query("value <= 270 and value > 260") levels[11] = df.query("value <= 260 and value > 250") levels[12] = df.query("value <= 250 and value > 240") levels[13] = df.query("value <= 240 and value > 230") levels[14] = df.query("value <= 230 and value > 220") levels[15] = df.query("value <= 220 and value > 210") levels[16] = df.query("value <= 210 and value > 200") levels[17] = df.query("value <= 200 and value > 180") levels[18] = df.query("value <=180") return levels
6d85267219026c978720d0ba4dadc6ac58bb5836
13,194
def arch2abi(arch): """Map arch to abi""" # pylint: disable=too-many-return-statements if "rv32e" in arch: if "d" in arch: return "ilp32ed" if "f" in arch: return "ilp32ef" return "ilp32e" if "rv32i" in arch: if "d" in arch: return "ilp32d" if "f" in arch: return "ilp32f" return "ilp32" if "rv64i" in arch: if "d" in arch: return "lp64d" if "f" in arch: return "lp64f" return "lp64" raise Exception("Unknown arch %s" % arch)
a315f04f5c45588953a15cc0fbfda8e3b9621621
13,195
def find_nearest_date(items, pivot): """This function will return the datetime in items which is the closest to the date pivot. See https://stackoverflow.com/questions/32237862/find-the-closest-date-to-a-given-date Parameters ---------- items : list List containing datetimes pivot : datetime.datime Datetime to be found Returns ------- datetime.datetime """ return min(items, key=lambda x: abs(x - pivot))
7b719357c92210729857957e5b4ed8aee4a1f466
13,196
def get_time_tied_leading_trailing(event, previous_score, last_goal_time): """ Calculate time of previous score state according to current event time and time of last goal scored. """ if previous_score['home'] == previous_score['road']: return 'tied', event['time'] - last_goal_time elif previous_score['home'] > previous_score['road']: return 'home_leading', event['time'] - last_goal_time elif previous_score['home'] < previous_score['road']: return 'road_leading', event['time'] - last_goal_time
d53172e0beb9ab3155f02f3feca3680d25e5bcd0
13,197
import os def has_freesurfer(): """Aux function""" return 'FREESURFER_HOME' in os.environ
4cc6137ac5bcf0b6fdc24797967147266851948d
13,199
def J_int_yt(yt, membrane_geometry): """ Jacobian using yt = 1. - rt coordination Note that the coordination definition for y is not consist between [1] and [2]. """ J = 1. - yt if (membrane_geometry=='FMM' or membrane_geometry=='FMS'): J = 1. return J
45e8ea2ef74c7bfd2dfa96543a8bfcb47531d6d4
13,201
def get_primer_direction(primer, delimiter='_', left='left', right='right'): """ Check if the primer is on the left or the right side of the amplicon. """ primer = delimiter.join([str(item) for item in primer]) try: if left in primer.lower(): return 'LEFT' elif right in primer.lower(): return 'RIGHT' except: print('Cannot determine LEFT or RIGHT primer side')
3aed4e1d3397befa2ae713c5cae1a0c018abb5ae
13,202
import os def exists(name): """Returns whether the given file or folder exists""" return os.path.exists(name)
d3f3ff1971a12b74355623feb5954356d0811735
13,204
import os def is_package(folder): """ Comprueba si una carpeta es un paquete python :param str folder: ruta de la carpeta :return: True si es un paquete, False en otro caso :rtype: bool .. code-block:: python >> is_package('../pydoc') 'True' """ return os.path.isdir(folder) and '__init__.py' in os.listdir(folder)
4cc46b31df07d36d478192294fb8c50c53f716f8
13,205
def sample_from_simplex(rng, dim): """Uniformly samples a probability vector from a simplex of dimension dim.""" alpha = [1] * dim return rng.dirichlet(alpha)
d034c4502634678874f89e17f2fde28eb8c28e0b
13,206
import subprocess def sh(command, bg=False, **kwargs): """Execute a local command.""" kwargs['shell'] = True if bg: return subprocess.Popen(command, **kwargs) else: subprocess.check_call(command, **kwargs)
f4d4b562e77738a4a371587926b32a8c3df8422d
13,207
import math def getDistance(p1, p2): """Return the distance between p1 and p2.""" return math.sqrt(sum([(p1[i] - p2[i])**2 for i in range(max(len(p1), len(p2)))]))
28d8156ad1eb5557a3fb3fa8bc7a94a66d06db3e
13,208
def format_condition_value(conditions): """ @summary: ['111', '222'] -> ['111', '222'] ['111', '222\n333'] -> ['111', '222', '333'] ['', '222\n', ' 333 '] -> ['222', '333'] @param conditions: @return: """ formatted = [] for val in conditions: formatted += [item for item in val.strip().split('\n') if item] return formatted
b79269ab375c1bec9724f29528c5a42d3bda3a72
13,210
def links_and_nodes(): """ J4 S1 / \ / / \ / S3 / J3 S2 \ / | / \ / | / J6 BR | // \ | // \ ~BI J2 INF | | | | J5 BF \ // \ // J1 | | OF """ s = [ ('S1', {"load1": 6, "load2": 10, "volume": 12}), ('S2', {"load1": 8, "load2": 10, "volume": 13}), ('S3', {"load1": 5, "load2": 10, "volume": 10}), ] l = [ ('S1', 'J3', {'id': "^S1", "volume": 12}), ('S2', 'BR', {'id': "^S2", "volume": 13}), ('J3', 'BR', {'id': "C3", "volume": 12}), ('BR', 'J2', {'id': "w2", "volume": 2}), ('BR', 'J2', {'id': "TR-BR", "volume": 13}), ('BR', 'INF-OF', {'id': "INF-1", "volume": 10}), ('J2', 'BF', {'id': "C2", "volume": 15}), ('BF', 1, {'id': "w1", "volume": 2}), ('BF', 1, {'id': "TR-BF", "volume": 13}), (1, 'OF', {'id': "C1", "volume": 16.8}), ('J4', 'J3', {'id': "C4", "volume": 0}), ('J4', 'J6', {'id': "C7", "volume": 0}), ('S3', 'J6', {'id': "^S3", "volume": 10}), ('J6', 'BI', {'id': 2, "volume": 10}), ('BI', 'J5', {'id': "w3", "volume": 1.8}), ('J5', 1, {'id': "C5", "volume": 1.8}), ] return l, s
b8a154738407fee77fbd2d111443a42753ff749c
13,212
import numbers def is_numeric(N): """Determine if `N` is numeric. `N` may be a single value or a collection. """ def is_num(n): return isinstance(n, numbers.Number) and (not isinstance(n, bool)) if '__iter__' in dir(N): return False not in [is_num(n) for n in N] else: return is_num(N)
4381d9ad7a3ee5f2f689b78f54b663e8349c58ff
13,213
import os def search(pathe, folder): """ Busqueda De carpetas al interior de los directorios que contienen las imagenes :pathe: Directorio en el cual se desea buscar :folder: Nombre de la carpeta que se desea encontrar Devuelve array con la totalidad de coincidencias encontradas """ directorios = [x[0] if x[0].split('\\')[-1] == folder else '' for x in os.walk(pathe)] while '' in directorios: directorios.remove('') return directorios
12b7fe6125959b4abda605dd9bb633c783c8057c
13,215
import argparse def build_parser(): """Build argument parser""" parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter ) parser.add_argument( "input_json", type=str, help="json of generated sequences", ) parser.add_argument("reference_file", type=str, help="Reference file in csv form") parser.add_argument("outdir", type=str, help="Directory to write output files") parser.add_argument( "--mode", type=str, choices=["edit", "blosum"], default="blosum", help="Select best match based on min edit or best alignment", ) return parser
21b83044e749c041b0d2a32630489823db8a8895
13,216
def file_is_pdf(filename: str) -> bool: """ Check if `filename` has the .pdf extension. Args: filename (str): Any filename, including its path. Returns: True if `filename` ends with .pdf, false otherwise. """ return filename.endswith(".pdf")
ba9a540a1f0edd33e48010a001de4493bdff80d9
13,217
import glob import os def isempty(path): """Determine whether the given directory is empty.""" flist = glob.glob(os.path.join(path,'*')) return flist == []
6f18b4f3956d25af20ee43c6177d76ac001db699
13,218
from typing import Callable from typing import Iterator from typing import Optional def mk_omit(skip_func: Callable) -> Callable: """The skip function is either minleq or maxgeq""" def omit_(pot: int, seqs: Iterator[Iterator[int]]) -> Iterator[Optional[int]]: """Given an iterator of iterators, call skip_func. If the returned value is true, skip it. Otherwise, yield the value """ for seq in seqs: m = skip_func(seq, pot) if m is True: for i in omit_(pot, seqs): yield i else: yield m for i in omit_(m, seqs): yield i return omit_
ce1aafd0a67dd1acfbc9fc7b966fd14a76b1e85d
13,219
def generateClosures(transactions, generators): """ Generate the closures of the generators transactions : list of sets The list of transactions generators : lists of lists The list of generator itemsets whose closures need to be computed Returns ------- list of sets The list of closures mapped from the generators """ # The indices of transactions where generators occur generators_trans_indices = [[] for _ in range(len(generators))] for trans_index, transaction in enumerate(transactions): for generator_index, generator in enumerate(generators): if all(_item in transaction for _item in generator): generators_trans_indices[generator_index].append(trans_index) generators_closures = [] for generator_trans_indices in generators_trans_indices: if generator_trans_indices: closure = transactions[generator_trans_indices[0]].copy() else: closure = set() for trans_index in generator_trans_indices[1:]: closure.intersection_update(transactions[trans_index]) generators_closures.append(closure) return generators_closures
452f12fec6dff0e3c8903344976fcb44728fa563
13,220
import torch def torch_hilbert(x_real, n_fft=None): """ Obtain imaginary counterpart to a real signal such that there are no negative frequency components when represented as a complex signal. This is done by using the Hilbert transform. We end up with an analytic signal and return only the imaginary part. Most importantly, this procedure is fully differentiable. Adapted from the SciPy signal.hilbert function. Parameters ---------- x_real : Tensor (F x T) Real counterpart of an analytic signal, F - number of independent signals T - number of time steps (samples) n_fft : int Number of Fourier components Returns ---------- x_imag : Tensor (F x T) Imaginary counterpart of an analytic signal, F - number of independent signals T - number of time steps (samples) """ # Default to the length of the input signal if n_fft is None: n_fft = x_real.size(-1) # Create the transfer function for an analytic signal h = torch.zeros(n_fft).to(x_real.device) if n_fft % 2 == 0: h[0] = h[n_fft // 2] = 1 h[1 : n_fft // 2] = 2 else: h[0] = 1 h[1 : (n_fft + 1) // 2] = 2 # Take the Fourier transform of the real part Xf = torch.fft.fft(x_real, n=n_fft, dim=-1) # Apply the transfer function to the Fourier transform Xfh = Xf * h.unsqueeze(-2) # Take the inverse Fourier Transform to obtain the analytic signal x_alyt = torch.fft.ifft(Xfh, dim=-1) # Take the imaginary part of the analytic signal to obtain the Hilbert transform x_imag = x_alyt.imag return x_imag
33bb230c78beb84ca5569c5fca8ebb61a36fd7c5
13,221
import math def calcCirclePos(robotIdx, numRobots, radius=3, center=(0,0)): """ Helper function to distribute robot positions on a circle. """ gamma = 2*math.pi / numRobots x = radius * math.cos(gamma * robotIdx) + center[0] y = radius * math.sin(gamma * robotIdx) + center[1] phi = gamma * robotIdx - math.pi return (x, y, phi)
393c044dcb4016c34b8efe2a7acd185b45c52467
13,224
def open_file_handler(file_name): """ function returning opened file handler :param file_name: :return: """ return open(file_name, mode='r', encoding='utf8')
9f78ba9e11060cff515405ffc87d48a585fdc2ed
13,226
import re def is_valid_vlive_url(url: str) -> bool: """Uses a regex to check if the given url is a valid 'vlive.tv./video/' address.""" # VLIVE videos are only identified by numbers in the url (unlike Youtube IDs, # for example) vlive_url_regex = r"(vlive\.tv\/video\/[0-9]*)" if not re.search(vlive_url_regex, url): return False return True
35c741ea512cad5edd791ebbb8acef99b19d84d3
13,227
from typing import Tuple import argparse def loadbuild_cli() -> Tuple[bool, bool]: """ Convenience CLI args for rebuilding data using `load_or_build` """ parser = argparse.ArgumentParser() parser.add_argument('--rebuild', action='store_true') parser.add_argument('--rebuild-down', action='store_true') parser.add_argument('--rebuild-all', action='store_true') args = parser.parse_args() rebuild = args.rebuild rebuild_down = args.rebuild_down rebuild_all = args.rebuild_all if rebuild_all: rebuild = True rebuild_down = True return rebuild, rebuild_down
a99515b4fdadb733fe398f64b919b531787dade1
13,228
def insertion_sort(arr): """Performs insertion sort of given IntsToSort""" if len(arr) < 2: return arr if len(arr) > 1: i = 1 while i < len(arr): curr_val = arr[i] j = i - 1 # Start with curr val and compare to all previous values while j >= 0: if arr[j] > curr_val: # Move larger value to the right arr[j + 1] = arr[j] j = j - 1 else: break # Once curr_val is larger than arr[j], place it to the right of arr[j] arr[j + 1] = curr_val i += 1 return arr
0e8da20d1e5e958f8df73dc5e9e83a7bdf6d235b
13,232
def default_resolution(): """Note that this and the number of ts objects created below are fine tuned for these tests to create just over a week of data, and changing them might cause the tests to fail. See docstrings of tests for info""" return 20
eacfdecb06665c52930f959ea9ee0adfde2775c3
13,233
import torch def embedding_similarity( batch: torch.Tensor, similarity: str = 'cosine', reduction: str = 'none', zero_diagonal: bool = True ) -> torch.Tensor: """ Computes representation similarity Example: >>> from pytorch_lightning.metrics.functional import embedding_similarity >>> embeddings = torch.tensor([[1., 2., 3., 4.], [1., 2., 3., 4.], [4., 5., 6., 7.]]) >>> embedding_similarity(embeddings) tensor([[0.0000, 1.0000, 0.9759], [1.0000, 0.0000, 0.9759], [0.9759, 0.9759, 0.0000]]) Args: batch: (batch, dim) similarity: 'dot' or 'cosine' reduction: 'none', 'sum', 'mean' (all along dim -1) zero_diagonal: if True, the diagonals are set to zero Return: A square matrix (batch, batch) with the similarity scores between all elements If sum or mean are used, then returns (b, 1) with the reduced value for each row """ if similarity == 'cosine': norm = torch.norm(batch, p=2, dim=1) batch = batch / norm.unsqueeze(1) sqr_mtx = batch.mm(batch.transpose(1, 0)) if zero_diagonal: sqr_mtx = sqr_mtx.fill_diagonal_(0) if reduction == 'mean': sqr_mtx = sqr_mtx.mean(dim=-1) if reduction == 'sum': sqr_mtx = sqr_mtx.sum(dim=-1) return sqr_mtx
dda82b8fa4dc4f3760a5c7ec329c77b980f3860c
13,234
from typing import List from typing import Dict from typing import Any import logging import json def entity2tag(token_list: List[str], entities: List[Dict[str, Any]]): """ 将实体 entity 格式转为 tag 格式,若标注过程中有重叠标注,则会自动将靠后的 实体忽略、删除。针对单条处理,不支持批量处理。 Args: token_list(List[str]): token 化的文本的 list entities(List[str, Dict[str, Any]]): 文本相应的实体。 return: List[List[str], List[str]]: tag 格式的数据 Examples: >>> token_list = '胡静静在水利局工作。' # 字级别 >>> token_list = ['胡', '静', '静', '在', '水', '利', '局', '工', '作', '。'] # 字或词级别 >>> ner_entities = [{'text': '胡静静', 'offset': [0, 3], 'type': 'Person'}, {'text': '水利局', 'offset': [4, 7], 'type': 'Orgnization'}] >>> print(jio.ner.entity2tag(token_list, ner_entities)) ['B-Person', 'I-Person', 'E-Person', 'O', 'B-Orgnization', 'I-Orgnization', 'E-Orgnization', 'O', 'O', 'O'] """ tags = ['O' for i in range(len(token_list))] flag = 0 # 判断重叠标注 entities = sorted(entities, key=lambda i: i['offset'][0]) for idx, entity in enumerate(entities): if entity['offset'][1] < flag: # 说明重叠标注,要删除 if 1 < idx + 1 < len(entities): logging.warning( 'The entity {} is overlapped with {}.'.format( json.dumps(entity, ensure_ascii=False), json.dumps(entities[idx - 1], ensure_ascii=False))) else: if entity['offset'][1] - entity['offset'][0] == 1: tags[entity['offset'][0]] = 'S-' + entity['type'] else: tags[entity['offset'][0]] = 'B-' + entity['type'] if entity['offset'][1] - entity['offset'][0] > 2: for j in range(entity['offset'][0] + 1, entity['offset'][1] - 1): tags[j] = 'I-' + entity['type'] tags[entity['offset'][1] - 1] = 'E-' + entity['type'] flag = entity['offset'][1] return tags
79c0d228da642013dfcbfa59aa2e317185e413c2
13,235
import sys def loadingFinder(load): """ """ _load = str(load).lower() _load = _load.replace(' ','') _load = _load.replace('-','') _load = _load.replace('_','') _load = _load.strip() # _static = ['static'] _dyamic = ['dynamic'] # if _load in _static : _type = 'static' elif _load in _dyamic : _type = 'dyamic' else: print('error loading type {} no available'.format(load)) sys.exit('error loading type {} no available'.format(load)) # return _type #
7bf6cc738932f1b04dc0d7532302f7402437eaf1
13,236
def fib(n): """ Computing the nth of Fib series Implemetation focusing on 1 time running of fib. """ memoire = [0] * 100 def rec_fib(n): if n == 1 or n == 2: memoire[n-1] = 1 return 1 else: if memoire[n - 1] == 0: memoire[n-1] = memoire[n-1] + memoire[n-2] # if you use the line below, the result is less impressive # No memoire = 3.22995209694 # With memoire = 0.156451940536 # Memoire is faster 20.6450114065 times #memoire[n - 1] = rec_fib(n-1) + rec_fib(n-2) return memoire[n-1] return rec_fib(n)
dfeae222af315c55a1010ce73857a8925f20d837
13,238
def fixName(name): """PMML tag name substitutions to avoid conflicts with Python syntax.""" out = name.replace("-", "_") if out == "True": out = "AlwaysTrue" elif out == "False": out = "AlwaysFalse" elif out == "from": out = "isfrom" return out
e5bb85465f8a599f45ff12bf48247beab123472e
13,240
import time def microseconds(): """ Get the time in microseconds """ return time.time() * 1000000.0
82d9258a8b0f68295b9ba2c86a9165309c19ae90
13,241
def _compute_input_padding(size, bcount, bsize, boffset, bstride): """Computes the padding for the operation. :param size: `[SZH, SZW]` list-like of ints, size of image :param bcount: `[BCH, BCW]` list of ints :param bsize: :param boffset: :param bstride: :returns: `pad_h, pad_w` for _pad_inputs function, possibly negative. :rtype: """ pad_h = [boffset[0], boffset[0] + bstride[0] * bcount[0] + bsize[0] - size[0]] pad_w = [boffset[1], boffset[1] + bstride[1] * bcount[1] + bsize[1] - size[1]] return pad_h, pad_w
bce6ed06406f0d259d029c745e3c9f5ff015959e
13,242
from typing import Union import string def _strip_unwanted_chars(price: Union[int, str]) -> str: """Returns price text with all unnecessary chars stripped (nonnumeric etc). Examples: "100" should return "100" "100 yen" should return "100" "10,000" should return "10000" Args: price: The raw value of the price data. Returns: String that represents the price with currency and other unnecessary punctuation removed. """ return ''.join(char for char in str(price) if char in string.digits)
18679bbd7e53fcea7cadd3343fc82a3dd875f8af
13,243
def isDocx(filename): """确定上传文件为docx""" return '.' in filename and filename.rsplit('.', 1)[1] in {"docx",}
547236d821e0ecb3bb59a2ea00623adbf7d5c6df
13,245
import warnings def update(data): """Update the data in place to remove deprecated properties. Args: data (dict): dictionary to be updated Returns: True if data was changed, False otherwise """ updated = False if 'include' in data: msg = ("included configuration files should be updated manually" " [files={0}]") warnings.warn(msg.format(', '.join(data['include']))) # Spack 0.19 drops support for `spack:concretization` in favor of # `spack:concretizer:unify`. Here we provide an upgrade path that changes the former # into the latter, or warns when there's an ambiguity. Note that Spack 0.17 is not # forward compatible with `spack:concretizer:unify`. if 'concretization' in data: has_unify = 'unify' in data.get('concretizer', {}) to_unify = {'together': True, 'separately': False} unify = to_unify[data['concretization']] if has_unify and data['concretizer']['unify'] != unify: warnings.warn( 'The following configuration conflicts: ' '`spack:concretization:{}` and `spack:concretizer:unify:{}`' '. Please update manually.'.format( data['concretization'], data['concretizer']['unify'])) else: data.update({'concretizer': {'unify': unify}}) data.pop('concretization') updated = True return updated
2e604cde4455bb1ab784651798fb3be0cd3733db
13,247
from typing import List def calc_high_actual_pd(actual_temps_dict: dict, final_high_rcp_list: list) -> List[float]: """ Return a list of percentage differences of high RCP values to actual temperature values """ actual_temps_list = list(actual_temps_dict.values()) high_rcp_pd = [] for index in range(0, len(final_high_rcp_list)): difference = abs(final_high_rcp_list[index] - actual_temps_list[index]) percentage_difference = round(((difference / actual_temps_list[index]) * 100), 1) high_rcp_pd.append(percentage_difference) return high_rcp_pd
6aee2dbf9ab62dab7c5b4671a1c1ce64ad4ad2eb
13,249