content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
import subprocess import re def compiler_version(): """ Return the version of the installed solc. """ version_info = subprocess.check_output(['solc', '--version']) match = re.search(b'^Version: ([0-9a-z.-]+)/', version_info, re.MULTILINE) if match: return match.group(1)
951f3bf271740a49f5a381089e981587ebb1200f
18,122
def get_payer_channel(channelidentifiers_to_channels, transfer_pair): """ Returns the payer channel of a given transfer pair. """ payer_channel_identifier = transfer_pair.payer_transfer.balance_proof.channel_address assert payer_channel_identifier in channelidentifiers_to_channels payer_channel = channelidentifiers_to_channels[payer_channel_identifier] return payer_channel
42a0f99a65888dbe8b19d248f5d78d2b8b5f0fd1
18,123
def forward2reverse(dna): """Converts an oligonucleotide(k-mer) to its reverse complement sequence. All ambiguous bases are treated as Ns. """ translation_dict = {"A": "T", "T": "A", "C": "G", "G": "C", "N": "N", "K": "N", "M": "N", "R": "N", "Y": "N", "S": "N", "W": "N", "B": "N", "V": "N", "H": "N", "D": "N", "X": "N"} letters = list(dna) letters = [translation_dict[base] for base in letters] return ''.join(letters)[::-1]
81d9e1eeebc2f446ada6e88be6f4332510a8e5e4
18,124
from typing import List import struct def pack_byte(data: List[int]): """ 打包短整数 """ return struct.pack(f"{len(data)}h", *data)
1f9951e0e1052528e4c6882fdd7fd94b9ac2c9a7
18,125
def fibonacci_two(n): """计算斐波那契数列2""" return n if n < 2 else fibonacci_two(n - 2) + fibonacci_two(n - 1)
b858b565b64f97138e4e2ddc976f3f17f573c0df
18,126
def count_rect_area(matrix): """ https://leetcode-cn.com/problems/maximal-square/solution/zui-da-zheng-fang-xing-by-leetcode-solution/ :param matrix: :return: """ if not matrix: return 0 n = len(matrix) m = len(matrix[0]) ans = [[0] * m for _ in range(n)] side=0 for i in range(1, n + 1): for j in range(1, m + 1): if matrix[i - 1][j - 1] == '1': if i == 1 or j == 1: ans[i][j] = 1 else: ans[i][j] = min(ans[i - 1][j], ans[i][j - 1],ans[i-1][j-1]) + 1 side = max(side, ans[i][j]) return side
f476637354864823a3e3f8c0821f8a91012b5e2e
18,128
def norm(str): """Normalize string for checking""" return ' '.join(str.strip().split()).lower()
a2dacc1009acbc9615c9391458fac4a1793d154e
18,129
import inspect def iscoroutinefunction(obj): """ This is probably in the library elsewhere but returns bool based on if the function is a coro """ if inspect.iscoroutinefunction(obj): return True if hasattr(obj, '__call__') and inspect.iscoroutinefunction(obj.__call__): return True return False
ccbb3216896990a197fe8059f6212c5bf8168a14
18,130
from typing import List import pathlib def parts_to_path(parts: List[str]) -> str: """Convert list of path parts into a path string e.g j.sals.fs.parts_to_path(["home","rafy"]) -> 'home/rafy' Args: parts (List[str]): path parts Returns: str: joined path parts """ path = pathlib.Path(parts[0]) for p in parts[1:]: path = path.joinpath(p) return str(path)
71fcc68b91bbfa868fec8f0e70dbc8434417664f
18,131
import argparse def parse_arguments(args_to_parse): """ Parse CLI arguments """ descr = 'Apply pre-processing to generate the Swahili document classification dataset' parser = argparse.ArgumentParser(description=descr) general = parser.add_argument_group('General settings') general.add_argument('name', type=str, help="The name of the results directory - used for saving and loading.") general.add_argument( '--dataset-name', type=str, default='z-news', choices=['hsc', 'z-news'], help="Select which raw dataset to use: Helsinki Swahili Corpus or Zenodo Swahili News", ) general.add_argument( '--output-dataset', type=str, default='dataset.csv', help="The name of the final processed dataset" ) general.add_argument( '--output-json-labels', type=str, default='labels.json', help="JSON file inwhich to save the label name to index mapping", ) general.add_argument( '--train-ratio', type=float, default=0.8, help="Ratio of the nodes to reserve for training", ) general.add_argument("--seed", type=int, default=12321, help='Random seed for reproducability') return parser.parse_args(args_to_parse)
8d44a90ad58805b010ba4c662d48707e315de583
18,132
import json def my_json_dumps(data): """my_json_dumps JSON formatter Arguments: data {str} -- Data to JSON beautify Returns: str -- Data beautified """ return json.dumps(data, indent=2, sort_keys=True)
cc9b9424b79642d3dbd6eaec2dd685ecaed3b239
18,133
import random def _replace_mlm_tokens(tokens, candidate_pred_positions, num_mlm_preds, vocab): """Defined in :numref:`sec_bert-dataset`""" # 为遮蔽语言模型的输入创建新的词元副本,其中输入可能包含替换的“<mask>”或随机词元 mlm_input_tokens = [token for token in tokens] pred_positions_and_labels = [] # 打乱后用于在遮蔽语言模型任务中获取15%的随机词元进行预测 random.shuffle(candidate_pred_positions) for mlm_pred_position in candidate_pred_positions: if len(pred_positions_and_labels) >= num_mlm_preds: break masked_token = None # 80%的时间:将词替换为“<mask>”词元 if random.random() < 0.8: masked_token = '<mask>' else: # 10%的时间:保持词不变 if random.random() < 0.5: masked_token = tokens[mlm_pred_position] # 10%的时间:用随机词替换该词 else: masked_token = random.randint(0, len(vocab) - 1) mlm_input_tokens[mlm_pred_position] = masked_token pred_positions_and_labels.append( (mlm_pred_position, tokens[mlm_pred_position])) return mlm_input_tokens, pred_positions_and_labels
3bf7c4c1db7f577f7899d3a7156d6cfd97a192cd
18,134
import json from jinja2 import Template def request_body(fixtures, api_request, data): """Set request body.""" tpl = Template(data).render(**fixtures) body = api_request["kwargs"]["body"] = json.loads(tpl) return body
544d124fd8fb0c584575ed11b514c0eccb447d96
18,136
def get_preprocess_filenames(pipelines, vocab_file, dataset_file=None): """ Gets the appropriate vocabulary file path to load. Parameters ---------- pipelines : list List of preprocessing pipelines. vocab_file : str Path of vocabulary file. dataset_file : str, optional Path of dataset file. Returns ------- vocab_to_load : str Path of vocabulary file to load. dataset_to_load : str, optional Path of dataset to load. """ pipelines.sort() if pipelines is None or len(pipelines) == 0: return None dataset_to_load = dataset_file[:-4] if dataset_file else '' vocab_to_load = vocab_file[:-4] for pipeline in pipelines: dataset_to_load += '_' + pipeline vocab_to_load += '_' + pipeline dataset_to_load += '.csv' vocab_to_load += '.txt' if dataset_file: return vocab_to_load, dataset_to_load else: return vocab_to_load
727eda55b1e2de37d5fd00569c4efa849e27e1a9
18,137
def config_emptysection(config, section): """ Create empty configuration section. :param config: Configuration dictionary. :type config: configparser.ConfigParser :param section: Section of ini file to return. :type section: str """ if not config.has_section(section): config[section] = {} return config
015dc3d549bae0b162c4dcf96545af3dd5c3d2fd
18,138
def _is_utf8(filepath): """ check if file is utf8 encoding """ with open(filepath, 'rb') as f: content = f.read() try: content_utf8 = content.decode('utf-8') except UnicodeDecodeError as e: return False return True
7e92aa296122368dd4aaa2af08b9d31ac0e674f9
18,140
import click def command_line_verbose_options(f): """ Decorator for specifying verbose and json output """ f = click.option( "--verbose", is_flag=True, default=False, help="Show all URLs and metadata.", )(f) f = click.option( "--json-output", is_flag=True, default=False, help="Return the raw json content from the API.", )(f) return f
7f9222a981e6c5a1322848c2ec080a559594b653
18,141
import os def _determine_files_dir(rd): """Determine the appropriate files directory for a recipe""" recipedir = rd.getVar('FILE_DIRNAME') for entry in rd.getVar('FILESPATH').split(':'): relpth = os.path.relpath(entry, recipedir) if not os.sep in relpth: # One (or zero) levels below only, so we don't put anything in machine-specific directories if os.path.isdir(entry): return entry return os.path.join(recipedir, rd.getVar('BPN'))
40e2c0f92a24641bf2e2afed5f9ed637ce33898d
18,142
import math def get_cross_points(a, b, c, image_height): """ solve the quadratic equation x = ay^2 + by + c Parameters ---------- a: the first coefficient of quadratic equation b: the second coefficient of quadratic equation c: the third coefficient of quadratic equation image_height: the height of image Return ---------- (number of cross_points, cross_points) """ assert a d = b**2 - 4*a*c if d < 0: return (0, None) elif d == 0: y = -b / (2 * a) if (y < image_height) and (y >= 0): return (0, y) else: return (0, None) else: y1 = (-b + math.sqrt(d)) / (2 * a) y2 = (-b - math.sqrt(d)) / (2 * a) if (y1 < image_height) and (y1 >= 0) and (y2 < image_height) and (y2 >= 0) : return (2,(y1, y2)) elif (y1 < image_height) and (y1 >= 0): return (1, y1) elif (y2 < image_height) and (y2 >= 0): return (1, y2) else: return (0, None)
d27c37195631083742c006f443e4d56e6bd21d64
18,143
def insert_nested_value(dictionary, path, value): """ Given a `dictionary`, walks the `path` (creating sub-dicts as needed) and inserts the `value` at the bottom. Modifies `dictionary`, and also returns `dictionary` as a convenience. For example: >>> insert_nested_value({}, ['foo', 'bar'], 'baz') {"foo": {"bar": "baz"}} """ # This is a pointer into our data structure allowing us to walk # down levels. breadcrumb = dictionary while path: # Remove the head of our list key = path.pop(0) # If our path is now empty, we've reached the end. if not path: breadcrumb[key] = value # Otherwise, we need to dig deeper, adding a layer of dict # if not already there. else: if key not in breadcrumb: breadcrumb[key] = {} breadcrumb = breadcrumb[key] return dictionary
1d2c66f2d4e05b2553bd7feb616e1ddc7c24936e
18,147
def initialize_program(): """return a dict representation of a MILP program""" return { 'variables': {}, 'constants': {}, 'constraints': { 'A_eq': [], 'b_eq': [], 'A_lt': [], 'b_lt': [], }, 'cost_function': {}, }
b855dc0bd92e00e4afe58c8bb6b56b00f1495619
18,148
def tlvs_by_type(tlvs, tlv_type): """Return list of TLVs with matching type.""" return [tlv for tlv in tlvs if tlv.tlv_type == tlv_type]
5adda249aef14dc5523dc0a8ec84bbe599a266f7
18,149
import pickle def read_pickled_data(pickle_id): """ Loads pickled data based on ID string. Parameters ---------- filter_id : str An ID strong made of the month of the request and the a hash-value generated from the filter settings. Returns ------- data: list A list containing one or more files """ data = [] with open(pickle_id+".pickle", "rb") as f: for _ in range(pickle.load(f)): data.append(pickle.load(f)) return data
e7f851f968880087209abf89fb23523c2653b298
18,150
import os def walkdir(folder): """Walk through each files in a directory""" results = [] for dirpath, dirs, files in os.walk(folder): for filename in files: # yield os.path.abspath(os.path.join(dirpath, filename)) results.append(os.path.abspath(os.path.join(dirpath, filename))) return results
c16f020df646cba983e9cf773389a96b50d5cabf
18,151
def violations(norms,env): """ Returns a dict with key as object_id and value, as a list of all apossible violations of norm on that object """ assert "Obsolete function violations should not be used" and False from verify_action_4 import check_pro_or_per,check_per,check_obl violations={} my_dict={x:norms[x] for x in range(1,len(norms))} obl_norms={norm_no:norm for (norm_no,norm) in my_dict.items() if norm[0]=="Obl"} pro_norms={norm_no:norm for (norm_no,norm) in my_dict.items() if norm[0]=="Pro"} per_norms={norm_no:norm for (norm_no,norm) in my_dict.items() if norm[0]=="Per"} for obj in env[0]: #print (obj.obj_id,obj.colour,obj.shape) viol_zones=list(env[3].keys()) ob_viol=[] """ Pickup stage """ #check if pickup is prohibited and no permission pro_flag,pro_zone,key=check_pro_or_per(obj,"pickup",pro_norms) per_flag,per_zone,key=check_pro_or_per(obj,"pickup",per_norms) if pro_flag==1: #Prohibition exists if per_flag==0: ob_viol=ob_viol+[(("pickup",obj.obj_id),("putdown",obj.obj_id,zone)) for zone in viol_zones] else: if pro_zone != per_zone: ob_viol=ob_viol+[(("pickup",obj.obj_id),("putdown",obj.obj_id,zone)) for zone in viol_zones] """ Putdown stage """ #check for obl and permission obl_flag,obl_zone,key=check_obl(obj,obl_norms) per_flag,per_zone,key=check_pro_or_per(obj,"putdown",per_norms) if obl_flag==1: viol_zones.remove(int(obl_zone)) if per_flag==0: ob_viol=ob_viol+[(("pickup",obj.obj_id),("putdown",obj.obj_id,zone)) for zone in viol_zones] else: if per_zone!=obl_zone: viol_zones.remove(int(per_zone)) ob_viol=ob_viol+[(("pickup",obj.obj_id),("putdown",obj.obj_id,zone)) for zone in viol_zones] #check for pro and permission pro_flag,pro_zone,key=check_pro_or_per(obj,"putdown",pro_norms) per_flag,per_zone,key=check_pro_or_per(obj,"putdown",per_norms) viol_zones=list(env[3].keys()) if pro_flag==1: #Prohibition exists if per_flag==0: ob_viol.append((("pickup",obj.obj_id),("putdown",obj.obj_id,int(pro_zone)))) else: if pro_zone!=per_zone: ob_viol.append((("pickup",obj.obj_id),("putdown",obj.obj_id,int(pro_zone)))) if len(ob_viol)>0: violations[obj.obj_id]=ob_viol return (violations)
2780103ff61eabef49f736d339014a4ac7dd1b64
18,153
import random def random_real(): """ Returns a random real +/- from [-90, 90] for testing. """ return random.uniform(-90, 90)
2b48df3e6ccb252a262f4b5eee063183cc60ce72
18,154
def GAMMA_ASSEMBLY(X_L, X_U, D, M): """ This function calculates the light absorption coefficient. Input: X_L | Lower limit design variables | Py list[D] X_U | Upper limit design variables | Py list[D] D | Problem dimension | Integer M | Exponent value in distance | Float Output: GAMMA | Light absorption coefficient 1 / (X_U - X_L) ** M | Py list[D] """ GAMMA = [] for I_COUNT in range(D): DISTANCE = X_U[0] - X_L[0] GAMMA.append(1 / DISTANCE ** M) return GAMMA
32fcf3ce72e74a6d91419ce19f29859aa5fa3ac0
18,156
import torch def get_dihedral(pos, dihedral_index): """ Args: pos: (N, 3) dihedral: (4, A) """ n1, ctr1, ctr2, n2 = dihedral_index # (A, ) v_ctr = pos[ctr2] - pos[ctr1] # (A, 3) v1 = pos[n1] - pos[ctr1] v2 = pos[n2] - pos[ctr2] n1 = torch.cross(v_ctr, v1, dim=-1) # Normal vectors of the two planes n2 = torch.cross(v_ctr, v2, dim=-1) inner_prod = torch.sum(n1 * n2, dim=1, keepdim=True) # (A, 1) length_prod = torch.norm(n1, dim=-1, keepdim=True) * torch.norm(n2, dim=-1, keepdim=True) dihedral = torch.acos(inner_prod / length_prod) return dihedral
2f9cc39bd7bea7a04860b7cc2f497d0910246b83
18,157
def extract_last_modified_user(gfile): """ Extractors for the gfile metadata https://developers.google.com/drive/v2/reference/files#resource-representations :param gfile: :return: """ if 'lastModifyingUser' not in gfile: return '' user = gfile['lastModifyingUser'] email = '' if 'emailAddress' in user: email = user['emailAddress'] name = '' if 'displayName' in user: name = user['displayName'] return (email, name)
39ecd81da5103219d98acd750c201b3ee1f1cd2b
18,158
def get_mode_count(df): """ Computes the mode and the count of the mode from an array. Args: df - data fram with ONE column Returns: df_mode - mode of the column df_mode_count - count for that mode """ # calculate the mode and its count from the input data fram (with one column) df_value_counts = df.value_counts() df_mode = df_value_counts.index[0] df_mode_count = df_value_counts.iloc[0] return df_mode, df_mode_count
8712b6a351c6afdb328e94a633652af86a1f4eba
18,159
import json def get_settings(settings_path): """ Opens the json-file with settings and loads and returns them """ with open(settings_path) as infile: settings = json.load(infile) return settings
a4e2cfc2c63ea2c3422f42c200d1d715338f0898
18,161
def decode_dict(d, *keys): """Decode all keys and values.""" decoded = {} for key in keys: if key in d: decoded[key.decode()] = d[key].decode() return decoded
59bfe8a63ef5cb64446b2c7823218b3dae533032
18,162
def to_list(*args): """ Input: args - variable number of integers represented as strings, e.g. to_list("15353", "025") Output: lst - a Python array of lists of strings, e.g. [[1,5,3,5,3],[0,2,5]] """ lst = [] for string in args: lst.append([int(digit) for digit in string]) return lst
9cf65eccc5ec42ee91c15b2485662716c85aacab
18,163
import requests def get_user_key(api_key: str, username: str, password: str) -> str: """Login the user and retruns his ``user_key`` This key can be cached since, it is only invalidated by the next (api) login. Arguments: api_key {str} -- the api_key of the application. username {str} -- the username of the user that wants to login. password {str} -- the password of the user that wants to login. Returns: user_key {str} -- The ``user_key`` of the user that logged in. Raises: ValueError: Bad API request, use POST request, not GET ValueError: Bad API request, invalid api_dev_key ValueError: Bad API request, invalid login ValueError: Bad API request, account not active ValueError: Bad API request, invalid POST parameters """ r = requests.post("https://pastebin.com/api/api_login.php", data={ "api_dev_key": api_key, "api_user_name": username, "api_user_password": password }) try: r.raise_for_status() except: raise ValueError(r.text) return r.text
92dd0cedfd5364378d5381b0a64844e7f789f62d
18,164
import toml def readToml(filename): """Read a single TOML configuration file, or return an empty dict if it doesn't exist. """ try: with open(filename, encoding='utf-8') as fp: return toml.load(fp) except OSError: return {} except toml.TomlDecodeError as err: raise RuntimeError( f'could not read NICOS config file at {filename!r},' f' please make sure it is valid TOML: {err}') from None
b5d0761015cd1fbeb94bfb771a7ac30fb2c35d3d
18,165
def get_all_storage_units(req): """Return only the list of Storage Units from the request""" storage_units = [] for dist_grp in req.distribute_groups: storage_units += dist_grp.storage_units return storage_units
82b4403f8a63b309f35a08879f9585932272c3ff
18,166
def compute_gradient (X, Y, Theta): """ Computes the cost gradient X = m*n Y = m*1 Theta = n*1 gradient = (1/m) * X_transpose * (X*Theta - Y) """ (m, n) = X.shape return (1.0/m) * (X.T) * (X*Theta - Y)
6348e2099a8ae6e32f6150bcaf1124edadc1c798
18,167
def get_doc_id(element_tree): """ returns the document ID of a SaltXML document. :param tree: an ElementTree that represents a complete SaltXML document :type tree: ``lxml.etree._ElementTree`` """ id_element = element_tree.xpath('labels[@name="id"]')[0] return id_element.attrib['valueString']
64e3e2abda9a0182866cc34b2f510a6c6dffe05b
18,168
def cleanDotNodes(graph): """Remove unnecessary data from node list""" nodes = graph.get_node_list() tmp = [node for node in nodes if node.name.startswith('"')] for node in nodes: node.name = node.name.replace('"','') # There are some strange issues with what nodes are returned # across different operating systems and versions of pydot, pyparsing, # and Graphviz. This is just a quick fix. Need to investigate this further if tmp: return tmp else: return nodes
b5861c60b6054f5e9701e2acc94cc6a8a0621fb8
18,169
def jwt_get_user_secret_key(user): """ 重写JWT的secret的生成 :param user: :return: """ return str(user.secret)
97bc4313e405657805ef4a82fb020baef04863d0
18,170
def convertMinuteDecimalToDregrees(toconvert): """ Convert minute decimal to degrees """ converted=[] for toc in toconvert: converted.append(float(toc)/60) return converted
7c9f839ccc80f1d2a660ffc51fd05e91304e6683
18,171
def marquage_ligne_a (matrix): """a- Retourne une liste d'index des lignes avec aucun zero encadre""" ligne_marque_a = list() for y, y_elt in enumerate(matrix): if 'E' not in y_elt: ligne_marque_a.append(y) return ligne_marque_a
da91effb7562f559008c8e72f2a87c4f701d094c
18,173
def cli(ctx, value): """Get a specific canned key Output: A dictionnary containing canned key description """ return ctx.gi.cannedkeys.show_key(value)
b13c15e104c9c129b2efcc158c34b79f623fbb50
18,174
def get_subtrees(tree_mem): """ break the tree membership information into which subtrees of a given size are present preparatory step for saving computational steps Parameters ---------- tree_mem : dictionary Contains the group information with the leaves as keys and the list of groups as values Returns ------- subtree_sizes : dictionary contains the subtree information, keys represent the size of the sub tree, values are a tuple of leaf names, rank_id, g_id """ # obtain the maximum rank size keys = list(tree_mem.keys()) max_rank = len(tree_mem[keys[0]]) subtree_sizes = dict() # for each rank for i in range(0, max_rank): # blank the dictionary for the current subtree subtree = dict() # for each of the leaves in the tree membership dictionary rearrange the # information so that the key is the g_id and the value is a list of \ # all the leaves that are in that subtree for key in tree_mem.keys(): tree_id = tree_mem[key][i] if tree_id not in subtree.keys(): subtree[tree_id] = [key] else: s_tree = list(subtree[tree_id]) s_tree.append(key) subtree[tree_id] = s_tree # add to the dictionary of subtrees with the size of the subtree as a key and the values are # tuples that contain a list of the leaves and the rank and group ids for key in subtree: size = len(subtree[key]) if size not in subtree_sizes.keys(): subtree_sizes[size] = [(subtree[key], i, key)] else: temp = list(subtree_sizes[size]) temp.append((subtree[key], i, key)) subtree_sizes[size] = temp return subtree_sizes
fdca6e8fb8c82b896748a2599ab5a25eac377108
18,175
from datetime import datetime def create_filename(): """I wanted a properly unique and readable date and time as the file name.""" global time_stamp time_stamp = (datetime.now().strftime(r'%d' + ('-') + '%b' + ('-') + '%Y' + ('-') + '%H' + ('.') + '%M' + ('-') + '%S' + 's')) return time_stamp
9d2706bc745fc636bffb98a862ec07d76d8714d5
18,176
def _deep_merge_dicts(defaults: dict, updates: dict) -> dict: """Merges two dicts recursively, with updates taking precendence. Note that this modifies defaults in place. """ for k, u in updates.items(): if k not in defaults: defaults[k] = u else: v = defaults[k] if isinstance(u, dict) and isinstance(v, dict): defaults[k] = _deep_merge_dicts(v, u) else: defaults[k] = u return defaults
90ea8c5e81998a51005338c0d67cb6a07a4291f2
18,178
def thread_helper(extractor): """! @brief Supports multiprocess color extraction operations. @param extractor The Extractor object for which to run extraction process. @return The Extractor object. """ extractor.run() return extractor
e032f0eb81897eba39c0773cf0f7805f66e8f052
18,179
import shutil def test_cmtk_install() -> int: """ Tries to determine if CMTK is installed and whether individual binaries are directly accessible or are accessible via the cmtk script call :return: -1: No cmtk install detected, 0: Direct call, 1: call via cmtk script """ if shutil.which("warp") is not None: return 0 if shutil.which("cmtk") is not None: return 1 return -1
1be574fc3b5f9f7bd41056591e5681057ae0ebd4
18,180
def compute_image_shape(width: int, height: int, fmt: str) -> tuple: """Compute numpy array shape for a given image. The output image shape is 2-dim for grayscale, and 3-dim for color images: * ``shape = (height, width)`` for FITS images with one grayscale channel * ``shape = (height, width, 3)`` for JPG images with three RGB channels * ``shape = (height, width, 4)`` for PNG images with four RGBA channels Parameters ---------- width, height : int Width and height of the image fmt : {'fits', 'jpg', 'png'} Image format Returns ------- shape : tuple Numpy array shape """ if fmt == 'fits': return height, width elif fmt == 'jpg': return height, width, 3 elif fmt == 'png': return height, width, 4 else: raise ValueError(f'Invalid format: {fmt}')
f154b3e97ef8f700517985a520104ed26b041a4a
18,181
def unpack_payload(func): """Decorator to run admin shell functions. Unpacks payload metadata and passess them as kwargs to the actual callback """ def wrapper(resource, event, trigger, payload): # Use only with MetadataEventPayload if payload: func(resource, event, trigger, **payload.metadata) else: func(resource, event, trigger) return wrapper
dbb8fe5c89ef14a30c92e7df3c76af310db56ae4
18,182
def summarize_file(file_path): """Summarizes the provided file by some basic measurements. Returns: A tuple containing how many (bytes, words, lines it contains, and what is the maximum character count in one line). """ if file_path is None: return bytes_ = 0 words = 0 lines = 1 max_line_width = 0 with open(file_path) as file: for line_ in file: # enforce string type line: str = line_ if len(line) > max_line_width: max_line_width = len(line) lines += 1 words += len(line.split()) bytes_ += len(line.encode()) return (bytes_, words, lines, max_line_width)
63b56a976f00cf86fc384abdc304b97690f32554
18,183
def compute_products(of_adjacent_digits, in_number): """Compute the products of all N-sized groups of adjacent digits in a predefined number.""" # Convert said number to a string. numberToString = str(in_number) # Register the list of digit group products. products = [] # Build said groups. for i in range(len(numberToString) - of_adjacent_digits + 1): digit_group = numberToString[i:of_adjacent_digits + i] # Register the digit product of the current group. product = 1 for ii in digit_group: product = product * int(ii) products.append(product) return products
56ef92514f2c24a707a3fd174d8bede0d0454ef9
18,184
import os def get_rapidtide_root(): """ Returns the path to the base rapidtide directory, terminated with separator. Based on function by Yaroslav Halchenko used in Neurosynth Python package. """ thisdir, thisfile = os.path.split(os.path.join(os.path.realpath(__file__))) return os.path.join(thisdir, '..') + os.path.sep
2abfb4078e0fc37a3ce1b3772da129190cf15501
18,185
def romanToInt(s): """Calculates value of Roman numeral string Args: s (string): String of Roman numeral characters being analyzed Returns: int: integer value of Roman numeral string """ sum = 0 i = 0 while i < len(s): if s[i] == 'M': sum += 1000 elif s[i] == 'D': sum += 500 elif s[i] == 'L': sum += 50 elif s[i] == 'V': sum += 5 elif s[i] == 'C': if i < len(s) - 1: if s[i + 1] == 'M': sum += 900 i += 1 elif s[i + 1] == 'D': sum += 400 i += 1 else: sum += 100 else: sum += 100 elif s[i] == 'X': if i < len(s) - 1: if s[i + 1] == 'C': sum += 90 i += 1 elif s[i + 1] == 'L': sum += 40 i += 1 else: sum += 10 else: sum += 10 elif s[i] == 'I': if i < len(s) - 1: if s[i + 1] == 'X': sum += 9 i += 1 elif s[i + 1] == 'V': sum += 4 i += 1 else: sum += 1 else: sum += 1 else: return "Something went wrong" i += 1 return sum
71e6fdb08c2d705dbd88764e5ee1441ee6397d14
18,186
import os def expanded_abspath(p): """Return absolute path with user ``~`` expanded for path `p`.""" return os.path.abspath(os.path.expanduser(p))
a8aa36f73499c960d2cdda99dea5ea9d584afc3e
18,187
def get_cart_location(env, screen_width): """ Returns the position of the middle of the cart :param env: :param screen_width: :return: """ world_width = env.x_threshold * 2 scale = screen_width / world_width return int(env.state[0] * scale + screen_width / 2.0)
2b245964e1ce8b70a7964766a13a14e7759e48bf
18,188
import os def find_cache_base_dir(cache_base_dir=None): """ Finds the base cache directory for caching operations Arguments --------- cache_base_dir: string, optional Defaults to None. If None, then the cache directory is searched in the environement variable 'SCATTERING_CACHE'. If the latter does not exist (so returns None), then the default base cache directory is: "~/scattering_cache" Returns ------- cache_base_dir: string The path to the cache base directory. """ if cache_base_dir is None: scattering_cache = os.environ.get('SCATTERING_CACHE') if scattering_cache is None: return os.path.join(os.path.expanduser("~"), "scattering_cache") else: return scattering_cache else: return cache_base_dir
3a581e578f7e37144314b3582c18ea966ca58f06
18,190
def is_happy(n): """ Determines whether the number n is a "happy number". Int -> Bool Notes: Uses str() to split digits (slower, less efficient) Uses a set to contain the sequence generated from n """ n_sequence = set() while n != 1: s = str(n) n = sum(pow(int(x),2) for x in s) if n in n_sequence: return False n_sequence.add(n) return True
30068b5e6e6f0a4aa1fbcb91fc062b663be5a0c1
18,191
def _get_ec2_range(column_name, min_value, max_value, quotes='"'): """ Return a clause to select where a column is equal to one or more values. """ if min_value and max_value: return('and %s>=%s and %s<=%s' % (column_name, '%s%s%s' % (quotes, min_value, quotes), column_name, '%s%s%s' % (quotes, max_value, quotes))) if min_value: return('and %s>=%s' % (column_name, '%s%s%s' % (quotes, min_value, quotes))) if max_value: return('and %s<=%s' % (column_name, '%s%s%s' % (quotes, max_value, quotes)))
271dc11dbfd0149f2a44f31b580a677cef67bdd8
18,192
import numpy as np def estimate_order_of_convergence(x, y): """Computes an estimate of the order of convergence in the least-square sense. This assumes that the :math:`(x, y)` pair follows a law of the form .. math:: y = m x^p and estimates the constant :math:`m` and power :math:`p`. """ assert x.size == y.size if x.size <= 1: raise RuntimeError("need at least two values to estimate order") c = np.polyfit(np.log10(x), np.log10(y), 1) return 10 ** c[-1], c[-2]
e1e1d72006d133f58e6ae1b204217b394ac57e19
18,194
import math def cosine_lr_scheduler( lr: float, n_warmup: int = 0, warmup_init_lr: float = -1, min_lr: float = 0.0, t_mult: float = 1.0, lr_period_updates: float = -1, lr_shrink: float = 0.1, max_update: int = -1, ): """Cosine annealing learning rate scheduler with warmup and step decay for LambdaLR. Based on fairseq.optim.lr_scheduler.cosine_lr_scheduler. Args: lr (float): (Maximum) learning rate. n_warmup (int): Number of warmup steps with a linear lr increase. Default is 0. warmup_init_lr (float): Initial learning rate during warmup phase. Default is `lr`. min_lr (float): Minimum learning rate during cosine annealing. Default is 0. t_mult (float): Factor to grow the length of each period. Default is 1. lr_period_updates (float): Initial number of updates per period. lr_shrink (float): Shrink factor for each period. Default 0.1. max_update (int): Number of maximum updates (epochs). If specified, will result in 1 period over all updates. """ max_lr_base = lr min_lr_base = min_lr warmup_end_lr = max_lr_base warmup_init_lr = min_lr if warmup_init_lr < 0 else warmup_init_lr period = lr_period_updates if period <= 0: assert max_update > 0, "Either lr_period_updates or max_update must be set." period = max_update - n_warmup if n_warmup > 0: step_lr = (warmup_end_lr - warmup_init_lr) / n_warmup else: step_lr = 1 lr_shrink_base = lr_shrink def step(epoch: int) -> float: if epoch < n_warmup: return (warmup_init_lr + epoch * step_lr) / max_lr_base cur_updates = epoch - n_warmup if t_mult != 1: i = math.floor(math.log(1 - cur_updates / period * (1 - t_mult), t_mult)) t_i = t_mult**i * period t_cur = cur_updates - (1 - t_mult**i) / (1 - t_mult) * period else: i = math.floor(cur_updates / period) t_i = period t_cur = cur_updates - (period * i) lr_shrink = lr_shrink_base**i min_lr = min_lr_base * lr_shrink max_lr = max_lr_base * lr_shrink return ( min_lr + 0.5 * (max_lr - min_lr) * (1 + math.cos(math.pi * t_cur / t_i)) ) / max_lr_base return step
7438511ad48cf30fa706e5c4fa4d234919a78234
18,196
from typing import Tuple def decomress_coords(coords_data: bytes) -> Tuple[int, int]: """Получение координат клетки из байтовых данных.""" cell_x, cell_y = coords_data # вычитание единицы происходит вследствие работы `compress_coords` return cell_x-1, cell_y-1
cb91d0dbc13076f50d9359867469ea293c227383
18,197
def bytestr_to_int(s: bytes) -> int: """converts bytes to integer""" i = 0 for char in s: i <<= 8 i |= char return i
c9a7fcd0ff347c8ac99bf812cfffd9a61dcfd785
18,199
def sanitize(string): """ Turns '-' into '_' for accumulo table names """ return string.replace('-', '_')
2a86fff76b6d504be7981877612c4b1965d61e4e
18,200
def handle_node_attribute(node, tag_name, attribute_name): """Return the contents of a tag based on his given name inside of a given node.""" element = node.getElementsByTagName(tag_name) attr = element.item(0).getAttribute(attribute_name) return attr
8bbde7ecf335ce37b1fc55e3472aa07af6b7806a
18,201
def select_trees(gdf, subplot): """ Args: gdf: a geopandas dataframe subplot: a shapely box Returns: selected_trees: pandas dataframe of trees """ selected_trees = gdf[gdf.intersects(subplot)] return selected_trees
e0c4acea2622c839fb4afe124aae0a33af012e0f
18,203
def getpoint(acad,point,inputstring="Click on Drawing to get point"): """ acad= acad object Inputstring=prompt optional """ return acad.doc.Utility.GetPoint(point,inputstring)
921de5fa8511d43b2a9bc33122d2ee7bd1025a57
18,205
def get_pending_registration_ip_set( ip_from_dns_set, ip_from_target_group_set ): """ # Get a set of IPs that are pending for registration: # Pending registration IPs that meet all the following conditions: # 1. IPs that are currently in the DNS # 2. Those IPs must have not been registered yet :param ip_from_target_group_set: a set of IPs that are currently registered with a target group :param ip_from_dns_set: a set of IPs that are in the DNS """ pending_registration_ip_set = ip_from_dns_set - ip_from_target_group_set return pending_registration_ip_set
0bc0c23093bf9881421c5ef2b1307b099ed10384
18,206
def echo(message): """ :param message: the message to echo :returns: the given message :raises: """ return message
d6193277965c15ce7d3768c5c92bc3dee9aeaa1e
18,207
def get_identical_attributes(features, exclude=None): """Return a dictionary of all key-value pairs that are identical for all |SegmentChains| in `features` Parameters ---------- features : list list of |SegmentChains| exclude : set attributes to exclude from identity criteria Returns ------- dict Dictionary of all key-value pairs that have identical values in all the `attr` dictionaries of all the features in `features` """ exclude = [] if exclude is None else exclude common_keys = set(features[0].attr.keys()) for feature in features: common_keys &= set(feature.attr.keys()) common_keys -= set(exclude) dtmp = { K : features[0].attr[K] for K in common_keys \ if all([X.attr[K] == features[0].attr[K] for X in features]) == True } return dtmp
efe7e913c7bb5d4f69f50241a999fb85fdd72800
18,208
def redis_prime_logic__style_2_domain(redis_client, dbDomain, redis_timeouts): """ returns the certificate :param redis_client: :param dbDomain: A :class:`model.objects.Domain` :param redis_timeouts: REDIS KEY PREFIXES: d2: domain """ dbCertificateSigned = None if dbDomain.certificate_signed_id__latest_multi: dbCertificateSigned = dbDomain.certificate_signed__latest_multi elif dbDomain.certificate_signed_id__latest_single: dbCertificateSigned = dbDomain.certificate_signed__latest_single else: raise ValueError("this domain is not active: `%s`" % dbDomain.domain_name) # the domain will hold the fullchain and private key key_redis = "d2:%s" % dbDomain.domain_name value_redis = { "f": "%s" % dbCertificateSigned.cert_fullchain_pem, "p": "%s" % dbCertificateSigned.private_key.key_pem, } redis_client.hmset(key_redis, value_redis) return dbCertificateSigned
aa293ece422a35051f68ee452c35ee4b2b3d98bb
18,212
def compute_avg_over_multiple_runs(number_episodes, number_runs, y_all_reward, y_all_cum_reward, y_all_timesteps): """ Compute average of reward and timesteps over multiple runs (different dates) """ y_final_reward = [] y_final_cum_reward = [] y_final_timesteps = [] for array_index in range(0, number_episodes): sum_r = 0 sum_cr = 0 sum_t = 0 count = 0 for date_index in range(0, number_runs): # compute average sum_r += y_all_reward[date_index][array_index] sum_cr += y_all_cum_reward[date_index][array_index] sum_t += y_all_timesteps[date_index][array_index] count += 1 y_final_reward.append(sum_r / float(count)) y_final_cum_reward.append(sum_cr / float(count)) y_final_timesteps.append(sum_t / float(count)) return y_final_reward, y_final_cum_reward, y_final_timesteps
a991f1f71ada7852a6ed94d7764d8112c6015cd1
18,213
import sys import traceback def get_raising_file_and_line(tb=None): """Get the file and line number where an exception happened. :param tb: the traceback (uses the most recent exception if not given) :return: a tuple of the filename and line number :rtype: (str, int) .. deprecated:: 7.0 Use Python's built-in logging system, with the ``logger.exception`` method. This method makes sure to log the exception with the traceback and the relevant information (filename, line number, etc.). """ if not tb: tb = sys.exc_info()[2] filename, lineno, _context, _line = traceback.extract_tb(tb)[-1] return filename, lineno
2bb531572a58045fc57688c989e0c7003644fc36
18,214
def db36(s): """Convert a Redis base36 ID to an integer, stripping any prefix present beforehand.""" if s[:3] in 't1_t2_t3_t4_t5_': s = s[3:] return int(s, 36)
0946bb125b17afec7803adf3654af7250047f9d6
18,215
from typing import Optional import torch def _conj(z, out: Optional[torch.Tensor] = None) -> torch.Tensor: """Element-wise complex conjugate of a Tensor with complex entries described through their real and imaginary parts. can work in place in case out is z""" if out is None or out.shape != z.shape: out = torch.zeros_like(z) out[..., 0] = z[..., 0] out[..., 1] = -z[..., 1] return out
015e9fc6d44f079ec157d659cc68684bef1b9e87
18,216
def split_touched(files): """Splits files that are touched vs files that are read.""" tracked = [] touched = [] for f in files: if f.size: tracked.append(f) else: touched.append(f) return tracked, touched
574772b7cec285ca9463bbebeab90502933f35a4
18,218
def fromstringtolist(strlist, map_function=int): """ read list written as string map_function function to convert from string to suited element type """ if strlist in ("None", None): return None list_elems = strlist[1:-1].split(",") toreturn = [map_function(elem) for elem in list_elems] return toreturn
23ec31783c66fdb05420a3e233102111a400a53f
18,219
import numpy def spectral_function(eig_p, eig_m, eigv_p, eigv_m, rdm, i, j, window, num_omega, mu=0.0, eta=0.05): """ """ om_min, om_max = window def a_omega(omega, rdm_i, eigs, eigv, diag=False): spec = 0.0 norm = 0.0 for nu in range(len(eigs)): nume = numpy.dot(eigv[:,nu].conj(), rdm_i) denom = (omega + mu - eigs[nu])**2.0 + eta**2.0 if diag: nume = abs(nume)**2 spec += eta/numpy.pi * nume / denom return spec Ah = numpy.zeros(num_omega) Ap = numpy.zeros(num_omega) num_eig = len(eig_p) I = numpy.eye(rdm.shape[0]) omegas = numpy.linspace(om_min, om_max, num_omega) # print(rdm.trace()) rdm = rdm / rdm.trace() if i == j: gh = rdm[:,i] gp = (2.0*I - rdm)[i,:] # norm_h = numpy.sum(numpy.abs(numpy.dot(eigv_m.T, gh))**2.0, axis=0) # norm_p = numpy.sum(numpy.abs(numpy.dot(eigv_p.T, gp))**2.0, axis=0) # print(eigv_m[:,0]) # print(numpy.dot(eigv_m.T, gh)) # print(numpy.dot(eigv_p.T, gp)) # norm = norm_h + norm_p for io, om in enumerate(omegas): Ah[io] = a_omega(om, gh, -eig_m, eigv_m, diag=True) Ap[io] = a_omega(om, gp, eig_p, eigv_p, diag=True) # Normalise. # norm = scipy.integrate.trapz(Ap+Ah, omegas) # norm = 1.0 return omegas, Ah, Ap
0fd1f4b41b18c9947d2b9e01246207a2ad2a16a5
18,220
import os import pathlib def parse_config_path(fpath): """ Parse a path in config files. (1) If the path does not contain '{CORE}' or '{DATA}', it is a non-default path, so return unchanged. (2) If the path contains '{CORE}', it is a default that is going into the platform package directory structure. (Why? These are the only directories that we now exist, other than the current directory.) (a) The term '{CORE}' is replaced with the directory of this file (assumed to be the base of the package.), or '{DATA}' with '{CORE}/data'. (b) All directory seperators ("/", "\") are replaced with the correct path seperator for the OS. :param fpath: str :return: str """ if not(('{CORE}' in fpath) or ('{DATA}' in fpath) or ('{PARENT}' in fpath)): return fpath # Default path; go to work. # Make all separators the same. fpath = fpath.replace('\\', '/') fpath_s = fpath.split('/') new_path = os.path.sep.join(fpath_s) new_path = new_path.replace('{PARENT}', str(pathlib.Path(os.path.dirname(__file__)).parent)) new_path = new_path.replace('{DATA}', os.path.join('{CORE}', 'data')) new_path = new_path.replace('{CORE}', os.path.dirname(__file__)) return new_path
7b4121fbccfee17a9e6dc0c511bd80bc4276128a
18,221
def str_to_c_string(string): """Converts a Python bytes to a C++ string literal. >>> str_to_c_string(b'abc\x8c') '"abc\\\\x8c"' """ return repr(string).replace("'", '"').removeprefix('b')
5316e61282d3ce3a807764588904529291453a37
18,222
def iuo(z1,z2): """ intersection over union :param z1: polygon :param z2: polygon returns z1.intersection(z2) / z1.union(z2) """ assert z1.isvalid assert z2.isvalid return z1.intersection(z2) / z1.union(z2)
c7f682833a82c16fe0959fbed16980188b45dade
18,223
def round_and_clip_image(image): """ Given a dictionary, ensure that the values in the 'pixels' list are all integers in the range [0, 255]. All values should be converted to integers using Python's `round` function. Any locations with values higher than 255 in the input should have value 255 in the output; and any locations with values lower than 0 in the input should have value 0 in the output. """ result = image.copy() s = len(result['pixels']) for i in range(s): x = round(result['pixels'][i]) if x < 0: x = 0 elif x > 255: x = 255 result['pixels'][i] = x return result
2c50a431be17fb203ffcc1136e91e2ffa97a7337
18,224
def bokeh_args(app_path, args): """translate from forest to bokeh serve command""" opts = ["bokeh", "serve", app_path] if args.dev: opts += ["--dev"] if args.show: opts += ["--show"] if args.port: opts += ["--port", str(args.port)] if args.allow_websocket_origin: opts += ["--allow-websocket-origin", str(args.allow_websocket_origin)] extra = [] if args.config_file is not None: extra += ["--config-file", str(args.config_file)] if args.database is not None: extra += ["--database", str(args.database)] if args.directory is not None: extra += ["--directory", str(args.directory)] if args.file_type != "unified_model": extra += ["--file-type", str(args.file_type)] if len(args.files) > 0: extra += args.files if len(extra) > 0: opts += ["--args"] + extra return opts
c957528c9b00e8e2ebf2b041ba27b8d4d6a84f2a
18,225
import requests def getURLChain(targetURL): """For the given URL, return the chain of URLs following any redirects """ ok = False chain = [] try: r = requests.head(targetURL, allow_redirects=True) ok = r.status_code == requests.codes.ok # pylint: disable=no-member if ok: for resp in r.history: chain.append(resp.url) except (requests.exceptions.RequestException, requests.exceptions.ConnectionError, requests.exceptions.HTTPError, requests.exceptions.URLRequired, requests.exceptions.TooManyRedirects, requests.exceptions.Timeout): ok = False return (ok, chain)
ecdfab9aff08035e8f830d67ba1380782ae82e6d
18,226
def setDefaultAlgoParams(): """This module creates a default template of the algorithm parameters. Returns ------- algoParam : dict a dictionary with keys and values (default values are in bold): * **PROBLEM: 'binary'**,'regression' * **REG: 'elasticnet'**, 'group' * **LOSS: 'logistic'** (classification), 'sq_hinge' (classification), 'leastsq' (classification/regression), 'huber' (regression), 'pseudo_huber' (regression) * **LAMBDA: 1.0** , (Regularization parameter) * **ALPHA: 0.0**, (Elastic-Net parameter) * **MU: 0.1**, ((Pseudo)-Huber Threshold parameter) * **SCALE: 'Uniform'**, 'Normalize', 'None' * **RHO: 1.0**, (ADMM augmented lagrangian penalty term) * **RHO_INITIAL: 0**, 1 (0 = Constant (RHO = LAMBDA) , 1 = Goldstein (pending)) * **RHO_ADAPTIVE_FLAG: False**, True * **MAX_ITER: 100**, (Max. iterations for ADMM updates) * **PRIM_TOL: 1e-4**, (Error tolerance of relative primal residual) * **DUAL_TOL: 1e-4**, (Error tolerance of relative dual residual) * **MAX_INNER_ITER: 10**, (Max. iteration for internal newton updates) * **INNER_TOL: 1e-6**, (Relative error tolerance of internal iterations) * **VERBOSE: 0** (no print), 1 (print) """ algoParam = {'PROBLEM': 'binary', # 'binary'(default),'regression', multiclass (pending) 'REG': 'elasticnet', # 'elasticnet'(default), 'group' , 'scad (non-convex pending)' 'LOSS': 'logistic', # Classification :: 'logistic'(default),'sq_hinge','leastsq' # Regression :: 'leastsq','huber','pseudo_huber' 'LAMBDA': 1.0, # Regularization parameter 'lambda' 'ALPHA': 0.0, # Alpha (Elastic net param). Formulation:- (Alpha)*norm(w,1) + (1-Alpha)/2 * norm(w,2)^2 'SCALE': 'Uniform', # 'Uniform'(default), 'Normalize', 'None' 'MAX_ITER': 100, # Max outer iteration 'MAX_INNER_ITER': 10, # Max Inner iteration for Newton Updates:- Logistic , Huber, Pseudo-Huber 'RHO': 1.0, # RHO FOR ADMM 'RHO_INITIAL': 0, # 0 = Constant (RHO) , 1 = Goldstein 'RHO_ADAPTIVE_FLAG': False, # This Flag sets the RHO Adaptive per step. True= Adaptive RHO, False = Non-Adaptive RHO 'PRIM_TOL': 1e-4, # Relative Tolerance Primal Residual 'DUAL_TOL': 1e-4, # Relative Tolerance Dual Residual 'INNER_TOL': 1e-6, # Inner Newton update Tolerance Level 'N': 0, # No of Samples. Has to be set. 'D': 0, # No of Dimension. Has to be set 'K': 1, # No of classes (binary = 1, Multiclass > 1 (pending)) 'EIG_VALS_FLAG': 0, # 0 = SCIPY (exact), 1 = APPROX (pending) 'MU': 0.1, # Threshold for Huber loss 'MU_MAX': 1.0, # HUBER START FROM EASIER PROBLEM 'VERBOSE': 0 # 0 = No Prints, 1 = Prints } return algoParam
a5973bd28d7d96936937919112d3f36db1b04cb4
18,227
def dict_get_path(data, path, default=None): """ Returns the value inside nested structure of data located at period delimited path When traversing a list, as long as that list is containing objects of type dict, items in that list will have their "name" and "type" values tested against the current key in the path. Args: data (dict or list): data to traverse path (str): '.' delimited string Kwargs: default: value to return if path does not exist """ keys = path.split(".") for k in keys: if type(data) == list: found = False for item in data: name = item.get("name", item.get("type")) if name == k: found = True data = item break if not found: return default elif type(data) == dict: if k in data: data = data[k] else: return default else: return default return data
00cc29d35f23ebff77c8d66ac95c863b70240f17
18,229
def generate_deletes_of_word(word): """does not include the original word""" deletes = {} for i in range(len(word)): deletes[word[:i] + word[i + 1:]] = [word] return deletes
6526d1481dc319cabec4e2db34d97c2cf42b0856
18,230
def raises_keyerr(k, m): """ Determine whether a mapping is missing a particular key. This helper is useful for explicitly routing execution through __getitem__ rather than using the __contains__ implementation. :param object k: the key to check for status as missing :param Mapping m: the key-value collection to query :return bool: whether the requested key is missing from the given mapping, with "missing" determined by KeyError encounter during __getitem__ """ try: m[k] except KeyError: return True else: return False
5244f18065f89c6a4c22e11a3b08b7e9628bd616
18,231
def struct(T): """ Identifies a pure-data class that may be passed to the API """ return T
7bfc4826c013b1e3208ed930f1926b8debcca1dd
18,233
def bold(content): """Corresponds to ``**content**`` in the markup. :param content: HTML that will go inside the tags. >>> 'i said be ' + bold('careful') 'i said be <b>careful</b>' """ return '<b>' + content + '</b>'
6ee44da053fe21932b7db5003fc1c40526fa9ec9
18,234
from typing import Union import os import sys def _get_constant(key: str) -> Union[str, list]: """ Returns a string value for a constant used elsewhere in the code. All constants are defined here. Args: key (str): Key for the constant. Returns: (str, list): The constant. """ d_constants = { # Files, folders, OS. "CURR_DIR": str(os.path.dirname(os.path.realpath(__file__))), "TEMPLATES_FOLDER": str(os.path.dirname(os.path.realpath(__file__))) + "/config_templates", "PLATFORM": sys.platform, "DOCKER_LOG_FOLDER": "docker_build_logs", "DOCKER_LOG_FILE_TAG": "_docker_build_log_", "REG_FILE_NAME": ".registry.json", "CLOUDFORMATION_FILE_NAME": ".cloudformation.yml", # AWS prefix names. "S3_STORE_PREF": "mldeployStore", # Registry key names. "CLOUDFORMATION_LOCATION_KEY": "cloudformation_template", "DEPLOY_STATUS_KEY": "deployment_status", "DOCKER_IMAGE_KEY": "docker-image", "PROJ_FOLDER_KEY": "location", "SALT_KEY": "salt", "STACK_NAME_KEY": "stack_name", "STACK_ID_KEY": "stack_id", # Standard values in registry. "STATUS_DEPLOYED": "Deployed", "STATUS_NOT_DEPLOYED": "Not deployed", # Docker image constructions. "DEFAULT_PROJECT_MODULES": ["boto3"], "APP_DIR_ON_IMAGE": "app", # User messages. "MSG_PREFIX": "\033[1;36;40m MLDeploy Message:: \033[m", "FAIL_PREFIX": "\033[1;31;40m MLDeploy Failure:: \033[m", "ACTION_PREFIX": "\033[1;33;40m MLDeploy Action Required:: \033[m", } return d_constants[key]
40c004a814b55a59e8814bb104a21f0d1c16db6b
18,235
def FUNCTION_TO_REGRESS_TEMPLATE(x, a, b, c): """ A simple function to perform regression on. Args: x: A list of integers. a,b,c: Coefficients """ return (x[0] * a/2) + pow(x[1], b) + c
46f53a3d2a7e6729f51439a355991103dcbc03c2
18,236
def replace_substring(text: str, replace_mapping_dict: dict): """Return a string replaced with the passed mapping dict Args: text (str): text with unreplaced string replace_mapping_dict (dict): mapping dict to replace text Returns: str: text with all the substrings replaces """ for search_string, replace_string in replace_mapping_dict.items(): text = text.replace(search_string, replace_string) return text
12ca0addd4417813682100abe28462b88f58933e
18,237
import multiprocessing def cpu_count(): """ Return the number of CPUs. """ if multiprocessing is None: return 1 return multiprocessing.cpu_count()
6b0d6647216e6b7cf8e0bb2c5ada6e557ef45e80
18,239
def basefilename(context): """ Returns filename minus the extension """ return context.template_name.split('.')[0]
f077ffd509bce97d989a9cf6f68e1a0f379c6bab
18,240
import getpass import sys def prompt(prompt: str, secure: bool=False): """ Reads a line of input from the user params: prompt: input prompt secure: read with getpass returns: the string entered """ result = "" try: if secure: result = getpass.getpass(prompt) else: result = input(prompt) except KeyboardInterrupt: sys.exit(1) else: return result
0c0dab08e428912a8903da56c1c4c6f2544fad7a
18,241
from numpy.random import normal def noise(arr,mean,sigma): """ returns normal distributed noise array of shape x with mean and sigma. """ result = normal(mean,sigma,arr.shape) return result
54bfbd64cf5fb5920987327fa536381905aabbde
18,242
def is_core(protein, mutation, elaspic_core_data): """ Given protein and mutation, check if that protein.mutation is in elaspic_core_data. Parameters ---------- protein : <str> The UniProt/SWISSPROT protein name. E.g. "Q9UPU5". mutation : <str> The position of the mutation. E.g. "I342V". elaspic_core_data : <DataFrame> The ELASPIC results file that contains only the `core` type entries. Returns ------- is_core_bool : <bool> A boolean value that indicates whether the given protein.mutation is an `core` type. """ core_search_data = elaspic_core_data[(elaspic_core_data["UniProt_ID"] == protein) & (elaspic_core_data["Mutation"] == mutation)] is_core_bool = True if len(core_search_data) else False return is_core_bool
51698e861f7c35e3a1d2e428ed71d178d470e3e1
18,243
def get_dropdown_value(soup, var_id): """Get the current value from a dropdown list. Use when you see a dropdown in this HTML format: <select id="var-id" name="var-name"> <option value="false"> -- Option#1 -- </option> ... <option value="true" selected="selected"> -- Option#2 -- </option> </select> Args: soup (soup): soup pagetext that will be searched. var_id (string): the id of a var, used to find its value. Returns: (string): The text of the dropdown value. """ try: dropdown = soup.find("select", {"id": var_id}) dropdown_value = dropdown.find("option").text return dropdown_value except AttributeError: print('\nERROR: <' + var_id + '> not found!\nPagesoup:\n\n', soup) raise LookupError
d26ca8233b0cabb174a202800b063db810b902a1
18,244
def rgb2hex(r, g, b, normalized=False): """ Converts RGB to HEX color """ # Check if RGB triplett is normalized to unity if normalized: r, g, b = r * 255.0, g * 255.0, b * 255.0 return '#%02X%02X%02X' % (r, g, b)
f05fd63b90ee946e011565194f93f4015a8e2cf1
18,245