content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
import os def resolve(path): """ Converts path by interpreting tilde and environmental variables """ return os.path.expandvars(os.path.expanduser(path))
5c47e7de63a65908f25438ed244a765b90ac788f
697,198
import os def get_size(filepath): """Get file size and return string with appropriate unit""" size_bytes = os.path.getsize(filepath) if size_bytes < 1024: size_bytes = round(size_bytes, 2) size = f'{size_bytes} B' else: size_kilobytes = size_bytes / 1024 if size_kilobytes < 1024: size_kilobytes = round(size_kilobytes, 2) size = f'{size_kilobytes} KB' else: size_megabytes = size_kilobytes / 1024 if size_megabytes < 1024: size_megabytes = round(size_megabytes, 2) size = f'{size_megabytes} MB' else: size_gigabytes = size_megabytes / 1024 if size_gigabytes < 1024: size_gigabytes = round(size_gigabytes, 2) size = f'{size_gigabytes} GB' else: size = "Wow, that's huge." return size
6036b0ecec6276be781f73682bce0024ebaeaec4
697,199
def is_viable_non_dupe(text: str, comparison) -> bool: """text must be longer than 2 ('""'), not 'NULL' and not in comparison. :param text: String to be tested. :param comparison: Dictionary or set to search for text in. :return: bool """ return 2 < len(text) and text != 'NULL' and text not in comparison
da575dffde2f13e849949350aabe73f40802f14a
697,200
def total_profit(attributes): """ rounds the total profit under the assumption that all inventory was sold. """ return round((attributes['sell_price'] - attributes['cost_price']) * attributes['inventory'])
f2b7f9f7d19738ad4476ef7d3454c4aa34616faf
697,201
def _proc(tr, sampling_rate=10): """ Basic processing including downsampling, detrend, and demean. :param tr: raw trace :param sampling_rate: :return tr: trace after processing """ # deep copy tr2 = tr.copy() tr2.interpolate(sampling_rate) tr2.detrend(type="linear") tr2.detrend(type="demean") return tr2
99dab5d384b9db80b257c8bd0068f96cbc82c532
697,202
def get_neighbors(point): """Given a 2D point (represented as a Point object), returns a list of the four points that neighbor it in the four coordinate directions. Uses the "copy" method to avoid modifying the original point.""" p1 = point.copy() p2 = point.copy() p3 = point.copy() p4 = point.copy() p1.setX(point.getX() - 1) p2.setX(point.getX() + 1) p3.setY(point.getY() - 1) p4.setY(point.getY() + 1) points = [] points.append(p1) points.append(p2) points.append(p3) points.append(p4) return points
67df39fe0d6fd61fed70f1521b13754d1230ac9e
697,203
def get_user_from_email(github, email): """Returns a user for that email or None.""" users = list(github.search_users(f'type:user {email} in:email')) if len(users) == 0: return None elif len(users) == 1: return users[0] else: raise RuntimeError(f'{email} associated with {len(users)} users.')
8016081e8f9ff737369fdb957055c243372485a6
697,204
def fixDelex(filename, data, data2, idx, idx_acts): """Given system dialogue acts fix automatic delexicalization.""" try: turn = data2[filename.strip('.json')][str(idx_acts)] except: return data if not isinstance(turn, str): # and not isinstance(turn, unicode): for k, act in turn.items(): if 'Attraction' in k: if 'restaurant_' in data['log'][idx]['text']: data['log'][idx]['text'] = data['log'][idx]['text'].replace("restaurant", "attraction") if 'hotel_' in data['log'][idx]['text']: data['log'][idx]['text'] = data['log'][idx]['text'].replace("hotel", "attraction") if 'Hotel' in k: if 'attraction_' in data['log'][idx]['text']: data['log'][idx]['text'] = data['log'][idx]['text'].replace("attraction", "hotel") if 'restaurant_' in data['log'][idx]['text']: data['log'][idx]['text'] = data['log'][idx]['text'].replace("restaurant", "hotel") if 'Restaurant' in k: if 'attraction_' in data['log'][idx]['text']: data['log'][idx]['text'] = data['log'][idx]['text'].replace("attraction", "restaurant") if 'hotel_' in data['log'][idx]['text']: data['log'][idx]['text'] = data['log'][idx]['text'].replace("hotel", "restaurant") return data
cd9fc035165f931183d28d456d4b879eb98c552a
697,205
from typing import Dict def read_config(return_config: str, config: Dict): """ Reads the config file and goes through the config dictionary to load all of the parameters. Depending on whether return_config is dashboard or logger it returns the necessary information to create the dashboard or the logger. If any of the logger or dashboard options are not included in the config file, returns them as None. The constructor handles default values if this happens. :param return_config: String indicating what information I am asking for. It should only be 'logger' or 'dashboard' respectively. :param config: The dictionary that needs to be read. """ lg_parameters = [] dash_plots = [] refresh = None ips = None load_directory = None save_directory = None for key in config.keys(): # check if the key is options and load the specified settings. if key == 'options': if 'refresh_rate' in config[key]: refresh = config[key]['refresh_rate'] if 'allowed_ip' in config[key]: ips = config[key]['allowed_ip'] if 'load_and_save' in config[key]: load_directory = config[key]['load_and_save'] save_directory = config[key]['load_and_save'] else: if 'save_directory' in config[key]: save_directory = config[key]['save_directory'] if 'load_directory' in config[key]: load_directory = config[key]['load_directory'] elif key == 'plots': for plot in config[key].keys(): # check what information it needs if return_config == 'logger': for params in config[key][plot].keys(): # default configs. If they exist in config they will get overwritten. Used for constructor. server_param = 'localhost' port_param = 5555 interval_param = 1 # check if the optional options exist in the dictionary and overwrites them if they do. if 'server' in config[key][plot][params]: server_param = config[key][plot][params]['server'] if 'port' in config[key][plot][params]: port_param = config[key][plot][params]['port'] if 'options' in config[key][plot][params]: if 'interval' in config[key][plot][params]['options']: interval_param = config[key][plot][params]['options']['interval'] # a tuple with the specified parameters for the logger name = params source_type = config[key][plot][params]['source_type'] parameter_path = config[key][plot][params]['parameter_path'] # appends the tuple with the information for the parameters constructor lg_parameters.append((name, source_type, parameter_path, server_param, port_param, interval_param)) elif return_config == 'dashboard': name_list = [] for params in config[key][plot].keys(): # append the names of the parameter name_list.append(params) # append a touple with the plot and a list of the parameters. dash_plots.append((plot, name_list)) # returns the correct information for each object if return_config == 'logger': return lg_parameters, refresh, save_directory elif return_config == 'dashboard': return dash_plots, refresh, load_directory, ips
cc91ebe633f01259fa3344f93698ab806ee9d9fc
697,206
def check_pair_sum_divisible(arr, k): """ Check if an array can be divided into pairs whose sum is divisible by k. """ rem_freq_map = {} for elem in arr: rem = elem % k rem_freq_map[rem] = rem_freq_map.get(rem, 0) + 1 for rem, freq in rem_freq_map.items(): if rem == 0 or rem * 2 == k: if freq & 1: return False elif not freq == rem_freq_map.get(k - rem, 0): return False return True
536833113554e50f61a78bc988662edaf190b2b3
697,207
def shouldIgnore(s): """Should we ignore the key s? (Right now, returns true if s takes the form "__xxxxx...", prepended by two '_' chars """ return len(s) > 1 and s[:2] == "__"
07a260a0236444f568c3047ddb3988509f2e7c77
697,208
def total_yngve_depth(yngve_tree_root): """Returns the total depth of the ynvge tree of the sentence Args: yngve_tree_root (obj): The root node Returns: int: The total depth of the yngve tree """ tot_score = 0 for leaf in yngve_tree_root.leaves: tot_score += leaf.score return tot_score
9220147ed529bac780b7cb5e60ab2364af47f9f6
697,209
def is_function(f): """ Is it a function? :param f: function :return: boolean """ return hasattr(f, '__call__')
c330b81c3b09a0ba8e475e322df5f90d89e39e21
697,210
def inRects(R, x, y): """inRects returns True if (x, y) is in any of the rectangles in R. """ return any(x0 <= x < x1 and y0 <= y < y1 for x0, y0, x1, y1 in R)
f2e4a5c5d60e4f37ee1a8ec9d54a050b3e12b022
697,211
def cal_linear(iaql_lo, iaql_hi, bp_lo, bp_hi, cp): """ 范围缩放 """ return (iaql_hi-iaql_lo)*(cp-bp_lo)/(bp_hi-bp_lo) + iaql_lo
b1aba33f4ee2c42b62de66a710c327b4ffc7838a
697,212
def _gen_runtime_script(inputs, user_data): """ 生产运行时脚本 Args: inputs: user_data: Returns: """ mec_runtime_script = '' if 'ak' in inputs and 'sk' in inputs: mec_runtime_script = 'echo \'ak=$ak$\\nsk=$sk$\\n\' >> /root/init.txt\n' if '$ak$' not in user_data['str_replace']['params']: user_data['str_replace']['params']['$ak$'] = { 'get_input': 'ak' } if '$sk$' not in user_data['str_replace']['params']: user_data['str_replace']['params']['$sk$'] = { 'get_input': 'sk' } return mec_runtime_script
cac5421552df6b58a1a3849e5173884e3eb09416
697,213
def load_corpus(corpus_path, proc_mode=0): """Load the corpus from disk.""" corpus_text="" with open(corpus_path, 'r') as corpusFile: corpus_text=corpusFile.read() return corpus_text
045ed4aa97a6e685edefaf03d8f45cd5afb51526
697,214
def _dict_to_tuple(d): """ Recursively converts a dictionary to a list of key-value tuples Only intended for use as a helper function inside memoize!! May break when keys cant be sorted, but that is not an expected use-case """ if isinstance(d, dict): return tuple([(k, _dict_to_tuple(d[k])) for k in sorted(d.keys())]) else: return d
4a7137b7aeefc8d9137ec5d1177d0635ef5b5627
697,215
import token def decompose_name(node): """ NOTE: Per the lib2to3 grammar: dotted_name: NAME ('.' NAME)* This means that dotted_name can be either dotted or not dotted, i.e. it's a generalized form of NAME. So this function will cover both cases. Given a dotted_name node this will return a tuple of the form (pkg, name, full_string) where all are str ex: a.b.c => (a.b, c, a.b.c) b.c => (b, c, b.c) c => (None, c, c) otherwise it will return None for each field """ if node.type == token.NAME: # node is just a name, no dots return '', node.value, node.value if node.children: # Right most node will be the name, i.e. a.b.c = ['a','.','b','.','c'] name_node = node.children[-1] package_nodes = node.children[:-2] name = str(name_node).strip() package = ''.join(str(n).strip() for n in package_nodes) full = ''.join(str(n).strip() for n in node.children) return package, name, full return None, None, None
2be377913f6dd2a13335d29b0f932de6ebe35c12
697,216
from typing import Counter def composition(mol): """Molecular composition in dict format (ex. Glucose {'C': 6, 'H': 12, 'O': 6}). """ mol.require("Valence") c = Counter() for _, a in mol.atoms_iter(): c += a.composition() return c
6d677e7390fe815570080dd051edc747ef5a9393
697,217
from pathlib import Path from typing import Callable from typing import List def find_paths(path: Path, pattern: str, filter: Callable[[Path], bool]) -> List[Path]: """ Glob pattern relatively to path and filter results by predicate. """ return [x for x in sorted(path.glob(pattern), key=str) if filter(x)]
cce368e6dc3b97f715b3f82a5cc5837942d600b9
697,218
def concatenate(dic): """ dic: dict of DataFrames merge all using index and outer join """ keys = list(dic) d = dic[keys[0]].merge(dic[keys[1]], left_index=True, right_index=True, how='outer', suffixes=('.'+keys[0],'.'+keys[1])) for k in keys[2:]: d = d.merge(dic[k], left_index=True, right_index=True, how='outer', suffixes=('','.'+k)) return d
0e12100664c868c6f2eec9cd9133c9823238b418
697,219
def abv(og, fg, from_carbonation=0): """Work out alcohol content from fermentation data (optionally including carbonation) """ value = (float(og) - float(fg)) / 1.938 + float(from_carbonation) return float(value)
d3cf1e0c645d07bf98c70f1b087f0f1213e4bf21
697,221
def is_winning(p, state): """Defines all of the states where a player wins in function of morpions rules.""" return state[0][0] == state[0][1] == state[0][2] == p or \ state[1][0] == state[1][1] == state[1][2] == p or \ state[2][0] == state[2][1] == state[2][2] == p or \ state[0][0] == state[1][0] == state[2][0] == p or \ state[0][1] == state[1][1] == state[2][1] == p or \ state[0][2] == state[1][2] == state[2][2] == p or \ state[0][0] == state[1][1] == state[2][2] == p or \ state[0][2] == state[1][1] == state[2][0] == p
3dafa09f34f48499eb661f577fea5473f57a3b0f
697,223
import os def list_notebooks(path='ipynb', skip=''): """All notebooks in the directory notebooks/path, or in the package itself""" if path == 'ipynb': return list_notebooks('ipynb_julia', skip=skip) + \ list_notebooks('ipynb_py', skip=skip) + \ list_notebooks('ipynb_R', skip=skip) nb_path = os.path.dirname(os.path.abspath(__file__)) if path.startswith('.'): nb_path = os.path.join(nb_path, path) else: nb_path = os.path.join(nb_path, 'notebooks', path) notebooks = [os.path.join(nb_path, nb_file) for nb_file in os.listdir(nb_path) if not skip or skip not in nb_file] assert notebooks return notebooks
bb585d58950adf65e2ae2a47ae52a40c530a9eba
697,224
def find_remote_addr(req): """Determine the correct IP address of the requester.""" if req.headers.get('CF-Connecting-IP'): return req.headers.get('CF-Connecting-IP') if req.headers.get('X-Forwarded-For'): return req.headers.get('X-Forwarded-For') return req.remote_addr
ddef91a116fb47a12acd71417b70af88484fd780
697,225
def linkage_to_leaves(Z,data_size): """ Convert the linkage formated matrix to a dictionary, key is a node number while values is a array of leaves of the node Parameters: ---------- Z: 2-d numpy matrix, shape(n_merges, 4), float The linkage matrix that generated by See in scipy.cluster.hierarchy.linkage data_size: integer, _, _ The size of the orignal dataset, the same as the total number of leaves in the likage tree (dendrogram) Return: ------ node_to_leaves: dictionary, (node number, set of leaves), (integer, list(integer)) A dictionary that can parse a integer node (indexed by a integer) in the linkage tree (dendrogram), to it corresponding leaf nodes. """ node_id = data_size node_to_leaves = {} for merge in Z[:,:2].astype(int): # Both node have node id < number of node, the two merging nodes are leaves if(merge.max()< data_size): node_to_leaves[node_id] = [] node_to_leaves[node_id].extend(merge) # One of the merging node (the one with small node index) is a leaf node elif(merge.min() < data_size): s,l = merge.min(),merge.max() node_to_leaves[node_id]= [] node_to_leaves[node_id].extend(node_to_leaves[l]) node_to_leaves[node_id].append(s) # Both nodes are internal nodes else: node_to_leaves[node_id]= [] node_to_leaves[node_id].extend(node_to_leaves[merge[0]]) node_to_leaves[node_id].extend(node_to_leaves[merge[1]]) node_id = node_id + 1 return node_to_leaves
0b713962f4d1f594e196a77bc1c955c4a4b57505
697,226
import re def remove_comments(data): """Return the supplied VHDL file data string, *data*, with all comments removed.""" return re.sub(r'--[^\n]*', '', data)
832e3a66de83cffa764e04a1baef017763af519d
697,227
import argparse def get_user_args(): """ Get user commands and return a dictionary with the user input. Returns: A dict containing the user input including username, password, host file and command """ parser = argparse.ArgumentParser() parser.add_argument("-i", "--INPUT", help="name of hosts CSV file", required=True) args = parser.parse_args() params = {key: value for key, value in args._get_kwargs()} return params
acd65fbb912c10699c38f9a976817ae62a6569a9
697,228
def probes_to_genes(df, probe_to_gene): """Converts probe level dataframe to gene level dataframe.""" get_gene = lambda probe: probe_to_gene.get(probe) grouped = df.groupby(by=get_gene, axis=0) gene_df = grouped.mean() return gene_df
348dc91ae5c49904cdfab8c52b2d421220b493e1
697,229
def target_to_bits(target): """ Creates a compact target representation for a given target. Args: target (Bignum): The long-form target to make compact. Returns: ct (int): Compact target """ # Get bit length nbits = target.bit_length() # Round up to next 8-bits nbits = ((nbits + 7) & ~0x7) exponent = (int(nbits/8) & 0xff) coefficient = (target >> (nbits - 24)) & 0xffffff if coefficient & 0x800000: coefficient >>= 8 exponent += 1 return (exponent << 24) | coefficient
8a1d996d653388d49d96d51409029f7d8cde283f
697,230
from typing import ChainMap def chainmap_context_factory(parent_context=None): """ A ``ChainMap`` context, to avoid copying any data and yet preserve strict one-way inheritance (just like with dict copying) """ if parent_context is None: # initial context return ChainMap() else: # inherit context if not isinstance(parent_context, ChainMap): # if a dict context was previously used, then convert # (without modifying the original dict) parent_context = ChainMap(parent_context) return parent_context.new_child()
ff9446d9547dcd042589c402048527a0644fb2c3
697,231
def get_rectangle_edges(x, y, width, height): """Return the 4 edges of a rectangle as a list. Edges are in clock-wise order, starting from the top. Each edge is returned as ``(start_point, end_point)`` and each point as ``(x, y)`` coordinates. """ # In clock-wise order, starting on top left corners = [ (x, y), (x + width, y), (x + width, y + height), (x, y + height)] # clock-wise order, starting on top right shifted_corners = corners[1:] + corners[:1] return zip(corners, shifted_corners)
e0738bd8d742eb9f9ae076e2e89031cd8eb74796
697,232
def get_committee_indices(spec, state, duplicates=False): """ This utility function allows the caller to ensure there are or are not duplicate validator indices in the returned committee based on the boolean ``duplicates``. """ state = state.copy() current_epoch = spec.get_current_epoch(state) randao_index = (current_epoch + 1) % spec.EPOCHS_PER_HISTORICAL_VECTOR while True: committee = spec.get_next_sync_committee_indices(state) if duplicates: if len(committee) != len(set(committee)): return committee else: if len(committee) == len(set(committee)): return committee state.randao_mixes[randao_index] = hash(state.randao_mixes[randao_index])
f53ffdf01382c45b9f89419a8b4419c66ffbf8ea
697,233
import math def comb(n,r): """Combinations of n objects by r, namely picking r among n possible. comb(n,r) = n!/(r!(n-r)!) """ return math.factorial(n)/(math.factorial(r)*math.factorial(n-r))
61944512cec3555bc15efb73e1ca90510c86caa9
697,234
def schema_to_dtypes(schema): """Converts a schema to a Pandas dtypes dictionary Args: schema (list): A schema to get the dtypes from. """ dtypes = {} for item in schema: name = item['name'] type = item['type'] if type == 'STRING': dtypes[name] = 'str' elif type == 'INTEGER': dtypes[name] = 'float64' elif type == 'NUMERIC': dtypes[name] = 'float64' elif type == 'DATETIME': dtypes[name] = 'str' elif type == 'DATE': dtypes[name] = 'str' return dtypes
b3f2d41b3c6fad5f197da48b236e4ef3801614f7
697,235
def set_monthly_base_periods_defaults(double_link): """Set default to single link on Jan, or double link on n""" base_periods = [1] if double_link: base_periods.append(12) return base_periods
15fc7d30064b6888a6e1df3ef60cf25fa8f101fb
697,236
import itertools def part2(data): """ >>> part2([[5, 9, 2, 8], [9, 4, 7, 3], [3, 8, 6, 5]]) 9 >>> part2(read_input()) 280 """ checksum = 0 for row in data: for (a, b) in itertools.permutations(row, 2): if a % b == 0: checksum += a // b break return checksum
95fa69b80b064dabf21057791417c769fda3c14e
697,237
import re def safe_dag_id(s: str) -> str: """ Remove invalid characters for dag_id """ return re.sub('[^0-9a-zA-Z_]+', '_', s)
950dd59baaea5b0a94b1d5f67e8699c8779dae15
697,239
import yaml def unicode_representer(self, value): """ Represents unicode strings as regular strings. """ return yaml.ScalarNode(tag='tag:yaml.org,2002:str', value=value)
7eaa92e17be6fa707cb84cdea877ee451dc48694
697,240
import argparse def parse_arguments(): """Argument parser""" parser = argparse.ArgumentParser(description=__doc__) parser.add_argument( '-file1', help='File containing sequences', required=True ) parser.add_argument( '-file2', help='File containing reverse end sequences', required=True ) parser.add_argument( '-barcodes', help='File containing barcodes', required=True ) parser.add_argument( '-bc_pos', help='Start and end index of barcodes (default: 7 14)', type=int, nargs=2, default=[7, 14] ) parser.add_argument( '-prefix', help='Filename prefixes', required=True ) parser.add_argument( '-r2bc', help='Flag that barcode is on read 2', action='store_true' ) parser.add_argument( '-count_sum', help='Flag for printing count summary', action='store_true' ) args = parser.parse_args() return args
c441b3f165b59e66598a6ef8935ec94a73aa88ed
697,241
def rnn_args_from_config(rnn_config): """ Takes a Config object corresponding to RNN settings (for example `config.algo.rnn` in BCConfig) and extracts rnn kwargs for instantiating rnn networks. """ return dict( rnn_hidden_dim=rnn_config.hidden_dim, rnn_num_layers=rnn_config.num_layers, rnn_type=rnn_config.rnn_type, rnn_kwargs=dict(rnn_config.kwargs), )
54cf542122036510c70fe7a53a47dd724880a912
697,242
def gldas_variables(): """ List of the plottable variables from the GLDAS 2.1 datasets used """ return { 'Air Temperature': 'Tair_f_inst', 'Surface Albedo': 'Albedo_inst', 'Surface Temperature': 'AvgSurfT_inst', 'Canopy Water Amount': 'CanopInt_inst', 'Evaporation Flux From Canopy': 'ECanop_tavg', 'Evaporation Flux From Soil': 'ESoil_tavg', 'Water Evaporation Flux': 'Evap_tavg', 'Surface Downwelling Longwave Flux In Air': 'LWdown_f_tavg', 'Surface Net Downward Longwave Flux': 'Lwnet_tavg', 'Potential Evaporation Flux': 'PotEvap_tavg', 'Surface Air Pressure': 'Psurf_f_inst', 'Specific Humidity': 'Qair_f_inst', 'Downward Heat Flux In Soil': 'Qg_tavg', 'Surface Upward Sensible Heat Flux': 'Qh_tavg', 'Surface Upward Latent Heat Flux': 'Qle_tavg', 'Surface Runoff Amount': 'Qs_acc', 'Subsurface Runoff Amount': 'Qsb_acc', 'Surface Snow Melt Amount': 'Qsm_acc', 'Precipitation Flux': 'Rainf_f_tavg', 'Rainfall Flux': 'Rainf_tavg', 'Root Zone Soil Moisture': 'RootMoist_inst', 'Surface Snow Amount': 'SWE_inst', 'Soil Temperature': 'SoilTMP0_10cm_inst', 'Surface Downwelling Shortwave Flux In Air': 'SWdown_f_tavg', 'Surface Snow Thickness': 'SnowDepth_inst', 'Snowfall Flux': 'Snowf_tavg', 'Surface Net Downward Shortwave Flux': 'Swnet_tavg', 'Transpiration Flux From Veg': 'Tveg_tavg', 'Wind Speed': 'Wind_f_inst', }
5fc8e24d7e58e0d7a892775b8be34d5fc1322f58
697,243
from typing import Any from typing import Tuple from typing import Dict import json def flask_http_response(status: int, data: Any) -> Tuple[str, int, Dict[str, str]]: """Create a tuple for flask to return Args: status: integer http status to use data: json dumpable data for the return body """ return json.dumps(data, separators=(",", ":")), status, {"Content-Type": "application/json"}
33584b4c66f08174ca7ad93e1b11193707a65892
697,244
import itertools def generate_configs(params, description_skeleton): """This function generates all combinations of given parameters. Format of returnev value is appropriate for @performance decorator :param description_skeleton: skeleton of config description, it will be filled with parameter values :param params: dictionary of parameters in form {"param_name": [val1, val2, val3]} """ keys = params.keys() configs = {} combinations = itertools.product(*params.values()) for i, combination in enumerate(combinations): conf_name = 'config{}'.format(i) configs[conf_name] = dict() new_params = dict(zip(keys, combination)) description = description_skeleton.format(**new_params) for key, value in new_params.items(): new_params[key] = {'value': value} configs[conf_name].update({ 'parameters': new_params, 'description': description }) return configs
027180b38cbac1769136ed99f901328a84d8b655
697,245
def max4(x): """ >>> max4(20) 20.0 """ return max(1, 2.0, x, 14)
95a78f9ae81ba5f04d6abd09a7a2cd5f248e0e12
697,246
import requests def get_response_commits(commits_url, my_token): """ Wrapper around the function `requests.get()`. If my_token is supplied by me (i.e. it's not an empty string), then use authentication. Otherwise, go ahead without authentication. Args: commits_url: URL with JSON metadata for commits. my_token: your own GitHub personal access token for authentication. Authenticated requests get a higher hourly API rate limit. Returns: Response object for no. of commits by the GitHub user on the API. """ if my_token: response_commits = requests.get(commits_url, headers={'Authorization': 'token %s' % my_token}) else: response_commits = requests.get(commits_url) return response_commits
7d8350540944aef18631b9122e1180d21f9a6f8a
697,247
def _validate_image_segment_id(the_sicd): """ Validate the image segment id. Parameters ---------- the_sicd : sarpy.io.complex.sicd_elements.SICD.SICDType Returns ------- bool """ if the_sicd.ImageFormation is None or the_sicd.RadarCollection is None: return False # get the segment identifier seg_id = the_sicd.ImageFormation.SegmentIdentifier # get the segment list try: seg_list = the_sicd.RadarCollection.Area.Plane.SegmentList except AttributeError: seg_list = None if seg_id is None: if seg_list is not None: the_sicd.log_validity_error( 'ImageFormation.SegmentIdentifier is not populated, but\n' 'RadarCollection.Area.Plane.SegmentList is populated.\n' 'ImageFormation.SegmentIdentifier should be set to identify the appropriate segment.') return False return True if seg_list is None: the_sicd.log_validity_error( 'ImageFormation.SegmentIdentifier is populated as {},\n' 'but RadarCollection.Area.Plane.SegmentList is not populated.'.format(seg_id)) return False # let's double check that seg_id is sensibly populated the_ids = [entry.Identifier for entry in seg_list] if seg_id not in the_ids: the_sicd.log_validity_error( 'ImageFormation.SegmentIdentifier is populated as {},\n' 'but this is not one of the possible identifiers in the\n' 'RadarCollection.Area.Plane.SegmentList definition {}.\n' 'ImageFormation.SegmentIdentifier should be set to identify the ' 'appropriate segment.'.format(seg_id, the_ids)) return False return True
3cf356f0ff94f4550eb756abda6fce100bb9c1c8
697,248
def get_locale_parts(locale): """Split a locale into three parts, for langauge, script, and region.""" parts = locale.split('_') if len(parts) == 1: return (parts[0], None, None) elif len(parts) == 2: if len(parts[1]) == 4: # parts[1] is a script return (parts[0], parts[1], None) else: return (parts[0], None, parts[1]) else: assert len(parts) == 3 return tuple(parts)
6a2a5f8600470ff13323482c478dcb1494b410a7
697,249
import re def strip_markdown_directives(line): """strips markdown directives from a line""" line = line.strip() if line.startswith("<") and line.endswith(">"): # Let's assume it's inline HTML and skip it return "" # Remove URLs (assume remote starts with http and local ends with html) line = re.sub(r'\[(.+?)]\(http[^\)]+\)', r'\1', line) line = re.sub(r'\[(.+?)]\(.+?html\)', r'\1', line) line = re.sub(r'<http:.+?>', r'', line) return line
8a1ad9076d058ecabbce0720d3c960601c14e3de
697,250
import random def pick_random_worker_set(worker_sets): """Pick random set of workers""" return random.choice(worker_sets)
86725e54659e0577ef5ff548e0b997261f876c75
697,251
import logging def filter_event_tags(tags, device): """Drop unknown tags not listed in device's event-log-tags file.""" device.wait() supported_tags = set() for l in device.shell( ['cat', '/system/etc/event-log-tags'])[0].splitlines(): tokens = l.split(' ') if len(tokens) >= 2: supported_tags.add(tokens[1]) filtered = [] for tag in tags: if tag in supported_tags: filtered.append(tag) else: logging.warning('Unknown tag \'%s\'. Ignoring...', tag) return filtered
dba8fcdf92e41e5b348548c840b67a0ba3b13db7
697,252
import json def is_valid_json(stuff): """Checks if a string is valid json.""" try: json.loads(stuff) except: return False else: return True
f949b00d31fe682c1974e93619a79a573802a2fe
697,253
def package_installed(module, package_name): """ Determine if the package is already installed """ cmd = ['pacman', '-Q', package_name] exit_code, _, _ = module.run_command(cmd, check_rc=False) return exit_code == 0
5382a160e3c55d23bcf20b6bd8d360b11cd71410
697,254
def set_ipu_model_options(opts, compile_ipu_code=True): """Set the IPU Model options. Args: compile_ipu_code: Whether or not to actually compile real IPU code for modelling. Returns: The IpuOptions configuration protobuf, with IPU model options set. """ opts.ipu_model_config.compile_ipu_code = compile_ipu_code return opts
d5e9577fb9ebad81b6fedb1988561197dbd3028e
697,255
def _ExtractResNetThroughput(output): """Extract throughput from Horovod output. Args: output: Horovod output Returns: A tuple of: Average throuput in images per second (float) Unit of the throughput metric (str) """ # Start from last line and iterate backwards. avg_throughput = 0 for line in output.splitlines()[::-1]: if 'train_throughput' in line: split_line = line.split() avg_throughput = float(split_line[-1]) break return round(avg_throughput, 1), 'images/second'
671d745b0f73e9a84fa9a8b55f45054c711329c0
697,256
from typing import Any def map_index(x: Any, column_map: dict) -> str: """Makes column list index human-readable.""" return f'{column_map[x]}'
ab7c371cbcb9949e66a9a8adcc153c2e85646d01
697,257
def cuda_collate_fn(batch): """ don't need to zip the tensor """ return batch[0]
14c9f98b0f4073757cd9f880d73aae984119bf70
697,258
from math import factorial def fatorial(num, show=False): """ - > Calcula o fatorial de um número :param num: Número a ser calculado :param show: (opcional) Mostra ou não a conta :return: O valor do fatorial de um número """ if show: cont = num fat = 1 while cont >= 1: if cont != 1: print(cont, end=' x ') fat *= cont else: print(cont, end=' = ') cont -= 1 else: fat = factorial(num) return fat
74705b909376cdb8abdd366bc1ed0dc654981a4b
697,259
def is_decoy(psm, prefix=None): """Given a PSM dict, return :py:const:`True` if it is marked as decoy, and :py:const:`False` otherwise. Parameters ---------- psm : dict A dict, as yielded by :py:func:`read`. prefix : ignored Returns ------- out : bool """ return psm['PeptideHit'][0]['target_decoy'] == 'decoy'
b901eeb4fc0938d64b06bce0972fadcdf9bd05d5
697,260
def complaint_image_download(self, media_url): """下载客户投诉图片 :param media_url: 图片下载地址,示例值:'https://api.mch.weixin.qq.com/v3/merchant-service/images/xxxxx' """ path = media_url[len(self._core._gate_way):] if media_url.startswith(self._core._gate_way) else media_url return self._core.request(path, skip_verify=True)
345db60477b122109ba5a9e16077483581b5d107
697,261
import math def calc_mean_and_sd(values_list): """ Calculates arithmetic mean and SD of provided data. Used to aggregate variable values of iterations/repetitions of an experiment. Thanks to the database check, all values are available and valid. Handwritten function to allow usage of the specific storage format used in this database. Textbook implementation following ref [Press2002] (identical to standard statistics definition) :param values_list: list of ints or floats :return: mean (float), sd (float), n (int) """ n = 0 sum = 0.0 for value in values_list: n += 1 sum += float(value) if n == 0: return 0.0, 0.0, 0 if n == 1: return sum, 0.0, 1 mean = sum / float(n) variance = 0.0 for value in values_list: delta = float(value) - mean variance += delta * delta variance /= float(n - 1) return mean, math.sqrt(variance), n
46ff39c31812f9df7e3a68da758095607db8656e
697,262
def clsName2Ind(lbls, cls): """ Converts a cls name to an ind """ if cls in lbls: return lbls.index(cls) + 1 else: raise ValueError('unknown class')
15676bb297e42562a02b7e9af0c8d9de6fb890df
697,263
def site_facility(hybrid_plant_size_MW, hybrid_construction_months, num_turbines): """ Uses empirical data to estimate cost of site facilities and security, including Site facilities: Building design and construction Drilling and installing a water well, including piping Electric power for a water well Septic tank and drain field Site security: Constructing and reinstating the compound Constructing and reinstating the batch plant site Setting up and removing the site offices for the contractor, turbine supplier, and owner Restroom facilities Electrical and telephone hook-up Monthly office costs Signage for project information, safety and directions Cattle guards and gates Number of access roads In main.py, a csv is loaded into a Pandas dataframe. The columns of the dataframe must be: Size Min (MW) Minimum power output for a plant that needs a certain size of building. Size Max (MW) Maximum power output of a plant that need a certain size of building. Building Area (sq. ft.) The area of the building needed to provide O & M support to plants with power output between "Size Min (MW)" and "Size Max (MW)". Returns ------- float Building area in square feet """ if hybrid_plant_size_MW > 15: building_area_sq_ft = float(4000) construction_building_cost = building_area_sq_ft * 125 + 176125 ps = hybrid_plant_size_MW ct = hybrid_construction_months nt = num_turbines if nt < 30: nr = 1 acs = 30000 elif nt < 100: nr = round(0.05 * nt) acs = 240000 else: nr = round(0.05 * nt) acs = 390000 compound_security_cost = 9825 * nr + 29850 * ct + acs + 60 * ps + 62400 site_facility_cost = construction_building_cost + compound_security_cost else: site_facility_cost = 0 return site_facility_cost
c34056318103d6353add03f05925a2fffc69d51e
697,265
import six def load_from_hparams_overrides(params, params_source, hparams_overrides): """Given a dictionary of hyperparameters and a list of overrides, merge them. Args: params: Python dict containing a base hyperparameters set. params_source: Python dictionary to record source of hyperparameters. hparams_overrides: Python list of strings. This is a set of k=v overrides for the hyperparameters in `params`; if `k=v1` in `params` but `k=v2` in `hparams_overrides`, the second value wins and the value for `k` is `v2`. Returns: Python dict of hyperparameters. """ if params is None: raise ValueError( 'Input dictionary is empty. It is expected to be loaded with default ' 'values') if not isinstance(params, dict): raise ValueError( 'The base hyperparameters set must be a Python dict, was: {}'.format( type(params))) if hparams_overrides is None: return params, params_source if isinstance(hparams_overrides, six.string_types): hparams_overrides = [hparams_overrides] if not isinstance(hparams_overrides, list): raise ValueError( 'Expected that hparams_overrides would be `None`, a single string, or a' ' list of strings, was: {}'.format(type(hparams_overrides))) for kv_pair in hparams_overrides: if not isinstance(kv_pair, six.string_types): raise ValueError( 'Expected that hparams_overrides would contain Python list of strings,' ' but encountered an item: {}'.format(type(kv_pair))) key, value = kv_pair.split('=') parser = type(params[key]) if parser is bool: params[key] = value not in ('0', 'False', 'false') else: params[key] = parser(value) params_source[key] = 'Command-line `hparams` flag' return params, params_source
2365a3ce67d4855662912bbd865e1e731648e7b1
697,266
def flatten_report_for_csv(report): """ Flattens the data structure returned by `watson.report()` for a csv export. Dates are formatted in a way that Excel (default csv module dialect) can handle them (i.e. YYYY-MM-DD HH:mm:ss). The result is a list of dictionaries where each element can contain two different things: 1. The total `time` spent in a project during the report interval. In this case, the `tag` value will be empty. 2. The partial `time` spent in a tag and project during the report interval. In this case, the `tag` value will contain a tag associated with the project. The sum of all elements where `tag` is empty corresponds to the total time of the report. """ result = [] datetime_from = report['timespan']['from'].format('YYYY-MM-DD HH:mm:ss') datetime_to = report['timespan']['to'].format('YYYY-MM-DD HH:mm:ss') for project in report['projects']: result.append({ 'from': datetime_from, 'to': datetime_to, 'project': project['name'], 'tag': '', 'time': project['time'] }) for tag in project['tags']: result.append({ 'from': datetime_from, 'to': datetime_to, 'project': project['name'], 'tag': tag['name'], 'time': tag['time'] }) return result
a7af0385bb846aa7d88e5e5ef939c40712ea5f2e
697,267
def countAddendCombinations(num, addends): """Count how many combinations of addends can add to the value of 'num'. Parameters: 'num' must be positive. 'addends' must be sorted. """ if len(addends) == 0: return 0 elif num == 0: return 1 elif num < addends[-1]: return countAddendCombinations(num, addends[:-1]) else: lastRemoved = countAddendCombinations(num, addends[:-1]) lastSubtracted = countAddendCombinations(num - addends[-1], addends) return lastRemoved + lastSubtracted
1816351b3b94b44c29bef2761f8f2ea778d4ab6b
697,268
import threading import warnings def pool(klass): """Thread-safe pool of objects not currently in use, generates new object when empty. Use as a decorator. Decorated classes must have init() method to prepare them for reuse.""" lock = threading.Lock() pool = set() orig_new = klass.__new__ orig_init = klass.__init__ def __new__(cls, *args, **kwargs): "Get object from pool, generating a new one if empty" with lock: if pool: obj = pool.pop() obj._used = True return obj return orig_new(cls, *args, **kwargs) klass.__new__ = __new__ def __init__(self, *args, **kwargs): if hasattr(self, '_used'): self.init() del self._used return orig_init(self, *args, **kwargs) klass.__init__ = __init__ def release(self): """Release for reuse""" if self in pool: warnings.warn(RuntimeWarning('Attempting double-release of ' + klass.__name__)) else: pool.add(self) klass.release = release return klass
edc979b5aa5508d1cf950d26b92f8a3fa34efc20
697,269
import re def postfinance_preprocess_notice(payment_notice): """Remove spaces from potential invoice numbers""" return re.sub( r"\b([0-9]{4}\s*-\s*[0-9]{4}\s*-\s*[0-9]{4})\b", lambda match: re.sub(r"\s+", "", match.group(0)), payment_notice, )
ce806fda4c63030b21d3e1747d31349b3757dadb
697,270
def get_verbose_name(address_object): """Возвращает имя нас. пункта в формате: г. Москва :param address_object: объект FiasAddressObject """ return u'{0}. {1}'.format(address_object.short_name, address_object.formal_name)
c94b4003071003c95b4f2edb450f50680cff5055
697,271
import os import fnmatch def find_all_matching(path, pattern): """Utility function that works like 'find' in bash.""" if not os.path.exists(path): raise RuntimeError("Invalid path '{0}'".format(path)) result = [] for root, _, files in os.walk(path): for thisfile in files: if fnmatch.fnmatch(thisfile, pattern): result.append(os.path.join(root, thisfile)) return result
c6f6cadd0558c05573e4a8cdefa801369c8c5b6e
697,273
import re def reDiac(): """ Generate regex pattern to locate diacritics Requires regex module as re Return compiled regex pattern """ unicodeBlockList = [r'\p{InCombining_Diacritical_Marks_for_Symbols}', r'\p{InSuperscripts_and_Subscripts}', r'\p{InCombining_Diacritical_Marks}', r'\p{InSpacing_Modifier_Letters}', r'\p{InCombining_Diacritical_Marks_Extended}' r'\p{InCombining_Diacritical_Marks_Supplement}'] additionalChars = [r'ᴸ', r'ᵇ', r':', r'<', r'←', r'=', r"'", r"‚", r"ᵊ"] pattern = r'(' + r'|'.join(unicodeBlockList+additionalChars) + r')' pattern = re.compile(pattern) return pattern
8c35844dc0d4d6dfa399efae3d2c58b885258348
697,274
import ipaddress def _evaluate_ip_address(ip_address): """Evaluate supplied IPv4 address. Returns the supplied IPv4 address if valid and specified without a netmask, or returns the subnet broadcast address if the supplied IPV4 address is specified with a netmask such as '192.168.1.5/24' or '192.168.1.5/255.255.255.0'. Parameters ---------- ip_address : str Supplied IP address. Returns ------- str Valid IPv4 address. Raises ------ ValueError If `ip_address` does not contain a valid IPv4 address. """ ip = ip_address.strip() try: ip = str(ipaddress.IPv4Address(ip)) except ipaddress.AddressValueError: try: ip = str(ipaddress.IPv4Network(ip, strict=False).broadcast_address) except Exception as e: raise ValueError(f"[Error] Invalid IP address: {ip_address}") from e return ip
67b45d1ba169c9880c68d800efb944ff05f2c51b
697,275
from typing import Callable def get_linear_anneal_func( start_value: float, end_value: float, start_step: int, end_step: int ) -> Callable: """Create a linear annealing function. Parameters ---------- start_value : float Initial value for linear annealing. end_value : float Terminal value for linear annealing. start_step : int Step to start linear annealing. end_step : int Step to end linear annealing. Returns ------- linear_anneal_func : Callable A function that returns annealed value given a step index. """ def linear_anneal_func(step): if step <= start_step: return start_value if step >= end_step: return end_value # Formula for line when two points are known: # y1 - y0 # y - y0 = --------- (x - x0) # x1 - x0 return (end_value - start_value) / (end_step - start_step) * ( step - start_step ) + start_value return linear_anneal_func
affe767318be5b07dfcf23d1a3e2e58e67750611
697,276
import time def get_run_id(): """Generates unique identifier Parameters ---------- Return ---------- """ run_id = str(int(time.time())) return run_id
dfcfe2df2a94d707adfe31e2f582554f66e1f97f
697,277
import os def testdata(): """Returns testing dataset used in tests.""" return os.path.join( os.path.dirname(os.path.realpath(__file__)), "testdata", "testdata")
58e8dd63239209d278e6b98100221be69f7116ae
697,278
def bump_version(base: str, index: int = -1) -> str: """ Increment one of the numerical positions of a version. :param base: Version core, such as 0.1.0. Do not include pre-release identifiers. :param index: Numerical position to increment. Default: -1. This follows Python indexing rules, so positive numbers start from the left side and count up from 0, while negative numbers start from the right side and count down from -1. :return: Bumped version. """ bases = [int(x) for x in base.split(".")] bases[index] += 1 limit = 0 if index < 0 else len(bases) i = index + 1 while i < limit: bases[i] = 0 i += 1 return ".".join(str(x) for x in bases)
48d5c85c106e87733702f33dfcdd7c654478949e
697,279
import torch def ssim(prediction: torch.Tensor, label: torch.Tensor) -> torch.Tensor: """ Function computes the structural similarity Source: https://github.com/ChristophReich1996/CellFlowNet :param prediction: (torch.Tensor) Prediction :param label: (torch.Tensor) Label :return: (torch.Tensor) SSMI value """ assert prediction.numel() == label.numel(), 'Prediction tensor and label tensor must have the number of elements' # Calc means and vars prediction_mean = prediction.mean() prediction_var = prediction.var() label_mean = label.mean() label_var = label.var() # Calc correlation coefficient correlation_coefficient = (1 / label.numel()) * torch.sum((prediction - prediction_mean) * (label - label_mean)) return ((2.0 * prediction_mean * label_mean) * (2.0 * correlation_coefficient)) / \ ((prediction_mean ** 2 + label_mean ** 2) * (prediction_var + label_var))
2cdf069823f195f9730b13d00d48a25c13bc4884
697,280
def validate_pin_input(value): """Validate that the GPIO PIN is prefixed with a D.""" try: int(value) return f"D{value}" except ValueError: return value.upper()
da5d27a57911819a99375d97f22e503067ee301c
697,281
def searchInsert(nums, target): """ :type nums: List[int] :type target: int :rtype: int """ if target in nums: return nums.index(target) else: for i in range(len(nums)): if target < nums[i]: return i if target > nums[i]: if i == len(nums) - 1: return i + 1 continue
fb79487e2f92229f609425638517618cdad5e3e4
697,283
def cards_remaining(card_list): """ this function returns the number of cards that have not been matched yet """ num_remaining = 0 for c in card_list: if c.is_unsolved(): num_remaining += 1 return num_remaining
b99e5fc8c1fe32e0b8ccb36548e094b9bf0f39ab
697,284
def get_network_id(networks, name): """ Get network id based on name provided """ for network in networks: if network["Name"] == name: return network["Id"]
12e53ade2d661587a674435d8c57160d740aa48c
697,286
def get_table(connect, market, code, ktype): """note: market: 'DAY' | 'MIN' | 'MIN5' """ cur = connect.cursor() schema = "{market}_{ktype}".format(market=market, ktype=ktype).lower() cur.execute("SELECT 1 FROM information_schema.SCHEMATA where SCHEMA_NAME='{}'".format(schema)) a = cur.fetchone() if not a: cur.execute("CREATE SCHEMA `{}`".format(schema)) connect.commit() tablename = code.lower() cur.execute("SELECT 1 FROM information_schema.tables " "where table_schema='{schema}' and table_name='{name}'" .format(schema=schema, name=tablename)) a = cur.fetchone() if not a: sql = """ CREATE TABLE `{schema}`.`{name}` ( `date` BIGINT(20) UNSIGNED NOT NULL, `open` DOUBLE UNSIGNED NOT NULL, `high` DOUBLE UNSIGNED NOT NULL, `low` DOUBLE UNSIGNED NOT NULL, `close` DOUBLE UNSIGNED NOT NULL, `amount` DOUBLE UNSIGNED NOT NULL, `count` DOUBLE UNSIGNED NOT NULL, PRIMARY KEY (`date`) ) COLLATE='utf8_general_ci' ENGINE=MyISAM ; """.format(schema=schema, name=tablename) cur.execute(sql) connect.commit() cur.close() return "`{schema}`.`{name}`".format(schema=schema, name=tablename)
93cbeee43f2660693ff827f3f2442f7d97fe6565
697,287
import torch def normal_transform_pixel(shape): """ Compute the normalization matrix from image size in pixels to [-1, 1]. """ tr_mat = torch.tensor([[1.0, 0.0, 0.0, -1.0], [0.0, 1.0, 0.0, -1.0], [0.0, 0.0, 1.0, -1.0], [0.0, 0.0, 0.0, 1.0]]) for idx in range(len(shape)): tr_mat[idx, idx] = tr_mat[idx, idx] * 2.0 / (shape[idx] - 1.0) tr_mat = tr_mat.unsqueeze(0) return tr_mat
8f178255108e565d9156cb01a8da5b88cd46c73a
697,288
def remove_config_prefix(config, prefix, skip=None): """Iterate over keys in dict and remove given prefix. Arguments --------- config : dict The configuration data. prefix : str The prefix to remove. skip : List[str], optional A list of keys which should not be altered. Returns ------- ret : dict The transformed configuration. """ if skip is None: skip = [] def helper(key): return key.split(f"{prefix}.")[-1] return {helper(key): value for key, value in config.items() if f"{prefix}." in key and key not in skip}
0c557e673a42d93772bf357250435d1ca203d072
697,289
import tempfile import sys import subprocess import shlex import os def survivor(samples, distance, ignore_type, minlength): """ Executes SURVIVOR merge, with parameters: -samples.fofn (truth and test) -distance between calls (args.distance) -number of callers to support call (1) -require variants to have sampe type (args.ignore_type) -require variants to be on same strand (no) -estimate distance between calls (no) -specify minimal size of SV event (args.minlength) """ fhf, fofn_f = tempfile.mkstemp() fhv, vcf_out = tempfile.mkstemp() with open(fofn_f, 'w') as fofn: for s in samples: fofn.write(s + "\n") survivor_cmd = f"SURVIVOR merge {fofn_f} {distance} 1 {ignore_type} -1 -1 {minlength} {vcf_out}" sys.stderr.write("Executing SURVIVOR...\n") subprocess.call(shlex.split(survivor_cmd), stdout=subprocess.DEVNULL) os.close(fhf) os.close(fhv) return vcf_out
3a16c331cda610ecd9cf2d9321d88c86f5532c6b
697,290
def lengthOfLongestSubstring(s): """ :type s: str :rtype: int """ if len(s) == 0: return 0 maxVal = 1 for i in range(0, len(s)-1): checkStr = s[i+1:] if (s[i] in checkStr): # check to prevent exception in index() when char repitition not found curVal = checkStr.index(s[i]) + 1 # print("Check string is ", checkStr) # print("Next ", s[i], " found at ", checkStr.index(s[i])) # print("Cur val is: ", checkStr.index(s[i]) + 1) if curVal > maxVal: maxVal = curVal else: return len(s[i:]) # print("Brfeak") # this is the largest possible substring, so break after calculating length from i to len return maxVal
43f8915567aa93e6aadb7b00b57d0a8d7e41fe9a
697,292
def format_result(input, params, offset=True): """ Fromat result to a epiviz compatible format Args: input : input dataframe params : request parameters offset: defaults to True Returns: formatted JSON response """ # measurement = params.get("measurement")[0] # input_json = [] # for item in input_data: # input_json.append({"chr":item[0], "start": item[1], "end": item[2], measurement: item[3]}) # input = pandas.read_json(ujson.dumps(input_json), orient="records") # input = input.drop_duplicates() input.start = input.start.astype("float") input.end = input.end.astype("float") # input[measurement] = input[measurement].astype("float") # input["chr"] = params.get("seqName") # input = bin_rows(input) # input = pandas.DataFrame(input_data, columns = ["start", "end", measurement]) globalStartIndex = None data = { "rows": { "globalStartIndex": globalStartIndex, "useOffset" : offset, "values": { "id": None, "chr": [], "strand": [], "metadata": {} } }, "values": { "globalStartIndex": globalStartIndex, "values": {} } } if len(input) > 0: globalStartIndex = input["start"].values.min() if offset: minStart = input["start"].iloc[0] minEnd = input["end"].iloc[0] input["start"] = input["start"].diff() input["end"] = input["end"].diff() input["start"].iloc[0] = minStart input["end"].iloc[0] = minEnd col_names = input.columns.values.tolist() row_names = ["chr", "start", "end", "strand", "id"] data = { "rows": { "globalStartIndex": globalStartIndex, "useOffset" : offset, "values": { "id": None, "chr": [], "strand": [], "metadata": {} } }, "values": { "globalStartIndex": globalStartIndex, "values": {} } } for col in col_names: if params.get("measurement") is not None and col in params.get("measurement"): data["values"]["values"][col] = input[col].values.tolist() elif col in row_names: data["rows"]["values"][col] = input[col].values.tolist() else: data["rows"]["values"]["metadata"][col] = input[col].values.tolist() else: data["rows"]["values"]["start"] = [] data["rows"]["values"]["end"] = [] if params.get("metadata") is not None: for met in params.get("metadata"): data["rows"]["values"]["metadata"][met] = [] # else: # data["rows"]["values"]["metadata"] = None data["rows"]["values"]["id"] = None if params.get("datasource") != "genes": data["rows"]["values"]["strand"] = None return data
d304817cbeb993eef52e86a28187978f1a7887f0
697,293
def are_entangled(q1, q2): """ Check for entanglement betwee ntwo Qubits :param q1: Qubit Type Object :param q2: Qubit Type Object :return: boolean absed on entanlgement """ return q1.is_entangled() == id(q2)
49315c4a92d09180761047ec7b5ec93ff761afab
697,294
def read_messages(msg_file): """(file open for reading) -> list of str Precondition: The parameter msg_file should be a message file that is already open for reading and that file contains one message per line. Read and return the contents of the file as a list of messages. The returned message should strip the newline from each line, and in order in which they appear in the file. """ new_message = [] for line in msg_file: new_message.append(line.strip()) # Since The returned message should strip the newline from each line. return new_message
220cda7e055f9c3b6a483500fe3aeef7d9752b70
697,295
def get_all_objs(content, vimtype): """ Return all object for a given VIM Type :param content: :param vimtype: :return: """ obj = {} container = content.viewManager.CreateContainerView( content.rootFolder, vimtype, True) for managed_object_ref in container.view: obj.update({managed_object_ref: managed_object_ref.name}) return obj
50c2ef6345761da0faf1fe33d5cb32e4883d6875
697,296
def doi_conflict_check(agr_data, value): """ check a database reference has a conflict from DOI. always pass this here, it gets checked during cross_reference resolution :param agr_data: :param value: :return: """ return 'Success'
a54b0839fe1a85226591bb2e4a35c75d1732f1bb
697,297
from typing import Dict from typing import List def avoid_body(my_head: Dict[str, int], body: List[dict]): """ my_head: Dictionary of x/y coordinates of the Battlesnake head. e.g. {"x": 0, "y": 0} my_body: List of dictionaries of x/y coordinates for every segment of a Battlesnake. e.g. [ {"x": 0, "y": 0}, {"x": 1, "y": 0}, {"x": 2, "y": 0} ] possible_moves: List of strings. Moves to pick from. e.g. ["up", "down", "left", "right"] return: The list of remaining possible_moves, with the 'neck' direction removed """ bad_moves = set() right = { "x": my_head["x"] + 1, "y": my_head["y"] } left = { "x": my_head["x"] - 1, "y": my_head["y"] } up = { "x": my_head["x"], "y": my_head["y"] + 1 } down = { "x": my_head["x"], "y": my_head["y"] - 1 } for part in body: print(f"{part} <> {right}") if ((part["x"] == right["x"] and part["y"] == right["y"])): print("ADD RIGHT") bad_moves.add("right") print(f"{part} <> {left}") if ((part["x"] == left["x"] and part["y"] == left["y"])): print("ADD LEFT") bad_moves.add("left") print(f"{part} <> {up}") if ((part["x"] == up["x"] and part["y"] == up["y"])): print("ADD UP") bad_moves.add("up") print(f"{part} <> {down}") if ((part["x"] == down["x"] and part["y"] == down["y"])): print("ADD DOWN") bad_moves.add("down") return bad_moves
3d7a151b694c6a6915f9c7c508cf7cd0e2e4d6f0
697,298
import numpy def to_rad(angle_grad: float) -> float: """ convert given angle(grad) to radians """ return angle_grad / 180 * numpy.pi
81b6fcf352c9c509e746a32a36f133904d9c2938
697,299
def AfficherDuree(duree): """ Retourne une cha\xeene de caract\xe8res repr\xe9sentant 'duree' """ dureeSeconde = duree / 1000 ms = duree % 1000 mn = dureeSeconde / 60 sec = dureeSeconde % 60 chaine = '' if mn > 0: chaine += repr(mn) + ' min ' + repr(sec) + ' s ' elif sec > 0: chaine += repr(sec) + ' s ' chaine += repr(ms) + ' ms' return chaine
096179837e9551685d81269df9aa7c9bcb89acdf
697,300
def get_area(a, b): """Calculate area of rectangle with sides a and b.""" return a * b
2907400c82c018634daf51091b509ab65258c269
697,301
import argparse def parse(argv): """Parses command-line arguments""" parser = argparse.ArgumentParser() parser.add_argument("project", default="fableproject.json", help="JSON project file") return parser.parse_args(argv)
441f33828f3234d5aa48ef60d03f385ff3876f90
697,302
def macro_with_both(name, number = 3, *args, **kwargs): """Oh wow this macro has both. Not much else to say. Args: name: The name of the test rule. number: Some number used for important things *args: Other arguments to include **kwargs: Other attributes to include Returns: An empty list. """ _ignore = [name, number, args, kwargs] return []
b1d1fce22662830de6ea1a9c3eee677c48c65a23
697,303
def output_filter_enabled(session, Type='Boolean', RepCap='', AttrID=1150055, buffsize=0, action=['Get', '']): """[Output Filtering Enabled <?>] Enables output filtering selected by the FilterBandwidth property. Set: RepCap=< channel# (1-2) > """ return session, Type, RepCap, AttrID, buffsize, action
23745d5a8f6b9727e33a0189c39466ef216fc081
697,304
def increment_duplicates(arr): """Increments duplicates in an array until there are no duplicates. Uses a hash set to keep track of which values have been seen in the array. Runs in O(n^2) time worst case (e.g. all elements are equal), O(n) time best case (elements are already unique). """ seen = set() for i in range(len(arr)): while arr[i] in seen: arr[i] += 1 seen.add(arr[i]) return arr
33a456c75b7ca00ddb535a6726af3fe1944efc21
697,305