content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def retooting(X, Topo, P, N, unit, merge, adj_bu, adj_td, adj_fin, root=0): """全方位DP # ref: https://qiita.com/Kiri8128/items/a011c90d25911bdb3ed3 # ref: https://atcoder.jp/contests/dp/submissions/19898045 Args: X (list): [description] Topo (list): [description] P (list): [description] N (int): [description] unit ([type]): [description] merge ([type]): function used to merge to dp. adj_bu ([type]): function used after merged.DPの遷移がf(i)=Σmerge(j:= iの子)+g(i)と表せるときのg(i)部分、みたいなイメージ adj_td ([type]): function used after merged.DPの遷移がf(i)=merge(j:= iの親)+g(i)と表せるときのg(i)部分 adj_fin ([type]): function finally used root (int, optional): [description]. Defaults to 0. Returns: list: result of rerooting dp. 全方位木DPの結果 ex: simple. ただし累積和を使わない実装はうまく行かない問題。 https://atcoder.jp/contests/dp/tasks/dp_v N, MOD = R() XY = [R1() for _ in range(N-1)] children, topo_sort, parent, partial_size = topological_sorted_tree(XY, N) dp = retooting( children, topo_sort, parent, N, unit=1, merge = lambda a, b: a * b % MOD, adj_bu = lambda a, i: a + 1, adj_td = lambda a, i, p: a + 1, adj_fin = lambda a, i: a ) print(*dp) ex2: 頂点更新時に子の数が必要な場合 https://atcoder.jp/contests/abc160/tasks/abc160_f (ほぼ同じ問題として https://atcoder.jp/contests/tdpc/tasks/tdpc_tree) N = r_int() XY = [R1() for _ in range(N-1)] MOD = 10**9+7 children, topo_sort, parent, partial_size = topological_sorted_tree(XY, N) comb = ModComb(N, MOD) # ダメなパターンが何割含まれるか、をDPで積み重ねていき、最後に階乗を加えることで、全部の組合せを出す。かなり頭が混乱しますね。 # 例えば1-2, 2-3, 2-4, 2-5, 2-6のN=6の木で、2の部分での更新に注目するとき、2の下の部分の並べ方のうち、良いパターンは2が先頭に来て、5-6の順序が整っていることである。「5-6の順序が整っていること」は5の部分の更新ですでに見ているため、あたらに考慮すべき部分は1/5を掛けることである。それがadj_buである。 # dpを積み重ねていくことで、全通り中の良いパターンの確率ができており、それに全通り(Nの階乗)をかけてあげることで、組合せ総数が出てくる。 dp = retooting( children, topo_sort, parent, N, unit=1, merge = lambda a, b: a * b % MOD, adj_bu = lambda a, i: a * comb.inv(partial_size[i]) % MOD, adj_td = lambda a, i, p: a * comb.inv(N-partial_size[i]) % MOD, adj_fin = lambda a, i: a * comb.fact[N-1] % MOD ) print(*dp) ex3: 木の最長距離(直径) https://atcoder.jp/contests/arc022/tasks/arc022_3 N = r_int() XY = [R1() for _ in range(N-1)] children, topo_sort, parent, partial_size = topological_sorted_tree(XY, N) dp = retooting( children, topo_sort, parent, N, unit=0, merge = lambda a, b: max(a,b), adj_bu = lambda a, i: a + 1, adj_td = lambda a, i, p: a + 1, adj_fin = lambda a, i: a ) mdp = max(dp) ans = [] for i, d in enumerate(dp): if d == mdp: ans.append(i+1) break edges = [list() for _ in range(N)] for x, y in XY: edges[x].append(y) edges[y].append(x) arrived = [0]*N arrived[i] = 1 tasks = deque([i]) while tasks: task = tasks.popleft() for node in edges[task]: if not arrived[node]: arrived[node] = 1 tasks.append(node) ans.append(task+1) print(*ans) """ # Bottom-Up 部分 ACC = [unit] * N # acc[i](=頂点iのp方向以外からの(=葉方向からの)集約(調整前)) res_BU = [0] * N # res_BU[i], acc[i]に調整を入れたもの for i in Topo[:0:-1]: # root=0部分はここでは計算しない p = P[i] # 親ノード res_BU[i] = adj_bu(ACC[i], i) # bottom up関数で res_BU[i]を作成 ACC[p] = merge(ACC[p], res_BU[i]) # 親の集約 res_BU[Topo[0]] = adj_fin(ACC[Topo[0]], Topo[0]) res = [i for i in res_BU] # Top-Down 部分 AL = [unit] * N # accum from left TD = [unit] * N # 根方向の合計 for i in Topo: # 左からDP(結果はALに格納) ac = TD[i] # ac: accum, 親を含めて左から累積merge for j in X[i]: AL[j] = ac ac = merge(ac, res_BU[j]) # 右からDP(結果はacに入れており、右からのDP) ac = unit # 右側は親を含めないので、左と合わせることで対象とするi以外の累積が取れる。 for j in X[i][::-1]: TD[j] = adj_td(merge(AL[j], ac), j, i) # jに根方向合計を格納 ac = merge(ac, res_BU[j]) # ac(右からのDP)を更新 res[j] = adj_fin(merge(ACC[j], TD[j]), j) return res
b2ffc778b3839a85a53a0ac8a6f82909840218dd
26,228
import tempfile import os def gen_color_file(): """ A function to generate color file for gdaldem. The rows of file should be [value R G B] """ fp, temp_file = tempfile.mkstemp(suffix='.txt') max_ph = 100 min_ph = 0 range_ph = max_ph-min_ph # Manually define a viridis palette # which could be nicer rs = ['68', '72', '72', '69', '63', '57', '50', '45', '40', '35', '31', '32', '41', '60', '86', '116', '148', '184', '220', '253'] gs = ['1', '21', '38', '55', '71', '85', '100', '113', '125', '138', '150', '163', '175', '188', '198', '208', '216', '222', '227', '231'] bs = ['84', '104', '119', '129', '136', '140', '142', '142', '142', '141', '139', '134', '127', '117', '103', '85', '64', '41', '24', '37'] with open(temp_file, 'w') as f: for i, c in enumerate(rs[:-1]): f.write(str(int(min_ph + (i + 1) * range_ph / len(rs))) + ' ' + c + ' ' + gs[i] + ' ' + bs[i] + '\n') f.write(str(int(max_ph - range_ph / len(rs))) + ' ' + rs[-1] + ' ' + gs[-1] + ' ' + bs[-1] + '\n') os.close(fp) return temp_file
eff2fb623d3a6410428faf69dee4cebd6b031296
26,229
def get(x, key, default=None): """ Get the value associated with the provided key from a `dict`-like object :param x: dictlike Any object with `__contains__()` or `__iter__()` and `__getitem__()` methods which accept `key` :param key: The key to extract from the provided `dict`-like object` :param default: A default value to return if the provided key is not present :return: Either the value associated with the key or the default if the key is missing. If he object does not implement the required methods an error will be thrown. """ return x[key] if key in x else default
7e54520fe5a0ec8e56bfa0e55fbca6060282b838
26,230
def read_message(connection, timeout=0.1): """Attempt to read a single message from the given multiprocessing.Connection object. :param connection: A multiprocessing.Connection like object that supports poll(<timeout>) and recv() methods. :param timeout: The timeout (in seconds) while waiting for messages. :return: The message received from the connection. """ if connection.poll(timeout): try: return connection.recv() except EOFError: pass
5890f6ad1dc7ecee37952df270f5810b149a1e16
26,231
def calc_csr(sigma_veff, sigma_v, pga, rd, gwl, depth): """ Cyclic stress ratio from CPT, Eq 2.2, """ return 0.65 * (sigma_v / sigma_veff) * rd * pga
becedff4526031f5047e68a0a2d51476bf56ca9b
26,232
import numpy def matrixReduction(setHor, setVer, arrayToReduce): """ This function returns the values from arrayToReduce for -columns from setHor -lines from setVer in a new array. """ listTemp = [] for i in range(len(setVer)): listTemp.append(arrayToReduce[setVer[i].index, :]) arrayTemp = numpy.array(listTemp) listTemp = [] for i in range(len(setHor)): listTemp.append(arrayTemp[:, setHor[i].index]) result = numpy.transpose(numpy.array(listTemp)) return result
850035a42da9e0f377d1e10a7a513b0ec52039a3
26,234
def sentiment_score(x): """ SECTION : sentiment DESCRIPTION : Return text feeling0s from sentiment scores """ if x == 0: return("공포") elif x == 1: return("놀람") elif x == 2: return("분노") elif x == 3: return("슬픔") elif x == 4: return("중립") elif x == 5: return("행복") else: return("혐오")
947b22def8d30285e9ab316c99d07c847d3d5a6d
26,238
import logging def get_log_level(level): """ :param level: expressed in string (upper or lower case) or integer :return: integer representation of the log level or None """ if type(level) is int: return level # This could be a string storing a number. try: return int(level) except ValueError: pass # Look up the name in the logging module. try: value = getattr(logging, level.upper()) if type(value) is int: return value else: return None except AttributeError: return None
45ee3abfe8b4a32cef0ab91d5fa3e9ff85d5156f
26,239
def gen_dots(val): """Generate dots from real data val = dict (x:y) return ox, oy lists """ oy = [] ox = [] for x in sorted(val.keys()): ox.append(int(x[:-1])) if val[x][0] != 0: oy.append(1.0/val[x][0]) else: oy.append(0) return ox, oy
cbd38754f696cd39b21fea1ae307f62819f4f7ee
26,240
def make_lock_uri(s3_tmp_uri, emr_job_flow_id, step_num): """Generate the URI to lock the job flow ``emr_job_flow_id``""" return s3_tmp_uri + 'locks/' + emr_job_flow_id + '/' + str(step_num)
3cff1dccca5659713ac9f85ad29a21fa401c058e
26,241
def bgr2rgb(img): """Converts an RGB image to BGR and vice versa """ return img[..., ::-1]
ece41c92d036ccf28b27d18019f0e0fc1b6d315b
26,243
def print_attention(iteration=1): """get attention in the command line @param iteration (integer) the number of attention character needs """ base = "## ## " for i in range(iteration): base += base return base
b5fc6adb44f0029545c7f6224bebd1c9f5b1ba69
26,244
import os def call_skyview(field, survey, pos, fov, coord, proj='Car', pix=500): """Call Skyview to download data from a survey based on input parameters Args: field (str): name of the field, used in naming the output file survey (str): name of survey, from https://skyview.gsfc.nasa.gov/current/cgi/survey.pl pos (float,float): position coordinates as a tuple fov (float): FOV in degrees coord (str): coordinate system (e.g. Galactic, J2000, B1950) proj (str): projection of image. (e.g. Car, Sin) pix (int): pixel dimensions of image (e.g. 500) Returns: str: name of resulting fits file Examples: >>> call_skyview('pks1657-298', 'dss', (255.291,-29.911), 5, 'J2000') 'skyview_pks1657-298_dss.fits' >>> call_skyview('B0329+54', 'nvss', (144.99497,-01.22029), 0.5, 'Gal') 'skyview_B0329+54_nvss.fits' """ # Assign position coordinates x,y = pos # Construct name of the resulting file fitsname = "Skyview_{field}_{survey}.fits".format(**locals()) # Construct and send the command cmd = 'java -jar /home/ricci/Documents/work/Tools/VO_tools/skyview.jar coordinates={coord} projection={proj} position={x:.4f},{y:.4f} size={fov},{fov} pixels={pix},{pix} survey="{survey}" output="results/{fitsname}"'.format(**locals()) print(cmd) print('Executing command...') os.system(cmd) print('... done!') return(fitsname)
77112840bc01b3f7d9d00f573a563aa3fb2a9b8d
26,245
def del_sensitive_content(content, sensitive_keys): """ 通过邮件发送出去的都去除敏感信息, 递归处理json,将敏感信息转换成**** """ SENSITIVE_CONTENT = '******' if isinstance(content, dict): for dict_key, dict_value in content.items(): if dict_key in sensitive_keys: content[dict_key] = SENSITIVE_CONTENT else: content[dict_key] = del_sensitive_content(dict_value, sensitive_keys) elif isinstance(content, list): for index, i_content in enumerate(content): content[index] = del_sensitive_content(i_content, sensitive_keys) else: for key in sensitive_keys: if key in str(content): content = SENSITIVE_CONTENT return content
da550ffe83fa34928be2b66828ff9f2d6fe854cf
26,246
def _get_filters_settings(agg): """ Get the settings for a filters aggregation :param agg: the filter aggregation json data :return: dict of {setting_name: setting_value} """ filter_settings = dict(filters=dict()) settings = agg['settings'] filters = settings['filters'] for _filter in filters: query_string = {"query_string": {"query": _filter['query'], "analyze_wildcard": True}} filter_settings["filters"][_filter['query']] = query_string return filter_settings
91160853c4300dc23452c5d774f4ec314bd7f7b2
26,247
def find_min(nums): """ Find minimum element in rotated sorted array :param nums: given array :type nums: list[int] :return: minimum element :rtype: int """ left, right = 0, len(nums) - 1 while left + 1 < right: mid = (left + right) // 2 if nums[mid] < nums[right]: right = mid elif nums[mid] > nums[right]: left = mid else: # we cannot determine which side is sorted subarray # when nums[left] == nums[mid] # so just move right pointer step backward right -= 1 if nums[left] < nums[right]: return nums[left] else: return nums[right]
7b5c48e599e74c396617b3981d7b2e83061da05c
26,249
def parse_processing_parents(processings, parent_keys): """Return a dictionary relating each processing identifier to its parent. Parameters ---------- processings : dict A dictionary of processing data, whose keys are processing identifiers and values are dictionaries containing corresponding processing data. This sort of dictionary is generated by reading the JSON file containing processing/artifact metadata derived from the processing network/tree on Qiita. parent_keys : ordered iterable of str An ordered collection of strings that are keys that will be sequentially used to find parent processing identifiers in the processing data of the `processings` argument. Returns ------- dict Dictionary whose keys are processing identifiers and values are the identifiers of a parent processing. """ processing_parents = {} for proc_id, proc_data in processings.items(): for key in parent_keys: try: parent_id = proc_data[key] except KeyError: # Processings can also have no parents, in which case the for # loop will simply exhaust all keys. pass else: processing_parents[proc_id] = parent_id break return processing_parents
7a7940371648f7236ba73c519cebe8610c0a7b8e
26,250
def chunk_string(string, length): """ Split a string into chunks of [length] characters, for easy human readability. Source: https://stackoverflow.com/a/18854817 """ return (string[0 + i:length + i] for i in range(0, len(string), length))
48d3c406ae9577cf3eb72c44398290eaca40821e
26,252
import os def hascache(): """Is the VIPY_CACHE environment variable set?""" return 'VIPY_CACHE' in os.environ
9821a5497c0dd077a2fbe25bd4aa758ff41958de
26,254
import math def depth(n): """Tree depth (distance from root), used to calculate node spacing.""" return int(math.log(n + 1, 2))
80f49914786f2ba322d7a9321cc75ee4fdb1f01a
26,255
def check_password(password): """ 检查密码是否合法 :param password: 长度是 8 到 16 :return: """ pwd_len = len(password) if pwd_len < 8: return False elif pwd_len > 16: return False else: return True
d43f1cd72701fa7b6dd2939b3c0a9a7b92e036ad
26,259
from typing import Iterable from typing import Callable from typing import Generator def argmap(functions: Iterable[Callable], args: Iterable) -> Generator: """Maps the same argument(s) to multiple functions. >>> inc = lambda x:x+1 >>> dec = lambda x:x-1 >>> list(argmap([inc, dec],[1])) [2,0] you can even map multiple arguments >>> add = lambda a,b: a+b >>> sub = lambda a,b: a-b >>> list(argmap([add, sub], [2, 1])) # two arguments [3, 1] Added in version: 0.1.0 """ return (f(*args) for f in functions)
56b5d28ecd6daeef8e78df0c8e774ee4aedafe09
26,260
import os def SafeEnvironment(hosttype): """ Returns a minimal enviorment dictionary suitable for ensuring build isolation. Basically we want to make sure the path doesn't point to things which may be currently installed on the build machine. """ env = os.environ.copy() path = [] if hosttype.startswith('windows'): if env.get('SYSTEMROOT'): path.append(os.path.join(env['SYSTEMROOT'], 'system32')) path.append(env['SYSTEMROOT']) # make and cygwin sh are both case sensitive and python # automagically capitiaizes the keys in os.environ. Put # then back. env['SystemRoot'] = env['SYSTEMROOT'] del env['SYSTEMROOT'] env['ComSpec'] = env['COMSPEC'] del env['COMSPEC'] env['PATH'] = os.pathsep.join(path) return env
8369f9b1a30889dc27db6428f2fe563fecc3f8eb
26,261
def dot_t(inputa,inputb): """Dot product for four vectors""" return inputa[:,0]*inputb[:,0] - inputa[:,1]*inputb[:,1] - inputa[:,2]*inputb[:,2] - inputa[:,3]*inputb[:,3]
5983a3da541d2884832154dac4a8fb11a583fa3e
26,262
from typing import List from typing import Tuple def pfd_prob_pervalue(R: List[Tuple[str]]): """ R is a sorted dataframe where the last column is the dependent attribute det_count is determinant count (Vx) tup_count is attribute value count (Vx,Va) max_tup_count is most-frequent tuple count (Vx,Va) """ R = sorted(R) assert(R) row = R[0] det, det_count, max_tup_count = row[:-1], 1, 1 val, tup_count = row[-1], 1 ndistinct, total_prob = 1, 0.0 for row in R[1:]: if row[:-1] == det: # same determinant det_count += 1 if row[-1] == val: # same tuple tup_count += 1 if max_tup_count < tup_count: max_tup_count = tup_count else: # new tuple val, tup_count = row[-1], 1 else: # new determinant = new tuple total_prob += max_tup_count / det_count ndistinct += 1 det, det_count, max_tup_count = row[:-1], 1, 1 val, tup_count = row[-1], 1 total_prob += max_tup_count / det_count return total_prob / ndistinct
935535db5023a4c2a927dd53475cbc329086f245
26,263
def SummarizeWindow(states): """Collapse the set or list of values in a particular temporal activity window into a single summary. Valid values are ambiguous, inactive, activation, inhibition. """ # Verify that all states are recognized validStates = set(["ambiguous", "inactive", "activation", "inhibition"]) for state in states: if state not in validStates: raise RuntimeError("Invalid temporal activity state: %s" % state) # If any are ambiguous, the entire window is ambiguous if "ambiguous" in states: return "ambiguous" # If all are activation or inhibition, return that state if all([s == "activation" for s in states]): return "activation" if all([s == "inhibition" for s in states]): return "inhibition" # A combination of activation and inhibition is ambiguous, regardless # of whether there is also inactive if "activation" in states and "inhibition" in states: return "ambiguous" # If all inactive, return inactive if all([s == "inactive" for s in states]): return "inactive" # Otherwise the states are a mix of inactive and activation or inhibition # so activation/inhibition dominates if "activation" in states: return "activation" if "inhibition" in states: return "inhibition" raise RuntimeError("Invalid case reached")
3d9ff71beddd0f38f0d5a98a4ab0a4e2aac047aa
26,265
def uniq(seq): """Return unique elements in the input collection, preserving the order. :param seq: sequence to filter :return: sequence with duplicate items removed """ seen = set() return [x for x in seq if not (x in seen or seen.add(x))]
2180b6aac4760d31503a9d4c7f2b3cf528d2a4f2
26,267
def calcular_recaudo_mixto_local(coste_producto_1,coste_producto_2,horas_1,horas_2): """ num -> num num -> num num -> num num -> num Multiplica el coste del producto por el 50% y suma el coste del producto, además las horas por 100000 y ls suma :param coste_producto_1: Valor del coste del producto uno :param coste_producto_2: Valor del coste del producto dos :param horas_1: Cantidad de horas trabajadas uno :param horas_2: Cantidad de horas trabajadas dos :return: La suma de las 4 cantidades >>> calcular_recaudo_mixto_local(1000,1500,1,1) 253750.0 >>> calcular_recaudo_mixto_local(2000,1000,8,10) 2254500.0 """ if (coste_producto_1 <= 0 or coste_producto_2 <= 0): return 'El costo del producto debe ser mayor a cero.' if (horas_1 <= 0 or horas_2 <= 0): return 'El numero de horas no debe ser menor o igual a cero.' recaudo_mix = (coste_producto_1+(coste_producto_1*0.5))+(coste_producto_2+(coste_producto_2*0.5))+((horas_1*100000)*0.25+(horas_1*100000))+((horas_2*100000)*0.25+(horas_2*100000)) return float(recaudo_mix)
5aab7f81e3598ea5654b59aefb16580e28634889
26,269
def set_verbosity(level=1): """Set logging verbosity level, 0 is lowest.""" global verbosity verbosity = level return verbosity
4588991bbfb9f52041e46fb0c302ded7f1a83667
26,273
def get_connection_config(configuration, connection): """ Extracts information for a specified connection from configuration. Parameters: configuration (dict): Configuration dictionary containing a 'connections' key connection (string): Name of a connection to extract configuration for Returns: dict: Configuration associated with the specified connection """ if not connection in configuration['connections']: raise RuntimeError('connection "%s" not found in config.connections' % connection) return configuration['connections'][connection]
772ffdcdea363d9adf5493fff01a060835471753
26,275
def subset_1d(da, dim, domain): """Subsets data along a single dimension. Parameters ---------- da : xarray.DataArray Data to subset. dim : str Name of dimension to subset along. domain : bcdp.Domain 1D Domain object with .min and .max accessors. Returns ------- xarray.DataArray Subsetted data. """ coord = da[dim] dmin, dmax = domain.min, domain.max if dim == 'time': coord = coord.astype(str) dmin, dmax = str(dmin), str(dmax) selection = (coord >= dmin) & (coord <= dmax) return da.isel(**{dim: selection})
29e13c4262f0f693c2d0ca16d4a2e235b37e400b
26,276
import os def read_versioninfo(project): """Read the versioninfo file. If it doesn't exist, we're in a github zipball, and there's really no way to know what version we really are, but that should be ok, because the utility of that should be just about nil if this code path is in use in the first place.""" versioninfo_path = os.path.join(project, 'versioninfo') if os.path.exists(versioninfo_path): with open(versioninfo_path, 'r') as vinfo: version = vinfo.read().strip() else: version = "0.0.0" return version
35db218765478f74146b6a245f3f424d5492d254
26,279
from typing import Mapping def _drop_nulls(o): """ Drop `None` valued keys from an object. """ if isinstance(o, (dict, Mapping)): return {k: _drop_nulls(v) for k, v in o.items() if v is not None} elif isinstance(o, list): return [_drop_nulls(v) for v in o if v is not None] elif isinstance(o, set): return {_drop_nulls(v) for v in o if v is not None} else: return o
dff1ab8dcbb5578a406a24c67b7885d5aa66b762
26,280
def command_name_to_class(name: str) -> str: """ This function transform command name, from INDEX file, to MIST "compilant" name """ return f"{name[0].upper()}{name[1:]}Command"
923a831f3f3ca10e445db760bc02eb19212f0e61
26,282
def simulate_until_attractor_or_max_t_storing_all_states( max_attractor_l, _simulate_until_attractor_or_target_substate_or_max_t, initial_state, perturbed_nodes_by_t, predecessor_node_lists, truth_tables): """ Simulates until attractor is found (or one of optionally given constrains is exceeded). State is not considered as part of the attractor until all the perturbations are carried out. Initial state can be considered as part of the attractor only if no perturbations are present. :param max_attractor_l: maximum length of attractor to search for :param _simulate_until_attractor_or_target_substate_or_max_t: [function] to perform simulation accounting for the time cap (if any) :param initial_state: initial state of the network :param perturbed_nodes_by_t: dict (by time step) of dicts (by node) of node states :param predecessor_node_lists: list of predecessor node lists :param truth_tables: list of dicts (key: tuple of predecessor node states, value: resulting node state) :return attractor or None if not found """ attractor = None states, state_codes_since_last_perturbation, _, attractor_is_found, *_ = \ _simulate_until_attractor_or_target_substate_or_max_t( initial_state, perturbed_nodes_by_t, predecessor_node_lists, truth_tables) if attractor_is_found: # Find attractor states. attractor_first_state_code = state_codes_since_last_perturbation[-1] # Trajectory length is counted from when all perturbations are carried out. attractor_trajectory_l = state_codes_since_last_perturbation.index(attractor_first_state_code) attractor_l = len(state_codes_since_last_perturbation) - (attractor_trajectory_l + 1) # Check if attractor is compliant with length cap (if any). if attractor_l <= max_attractor_l: attractor_state_codes = state_codes_since_last_perturbation[-attractor_l:] attractor_min_state_code = min(attractor_state_codes) attractor_states = states[-attractor_l:] attractor_trajectory_l = len(states) - (attractor_l + 1) attractor = (attractor_min_state_code, attractor_state_codes, attractor_states, attractor_trajectory_l) return attractor
35087f03e47afc5c8bbf9d34e7bb89717b95d826
26,284
import random def data_p4(local_jobs_list, local_data_no): """ This function will generate data for p4 :param local_jobs_list: list of all the job titles :param local_data_no: total number of data entries in the dict :return: a dataset in dictionary format """ # create an empty dict to begin with local_data_dict = {} # generate a data_no of entries in the dict for i in range(local_data_no): local_person_id = random.randint(10000, 99999) local_job = random.choice(local_jobs_list) local_age = random.randint(0, 120) local_area = random.choice([True, False]) # add the generated data to a list local_data_dict[local_person_id] = [local_job, local_age, local_area] return local_data_dict
3d93719346620bf5429a6b2f4d5d215c90bf2ca5
26,285
import os def get_inputs_filename(directory): """Return the name of the inputs file in a directory.""" # At present we have no reason to look for anything other than inputs. if os.path.isfile(directory + '/inputs'): return 'inputs' elif os.path.isfile(directory + '/inputs_2d'): return 'inputs_2d' elif os.path.isfile(directory + '/inputs_3d'): return 'inputs_3d' else: print("Error: no inputs file found in " + directory + ".") exit
2aa22acfa675be9c67a3e61ff31e70fa2f848c67
26,286
def pyeapi_result(output): """Return the 'result' value from the pyeapi output.""" return output[0]['result']
d4af079c3776ec7bfb6fcdcfd396836b2edc58fb
26,288
def _parse_rgx(regex_string): """ :return: a tuple of (modified regex_string, whether significant) """ if regex_string[0:1] == r"~": return regex_string[1:], False elif regex_string[0:2] == r"\~": return regex_string[1:], True return regex_string, True
96f526af276e9edb98ee56e48ceedf56008aa09b
26,289
from typing import Optional from typing import Any import re def _normalize_text(text: str, stemmer: Optional[Any] = None) -> str: """Rouge score should be calculated only over lowercased words and digits. Optionally, Porter stemmer can be used to strip word suffixes to improve matching. The text normalization follows the implemantion from https://github.com/google-research/google-research/blob/master/rouge/tokenize.py. Args: text: An input sentence. stemmer: Porter stemmer instance to strip word suffixes to improve matching. """ text = re.sub(r"[^a-z0-9]+", " ", text.lower()) if stemmer: text = " ".join(stemmer.stem(x) if len(x) > 3 else x for x in text.split()) return text.strip()
d92ab44f8d742f6e4ea086e12faa537c4ae7f650
26,291
def get_job(objectid): """Return json job configuration for NS/CI.""" return { "job_type": "job:ariamh_sciflo_create_interferogram", "payload": { "objectid": objectid, } }
6a0e38f44490784d57099cd8ca3657de38fa6a2b
26,292
import itertools def generate_jobs(entry): """ Generate a list of job configurations by varying the parameters in the given entry. In practice, computes the cartesian product of all the lists at the toplevel of the input dictionary, and generates one job for each resulting instance. Args: entry (dict): Input job configurations Returns: list: List of jobs with list parameters replaced by actual values """ keys = [k for k, v in entry.items() if isinstance(v, list)] all_vals = itertools.product( *[v for __, v in entry.items() if isinstance(v, list)]) all_jobs = [] for vals in all_vals: job = entry.copy() for k, v in zip(keys, vals): job[k] = v all_jobs.append(job) return all_jobs
ee376c0a2e3817784b8aac20ffa0b1144138e852
26,293
import torch import warnings def safe_cholesky(covariance_matrix, jitter=1e-6): """Perform a safe cholesky decomposition of the covariance matrix. If cholesky decomposition raises Runtime error, it adds jitter to the covariance matrix. Parameters ---------- covariance_matrix: torch.Tensor. Tensor with dimensions batch x dim x dim. jitter: float, optional. Jitter to add to the covariance matrix. """ try: return torch.cholesky(covariance_matrix) except RuntimeError: dim = covariance_matrix.shape[-1] if jitter > 1: # When jitter is too big, then there is some numerical issue and this avoids # stack overflow. warnings.warn("Jitter too big. Maybe some numerical issue somewhere.") return torch.eye(dim) return safe_cholesky( covariance_matrix + jitter * torch.eye(dim), jitter=10 * jitter )
94c91a1b34908e7e7b62be89e6ea0a3bc7432bac
26,295
def passport_is_valid_1(passport): """ Determines whether a passport is valid or not only checking the presence of the fields. :param passport: passport :return: boolean """ return all(field in passport.keys() for field in ['byr', 'iyr', 'eyr', 'hgt', 'hcl', 'ecl', 'pid'])
fe5220f55eb4a95f063ef51d0a306b17bcb4af49
26,296
import re def recognize_delivery_service(tracking_code: str): """Infer the parcel carrier for a tracking code. Can be used as a quick validation.""" service = None # Strip whitespace tracking_code = re.sub(r"\s+", "", tracking_code) usps_pattern = [ "^(94|93|92|94|95)[0-9]{20}$", "^(94|93|92|94|95)[0-9]{22}$", "^(70|14|23|03)[0-9]{14}$", "^(M0|82)[0-9]{8}$", "^([A-Z]{2})[0-9]{9}([A-Z]{2})$", ] ups_pattern = [ "^(1Z)[0-9A-Z]{16}$", "^(T)+[0-9A-Z]{10}$", "^[0-9]{9}$", "^[0-9]{26}$", ] fedex_pattern = ["^[0-9]{20}$", "^[0-9]{15}$", "^[0-9]{12}$", "^[0-9]{22}$"] usps = "(" + ")|(".join(usps_pattern) + ")" fedex = "(" + ")|(".join(fedex_pattern) + ")" ups = "(" + ")|(".join(ups_pattern) + ")" if re.match(usps, tracking_code) != None: service = "USPS" elif re.match(ups, tracking_code) != None: service = "UPS" elif re.match(fedex, tracking_code) != None: service = "FedEx" else: raise ValueError("Unable to determine service for %s", tracking_code) return service
ff30a95a0f0bdbcaec25a8f8b6db1ac6346800ca
26,297
import re def replace_urls(string_input: str, replace_by: str = "URL"): """ Replace url's in a string by replace_by :param string_input: string input :param replace_by: string, what we want to replace the url with :return: string, with urls replaced by replaced_by """ return re.sub(r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\), ]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', replace_by, string_input)
56e66ef06580e608dc09c2ba690179d820819c9a
26,298
import os def get_full_path(file: str, directory: str) -> str: """ Get the full path to file from the full path to a directory and a relative path to that file in that directory. :param file: the relative path to file in a directory. :param directory: the full path of a directory. :return: the full path to file. """ return os.path.abspath(os.path.join(directory, file))
074a19c7425c2b67229294c8d02c723c95e7f413
26,299
def target_title(target): """The title of a Target instance in text representations""" return 'Target #%d @ %fs' % (target.targetNumber, target.frameGrabTime)
a755aeb428c94ce6b79ac066877be1130112cba5
26,301
def palindrome_5(word_1: str, word_2: str) -> bool: """ O(n^2) :param word_1: :param word_2: :return: if is or not a palindrome """ word_1_list = list(word_1) word_2_list = list(word_2) word_1_list.sort() word_2_list.sort() pos = 0 matches = True while pos < len(word_1) and matches: if word_1_list[pos] == word_2_list[pos]: pos += 1 else: matches = False return matches
09caa875d3c71b9888c905aa7e7568e164c86d07
26,302
from datetime import datetime def parse_session(line): """Parse H4 line into session.""" if not line.startswith("H4"): raise ValueError("Not H4 line for session parser!") return { "data" : None, "start" : datetime( year = int(line[6:10]), month = int(line[11:13]), day = int(line[14:16]), hour = int(line[17:19]), minute = int(line[20:22]), second = int(line[23:25]) ), "end" : datetime( year = int(line[26:30]), month = int(line[31:33]), day = int(line[34:36]), hour = int(line[37:39]), minute = int(line[40:42]), second = int(line[43:45]) ), "troposphere_corrected" : int(line[49]), "CoM_corrected" : int(line[51]), "receive_amplitude_corrected" : int(line[53]), "station_delay_corrected" : int(line[55]), "spacecraft_delay_corrected" : int(line[57]), "range_type" : int(line[59]), "data_quality" : int(line[61]) }
3fa735495b296e5e3b6c5e4b60f23a48dfceada6
26,303
def add_storage_mappings_arguments_to_parser(parser): """ Given an `argparse.ArgumentParser` instance, add the arguments required for the 'storage_mappings' field for both Migrations and Replicas: * '--default-storage-backend' will be under 'default_storage_backend' * '--disk-storage-mapping's will be under 'disk_storage_mappings' * '--storage-backend-mapping's will be under 'storage_backend_mappings' """ parser.add_argument( "--default-storage-backend", dest='default_storage_backend', help="Name of a storage backend on the destination platform to " "default to using.") # NOTE: arparse will just call whatever 'type=' was supplied on a value # so we can pass in a single-arg function to have it modify the value: def _split_disk_arg(arg): disk_id, dest = arg.split('=') return { "disk_id": disk_id.strip('\'"'), "destination": dest.strip('\'"')} parser.add_argument( "--disk-storage-mapping", action='append', type=_split_disk_arg, dest='disk_storage_mappings', help="Mappings between IDs of the source VM's disks and the names of " "storage backends on the destination platform as seen by running " "`coriolis endpoint storage list $DEST_ENDPOINT_ID`. " "Values should be fomatted with '=' (ex: \"id#1=lvm)\"." "Can be specified multiple times for multiple disks.") def _split_backend_arg(arg): src, dest = arg.split('=') return { "source": src.strip('\'"'), "destination": dest.strip('\'"')} parser.add_argument( "--storage-backend-mapping", action='append', type=_split_backend_arg, dest='storage_backend_mappings', help="Mappings between names of source and destination storage " "backends as seen by running `coriolis endpoint storage " "list $DEST_ENDPOINT_ID`. Values should be fomatted with '=' " "(ex: \"id#1=lvm)\". Can be specified multiple times for " "multiple backends.")
c79cdd273b561e6605bc7322092f289251816d76
26,304
def intent(id): """ Registers this method as an intent with given ID """ def decorator(fx): fx._intent = id return fx return decorator
3a8dcf86915a682596629c658dc8544e88b1fce7
26,305
def zero_one_normalize(list_in): """ zero_one zero_one_normalization. map input to 0-1 """ if len(list_in) == 0: return [] value_list = [i[1] for i in list_in] max_num = max(value_list) min_num = max(value_list) return_list = [] if max_num == min_num: for i in list_in: return_list.append((i[0], 1)) return return_list for i in list_in: return_list.append((i[0], (i[1] - min_num) / (max_num - min_num))) return return_list
357fd675dd694bc57df19f487e09f2c6f2918a49
26,306
def remove_outliers(df): """ For INS and OUTS, remove implausibly values :param df: dataframe :return: dataframe """ df = df[df['INS'] < 200000] df = df[df['INS'] >= 0] df = df[df['OUTS'] < 200000] df = df[df['OUTS'] >= 0] return df
c9f0002992b971dc31839e9803049a4db5b1702e
26,307
def construct_path(vertex, reverse_paths): """Returns the shortest path to a vertex using the reverse_path mapping.""" path = [] while vertex is not None: path.append(vertex) vertex = reverse_paths[vertex] return list(reversed(path))
9ed1aa226a2055c16c22743f7930c4981f2ac16c
26,308
import resource def getTotalCpuTimeAndMemoryUsage(): """ Gives the total cpu time of itself and all its children, and the maximum RSS memory usage of itself and its single largest child. """ me = resource.getrusage(resource.RUSAGE_SELF) childs = resource.getrusage(resource.RUSAGE_CHILDREN) totalCPUTime = me.ru_utime + me.ru_stime + childs.ru_utime + childs.ru_stime totalMemoryUsage = me.ru_maxrss + childs.ru_maxrss return totalCPUTime, totalMemoryUsage
ae320ce252e44c71306cae24a8041ea331032e39
26,309
def _learning_rate_schedule(global_step_value, max_iters, initial_lr): """Calculates learning_rate with linear decay. Args: global_step_value: int, global step. max_iters: int, maximum iterations. initial_lr: float, initial learning rate. Returns: lr: float, learning rate. """ lr = initial_lr * (1.0 - global_step_value / max_iters) return lr
c0adcf9d64d83e9917b278fe7b49dd152cff9a47
26,310
import os def getFileSize(fileObj): """Returns the number of bytes in the given file object in total file cursor remains at the same location as when passed in :param fileObj: file object of which the get size :type fileObj: file object :return: total bytes in file object :rtype: int """ # store existing file cursor location startingCursor = fileObj.tell() # seek to end of file fileObj.seek(0, os.SEEK_END) size = fileObj.tell() # reset cursor fileObj.seek(startingCursor) return size
667b5fe40b4542f8e96afbccf59f0197af204537
26,311
def read_pid(pid_file): """ Read PID from given PID file. :param str pidfile: Name of the PID file to read from. :return: PID from given PID file. :rtype: int """ with open(pid_file, 'r') as pidfd: return int(pidfd.readline().strip())
61df745a73483bd9a9dd16b722ca53d559f07539
26,312
def bytechr(i): """Return bytestring of one character with ordinal i; 0 <= i < 256.""" if not 0 <= i < 256: if not isinstance(i, int): raise TypeError('an integer is required') else: raise ValueError('bytechr() arg not in range(256)') return chr(i).encode('latin1')
0949f417ec521edb4e3edef103e099ef43057869
26,313
def macro(name): """Replaces :func:`~flask_admin.model.template.macro`, adding support for using macros imported from another file. For example: .. code:: html+jinja {# templates/admin/column_formatters.html #} {% macro email(model, column) %} {% set address = model[column] %} <a href="mailto:{{ address }}">{{ address }}</a> {% endmacro %} .. code:: python class FooAdmin(ModelAdmin): column_formatters = { 'col_name': macro('column_formatters.email') } Also required for this to work, is to add the following to the top of your master admin template: .. code:: html+jinja {# templates/admin/master.html #} {% import 'admin/column_formatters.html' as column_formatters with context %} """ def wrapper(view, context, model, column): # skipcq: PYL-W0613 (unused arg) if '.' in name: macro_import_name, macro_name = name.split('.') m = getattr(context.get(macro_import_name), macro_name, None) else: m = context.resolve(name) if not m: return m return m(model=model, column=column) return wrapper
622aec9bd44e5cdb412e4fd92b07fdf4f2cb8aa7
26,314
from typing import Tuple def _get_level_percentiles(level: float) -> Tuple[float, float]: """Convert a credibility level to percentiles. Similar to the highest-density region of a symmetric, unimodal distribution (e.g. Gaussian distribution). For example, an credibility level of `95` will be converted to `(2.5, 97.5)`. Parameters ---------- level: The credibility level used to calculate the percentiles. For example, `[95]` for a 95% credibility interval. These levels are split symmetrically, e.g. `95` corresponds to plotting values between the 2.5% and 97.5% percentiles, and are equivalent to highest-density regions for a normal distribution. For skewed distributions, asymmetric percentiles may be preferable, but are not yet implemented. Returns ------- The percentiles, with the lower percentile first. """ lower_percentile = (100 - level) / 2 return lower_percentile, 100 - lower_percentile
7b7ff713e4b5c95c6e38c5807ee3706bcb01bc0f
26,315
def hysteresis(im, weak, strong=255): """Transforms weak pixel into strong ones if at least 1 pixel around the one being processed is a strong one Parameters ---------- im input image weak weak pixel that has intensity value that is not enough to be considered strong ones, but not non-relevant strong pixel with very high intensity Returns ------- im output result image """ row, col = im.shape for i in range(1, row-1): for j in range(1, col-1): if (im[i,j] == weak): if ((im[i+1, j-1] == strong) or (im[i+1, j] == strong) or (im[i+1, j+1] == strong) or (im[i, j-1] == strong) or (im[i, j+1] == strong) or (im[i-1, j-1] == strong) or (im[i-1, j] == strong) or (im[i-1, j+1] == strong)): im[i, j] = strong else: im[i, j] = 0 return im
4fe1f6c7728f69cef393432a6298744e5a4c383d
26,316
def print_tree(ifaces): """ Prints the tree for the given ifaces. """ return " ".join(i.get_tree() for i in ifaces)
27d117b3454420580a836a6419300b918b08b135
26,317
from os import popen from time import perf_counter from typing import Union def get_exe_path_from_port(port: Union[str, int]) -> Union[str, None]: """获取端口号第一条进程的可执行文件路径 \n :param port: 端口号 :return: 可执行文件的绝对路径 """ process = popen(f'netstat -ano |findstr {port}').read().split('\n')[0] t = perf_counter() while not process and perf_counter() - t < 10: process = popen(f'netstat -ano |findstr {port}').read().split('\n')[0] processid = process.split(' ')[-1] if not processid: return else: file_lst = popen(f'wmic process where processid={processid} get executablepath').read().split('\n') return file_lst[2].strip() if len(file_lst) > 2 else None
734d329d73a88a1294b71154b5b351428ebb5926
26,318
def max_builtin(): """map: Max element of an iterable.""" return "Ma{}".format(max("Madmax"))
a8cd56a68caa7bef6f77474973e86138466c27b5
26,320
import importlib import warnings def import_or_raise(library, error_msg=None, warning=False): """Attempts to import the requested library by name. If the import fails, raises an ImportError or warning. Args: library (str): The name of the library. error_msg (str): Rrror message to return if the import fails. warning (bool): If True, import_or_raise gives a warning instead of ImportError. Defaults to False. Returns: Returns the library if importing succeeded. Raises: ImportError: If attempting to import the library fails because the library is not installed. Exception: If importing the library fails. """ try: return importlib.import_module(library) except ImportError: if error_msg is None: error_msg = "" msg = f"Missing optional dependency '{library}'. Please use pip to install {library}. {error_msg}" if warning: warnings.warn(msg) else: raise ImportError(msg) except Exception as ex: msg = f"An exception occurred while trying to import `{library}`: {str(ex)}" if warning: warnings.warn(msg) else: raise Exception(msg)
51d890fde3cbc9740299fd262b544d09acfe7bdc
26,321
def gettext(nodelist): """Return string for text in nodelist (encode() for Unicode conversion)""" text = "" for node in nodelist: if node.nodeType == node.TEXT_NODE: text = text + node.data return text.strip().encode()
bfff79cf9b71d00905c66a29d8059065e0889ec2
26,323
from bs4 import BeautifulSoup def parse_response(response): """ Convert a valid response form the https://www.myshiptracking.com/vessels/ website into a dictionary containing the information parsed from the 'vessels_table2' :param response: :return: dict """ soup = BeautifulSoup(response.text, "html.parser") tables = soup.find_all("table", {"class": "vessels_table2"}) data = [] for table in tables: rows = table.findAll(lambda tag: tag.name == "tr") for row in rows: cols = row.find_all("td") cols = [ele.text.strip() for ele in cols] data.append([ele for ele in cols if ele]) ans = {x[0]: x[1] for x in data if len(x) == 2} return ans
cd08616001d10de4c45b7240b4df5b71393ac68f
26,324
def twos_complement(n, bits): """Compute the twos complement of a positive int""" if n < 0 or n >= 2**bits: raise ValueError return 2**bits - n
a088c5c7ce198316499c471b22e4e2feda9bca20
26,325
def bool_to_str(boolean: bool) -> str: """Converts a bool such as True to 'true'.""" return 'true' if boolean else 'false'
515701bf0b8d60a875ac7d49446df7ea62a0abdb
26,326
def get_cancer_type_match(trial_match): """Trial curations with _SOLID_ and _LIQUID_ should report those as reasons for match. All others should report 'specific' """ cancer_type_match = 'specific' for criteria in trial_match.match_criterion.criteria_list: for node in criteria.criteria: if 'clinical' in node and 'oncotree_primary_diagnosis' in node['clinical']: diagnosis = node['clinical']['oncotree_primary_diagnosis'] if diagnosis == '_LIQUID_': cancer_type_match = 'all_liquid' break elif diagnosis == '_SOLID_': cancer_type_match = 'all_solid' break return cancer_type_match
494db51953366a2e966b5f3b0957e0b44518cd64
26,330
def read_input(filename): """read input file and return list of raw intcodes.""" with open(filename, "r") as infile: raw_intcodes = infile.readlines()[0].strip().split(",") return raw_intcodes
6ba22122472d69b7adf067d652b8da67092f87d4
26,332
def static_initalise_z(time_series): """Static version of initalise_z(). Used for parallel programming. """ time_series.initalise_z() return time_series
dc968b8a12f041921419837d78ebf91c5903368b
26,333
from typing import Dict from typing import List def core_services_selection(simcore_docker_compose: Dict) -> List[str]: """Selection of services from the simcore stack""" ## OVERRIDES packages/pytest-simcore/src/pytest_simcore/docker_compose.py::core_services_selection fixture all_core_services = list(simcore_docker_compose["services"].keys()) return all_core_services
56b6566bea4fae3cd4fa4d37eb54f1964835ed54
26,334
from typing import List def dsv_line_to_list(line: str, *, delimiter=',', quote='"') -> List[str]: """ Splits line into fields on delimiter ignoring delimiters in fields that start and end with quote NB: Empty fields produce an empty string :param line: The line to be split :param delimiter: The delimiter to use to split the fields :param quote: The quote char to surround fields that contain the delimiter :return: a list of the fields found """ result = [] within_quoted_field = False at_start_of_field = True last_was_quote = False # Used to see if quote is not at end of field field = '' def new_field(): nonlocal field, within_quoted_field, at_start_of_field, last_was_quote result.append(field) within_quoted_field = False at_start_of_field = True last_was_quote = False field = '' for char in line: if at_start_of_field: at_start_of_field = False # Check for quote if char == quote: within_quoted_field = True continue # Skip quote do not include in field if within_quoted_field: if char == quote: last_was_quote = True continue # May not want to add this char if end of field if last_was_quote: if char == delimiter: new_field() continue else: field += quote last_was_quote = False field += char else: if char == delimiter: new_field() else: field += char # Add last field that was being filled (or empty if empty in comma) result.append(field) return result
d0e1248152ecbe95d68e57de15bf7c0e22be7c7f
26,335
def flatten(x): """Build a flat list out of any iter-able at infinite depth""" result = [] for el in x: # Iteratively call itself until a non-iterable is found if hasattr(el, "__len__") and not isinstance(el, str): flt = flatten(el) result.extend(flt) else: result.append(el) return result
c77950ae9e3839a450b797be1aaee9bff8a1f164
26,336
def combine_points(points_list): """Combine list of points (`ImagePoints`, `Points`, etc). List must be nonempty. Returns: combined_points """ cls = type(points_list[0]) return cls.combine(points_list)
bddc4262057359ff65d1b8356eacf445a78449e8
26,337
def complete_version(s: str): """ Complete the Version to meet x.y.z-*+* format requirements of semver.VersionInfo """ s = s.strip() count_dot = s.count('.') if count_dot == 0: return (s + '.0.0', 2) elif count_dot == 1: return (s + '.0', 1) else: return (s, 0)
19835bbf6d6f58eca477a94c7c52c049d6ea5446
26,338
from typing import List import collections def unique(list_: List) -> List: """Remove duplicate entries from list, keeping it in its original order >>> unique([1, 2, 2, 3, 4, 6, 2, 5]) [1, 2, 3, 4, 6, 5] >>> unique(['bb', 'aa', 'aa', 'aa', 'aa', 'aa', 'bb']) ['bb', 'aa'] """ return list(collections.OrderedDict.fromkeys(list_))
8707e2d2dbf6b77f8818ad39282b113da9d22707
26,339
def read_reqn_file(path_to_file): """ Reads the contents of a file and returns it as a list of lines. Parameters ---------- path_to_file : str Path to file that is to read in Returns ------- list of str The file contents as separate strings in a list """ with open(path_to_file) as f: lines = f.readlines() return lines
3df65ff2b6475ffbdceb9c2727684ab69c146da4
26,341
def weekDay (obj): """ return the weekDay from a obj with a datetime 'date' field weekDay 0 is monday """ return obj['date'].weekday()
f67d086076e99727e2b39f6a52608144ac24165d
26,342
def get_bl_data(adni_comp, clin_data, scan_data): """This function extracts the data from the baseline visit only for each patient. Supply the three dataframes adni_comp, clin_data, and scan_data as input. """ # extract the baseline data only adni_bl = adni_comp[adni_comp.EXAMDATE == adni_comp.EXAMDATE_bl] clin_bl = clin_data[clin_data.EXAMDATE == clin_data.EXAMDATE_bl] scan_bl = scan_data[scan_data.EXAMDATE == scan_data.EXAMDATE_bl] # return the three dataframes return adni_bl, clin_bl, scan_bl
fad3b7b422a23e597b2f37b8e2f2a9a702b1af0a
26,344
import math def formatElapsedSeconds(seconds): """ Returns a string of the form "mm:ss" or "hh:mm:ss" or "n days", representing the indicated elapsed time in seconds. """ sign = '' if seconds < 0: seconds = -seconds sign = '-' # We use math.floor() instead of casting to an int, so we avoid # problems with numbers that are too large to represent as # type int. seconds = math.floor(seconds) hours = math.floor(seconds / (60 * 60)) if hours > 36: days = math.floor((hours + 12) / 24) return "%s%d days" % (sign, days) seconds -= hours * (60 * 60) minutes = (int)(seconds / 60) seconds -= minutes * 60 if hours != 0: return "%s%d:%02d:%02d" % (sign, hours, minutes, seconds) else: return "%s%d:%02d" % (sign, minutes, seconds)
de8ec8614dd534871a3628c15f3d89f3f7a87d6f
26,345
def from_keyed_iterable(iterable, key, filter_func=None): """Construct a dictionary out of an iterable, using an attribute name as the key. Optionally provide a filter function, to determine what should be kept in the dictionary.""" generated = {} for element in iterable: try: k = getattr(element, key) except AttributeError: raise RuntimeError("{} does not have the keyed attribute: {}".format( element, key )) if filter_func is None or filter_func(element): if k in generated: generated[k] += [element] else: generated[k] = [element] return generated
e2d349876f446b378d2b6b5b535f9119e648cfbe
26,346
def get_mms_run_command(model_names, processor="cpu"): """ Helper function to format run command for MMS :param model_names: :param processor: :return: <str> Command to start MMS server with given model """ if processor != "eia": mxnet_model_location = { "squeezenet": "https://s3.amazonaws.com/model-server/models/squeezenet_v1.1/squeezenet_v1.1.model", "pytorch-densenet": "https://dlc-samples.s3.amazonaws.com/pytorch/multi-model-server/densenet/densenet.mar", "bert_sst": "https://aws-dlc-sample-models.s3.amazonaws.com/bert_sst/bert_sst.mar" } else: mxnet_model_location = { "resnet-152-eia": "https://s3.amazonaws.com/model-server/model_archive_1.0/resnet-152-eia.mar" } if not isinstance(model_names, list): model_names = [model_names] for model_name in model_names: if model_name not in mxnet_model_location: raise Exception( "No entry found for model {} in dictionary".format(model_name) ) parameters = [ "{}={}".format(name, mxnet_model_location[name]) for name in model_names ] mms_command = ( "mxnet-model-server --start --mms-config /home/model-server/config.properties --models " + " ".join(parameters) ) return mms_command
e32cb9a9f1dbe8992edbf02b6961b7ceaa7f2a3a
26,348
def sanity_check(): """ Perform an initial sanity check before doing anything else in a given workflow. This function can be used to verify importing of modules that are otherwise used much later, but it is better to abort the pilot if a problem is discovered early. :return: exit code (0 if all is ok, otherwise non-zero exit code). """ return 0
97db2259f32a0eb67515c5cd427ff03aea5a300e
26,349
def _find_shortest_indentation(lines): """Return most shortest indentation.""" assert not isinstance(lines, str) indentation = None for line in lines: if line.strip(): non_whitespace_index = len(line) - len(line.lstrip()) _indent = line[:non_whitespace_index] if indentation is None or len(_indent) < len(indentation): indentation = _indent return indentation or ''
e85c24d56c96b50dd82cb20d35365bd889eaf8bd
26,350
import os import shutil def copy(target_file: str, output_dir: str) -> str: """ファイルのコピー.""" # ディレクトリが存在しない場合は作成 if not os.path.isdir(output_dir): os.makedirs(output_dir) # コピー shutil.copy(target_file, output_dir) return target_file
0e3723fbc744fc65b48962c7125f5d23ae69c01e
26,351
import hashlib def check_sum(fl_name, tid=None): """ compute checksum for generated file of `fl_name` """ hasher = hashlib.md5() with open(fl_name, 'rb') as fin: for chunk in iter(lambda: fin.read(4096), b""): hasher.update(chunk) ret = hasher.hexdigest() if tid is not None: ret = ret + str(tid) return ret
842f6de54827b524f1d5cf18d4c5ad18b8ad8b59
26,352
def lazy_isinstance(instance, module, name): """Use string representation to identify a type.""" # Notice, we use .__class__ as opposed to type() in order # to support object proxies such as weakref.proxy cls = instance.__class__ module = cls.__module__ == module name = cls.__name__ == name return module and name
5a36b20e873ca4391e120dd06e833ff5c6d15db8
26,353
def _get_class_with_reference(visible_name: str, ref: str) -> str: """ Return the name of the class with a valid reference to be used by sphinx. """ return f"\\ :class:`{visible_name} <{ref}>`\\"
4a31fdcffcd2b295deecba062a683bb1cab91bee
26,354
import os import glob import yaml def _get_input_data_files(cfg): """Get a dictionary containing all data input files.""" metadata_files = [] for filename in cfg['input_files']: if os.path.isdir(filename): metadata_files.extend( glob.glob(os.path.join(filename, '*metadata.yml'))) elif os.path.basename(filename) == 'metadata.yml': metadata_files.append(filename) input_files = {} for filename in metadata_files: with open(filename) as file: metadata = yaml.safe_load(file) input_files.update(metadata) return input_files
258248df10e27b5ca0a68400708998de9d2c09bc
26,357
def _parse_total_magnetization(line, lines): """Parse the total magnetization, which is somewhat hidden""" toks = line.split() res = {"number of electrons": float(toks[3])} if len(toks) > 5: res["total magnetization"] = float(toks[5]) return res
886ecedd18e7d082a3055961cd2c561637eb57f7
26,358
import os import time def wait_for_file(path, timeout=60): """Wait for a file to exist in the filesystem (blocking). :param path: Path to wait on :param timeout: Max time to wait :return: True if file exists after timeout, else False """ t = 0 while t < timeout: if os.path.exists(path) and os.path.isfile(path): return True time.sleep(1) t += 1 return False
cfabeade0141be5972430a5eb04d2c1e1c92eea6
26,359
import optparse def Options(): """Returns an option parser instance.""" p = optparse.OptionParser('split_doc.py [options] input_file out_prefix') # Like awk -v p.add_option( '-v', dest='default_vals', action='append', default=[], help="If the doc's own metadata doesn't define 'name', set it to this value") p.add_option( '-s', '--strict', dest='strict', action='store_true', default=False, help="Require metadata") return p
565ccb65e6938397bec3c23381a376303dad8366
26,360
def filter_transitional_edges(df): """ Filters out gradually rising/falling edges that are generated when an appliance state is changed """ return df
8648589cb9198063f66b33189d57a98ea1962c47
26,361
def replace_location_in_cwl_tool(spec): """Recursively replace absolute paths with relative.""" # tools inputs_parameters = [] for param in spec["inputs"]: if param["type"] == "File": if param.get("default", ""): location = "location" if param["default"].get("location") else "path" param["default"][location] = param["default"][location].split("/")[-1] inputs_parameters.append(param) spec["inputs"] = inputs_parameters # workflows if spec.get("steps"): steps = [] for tool in spec["steps"]: tool_inputs = [] for param in tool["in"]: if param.get("default") and type(param["default"]) is dict: if ( param["default"].get("class", param["default"].get("type")) == "File" ): location = ( "location" if param["default"].get("location") else "path" ) param["default"][location] = param["default"][location].split( "/" )[-1] tool_inputs.append(param) tool["in"] = tool_inputs steps.append(tool) spec["steps"] = steps return spec
dbd1c152b7c196231f9a25cd1be8aff9230f1637
26,362
import os def disk_stat(path): """ This function returns disk usage percentage """ disk = os.statvfs(path) percent = (disk.f_blocks - disk.f_bfree) * 100 / (disk.f_blocks - disk.f_bfree + disk.f_bavail) + 1 return percent
5bee094c3afbea18b784d69104c14583b9d90a78
26,364
def int_median_cutter(lower_bound: int, upper_bound: int, value: int): """ Simple function for cutting values to fit between bounds Args: lower_bound (int): lower cutting bound upper_bound (int): upper cutting bound value (int): value to fit between bounds Returns: value cut to fit into bounds (as integer) """ return int(max(min(value, upper_bound), lower_bound))
dc6ebea3876f35470b2289312596f83ab8ba5fed
26,365