content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def is_numba_installed(raise_error=False): """Test if numba is installed.""" try: is_installed = True except ImportError: is_installed = False # Raise error (if needed) : if raise_error and not is_installed: raise IOError("numba not installed. See http://numba.pydata.org/ for " "installation instructions.") return is_installed
1f7e2ee6e880574522641dd635c2b52d13ea9e3a
40,814
import re def validate_account_to_dashed(account): """ Validates the the provided string is in valid AdWords account format and converts it to dashed format. :param str account: AdWords Account :rtype: str :return: Dashed format """ account = str(account).strip() if re.match("[0-9]{3}-[0-9]{3}-[0-9]{4}", account): return account if re.match("^[0-9]{10}$", account): return '-'.join([str(account)[0:3], str(account)[3:6], str(account)[6:]]) raise ValueError("Invalid account format provided: {}".format(account))
30eae40d2b205aeebc99cfc38864893d2fe6e7b8
40,815
def colourfulness_components(C_RG, C_YB, B_rw): """ Returns the *colourfulness* components :math:`M_{RG}` and :math:`M_{YB}`. Parameters ---------- C_RG : numeric *Chroma* component :math:`C_{RG}`. C_YB : numeric *Chroma* component :math:`C_{YB}`. B_rw : numeric Ideal white *brightness* correlate :math:`B_{rw}`. Returns ------- numeric *Colourfulness* components :math:`M_{RG}` and :math:`M_{YB}`. Examples -------- >>> C_RG = -0.0028852716381965863 >>> C_YB = -0.013039632941332499 >>> B_rw = 125.24353925846037 >>> colourfulness_components(C_RG, C_YB, B_rw) # doctest: +ELLIPSIS (-0.0036136..., -0.0163312...) """ M_RG = C_RG * B_rw / 100 M_YB = C_YB * B_rw / 100 return M_RG, M_YB
52b92618442ab87eba516ca1f2d41349a5f1120e
40,817
def procedural(it, f): """Procedural topology. - it: iterator of node labels - f: label -> [label] -- defines the edges """ return { i: f(i) for i in it }
9bc7638405e03005aa17c0af8a9ed792307524e9
40,818
def build_filename( name: str, suffix: str = "", prefix: str = "", max_length: int = 128 ) -> str: """ >>> build_filename("name") 'name' >>> build_filename("name", "suffix", "prefix") 'prefix-name-suffix' >>> build_filename("loooooooong_nameeeeee", "suffix", max_length=20) 'loooooooo-suffix' """ return "-".join( filter( None, [prefix, name[: max_length - len(suffix) - len(prefix) - 5], suffix] ) )
2081b9b8f6723d0f0e1c80c919454a6b0b98f64a
40,820
def _get_installers_from_configuration(configs): """Get installers from configurations. Example: { <installer_isntance>: { 'alias': <instance_name>, 'id': <instance_name>, 'name': <name>, 'settings': <dict pass to installer plugin> } } """ installers = {} for config in configs: name = config['NAME'] instance_name = config.get('INSTANCE_NAME', name) installers[instance_name] = { 'alias': instance_name, 'id': instance_name, 'name': name, 'settings': config.get('SETTINGS', {}) } return installers
ca6ddc52159648af38a8a2c37bb8e993dcd3d50b
40,821
import inspect def _get_func_name(func): """Get function full name. Parameters ---------- func : callable The function object. Returns ------- name : str The function name. """ parts = [] module = inspect.getmodule(func) if module: parts.append(module.__name__) qualname = func.__qualname__ if qualname != func.__name__: parts.append(qualname[: qualname.find(".")]) parts.append(func.__name__) return ".".join(parts)
7deeef30be87bb4ee84c481491966d3017ab7383
40,822
def create_cfg_ti(run_dir, receptor_f, ligand_f, ambig_f, target_f): """ Create HADDOCK3 configuration file for the first scenario. Parameters ---------- run_dir : path or str Path to the run directory; where run results will be saved. receptor_f : Path or str Absolute path pointing to the receptor PDB file. ligand_f : Path or str Absolute path pointing to the ligand PDB file. ambig_f : Path or str Absolute path pointing to the `ambig.tbl` file. Return ------ str The HADDOCK3 configuration file for benchmarking. """ cfg_str = \ f""" run_dir = {str(run_dir)!r} ncores = 48 molecules = [ {str(receptor_f)!r}, {str(ligand_f)!r} ] [topoaa] [rigidbody] ambig_fname = {str(ambig_f)!r} sampling = 1000 noecv = false [caprieval] reference = {str(target_f)!r} [seletop] select = 200 [flexref] ambig_fname = {str(ambig_f)!r} noecv = true [caprieval] reference = {str(target_f)!r} [mdref] ambig_fname = {str(ambig_f)!r} noecv = true [caprieval] reference = {str(target_f)!r} """ return cfg_str
9cf3dcc43e5e1c29de51c069d05a9eec7bd513e3
40,824
def get_type(attributes): """ Compute mention type. Args: attributes (dict(str, object)): Attributes of the mention, must contain values for "pos", "ner" and "head_index". Returns: str: The mention type, one of NAM (proper name), NOM (common noun), PRO (pronoun), DEM (demonstrative pronoun) and VRB (verb). """ pos = attributes["pos"][attributes["head_index"]] head_ner = attributes["ner"][attributes["head_index"]] if pos.startswith("NNP"): return "NAM" elif head_ner != "NONE": return "NAM" elif pos.startswith("PRP"): return "PRO" elif pos.startswith("DT"): return "DEM" elif pos.startswith("VB"): return "VRB" elif pos.startswith("NN"): return "NOM" else: return "NOM"
a8f8fd82f6b68b9bcb2332b0087fae47ba3ff50e
40,825
from typing import MutableMapping def flatten_dict(d, parent_key='', sep='_') -> dict: """ Flatten a nested dictionary into a single level dictionary. Extension of https://stackoverflow.com/a/6027615/3838313 capable of handling lists Args: d: dictionary to flatten parent_key: parent key user for recursion sep: separator for the nested keys Returns: """ items = [] for k, v in d.items(): new_key = parent_key + sep + k if parent_key else k if isinstance(v, MutableMapping): items.extend(flatten_dict(v, new_key, sep=sep).items()) elif isinstance(v, list): for i, item in enumerate(v): if isinstance(item, MutableMapping): items.extend(flatten_dict(item, f"{new_key}_{i}", sep=sep).items()) else: items.append((f"{new_key}_{i}", item)) else: items.append((new_key, v)) return dict(items)
72d88a9556c78beb4aa1adf6d2519bd892d3927d
40,826
import os def get_ipaddress(): """ Get the IP Address """ data = [] try: eth = os.popen("ip addr | grep LOWER_UP | awk '{print $2}'") iface = eth.read().strip().replace(':', '').split('\n') eth.close() del iface[0] for i in iface: pipe = os.popen( "ip addr show " + i + "| awk '{if ($2 == \"forever\"){!$2} else {print $2}}'") data1 = pipe.read().strip().split('\n') pipe.close() if len(data1) == 2: data1.append('unavailable') if len(data1) == 3: data1.append('unavailable') data1[0] = i data.append(data1) ips = {'interface': iface, 'itfip': data} data = ips except Exception as err: data = str(err) return data
2a0d028d6ed6fb1e1972e77822fe7dc9e8dc386d
40,827
import math def litres(time): """ Finds the amount of water needed to be drunk for x amount of cycling hours. :param time: integer value. :return: the number of litres Nathan will drink, rounded to the smallest value. """ return math.trunc(time * 0.5)
1f94d589692901c167dec16bc8660b799b798c83
40,829
def mock_get_outlet_port_code(outlet): """Mock cdb_helper.get_outlet_port_code""" return ('eth_IN01', 'S-E110059-NET-NO-01')
7280d947f3915b1caa9e7e3e8dddf9b34327c278
40,830
from typing import Mapping from typing import Any def with_extra_spec_options( original: Mapping[str, Any], extra_options: Mapping[str, Any], context: str, ) -> Mapping[str, Any]: """ Given an original arbitrary spec and a set of overrides, verify the overrides don't intersect with the existing attributes and return both mappings merged. Parameters ---------- original Original map of key-values. extra_options Options to set. context Context in which this options are being set. This is used to produce a useful error message. Raises ------ ValueError If we attempt to override keys that are already present in the original mapping. """ if not extra_options: return original key_intersection = set(extra_options).intersection(original) if key_intersection: raise ValueError( f"In {context}, you are trying to override the value of {sorted(list(key_intersection))}. The Argo runtime uses these attributes to guarantee the behavior of the supplied DAG is correct. Therefore, we cannot let you override them." ) return {**original, **extra_options}
722130e5d92e6b62bdf726d3ecdfe0ea0b452b83
40,832
def xy_to_bit(x: int, y: int) -> int: """Transform x/y coordinates into a bitboard bit number.""" return y * 8 + x
84ea71147e6ae3a64a3402c6fe90525736c1682e
40,833
def uptime_seconds(uptime_list): """ Convert a list of the following form: [years, weeks, days, hours, minutes] To an uptime in seconds """ years = uptime_list[0] weeks = uptime_list[1] days = uptime_list[2] hours = uptime_list[3] minutes = uptime_list[4] # No leap day calculation days = years*365 + weeks*7 + days minutes = days*24*60 + hours*60 + minutes seconds = minutes*60 return seconds
abf29658193ab373c8e7e8c722fe308d3642c176
40,835
import math def primeFactors(n): """ This function returns a list of prime factors for n --param n : integer --return list : prime factors of n """ if n < 2: return [] elif n in {2,3}: return [n] else: factors = [] while n % 2 == 0: factors.append(2) n //= 2 # end of while for i in range(3,math.floor(math.sqrt(n))+1,2): while n % i == 0: factors.append(i) n //= i # end of while # end of for if not(factors): factors = [n] return factors
3b11d65ba0da6e231c67bfa87011ade59902b933
40,836
def _load_coverage_exclusion_list(path): """Load modules excluded from per-file coverage checks. Args: path: str. Path to file with exclusion list. File should have one dotted module name per line. Blank lines and lines starting with `#` are ignored. Returns: list(str). Dotted names of excluded modules. """ exclusion_list = [] with open(path, 'r', encoding='utf-8') as exclusion_file: for line in exclusion_file: line = line.strip() if line.startswith('#') or not line: continue exclusion_list.append(line) return exclusion_list
da20f154a3ad754c344a378889ce913760743d5a
40,837
def avg(rows: list, index: int): """ This is used to calculate the skill average and the slayer average of the whole guild :param (list) rows: A list containing more lists which are used to calculate the average :param (index) index: The index in the list of the value to calculate :returns (str): The rounded guild average """ total = 0 for row in rows: if type(row[index]) in (int, float): total += row[index] result = total / len(rows) return str(round(float(result), 1))
e4a0dbb81e911c114b99e411c7d81209a8d0728f
40,843
from typing import Iterable def const_col(dims: Iterable[int]) -> str: """ Name of an constant columns. Parameters ---------- dims Dimensions, that describe the column content. Returns ------- name: str Column name. Example ------- >>> from rle_array.testing import const_col >>> const_col([1, 2]) 'const_1_2' >>> const_col([2, 1]) 'const_1_2' """ dims = sorted(dims) dims_str = [str(d) for d in dims] return f"const_{'_'.join(dims_str)}"
52aecf5872f6444bf0155f0556ce39b2250ce758
40,847
import re def file_id (s): """Return a conventional file name from a pseudo-file name. (ast/FooBar.hh -> ast/foo-bar.hh).""" return re.sub ("([a-z])([A-Z])", "\\1-\\2", s).lower ()
316248c3cb701466ba9189b1a8bf24ee21384042
40,848
def longest_consec(strarr, k): """ Returns the first longest string consisting of k consecutive strings taken in the array.""" if len(strarr) == 0 or k <= 0 or k > len(strarr): return "" lst = [''.join(strarr[i:i+k]) for i in range(len(strarr))] return max(lst, key=lambda x: len(x))
7192db56b17b5ed5ce11674f629c540860298114
40,849
def GetDefaultPanelBorder(self): """ Default panel border is set to 0 by default as the child control will set their borders. """ return 0
abbe52bd59c73ed92a278336c6dd38c1ce0c927e
40,851
import random def random_colour(): """Generate a random CSS colour""" def _rand_hex(): c = hex(random.randint(0, 254))[2:] if len(c) == 1: c = '0' + c return c r = _rand_hex() g = _rand_hex() b = _rand_hex() return '{}{}{}'.format(r, g, b).upper()
7c8e779b2a67e7597ca746d000e3069bad114f66
40,852
import re def vw_normalize_params(params): """ >>> vw_normalize_params(['--ngram', '1']) [] >>> vw_normalize_params(['--ngram', '1', '--skips', '1']) [] >>> vw_normalize_params(['--skips', '1']) [] >>> vw_normalize_params(['--ngram', '2', '--skips', '1']) ['--ngram', '2', '--skips', '1'] """ params = ' '.join(params) params = params.replace('--ngram 1', '') params = params.replace('--skips 0', '') if '--ngram' not in params: params = re.sub('--skips \d+', '', params) params = re.sub('\s+', ' ', params) return params.split()
598911689db03583add45082cfa435c57ad603a5
40,853
async def get_set_point(obj): """Get target temperatures in Celsius degrees.""" return await obj["madoka"].set_point.query()
b10e9498effc9d109137a396d65fe94c460fb4ba
40,854
def notfindhost(cpuMax): """ #非目标服务器 """ if(cpuMax=="notfindhost"): return True else: return False
21059e8f681b6d5b3c19ad2f6ddfb62443f05229
40,855
def get_service_state_name(state): """ Translate a Windows service run state number to a friendly service run state name. """ return { 1: 'Stopped', 2: 'Start Pending', 3: 'Stop Pending', 4: 'Running', 5: 'Continue Pending', 6: 'Pause Pending', 7: 'Paused' }.get(state, 'Unknown')
153bfb340ca20335aa476021d31a9918fb00f1f4
40,856
def mkdown_p(text): """ Generates the markdown syntax for a paragraph. """ return '\n'.join([line.strip() for line in text.splitlines()]) + '\n'
ac511acb8bb81ad37aac7897763584d062662d99
40,859
import socket def free_port(): """Find free port using bind(). There are some interval between finding this port and using it and the other process might catch the port by that time. Thus it is not guaranteed that the port is really empty. """ with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock: sock.bind(("", 0)) return sock.getsockname()[1]
7605289c5c7c069a8382fa7b6bcb003ebef72e2e
40,861
def smallestValue(nd1, nd2): """ take in any two model-dictionaries nd1 and nd2, return the smallest positive (non-zero) value across both""" valuesd1 = list(nd1.values()) # list values of each dictionary valuesd2 = list(nd2.values()) if min(valuesd1) < min(valuesd2): #check which one is the smallest return min(valuesd1) else: return min(valuesd2)
64f8509a0d18f1122a84db4b7e9963a25fe522a6
40,862
def make_items(times, leading='+'): """Make lines with leading char multiple times. So wait why is this set up so that times is a keyword argument...? Couldn't you just go times=None and then if times is None: times = len(t[0]) like wtf? Used by snippet li(st)? and for olist. :param: times, how many times you need :param: leading, leading character """ times = int(times) if leading == 1: msg = "" for x in range(1, times + 1): msg += "%s. Item\n" % x return msg else: return ("%s Item\n" % leading) * times
34752f0e681447e6177535af2594e6e0b0249ddf
40,863
def _issues_choice_callback(enum, _, param, value): """Translate the given value to enum representation.""" if value is None: return return getattr(enum, value)
7b52193ba4f3013cdef6f7e4417d8369c7f930f5
40,864
def get_node_info(node_info, host): """ Simple callback that takes the node info from `GET /` and a parsed connection information and return the connection information. If `None` is returned this node will be skipped. Useful for filtering nodes (by proximity for example) or if additional information needs to be provided for the :class:`~xapiand.Connection` class. :arg node_info: node information from `GET /` :arg host: connection information (host, port) extracted from the node info """ return host
6741188eaeabae2925082a2acd9715786eab24e0
40,865
import torch def evaluate_MAP(predict_sort_idx, is_truth, reduce=True): """ Metrics for P@K :param predict_sort_idx: :param is_truth: :param reduce: :return: """ # 1. is truth predict_sort_is_truth = is_truth.gather(1, predict_sort_idx) # 2. rank number batch, num_docs = is_truth.size() rank_num = torch.arange(1, num_docs+1, device=is_truth.device).unsqueeze(0) # 3. top-k truth number top_k_truth_num = [] sum_col = is_truth.new_zeros(batch,) for k in range(num_docs): col_k = predict_sort_is_truth[:, k] sum_col += col_k top_k_truth_num.append(sum_col.clone().detach()) predict_top_k_num = torch.stack(top_k_truth_num, dim=-1) batch_map = predict_sort_is_truth.float() * predict_top_k_num.float() / rank_num.float() batch_map = batch_map.sum(dim=1) / predict_sort_is_truth.sum(dim=1) if reduce: return batch_map.mean().item() return batch_map
79ce463cfd4094ef26bff5c3442f81b6c408fc10
40,866
import os def read_file_line(lineno, given_file, k_src, o_src, a_src): """ Read the source line from the provided llvm line info :param lineno: line number to read from. :param given_file: file path in the original json :param k_src: Absolute path to the kernel sources. :param o_src: Path to the original kernel sources, that should be replaced with alternate source dir. :param a_src: Path to the new kernel sources, that should be used instead or original src dir. :return: None """ if lineno <= 0: return "" if o_src is not None and a_src is not None: if os.path.isabs(given_file): given_file = given_file.replace(o_src, a_src) if not os.path.isabs(given_file): if k_src is not None: given_file = os.path.join(k_src, given_file) if os.path.exists(given_file) and os.path.isfile(given_file): fp = open(given_file, "r") all_lines = fp.readlines() to_ret = "" if len(all_lines) >= lineno: to_ret = all_lines[lineno-1].strip() return to_ret return ""
8996bf6e7e945b2b1e1a5c3e79372b64707e5293
40,867
import math def is_prime(candidate): """ Assumes n is an odd number. """ for integer in range(3, int(math.sqrt(candidate)) + 1, 2): if candidate % integer == 0: return False return True
ff72ee21444f3325f864208f8e2719c971375269
40,869
def get_emotion2id(DATASET: str) -> dict: """Get a dict that converts string class to numbers.""" emotions = {} # MELD has 7 classes emotions['MELD'] = ['neutral', 'joy', 'surprise', 'anger', 'sadness', 'disgust', 'fear'] # IEMOCAP originally has 11 classes but we'll only use 6 of them. emotions['IEMOCAP'] = ['neutral', 'frustration', 'sadness', 'anger', 'excited', 'happiness'] emotion2id = {DATASET: {emotion: idx for idx, emotion in enumerate( emotions_)} for DATASET, emotions_ in emotions.items()} return emotion2id[DATASET]
91bf833a1e8a2d7d7d0fe7349c59bef9b80f78c8
40,870
def get_first_selected_text(view): """Gets a copy of the given view's first buffer selection""" first_selected_region = view.sel()[0] buffer_text = view.substr(first_selected_region) return buffer_text, first_selected_region
74b4508f900c87789ebf41b66f31f5a3d41157b1
40,871
def create_transcript_objects(refseq_db, nms_in_refseq): """Retrieve Transcript objects from RefSeq db""" transcripts = [] for id in nms_in_refseq: t = refseq_db.by_id(id) transcripts.append(t) return transcripts
f1422ebd14eccc1b1f26cefefad1c734e06cb81b
40,872
def check_params(params): """ Checks if the parameters are defined in the domain [0, 1]. :param params: parameters (u, v, w) :type params: list, tuple :raises ValueError: input parameters are outside of the domain [0, 1] """ tol = 10e-8 # Check parameters for prm in params: if prm is not None: if not (0.0 - tol) <= prm <= (1.0 + tol): raise ValueError("Parameters should be between 0 and 1") return True
a0b8225addd1d499719d26461b2ada79b4d27a8c
40,874
import torch def swish_jit_bwd(input_, grad_output): """jit-scripted swish backward""" input_sigmoid = torch.sigmoid(input_) return grad_output * (input_sigmoid * (1 + input_ * (1 - input_sigmoid)))
99aee5de3c275b2d592c931225b39d736c093cd4
40,875
import random def begin_war(player1Hand, player2Hand, winningDeck, numBattles, warHistogram): """ description:: Starts a war sequence. Both players put down four cards from the top of their decks. The value of the fourth card determines the winner of the war. Whichever card has the highest value, the player that put down that card wins the war. If the fourth cards equal each other another war sequence begins until a winner is determined. Only up to six wars can be played in this version of war. If a player runs out of cards during a war sequence, the other player is determined the winner of the war sequence. attributes:: player1Hand (list of integers) Player 1's hand of cards. Values of cards range from 2-14. player2Hand (list of integers) Player 2's hand of cards. Values of cards range from 2-14. winningDeck (list of integers) The deck that both players place cards into during a battle. numBattles (integer) The number of battles that occurred before the start of the war sequence. Wars are counted as battles. warHistogram (list of integers) Consists of six integers. Each number represents how many war sequences occurred in the game: 0th position: 1-war sequence 1st position: 2-war sequence 2nd position: 3-war sequence 3rd position: 4-war sequence 4th position: 5-war sequence 5th position: 6-war sequence returns:: player1Hand (list of integers) Player 1's hand of cards. Value of cards range from 2-14 player2Hand (list of integers) Player 2's hand of cards. Value of cards range from 2-14 numBattles (integer) The number of battles that occurred during the war sequence. A war is considered a battle warHistogram (list of integers) Consists of six integers. Each number represents how many war sequences occurred in the game: 0th position: 1-war sequence 1st position: 2-war sequence 2nd position: 3-war sequence 3rd position: 4-war sequence 4th position: 5-war sequence 5th position: 6-war sequence """ warSequence = 0 continueWar = True while continueWar: numBattles += 1 # Players lay down 4 cards for i in range(4): try: winningDeck.append(player1Hand.pop(0)) winningDeck.append(player2Hand.pop(0)) # If a player runs out of cards the other player wins the war except IndexError: random.shuffle(winningDeck) if len(player1Hand) == 0: player2Hand.extend(winningDeck) else: player1Hand.extend(winningDeck) # If six war sequences have occurred, the war ends if warSequence > 5: return player1Hand, player2Hand, numBattles, warHistogram else: warHistogram[warSequence] += 1 return player1Hand, player2Hand, numBattles, warHistogram # Check if Player 1's 4th card is of greater value than Player 2's 4th # card if winningDeck[-2] > winningDeck[-1]: random.shuffle(winningDeck) player1Hand.extend(winningDeck) warHistogram[warSequence] += 1 continueWar = False break # Check if Player 2's 4th card is of greater value than Player 1's 4th # card elif winningDeck[-2] < winningDeck[-1]: random.shuffle(winningDeck) player2Hand.extend(winningDeck) warHistogram[warSequence] += 1 continueWar = False break # Continues war sequence if both of the 4th cards are equal to each # other else: warSequence += 1 return player1Hand, player2Hand, numBattles, warHistogram
70c0bd32876e83a8bc380e227eaa3e172bf79993
40,876
from re import A def remove_docker_cmd(cmd, args, remote_folder): """Removes cmd related to docker.""" # Find the build command cmd = "%s%s" % (args.build_cmd, cmd.split(args.build_cmd)[-1]) # Add GPU if needed if args.GUI: devices = ",".join(["%d" % i for i in A]) cuda = 'CUDA_VISIBLE_DEVICES=%s' % devices cmd = "%s %s" % (cuda, cmd) # Make sure to be in project path if not remote_folder: cmd = "cd %s && %s" % (args.proj, cmd) else: cmd = '"cd %s && %s"' % (remote_folder, cmd) return cmd
66a15c3d6f6f90e5906f9a8a8b16b8a01ae25389
40,877
def apply_tf(data, tf): """Apply function `tf` (transformation function) if specified and return result. Return unmodified data in other case""" if tf: return tf(data) else: return data
86d381f5df2362cd614ec252fd2650f2e0086d0d
40,878
import numpy def aic(k,n,rss,errors=False): """ Computes the Akaike information criterion which is "a measure of the relative goodness of fit of a statistical model". Unlike the F-test, it does not assume that one model is a particular case of the more complex one. If errors==False then assume the errors are the same for every data point. Otherwise, assumes you are providing the chi-squared values instead of RSS. Usage: >>> aic = aic(k,n,rss) :param rss: residual sum of squares for the model in case errors=False, otherwise assumes you are giving the chi-square values :param n: sample size, i.e. number of data points :param k: number of free parameters in the model :returns: AIC statistic References: 1. Documentation for Origin software on fit comparison: http://www.originlab.com/index.aspx?go=Products/Origin/DataAnalysis/CurveFitting/NonlinearFitting&pid=1195 (first heard about this test there) 2. http://en.wikipedia.org/wiki/Akaike_information_criterion v1 Dec 2011 """ if errors==False: # AIC assuming the errors are identical and given the residual sum of squares, # see http://en.wikipedia.org/wiki/Akaike_information_criterion#Relevance_to_chi-squared_fitting aicstat=n*numpy.log(rss/n)+2.*k else: # If you have different errors for the data points, it will assume rss=chisq aicstat=rss+2.*k # AICc = AIC with a correction for finite sample size aicc=aicstat+2.*k*(k+1.)/(n-k-1.) return aicc
f4fe6b9f83a2ffafbf4e7431aa140523e12a4ecd
40,880
import collections def get_gt_distribution(experience, get_block): """ Get the ground-truth distribution of an experience buffer. :param experience: List of transitions. :param get_block: Function that returns the ground-truth state-action block for each transition. :return: Number of samples for each state-action block. """ dist = collections.defaultdict(lambda: 0) for transition in experience: key = get_block(transition.state, transition.reward, transition.next_state) dist[key] += 1 dist = dict(dist) return dist
6dcba0e714d9d98a530c16fcfa12679980b46871
40,881
import math def cost_to_time(cost, avg_seek_time=8, avg_latency=4): """ Converts a disk I/O metric to a milliseconds. :param cost: The disk I/O metric to convert. :param avg_seek_time: The average seek time in milliseconds. :param avg_latency: The average latency in milliseconds. :return: A disk I/O in milliseconds. """ return int( math.ceil( cost * ((avg_seek_time/1000) + (avg_latency/1000)) ) )
9ec86fef8b1b76260bbb2fe58b539d1dc8164b52
40,882
def VLerp(startv, endv, t=0.5): """ Linear interpolation between 2 vectors. """ if t <= 0.0 or t > 1.0: raise ValueError("E: t must satisfy 0<t<1, but is %f" % t) return (startv + (t * (endv - startv)))
c64dec9b0b966c0b25f6da00392239389e714a30
40,883
def get_next_deriv(xi, step, n, deriv, func): """ (d_theta/d_xi)i+1 = (d_theta/d_xi)i - ([ (2 /xi)i . (d_theta/d_xi)i ] + theta^n ) * d_xi """ return deriv - step * ((2./xi)*deriv + func**n)
9393131ff5872c8da2fc519611d6fc075ef4ffa5
40,885
def read_emojis(emojis): """ Returns a dictionary of emojis in the format of name: url. """ items = [] for k, v in emojis['emoji'].items(): if v.startswith('http'): items.append((k, v)) return items
b8e80fc07cc287f3eff547b111aaaa16989309ce
40,886
def field_to_str(bf): """ :param bf: 2D list representation of a battlefield: list(list) :return: string representation of battlefield """ bf_txt = [['■' if bf[i][j] else ' ' for i in range(10)] for j in range(10)] first_line = ' A B C D E F G H I J\n' return first_line + '\n'.join([' ' * (2 - len(str(x + 1))) + str(x + 1) + ' ' + ' '.join(bf_txt[x]) for x in range(10)])
8496e7650ca58c8450c2630a01e1ef397e3b37d1
40,887
import subprocess def get_git_hash(): """gitハッシュを取得する Returns: string: Gitのハッシュ値 """ cmd = "git rev-parse --short HEAD" try: git_hash = subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT).strip().decode("utf-8") except Exception: git_hash = "Not found git repository" return git_hash
bd0bf2206931d753d1e8a85e78a135f502b1e3af
40,888
def make_label_index(link_display_label, entries): """Map entry IDs to human-readable labels. :arg link_display_label: Marker, which contains the label. :arg entries: Collection of entries. """ return { entry.id: entry.get(link_display_label, entry.id) for entry in entries}
28ccda31fa57fb0032732527748a732e43d8325a
40,890
def find_port_module(self, *args, **kwargs): """"find_port_module""" return None
54dde96deec68846b5ebd83da080980c1902afbf
40,892
import re def is_language_api(api_version): """Language API is date-based """ return re.search(r'\d{4}-\d{2}-\d{2}', api_version)
adf78acecd96df7b6833c7dcf345ca0770ac3720
40,894
import os def get_test_files(target_direcotry=None): """ Get all testing *.json files from 'data' directory. Create a mapping: file name (without extension) -> absolute file path. :param target_direcotry: absolute path to directory with testing files :type target_direcotry: str :returns dict """ if target_direcotry is None: target_direcotry = os.path.join(os.path.dirname(__file__), 'data') test_files = [] test_files_names = [] for virl_file in os.listdir(target_direcotry): path_parts = os.path.splitext(virl_file) if path_parts[1] == '.json': test_files.append(os.path.join(target_direcotry, virl_file)) test_files_names.append(path_parts[0]) return dict(zip(test_files_names, test_files))
581dd2f16b3adf17a658005c6db58a8644c0a206
40,895
def get_initial_configuration_with_dir(): """ Return densely populated {key: cell} where key: 2-letter (e.g. "11", "91") cell: {"state": state, "type": type} state: "empty", "up", or "down" type: "FU" ... "OU" """ initial_state_top = { (1, 1): "KY", (2, 1): "KE", (3, 1): "GI", (4, 1): "KI", (5, 1): "OU", (6, 1): "KI", (7, 1): "GI", (8, 1): "KE", (9, 1): "KY", (2, 2): "KA", (8, 2): "HI", (1, 3): "FU", (2, 3): "FU", (3, 3): "FU", (4, 3): "FU", (5, 3): "FU", (6, 3): "FU", (7, 3): "FU", (8, 3): "FU", (9, 3): "FU", } config = {} for ix in range(1, 10): for iy in range(1, 10): key = "%d%d" % (ix, iy) config[key] = { "state": "empty", "type": "empty" } for ((x, y), ty) in initial_state_top.items(): key_gote = "%d%d" % (x, y) key_sente = "%d%d" % (10 - x, 10 - y) config[key_gote] = { "state": "down", "type": ty } config[key_sente] = { "state": "up", "type": ty } return config
7856e5b56f775f747d021fef539054466f9325a3
40,896
def _prepare_cov_source(cov_source): """ Prepare cov_source so that: --cov --cov=foobar is equivalent to --cov (cov_source=None) --cov=foo --cov=bar is equivalent to cov_source=['foo', 'bar'] """ return None if True in cov_source else [path for path in cov_source if path is not True]
8287d28c5d82568ee226d89250524b75e81b69d8
40,900
from pathlib import Path import textwrap def cookiecutter_readme(cookiecutter_subdirectory: Path) -> Path: """The README file in the cookiecutter.""" path = cookiecutter_subdirectory / "README.md" text = """\ # {{cookiecutter.project_slug}} Welcome to `{{cookiecutter.project_slug}}`! """ path.write_text(textwrap.dedent(text)) return path
0a25eaa081e50c0fb4e3b2195b823d4fd5f18221
40,901
import os def get_module_targetname_for_cmakelists(cmakelists_filename): """Determine what the name for the all-in-one module target should be based on the CMakeLists.txt filename with path. Args: cmakelists_filename (str): CMakeLists.txt filename with path from quickstep root. Returns: str: The target name in CMake that corresponds to the special all-in-one library for the module described by the CMakeLists.txt file. """ components = [] (head, tail) = os.path.split(cmakelists_filename) while head != "": (head, tail) = os.path.split(head) if tail != ".": components.append(tail.replace("_", "")) components.append("quickstep") components.reverse() return "_".join(components)
1d8a2a550c33c9b1f2e2649bbd3416b3a2053d91
40,903
import time def last_hour(unixtime, hours=1): """Check if a given epochtime is within the last hour(s). Args: unixtime: epoch time hours (int): number of hours Returns: True/False """ seconds = hours * 3600 # sometimes bash histories do not contain the `time` column return int(time.time()) - int(unixtime) <= seconds if unixtime else False
1ce3ddb73f0316cf36979c1ee7f1269ae55d0d4c
40,904
import copy def fltr(node, whitelist): """ returns a new object rather than modifying the old one (and handles filtering on non-leaf nodes). Example Usage: `fltr(x, ['dropdown_value', 'nm_field', 'url_app', 'dt_reg'])` """ if isinstance(node, dict): retVal = {} for key in node: if key in whitelist: retVal[key] = copy.deepcopy(node[key]) elif isinstance(node[key], list) or isinstance(node[key], dict): child = fltr(node[key], whitelist) if child: retVal[key] = child if retVal: return retVal else: return None elif isinstance(node, list): retVal = [] for entry in node: child = fltr(entry, whitelist) if child: retVal.append(child) if retVal: return retVal else: return None
1455ee0602c817e1d3251ac141e5ff7ec6e4f663
40,905
def replace_matrix(base, x, y, data): """ base = [[1, 2, 3], [4, 5, 6], [7, 8, 9]] x = 1 y = 0 data = [[x], [y], [z]] return [[1, x, 3], [4, y, 6], [7, z, 9]] """ data_height = len(data) data_width = len(data[0]) for _row in range(data_height): for _col in range(data_width): base[y + _row][x + _col] = data[_row][_col] return base
a47c040ff2e86bc7bf47970c3e606557b3ae2db7
40,906
def format_stats(stats): """Given a dictionary following this layout: { 'encoded:label': 'Encoded', 'encoded:value': 'Yes', 'encoded:description': 'Indicates if the column is encoded', 'encoded:include': True, 'size:label': 'Size', 'size:value': 128, 'size:description': 'Size of the table in MB', 'size:include': True, } format_stats will convert the dict into this structure: { 'encoded': { 'id': 'encoded', 'label': 'Encoded', 'value': 'Yes', 'description': 'Indicates if the column is encoded', 'include': True }, 'size': { 'id': 'size', 'label': 'Size', 'value': 128, 'description': 'Size of the table in MB', 'include': True } } """ stats_collector = {} for stat_key, stat_value in stats.items(): stat_id, stat_field = stat_key.split(":") stats_collector.setdefault(stat_id, {"id": stat_id}) stats_collector[stat_id][stat_field] = stat_value # strip out all the stats we don't want stats_collector = { stat_id: stats for stat_id, stats in stats_collector.items() if stats.get('include', False) } # we always have a 'has_stats' field, it's never included has_stats = { 'id': 'has_stats', 'label': 'Has Stats?', 'value': len(stats_collector) > 0, 'description': 'Indicates whether there are statistics for this table', 'include': False, } stats_collector['has_stats'] = has_stats return stats_collector
38b0f19f216d85f57c48a2e4b35a00dfa784d962
40,907
from datetime import datetime def _todays_date(): """ Get today's date. Returns the current date and time. In a standalone function to easily be mocked in unit tests. Returns ------- datetime.datetime The current date and time as a datetime object. """ return datetime.now()
a45662385021f9dcd4267b66f9d6aa17e8a3421a
40,908
import torch def collapse_copy_scores(scores, batch, tgt_vocab, batch_index=None): """ Given scores from an expanded dictionary corresponeding to a batch, sums together copies, with a dictionary word when it is ambigious. """ offset = len(tgt_vocab) for b in range(scores.size(0)): blank = [] fill = [] if batch_index is not None: src_vocab = batch.src_vocabs[batch_index[b]] else: src_vocab = batch.src_vocabs[b] for i in range(1, len(src_vocab)): ti = src_vocab.itos[i] if ti != 0: blank.append(offset + i) fill.append(ti) if blank: blank = torch.tensor(blank, device=scores.device) fill = torch.tensor(fill, device=scores.device) scores[b, :].index_add_(1, fill, scores[b, :].index_select(1, blank)) scores[b, :].index_fill_(1, blank, 1e-10) return scores
675867187928860c8ff305f94b2573d169355de5
40,910
def select_flux(lc, flux_cols): """Return a Lightcurve object with the named column as the flux column. flux_cols: either a column name (string), or a list of prioritized column names such that the first one that the lightcurve contains will be used. """ def _to_lc_with_1flux(lc, flux_1col): flux_1col = flux_1col.lower() if "flux" == flux_1col: return lc elif flux_1col in lc.colnames: return lc.select_flux(flux_1col) else: return None if isinstance(flux_cols, str): flux_cols = [flux_cols] for flux_1col in flux_cols: res = _to_lc_with_1flux(lc, flux_1col) if res is not None: return res raise ValueError(f"'column {flux_cols}' not found")
baa59107fe35407873e2959e00533de12fbfdb96
40,911
def string_list(argument): """ This function ... :param argument: :return: """ return argument.split(",")
a0fe21f6ee49dd90952683d78531d84b3f7fe00d
40,913
def fix_closed_issues(data_frame, index): """The sum of merged and closed pull requests must be subtracted from the number of closed issues""" old_closed_issues_count = data_frame.loc[index, "closed_issues"] closed_pull_requests = data_frame.loc[index, "closed_pull_requests"] merged_pull_requests = data_frame.loc[index, "merged_pull_requests"] data_frame.set_value(index, "closed_issues", old_closed_issues_count - (closed_pull_requests + merged_pull_requests)) return data_frame
822c5a44dd8ab8805e6d3d0c5cc76992a2144e25
40,915
def get_customer_events(customerId, transcript): """ Get all the customer event rows from transcript dataframe Parameters ---------- customerId: Customer Id. transcript : The transcript dataframe containing events of all customers Returns ------- A dataframe with only rows for customer Id """ return transcript[transcript.person == customerId]
bf925440e1f43fe715f498e824ebf98180a23858
40,916
def logger(message): """____________________________""" def log_messenger(): print("Hello %s, i am log_messenger a inside f(x)\n" % (message)) return log_messenger
02e2a4754f37743c577d7987e330704f4205da75
40,917
import os def split_filename(fn): """ 경로가 포함된 파일명 텍스트를 ['경로', '파일명', '파일확장자'] 로 나눈다. """ v = os.path.splitext(fn) fileext = v[1] v = os.path.split(v[0]) if (fileext == '') and (len(v[1]) > 0) and (v[1][0] == '.'): v = list(v) fileext = v[1] v[1] = '' return [v[0], v[1], fileext]
c95ca7aad73a49a490cd6242697d21c1da39012e
40,919
def get_valid_step(current_step: int, max_step: int) -> int: """ Checks if the current step is within boundaries and returns a corrected step. :param current_step: The current step to check. :param max_step: The maximum allowed step. :return: A corrected step between 1 and the maximum step. """ if current_step < 1: current_step = 1 elif current_step > max_step: current_step = max_step return current_step
aa668ef490bce37ac890c767b604e14f4b5d5ed9
40,920
def U_ZZ(n): """Makes a sequence of operations for ZZ basis. Args: n (int): Length of the sequence. Returns: str: Sequence of operations. """ return 'I' * n
a3cdd8cb4b8d25d64b8338f9e94a96771e3ef657
40,922
def phaseplot_values(species): """ A convenience function to get a dictionary of values, to allow generalization of the PhasePlot class. The keys you can pull for phase plots are `x`, `v_x`, `v_y` and `v_z`. Parameters ---------- species : Species A species to draw data from Returns ------- A dictionary of phase plot values. """ return {"x": species.position_history, "v_x": species.velocity_history[:, :, 0], "v_y": species.velocity_history[:, :, 1], "v_z": species.velocity_history[:, :, 2], }
9b7458eb4634bbd7e6b337ecadc0a3db20911d45
40,925
def get_pagination(current_page_number, total_number_pages): """ Generate a pagination dictionary given a page number :param current_page_number: current page to paginate :param total_number_pages: total number of pages in the current model :return: dictionary of pagination """ previous_page = current_page_number - 1 next_page = current_page_number + 1 displayed_previous_page = None displayed_next_page = None if previous_page > 0: displayed_previous_page = previous_page if next_page < total_number_pages: displayed_next_page = next_page return { "first-page": 0, "previous-page": displayed_previous_page, "current-page": current_page_number, "next-page": displayed_next_page, "last-page": total_number_pages }
cbe5052fead77f1b034452841ae2468373cd4860
40,926
def _find_root_modules(modules): """Returns a list of modules that no other module imports from. """ module_imports = {name: 0 for name in modules} for neighbors in modules.values(): for neighbor in neighbors: module_imports[neighbor] += 1 return sorted([name for name, count in module_imports.items() if count == 0])
bca6d078f3c97d836c31a142c98cac47da25d073
40,927
def assort_values(d): """ Collect every values stored in dictionary, then return them as a set :param d: :return: """ values = set() for key in d.keys(): values |= set(d[key]) return values
b9cf4a705d92aa414bd38e05204764b71bf2e683
40,928
import struct def read_vary(stream, data_size_type="B"): """! @brief Read variable length data from stream. @param stream Data stream. @param data_size_type Type of data size in Python's struct module representation. @return Data in byte array. """ # Read data size data_size_size = struct.calcsize(data_size_type) data_size = struct.unpack(data_size_type, stream.read(data_size_size))[0] # Read data return bytearray(stream.read(data_size))
9aa8a29470dad880b3f9b718a34c80eb28e2110e
40,931
from typing import Optional from enum import Enum def enum_parse(enum_cls: type, value: str) -> Optional[Enum]: """Try to parse a string value to an Enum member.""" if not issubclass(enum_cls, Enum): raise TypeError("Can only be used with classes derived from enum.Enum.") if value in enum_cls.__members__: return enum_cls.__members__[value] val_lc = value.casefold() val_map = {name.casefold(): name for name in enum_cls.__members__} if val_lc in val_map: return enum_cls.__members__[val_map[val_lc]] return None
f932e9d941daed3a970273c91a2d45abf0261e17
40,932
def strip_trailing_zero(value): """Like builtin "floatformat" but strips trailing zeros from the right (12.5 does not become 12.50)""" value = str(value) if "." in value: return value.rstrip("0").rstrip(".") return value
201c3ee4014db53a613d23131f8d4caad4cab638
40,933
import logging import calendar def delete_contact(db, cursor, timestamp, station, callsign): """ Delete the results of a delete in N1MM """ """ station_id = stations.lookup_station_id(station) """ logging.info('DELETEQSO: %s, timestamp = %s' % (callsign, calendar.timegm(timestamp))) try: cursor.execute( "delete from qso_log where callsign = ? and timestamp = ?", (callsign, calendar.timegm(timestamp),)) db.commit() except Exception as e: logging.exception('Exception deleting contact from db.') return ''
724879d05a8a4f16c2490bf1d0209a15d59a5473
40,934
def pe_97(): """Find the last digits of a very large non-Mersenne prime.""" base = 2 for _ in range(7_830_456): base = (base * 2) % 10_000_000_000 result = base * 28_433 + 1 result = str(result)[-10:] return f'The last ten digits of the very large prime are {result}.'
d0ae1afab2434e9abdf13541c9dc1a4decec7fe9
40,935
def get_company(company_id, company_dict): """ Get the entry for a key from the company table """ return company_dict[company_id]
b2f682cf702dbd6877e46cf98b106b9e058fe4fb
40,936
def parse_lustre_pool_size(content): """Parse for total, used, avail""" if content is None: return None for line in content.splitlines(): if line.startswith("filesystem_summary"): splits = line.split() return int(splits[1]), int(splits[2]), int(splits[3]) return None
9ca70212d42a8f7a43829e8491c0f7664d0c9dee
40,937
def walk(G, s, S=set()): """ Returns a traversal path from s on G source: Python Algorithms Mastering Basic Algorithms in the Python Language, 2010, p.104 """ P, Q, SG = dict(), set(), dict() P[s] = None SG[s] = G[s] Q.add(s) while Q: u = Q.pop() for v in set(G[u].keys()).difference(P, S): Q.add(v) P[v] = u SG[v] = G[v] return SG
cdd73ef7dbf505eb7bd1c40d0bbdc4637a43b0d7
40,938
def exit_req(): """Construct message used when asking CtrlServer to exit. :returns: Constructed exit_req dict, ready to be sent over the wire. """ return {"type": "exit_req"}
5881c5f252a88d97f9259a4389633d0b3d6c334a
40,939
def find_list_index(a_list, item): """ Finds the index of an item in a list. :param a_list: A list to find the index in. :type a_list: list :param item: The item to find the index for. :type item: str :return: The index of the item, or None if not in the list. :rtype: int | None """ if item in a_list: for i, value in enumerate(a_list): if value == item: return i
c9eb862b4af3eb113cca9ee55edda05b9fbfa8fe
40,940
import torch def convert2DTo3D(phi_2d, En, device): """ phi_2d: N*2 En: N*6 """ phi_3d = torch.zeros(len(En), 3).to(device) tmp = En * phi_2d.repeat(1,3) phi_3d[:,0] = tmp[:,0] + tmp[:,1] phi_3d[:,1] = tmp[:,2] + tmp[:,3] phi_3d[:,2] = tmp[:,4] + tmp[:,5] return phi_3d
d637cadc5f2cb28ffb0f49a37ac45f005ecc762e
40,941
def horizontal_flip(img): """Flip the image along the horizontal axis.""" return img[:, ::-1, :]
7aa0defa82efc835848f5f8d2b73191d63476a74
40,944
def clip_zero_formatter(tick_val, tick_pos): """Tick formatter that returns empty string for zero values.""" if tick_val == 0: return '' return tick_val
263cd9104f8f8e2332c55454e3fee274afe2ec4a
40,945
def get_collocations_from_finder(finder): """ 生成collocation dict {w1:{l1:[(score_1,w2_1),(score_2,w2_2)], l2:[(score_1,w2_1),(score_2,w2_2)],...}, w2:{l1:[(score_1,w2_1),(score_2,w2_2)], l2:[(score_1,w2_1),(score_2,w2_2)],...}, ... } :param finder: :return: """ measure = 'frequency' collocations = finder.score_bigram(measure) collocations = sorted(collocations, key=lambda x: x[1], reverse=True) collocations_dict = dict() for (w1, w2), score in collocations: l = len(w2) if w1 in collocations_dict: if l in collocations_dict[w1]: collocations_dict[w1][l].append((score, w2)) else: collocations_dict[w1][l] = [(score, w2)] else: collocations_dict[w1] = {l: [(score, w2)]} return collocations_dict
20ee68f96cddac936425adc2b86945feff0943a9
40,946
def _get_variable_definition(ncvar): """ Collect information on input variable. Parameters ---------- ncvar : netcdf4 variable Variable of input file Returns ------- dict Containing information on input variable withkey/value pairs. The following keys are returned: 'name', 'dtype', 'dimensions', 'fill_vallue', 'chunksizes' Examples -------- .. code-block:: python _get_variable_definition(fi.variables['GPP']) """ out = ncvar.filters() if ncvar.filters() else {} # chunksizes chunks = None if "chunking" in dir(ncvar): if ncvar.chunking() is not None: if not isinstance(ncvar.chunking(), str): chunks = list(ncvar.chunking()) # missing value if "missing_value" in dir(ncvar): ifill = ncvar.missing_value elif "_FillValue" in dir(ncvar): ifill = ncvar._FillValue else: ifill = None # output variable out.update({ "name": ncvar.name, "dtype": ncvar.dtype, "dimensions": list(ncvar.dimensions), "fill_value": ifill, "chunksizes": chunks, }) return out
85201cc6b87b67e786334e4bcb51819b3145fb2c
40,948
def uri_scheme_behind_proxy(request, url): """ Fix uris with forwarded protocol. When behind a proxy, django is reached in http, so generated urls are using http too. """ if request.META.get("HTTP_X_FORWARDED_PROTO", "http") == "https": url = url.replace("http:", "https:", 1) return url
873ac1c70627fc8e8dd0cb2b86dec99246bb9344
40,949
def get_new_column(df, name): """ Get a new column which does not exists in df. @param name suggestion """ while name in df.columns: name += "_" return name
0f8083514c6008f7c65dcb008aeea03b1d6b6e7c
40,950
def select_one(choices: list, optional: bool = False): """Choose one from a list.""" if len(choices) == 1: print('Choosing (0) automatically as the only option...') return choices[0] while True: in_ = input(f'Choose one{" (optional)" * optional}: ') if optional and not in_: print(' Selected none.') return try: return choices[int(in_)] except (ValueError, IndexError): print('INVALID INPUT! ', end='')
12ee1365633ee4fe9cd477d8435ce42e9ba958f0
40,951
def remove_last_cycles_from_summary(s, last=None): """Remove last rows after given cycle number """ if last is not None: s = s.loc[s.index <= last, :] return s
811c73cce5d107a6a3c8fcae71c320c548ee6755
40,952
import functools def as_scalar(method): """ Return a scalar value from a method if the class is scalar. """ @functools.wraps(method) def _decorator(*args, **kwargs): result = method(*args, **kwargs) try: return (result[0] if args[0].isscalar and len(result) == 1 else result) except TypeError: # if result has no len return result return _decorator
2492f9650e2245170a9e52b7153a99c9265ef1a8
40,953
def get_count_to_submit(counts, n_total, dataset, method, subsampling=None, data_format=None): """ Function that takes info about job and checks in counts dictionary how many runs are still missing (= less than n_total) """ count = None #initialization if subsampling is not None: # append subsampling type to dataset. chose this format to prevent varying # levels of hierarchies dataset = dataset + '/' + subsampling if data_format is not None: # same for imputation scheme here method = data_format + method if dataset in counts.keys(): if method in counts[dataset].keys(): count = counts[dataset][method] if count is None: count = 0 # if we don't find any count, we assume there is no completed job #return, how many counts are missing until n_total return max(n_total - count, 0)
9fc49986d5dc58d300394ea796de715549ceaa9b
40,955