content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
import json import yaml def list_to_yaml(table_list, filename): """ Convert a list of strings to YAML file format """ X = [x[0].split('||') for x in table_list] timeseries = [x[1] for x in table_list] d = {} for index in range(len(X)): path = X[index] current_level = d for i, part in enumerate(path): if i < (len(path) - 1): if i == (len(path) - 2): if timeseries[index] is True: parameter = path[i] technology = path[i - 2] if (i - 4 > 0): location = path[i - 4] current_level[part] = \ "file={}--{}--{}.csv:value".format( location, technology, parameter) else: current_level[part] = \ "file={}--{}.csv:value".format( technology, parameter) elif path[i + 1] == "": current_level[part] = None elif path[i + 1] == 'True': current_level[part] = True elif path[i + 1] == 'False': current_level[part] = False else: try: string = path[i + 1].replace(", ", ",") for char in ['\'', '“', '”', '‘', '’']: string = string.replace(char, '\"') current_level[part] = json.loads(string) except Exception: try: current_level[part] = float(path[i + 1]) except ValueError: current_level[part] = path[i + 1] if part not in current_level: current_level[part] = {} current_level = current_level[part] with open(filename, 'w') as outfile: yaml.dump(d, outfile, default_flow_style=False) return True
e31ff812bc1b1f1759c465a226a75c2a6c386a38
29,859
def column2list(matrix: list, column_idx: int) -> list: """ Convert column from features 2D matrix to 1D list of that feature. :param matrix: 2D square array of number :param column_idx: column index in the matrix :return: """ if len(matrix) <= 1: return matrix if matrix else [] if column_idx >= len(matrix[0]): return [-1] new_column = [] for line in matrix: new_column.append(line[column_idx]) return new_column
841baf8f6cf6b78c9f41dc90a9ff1759a8285cc8
29,860
def __join_with_or(times): """Returns 'a', 'a or b', or 'a, b, or c'.""" if not times: return "" if len(times) == 1: return times[0] if len(times) == 2: return " or ".join(times) return ", or ".join([", ".join(times[:-1]), times[-1]])
eec8adc3bf0b013cda315ed81c0ddf1a0a7511c6
29,861
def parseBool(txt): """ Parser for boolean options :param str txt: String from config file to parse. :returns: ``True`` if the string is 'True', ``False`` otherwise. :rtype: boolean """ return txt == 'True'
267c6c09c665735ac3ae766a9fd5259768ff3d40
29,862
def seq_to_subset(seq_sol, m, n): """Convert sequence solution (e.g.: ['r2','c3']) into subset solution (e.g.: [[0,1],[0,0,1]])""" assert isinstance(seq_sol, list) subset_sol = [[0]*m,[0]*n] for e in seq_sol: if e[0] == 'r': subset_sol[0][int(e[1:])-1] = (1-subset_sol[0][int(e[1:])-1]) elif e[0] == 'c': subset_sol[1][int(e[1:])-1] = (1-subset_sol[1][int(e[1:])-1]) else: raise RuntimeError(f'This seq_sol is bad written: {seq_sol}') return subset_sol
345e3fab339edbdb059611b2b542917bc43bd6ef
29,863
def fit_FC(model, data): """ Convenience function to fit a flow curve Args: model: rheology model (e.g. HB_model) data: pandas DataFrame with column 'Shear rate' and 'Stress' Returns: lmfit.fitresult """ return model.fit(data["Stress"], x=data["Shear rate"], weights=1 / data["Stress"])
6fca8cf3d2ea965e55b667be27aeaf4bb4fbc402
29,864
import os import glob def make_log_list(log_root_dir, log_type): """ Builds a list of Bro log files by recursively searching all subdirectories under the root directory. :param top_level_dir: Root directory for the recursive search :param log_type: Name of the Bro log file to load (e.g., dns) :return: List of paths to log file that were found """ cwd = os.getcwd() path = os.path.join(f'{log_root_dir}', f'**/*{log_type}*.log') results = glob.glob(path, recursive=True) return results
951107e24f6e9a9411ed426a147446816fbdae1d
29,866
import os import inspect def module_relative_path(path): """return an absolute path from a given path which is relative to the calling module""" if os.path.isabs(path): return path calling_file = inspect.stack()[1][1] calling_dir = os.path.abspath(os.path.dirname(calling_file)) return os.path.join(calling_dir, path)
032942241b67ce5685d237475e1a52c39bb9ebf2
29,867
def set_cover(universe, subsets): """Find a family of subsets that covers the universal set""" elements = set(e for s in subsets for e in s) # Check the subsets cover the universe if elements != universe: return None covered = set() cover = [] # Greedily add the subsets with the most uncovered points while covered != universe: subset = max(subsets, key=lambda s: len(s - covered)) cover.append(subset) covered |= subset return cover
63a56ae709d766ec8cc57d3053f06032e84b7187
29,868
def get_constraints(cursor, table_name): """ Retrieves any constraints or keys (unique, pk, fk, check, index) across one or more columns. """ constraints = {} # Loop over the key table, collecting things as constraints # This will get PKs, FKs, and uniques, but not CHECK cursor.execute(""" SELECT kc.constraint_name, kc.column_name, c.constraint_type, array(SELECT table_name::text || '.' || column_name::text FROM information_schema.constraint_column_usage WHERE constraint_name = kc.constraint_name) FROM information_schema.key_column_usage AS kc JOIN information_schema.table_constraints AS c ON kc.table_schema = c.table_schema AND kc.table_name = c.table_name AND kc.constraint_name = c.constraint_name WHERE kc.table_schema = current_schema() AND kc.table_name = %s ORDER BY kc.ordinal_position ASC """, [table_name]) for constraint, column, kind, used_cols in cursor.fetchall(): # If we're the first column, make the record if constraint not in constraints: constraints[constraint] = { "columns": [], "primary_key": kind.lower() == "primary key", "unique": kind.lower() in ["primary key", "unique"], "foreign_key": tuple(used_cols[0].split(".", 1)) if kind.lower() == "foreign key" else None, "check": False, "index": False, } # Record the details constraints[constraint]['columns'].append(column) # Now get CHECK constraint columns cursor.execute(""" SELECT kc.constraint_name, kc.column_name FROM information_schema.constraint_column_usage AS kc JOIN information_schema.table_constraints AS c ON kc.table_schema = c.table_schema AND kc.table_name = c.table_name AND kc.constraint_name = c.constraint_name WHERE c.constraint_type = 'CHECK' AND kc.table_schema = current_schema() AND kc.table_name = %s """, [table_name]) for constraint, column in cursor.fetchall(): # If we're the first column, make the record if constraint not in constraints: constraints[constraint] = { "columns": [], "primary_key": False, "unique": False, "foreign_key": None, "check": True, "index": False, } # Record the details constraints[constraint]['columns'].append(column) # Now get indexes cursor.execute(""" SELECT c2.relname, ARRAY( SELECT (SELECT attname FROM pg_catalog.pg_attribute WHERE attnum = i AND attrelid = c.oid) FROM unnest(idx.indkey) i ), idx.indisunique, idx.indisprimary FROM pg_catalog.pg_class c, pg_catalog.pg_class c2, pg_catalog.pg_index idx, pg_catalog.pg_namespace n WHERE c.oid = idx.indrelid AND idx.indexrelid = c2.oid AND n.oid = c.relnamespace AND n.nspname = current_schema() AND c.relname = %s """, [table_name]) for index, columns, unique, primary in cursor.fetchall(): if index not in constraints: constraints[index] = { "columns": list(columns), "primary_key": primary, "unique": unique, "foreign_key": None, "check": False, "index": True, } return constraints
8d6895fa03e57aeabed774dbc26fb44f12565fe6
29,870
import os def random_salt(length = 32): """随机生成盐: 32字节 Cryptographically Secure Pseudo-Random Number Generator (CSPRNG) """ return os.urandom(length)
511ff570ac1dc31465e581c40217d3942534c7af
29,871
def GenerateWIUpdateMsgString(membership, issuer_url, resource_name, cluster_name): """Generates user message with information about enabling/disabling Workload Identity. We do not allow updating issuer url from one non-empty value to another. Args: membership: membership resource. issuer_url: The discovery URL for the cluster's service account token issuer. resource_name: The full membership resource name. cluster_name: User supplied cluster_name. Returns: A string, the message string for user to display information about enabling/disabling WI on a membership, if the issuer url is changed from empty to non-empty value or vice versa. An empty string is returned for other cases """ if membership.authority and not issuer_url: # Since the issuer is being set to an empty value from a non-empty value # the user is trying to disable WI on the associated membership resource. return ('A membership [{}] for the cluster [{}] already exists. The cluster' ' was previously registered with Workload Identity' ' enabled. Continuing will disable Workload Identity on your' ' membership, and will reinstall the Connect agent deployment.' .format(resource_name, cluster_name)) if not membership.authority and issuer_url: # Since the issuer is being set to a non-empty value from an empty value # the user is trying to enable WI on the associated membership resource. return ('A membership [{}] for the cluster [{}] already exists. The cluster' ' was previously registered without Workload Identity.' ' Continuing will enable Workload Identity on your' ' membership, and will reinstall the Connect agent deployment.' .format(resource_name, cluster_name)) return ''
75a54be804d5012e94587c4d5094936dc1f770da
29,874
import random def choice_optional(lst): """" Returns random.choice if there are elements, None otherwise """ if len(lst) > 0: return random.choice(lst) return None
778561f1a42952d9f2af9692adeadd57917a4ea3
29,875
def find_all_paths(graph, start, end, path): """ :param graph: the graph that's traversed :param start: starting node :param end: end node :param path: currently found paths :return: all possible paths """ path = path + [start] if start == end: return [path] if str(start) not in graph or str(end) not in graph: return [] paths = [] for node in graph[str(start)]: if node not in path: newpaths = find_all_paths(graph, node, end, path) for newpath in newpaths: paths.append(newpath) return paths
51382317493191193e822fa819998ee7aa749122
29,876
def intron_gff_iterator(df, intron, intron_found, real_introns, read, args): """ Iterates over the intron.gff3 and checks whether the given deletion is an intron. """ df = df.loc[df["start"] > intron[0] - args.wobble - 1] for index, row in df.iterrows(): intron_found = False if intron[0] == (row["start"]) -1 and intron[1] == (row["end"] + 1): intron_found = True real_introns += intron break if (not intron_found) and ((intron[1] - intron[0]) > args.gap): read.set_tag("ga", str(intron), "Z") return real_introns, intron_found, read
07b256576cb3cecf44252ac84305b575747c6da2
29,881
def update_claims(session, about, provider_info, old_claims=None): """ :param session: :param about: userinfo or id_token :param old_claims: :return: claims or None """ if old_claims is None: old_claims = {} req = None try: req = session["authn_req"] except KeyError: pass if req: try: _claims = req["claims"][about] except KeyError: pass else: if _claims: # Deal only with supported claims _unsup = [c for c in _claims.keys() if c not in provider_info["claims_supported"]] for _c in _unsup: del _claims[_c] # update with old claims, do not overwrite for key, val in old_claims.items(): if key not in _claims: _claims[key] = val return _claims return old_claims
4983b5dd225b690685f4d73d3cc746632e1c8d52
29,882
def getFolder(): """ return the folder where this module is located""" folderWithFileName = __file__ if(folderWithFileName[-1]=='c'): fileName = __name__+".pyc" else : fileName = __name__+".py" folderLength = len(folderWithFileName) - len(fileName) folderWithoutFileName = folderWithFileName[:folderLength] #print (folderWithFileName) #print (__name__) #print (fileName) #print(str(len(folderWithFileName))) #print (str(len(fileName))) #print (str(folderLength)) #print (folderWithoutFileName) return folderWithoutFileName
204bb6c2aa4e693e6461b108bd354844cf229363
29,883
import json def read_json_file(path=r'web_crawler\scrapped_data\all_scraped_data.json'): """reads Json file Args: path: the path to the json file """ with open(path, 'r') as data_file: return json.load(data_file)
1f82ae07906d554a227aa653a2fe1bffc6a46a9b
29,884
def grep(*matches): """Returns a generator function that operates on an iterable: filters items in the iterable that match any of the patterns. match: a callable returning a True value if it matches the item >>> import re >>> input = ["alpha\n", "beta\n", "gamma\n", "delta\n"] >>> list(grep(re.compile('b').match)(input)) ['beta\n'] """ def _do_grep_wrapper(*matches): def _do_grep(lines): for line in lines: for match in matches: if match(line): yield line break return _do_grep return _do_grep_wrapper(*matches)
9a28efd6416c0f8e02532072d9c366747dc52291
29,885
import os def clear_console_screen(): """Clears the console screen. Returns None.""" os.system('cls' if os.name=='nt' else 'clear') return None
6b9b4310a68f13ea13e1d28224c1d75e08aac0a8
29,887
from unicodedata import normalize def unicodify(s, encoding='utf-8', norm=None): """Ensure string is Unicode. .. versionadded:: 1.31 Decode encoded strings using ``encoding`` and normalise Unicode to form ``norm`` if specified. Args: s (str): String to decode. May also be Unicode. encoding (str, optional): Encoding to use on bytestrings. norm (None, optional): Normalisation form to apply to Unicode string. Returns: unicode: Decoded, optionally normalised, Unicode string. """ if not isinstance(s, str): s = str(s, encoding) if norm: s = normalize(norm, s) return s
ee4882dd7450ba0b146e3fb9ddabf9deaf0e7903
29,888
import collections def eval_step(test_batch, snlds_model, num_samples, temperature): """Runs evaluation of model on the test set and returns evaluation metrics. Args: test_batch: a batch of the test data. snlds_model: tf.keras.Model, SNLDS model to be evaluated. num_samples: int, number of samples per trajectories to use at eval time. temperature: float, annealing temperature to use on the model. Returns: Dictionary of metrics, str -> list[tf.Tensor], aggregates the result dictionaries returned by the model. """ test_values = collections.defaultdict(list) for _ in range(10): result_dict = snlds_model( test_batch, temperature, num_samples=num_samples) for k, v in result_dict.items(): test_values[k].append(v) return test_values
2a7ec12f43925aecf266048c6eaa0b331641a4fe
29,890
def exception_to_dict(error): """Takes in an exception and outputs its details, excluding the stacktrace, to a dict Args: error (Exception): The exception to serialize Returns: dict: The serialized exception """ return {"type": str(type(error).__name__), "message": str(error)}
a677e151079a7b0005a7da1065f08b49cbf559be
29,891
import time def crete_tag_time(): """ Create local tag time :return: str: local time structure """ s = time.localtime(time.time()) year = str(s[0]) month = s[1] if month < 10: month = "0" + str(month) day = s[2] if day < 10: day = "0" + str(day) hours = s[3] if hours < 10: hours = "0" + str(hours) m = s[4] if m < 10: m = "0" + str(m) sec = s[5] if sec < 10: sec = "0" + str(sec) ltime = str(year) + "-" + str(month) + "-" + str(day) + "_" + str(hours) ltime = ltime + ":" + str(m) + ":" + str(sec) return ltime
ac8a5a94f39c682bd17d7beb1b1eff98a1271a56
29,894
def normalized_bases(classes): """Remove redundant base classes from `classes`""" candidates = [] for m in classes: for n in classes: if issubclass(n, m) and m is not n: break else: # m has no subclasses in 'classes' if m in candidates: candidates.remove(m) # ensure that we're later in the list candidates.append(m) return candidates
b355d438bfbd946dc5d73423e33b77b107121e0b
29,895
def generate_window(keys, key_index, steps): """Generates a list (window) steps distance to the left and right of a given peak Args: keys (List): The keys for the compressed depths dictionary key_index (int): The index in the keys list of the peak_key key steps (int): How many steps left and write will be searched to verify a local maximum Returns: [list]: A list of keys that includes all keys steps away from a given location """ window_keys = [] for x in range(1, steps + 1): if key_index - x >= 0: window_keys.append(keys[key_index - x]) for x in range(1, steps + 1): if key_index + x < len(keys): window_keys.append(keys[key_index + x]) return window_keys
7c712c18a8ce031f0ba9cfa86f9e3b34611d48a9
29,896
def transform_play_to_column(play): """Return position of the column where the play was made. Parameters: play (int): bit board representation of a piece Returns: int : column position""" return [2 ** i for i in range(49)].index(play) // 7
aa0e0298ead19d9f1375092f6b06f95ff2829f0f
29,897
import binascii def get_internal_name(cache_key): """Converts a cache key into an internal space module name.""" package, version = cache_key return 'multiversion.space.%s___%s' % (package, binascii.hexlify(version))
a34c3c746135f3913f9577238f8a8d6b13398d5d
29,899
def to_char_arrays(text, w): """sets the words from input string into one or more character arrays of length w :param text: input string :param w: maximum length of array :type text: str :type w: int :return list of character arrays :rtype list of str """ words = text.split() array_list = [] if words: char_arr = words[0] # assign first word else: char_arr = '' for word in words[1:]: # for remaining words temp = ' ' + word if len(char_arr + temp) <= w: # if second word fits char_arr += temp else: # add to new array array_list.append(char_arr) char_arr = word array_list.append(char_arr) return array_list
6c2a3dc9957c1409dfebef23874bf39df530b776
29,900
def counting_sort(a, k, reverse=False): """ 计数排序 基本思想:对每一条记录,计算小于这条记录的记录个数。利用这一信息,就可以直接确定该记录的位置。 当有几条记录相同时,则需要调整一下位置。 记录值分布在0-k之间,建立C[0...k],根据记录值找到在C中的位置,该位置计数器+1。 数组C中某个位置Ci,C[0...i-1]的所有值相加,即为小于i的记录的个数。依次计算C[1...k]的值,即C[i]=C[i]+C[i-1]。 根据C中数据重新确定待排序列中记录的位置。 :param a: :param k: :param reverse: :return: """ b = [0] * len(a) # 有序序列输出辅助 c = [0] * (k + 1) # 计数辅助 # 计数 for v in a: c[v] += 1 if reverse: # 计算比记录i大的记录的个数 for i in range(len(c) - 2, -1, -1): c[i] += c[i + 1] else: # 计算比记录i小的记录的个数 for i in range(1, len(c)): c[i] += c[i - 1] for v in a: # 根据C数据,定位记录到输出序列 b[c[v] - 1] = v # 如果有多条相同的记录,调整位置 c[v] -= 1 return b
20d4a26d9541a397645890ce22a384efe6fe8427
29,901
def checkSignedFeatureList(vocabularyList, inputWords): """ 测试输入的文本向量中的文本是否在文档集合中有,并标记。 返回文档集合标记的列表,可理解为特征值列表 :type vocabularyList: list :param vocabularyList: :param inputWords: :return: """ # 创建固定长度的list signedFeatureList = [0] * len(vocabularyList) for word in inputWords: if word in vocabularyList: # 如果word在输入的文本向量中,则将对应的特征标记 signedFeatureList[vocabularyList.index(word)] += 1 return signedFeatureList
ba32774d6295169352ddf08cf954debd29dfa589
29,902
def parse_block(block, metric=[], labels={}): """Parse_block. Parse a 'block' of results (a level of a for example) and transform into a suitable list with labels and all """ result = [] # Give a dict of lists of labels which will be concatenated into a single # label per item. These could be static or based on the response data labels = {"app": ["example", "random_numbers"], "env": ["TEST"]} # .items() converts a dict to a list of key-value tuples for key, value in block.items(): # At this point we have e.g. key-value pair # "smalls: {'first': 3, 'second': 18}" based on the data for n_key, n_value in value.items(): # And here "first: 3" # Append the list with the metric "prefixes", keys, # labels and value # # e.g. "example1", "random", "smalls", "first", labels, 5 result.append((metric + [key] + [n_key], labels, n_value)) return result
6080db9e8a9045b254258312aa866757bd4fdc1c
29,903
def variants(sublist, int_list=False): """Find all supported variants of items in the sublist""" result = [] result.append(sublist) if int_list: if len(sublist) == 3: result.append([sublist[0], ",", sublist[1], ",", sublist[2]]) result.append([sublist[0], "x", sublist[1], "x", sublist[2]]) result.append([sublist[0], "by", sublist[1], "by", sublist[2]]) result.append(["(", sublist[0], sublist[1], sublist[2], ")"]) result.append(["(", sublist[0], ",", sublist[1], ",", sublist[2], ")"]) elif len(sublist) == 2: result.append([sublist[0], ",", sublist[1]]) result.append([sublist[0], "x", sublist[1]]) result.append([sublist[0], "by", sublist[1]]) result.append(["(", sublist[0], sublist[1], ")"]) result.append(["(", sublist[0], ",", sublist[1], ")"]) return result
078626408d0f0d74fea8062c80b1d1843993a0c6
29,905
import os def open_read_numpy_files(folder_path): """ opens each activations/gradients numpy file and calculates the product of the activations/gradients per layer """ # change directories os.chdir(folder_path) grad_files_list = [] activ_files_list = [] file_name = None # Read activation files in an ordered way, if they exist for epoch in range(0, 360): # read till total number of epochs for iter in range(0, 391): # 50000/batch_size(128) = 390 file_name = folder_path + '/epoch_' + \ str(epoch) + '_iteration_' + str(iter) + '_activations_array.npy' activ_files_list.append(file_name) # print('Activ') # Read activation files in an ordered way, if they exist for epoch in range(0, 360): # read till total number of epochs for iter in range(0, 391): # 50000/batch_size(128) = 390 file_name = folder_path + '/epoch_' + \ str(epoch) + '_iteration_' + str(iter) + '_gradients_array.npy' grad_files_list.append(file_name) # print('Grad') #print('activ_files_list', activ_files_list) #print('grad_files_list', grad_files_list) return grad_files_list, activ_files_list
048393ce4854076346039ce0f74da1f9b946a9a3
29,909
def build_index(data: list) -> tuple: """ Create an index with items and shops for faster lookup. Format - { 'product1': 'shops': set([1, 3, 5]), ... } { shop1: [ (price1, [item1, item2, ...]), ... ], ... } :param data: List of tuples containg data about products and shops :return: An index based on products and shops """ item_index = {} # Create an empty item_index shop_index = {} for item in data: for product in item[2]: if product not in item_index: # New, initialize item_index[product] = set() item_index[product].add(item[0]) # Add to shop set if item[0] not in shop_index: shop_index[item[0]] = [] shop_index[item[0]].append((item[1], item[2:])) return item_index, shop_index
64b8f75553bf1968e922dd06a2cc3c75e44e9bf6
29,911
def bst_contains(node, value): """ Return whether tree rooted at node contains value. Assume node is the root of a Binary Search Tree @param BinaryTree|None node: node of a Binary Search Tree @param object value: value to search for @rtype: bool >>> bst_contains(None, 5) False >>> bst_contains(BinaryTree(7, BinaryTree(5), BinaryTree(9)), 5) True """ if node is None: return False elif node.data == value: return True elif value < node.data: return bst_contains(node.left, value) elif value > node.data: return bst_contains(node.right, value) else: assert False, "WTF!"
bf1c875d9a3b4c5b8af42fb63465aa0f10039b49
29,912
def invert_dict(d): """Return an 'inverted' dictionary, swapping keys against values. Parameters ---------- d : dict-like The dictionary to invert Returns -------- inv_d : dict() The inverted dictionary. Notes ------ If the key-mapping is not one-to-one, then the dictionary is not invertible and a ValueError is thrown. """ inv_d = dict((d[key], key) for key in d) if len(d) != len(inv_d): raise ValueError('Key-value mapping is not one-to-one.') return inv_d
4a5d56cbf7ac4dc8c787635f7fbc8b4608b6532e
29,913
import os def get_path_basename(urlpath): """ Helper function to derive file basename :param urlpath: URL path :returns: string of basename of URL path """ return os.path.basename(urlpath)
9477b1d67d8a061505c961821fce7fe99092f196
29,915
import numpy as np def interbin_fft(freq, fft): """Interbinning, a la van der Klis 1989. Allows to recover some sensitivity in a power density spectrum when the pulsation frequency is close to a bin edge. Here we oversample the Fourier transform that will be used to calculate the PDS, adding intermediate bins with the following values: A_{k+1/2} = \\pi /4 (A_k - A_{k + 1}) Please note: The new bins are not statistically independent from the rest. Please use simulations to estimate the correct detection levels. Parameters ---------- freq : array of floats The frequency array fft : array of complex numbers The Fourier Transform Returns new_freqs : array of floats, twice the length of the original array The new frequency array new_fft : array of complex numbers The interbinned Fourier Transform. Examples -------- >>> import numpy as np >>> freq = [0, 0.5, 1, -1, -0.5] >>> fft = np.array([1, 0, 1, 1, 0], dtype=float) >>> f, F = interbin_fft(freq, fft) >>> np.allclose(f, [0, 0.25, 0.5, 0.75, 1, -1, -0.75, -0.5, -0.25]) True >>> pi_4 = np.pi / 4 >>> np.allclose(F, [1, -pi_4, 0, pi_4, 1, 1, -pi_4, 0, pi_4]) True """ freq = np.asarray(freq) fft = np.asarray(fft) neglast = freq[-1] < 0 if neglast: order = np.argsort(freq) freq = freq[order] fft = fft[order] N = freq.size new_N = 2 * N - 1 new_freqs = np.linspace(freq[0], freq[-1], new_N) new_fft = np.zeros(new_N, dtype=type(fft[0])) new_fft[::2] = fft new_fft[1::2] = (fft[1:] - fft[:-1]) * np.pi / 4 if neglast: fneg = new_freqs < 0 fpos = ~fneg new_freqs = np.concatenate((new_freqs[fpos], new_freqs[fneg])) new_fft = np.concatenate((new_fft[fpos], new_fft[fneg])) return new_freqs, new_fft
cb3f38171c29477f1c1614cd3d9cd744d20f8faf
29,916
def reduce_boxes(boxes, n_o_b): """ Reduces the number of boxes by combining them. Combine the closesd boxes :param boxes: List[List[int]] = List of bboxes of the chars :param n_o_b: int = wanted number of boxes :return: List[List[int]] = modified boxes """ while len(boxes) > n_o_b: sizes = list(map(lambda box: box[2] - box[0], boxes)) mini = min(sizes) index = sizes.index(mini) # select the smallest box if index == 0: # only one neighbour (second) neighbour = 1 elif index == len(boxes) - 1: # only one neighbour (next to last) neighbour = index - 1 else: # choose neighbour with smallest distance between borders neighbour = 2 * int( boxes[index][0] - boxes[index - 1][2] > boxes[index + 1][0] - boxes[index][2]) - 1 + index if neighbour > index: boxes[neighbour] = (boxes[index][0], min(boxes[index][1], boxes[neighbour][1]), boxes[neighbour][2], max(boxes[index][3], boxes[neighbour][3])) else: boxes[neighbour] = (boxes[neighbour][0], min(boxes[index][1], boxes[neighbour][1]), boxes[index][2], max(boxes[index][3], boxes[neighbour][3])) del boxes[index] return boxes
480078aa179161105d206cdfd4454d72d1ffd8e6
29,917
def c_uchar(i): """ Convert arbitrary integer to c unsigned char type range as if casted in c. >>> c_uchar(0x12345678) 120 >>> (c_uchar(-123), c_uchar(-1), c_uchar(255), c_uchar(256)) (133, 255, 255, 0) """ return i & 0xFF
937b683505282eb8577affb14236515ecdda20be
29,919
def _groups_intersect( groups_A, groups_B ): """ Return true if any of the groups in A are also in B (or size of intersection > 0). If both are empty for now we will allow it """ ga = set(groups_A) gb = set(groups_B) return len( ga.intersection( gb ) ) > 0
7162b8aae5c4eaacad1596b93f8ece1624fd6945
29,920
def is_same_shape(T1, T2): """ Two partial latin squares T1, T2 have the same shape if T1[r, c] = 0 if and only if T2[r, c] = 0. EXAMPLES:: sage: from sage.combinat.matrices.latin import * sage: is_same_shape(elementary_abelian_2group(2), back_circulant(4)) True sage: is_same_shape(LatinSquare(5), LatinSquare(5)) True sage: is_same_shape(forward_circulant(5), LatinSquare(5)) False """ for i in range(T1.nrows()): for j in range(T1.ncols()): if T1[i, j] < 0 and T2[i, j] < 0: continue if T1[i, j] >= 0 and T2[i, j] >= 0: continue return False return True
4602f7cb2a093393445f7e23f2d9f539021d2f7a
29,921
def condition_str(condition, phase, component): """ Returns a string representation of a condition for a specific phase and component. """ if phase!=None: return condition + '(' + phase + ',' + component + ')' else: return condition + '(' + component + ')'
ef7432bf1a7ca25ffb6bf7a9c1dc033dd53918af
29,922
def rename_columns(df): """ function to rename columns to allow for easier plotting of mri audio recordings compared to ansl output """ df.rename(columns={'dBFS':'Level (dBFS)'}, inplace=True) df['Frequency (Hz)'].astype(float) return df
27dfb10ce9717da3ea10b414a7a64ca5c5d059d0
29,923
from typing import Iterator def range_(values): """Calculates the range of the values. :param values: The values. :return: The range of the values. """ return range_(tuple(values)) if isinstance(values, Iterator) else max(values) - min(values)
28ce7a97db16e8a1e718f391b299457e42d556e2
29,924
from pathlib import Path def output_is_golden(out: str, golden_file: Path, update_golden: bool) ->bool: """Check that out string matches the contents of the golden file.""" __tracebackhide__ = True # pylint: disable=unused-variable if update_golden: with open(golden_file, "w") as file: file.write(out) # Output of stdout should match expected output with open(golden_file) as file: assert file.read() == out return True
e0a110ae466f28aa18a0163d8ca0bab1b6c548cb
29,925
def list_to_string(l: list, s: str = "\n") -> str: """Transforms a list into a string. The entries of the list are seperated by a seperator. Args: l (list): the list s (str, optional): the seperator. Defaults to "\\n". Returns: str: the list representation with seperator """ r = "" for e in l: r += e + s return r
ca04ba59c2edf740aa29b7e2ef79b8562c7d071c
29,926
def crop_boxes(boxes, crop_shape): """ Crop boxes according given crop shape """ crop_x1 = crop_shape[0] crop_y1 = crop_shape[1] crop_x2 = crop_shape[2] crop_y2 = crop_shape[3] l0 = boxes[:, 0] >= crop_x1 l1 = boxes[:, 1] >= crop_y1 l2 = boxes[:, 2] <= crop_x2 l3 = boxes[:, 3] <= crop_y2 L = l0 * l1 * l2 * l3 cropped_boxes = boxes[L, :] cropped_boxes[:, 0] = cropped_boxes[:, 0] - crop_x1 cropped_boxes[:, 1] = cropped_boxes[:, 1] - crop_y1 cropped_boxes[:, 2] = cropped_boxes[:, 2] - crop_x1 cropped_boxes[:, 3] = cropped_boxes[:, 3] - crop_y1 return cropped_boxes
a84e68f7d05bf56ac615e829c862dfe5fe3712a5
29,927
def _parse_idd_type(epbunch, name): """Parse the fieldvalue type into a python type. Possible types are: - integer -> int - real -> float - alpha -> str (arbitrary string), - choice -> str (alpha with specific list of choices, see \key) - object-list -> str (link to a list of objects defined elsewhere, see \object-list and \reference) - external-list -> str (uses a special list from an external source, see \external-list) - node -> str (name used in connecting HVAC components) """ _type = next(iter(epbunch.getfieldidd_item(name, "type")), "").lower() if _type == "real": return float elif _type == "alpha": return str elif _type == "integer": return int else: return str
bbf86c41ad685c2fb44f24671d22cf6601794cc1
29,929
def check_stream_status(stream, results_dict, ref_dict): """ :param stream: 'ecg' | 'ppg' :param results_dict: :param ref_dict: :return: """ status = True if stream == 'ecg': ref_hr = ref_dict['ref_hr'] ref_qrs_amp = ref_dict['ref_qrs_amp'] qrs_amp = float(results_dict['qrs_amp_mean']) qrs_time = float(results_dict['qrs_time_mean']) hr = float(results_dict['hr_mean']) if (abs(qrs_amp - ref_qrs_amp) > ref_qrs_amp * 0.1) or \ (abs(hr - ref_hr) > ref_hr * 0.2) or \ (0 <= qrs_time <= 0.2): status = False return status
1ff50f60b8b575c9b92c68cb65d9a7b25ee80c27
29,930
import numpy import itertools def pm(n, a): """Return all combinations of [+a, -a] with length n (with repetition). len(out) == 2**n. """ return numpy.array(list(itertools.product([+a, -a], repeat=n)))
baa781200466c44c2f17ee667a005582e132ccf8
29,933
import os import json def generate_dashboard(name, dashboards, datasource): """ Given the dashboard name, the dashboards part of the configuration, and the default datasource, generate a dashboard. We use the skeleton.json file, and fill it with given panels and template variables. Rows have titles, panels, and optional repeat and collapse options. Panels span can be configured. 12 = one lie. A dashboard can inherit from another one, and be enabled or disabled, has rows and optional templates. """ data = {} resource_dir = os.path.join(os.path.dirname(__file__), 'resources') skeleton_file = os.path.join(resource_dir, 'skeleton.json') with open(skeleton_file) as skel: data = json.load(skel) dashboard = dashboards[name] templates = dashboard.get('templating', []) rows = dashboard.get('rows', []) # If we inherit from a dashboard, initialize rows and templates with this # dashboard. if 'inherits' in dashboard: master = dashboards[dashboard['inherits']] rows = master.get('rows', []) + rows templates = master.get('templating', []) + templates data['title'] = dashboard['title'] for template in templates: template_data = {} template_dir = os.path.join(resource_dir, 'templates') template_file_path = os.path.join(template_dir, template['file'] + '.json') with open(template_file_path) as template_file: template_data = json.load(template_file) template_data['datasource'] = datasource # Set template default values if 'values' in template: for value in template['values']: template_data['current']['value'].append(value) data['templating']['list'].append(template_data) panelid = 1 for row in rows: row_data = { 'collapse': row.get('collapse', False), 'height': 250, 'panels': [], 'repeat': row.get('repeat', None), 'showTitle': True if 'title' in row else False, 'title': row.get('title', None), 'titleSize': 'h5' } # Process panels for panel in row['panels']: # panel can contain the file name or a dict if isinstance(panel, dict): panel_name = list(panel.keys())[0] else: panel_name = panel panel_data = {} panel_dir = os.path.join(resource_dir, 'panels') panel_file_path = os.path.join(panel_dir, panel_name + '.json') with open(panel_file_path) as panel_file: panel_data = json.load(panel_file) # Renumber each panel to avoid having the same one panel_data['id'] = panelid panelid += 1 # Set datasource with default panel_data['datasource'] = datasource # Set span if found in panel if isinstance(panel, dict): if 'span' in panel[panel_name]: panel_data['span'] = panel[panel_name]['span'] row_data['panels'].append(panel_data) data['rows'].append(row_data) return data
71e57469b9064d3f2dca26ef699b762fb6e77875
29,934
def dot_in_stripe(dot, stripe): """True if dot.y is in horizontal stripe.""" return stripe[0] < dot.y <= stripe[1]
a742ea3dd7d4dc9ed32d6df3bf2cb358fda2fda7
29,935
def parse_agents(args): """ Each element is a class name like "Peer", with an optional count appended after a comma. So either "Peer", or "Peer,3". Returns an array with a list of class names, each repeated the specified number of times. """ ans = [] for c in args: s = c.split(',') if len(s) == 1: ans.extend(s) elif len(s) == 2: name, count = s ans.extend([name]*int(count)) else: raise ValueError("Bad argument: %s\n" % c) return ans
e62aef22c20a449ae670bd517e3cb6f64375d792
29,939
import os def get_object_name(path): """ Given a path with a basename of the form xx-object.c, return Object """ (fdir, basename) = os.path.split(path) (base, ext) = os.path.splitext(basename) return base[3:].capitalize()
db920fdffd81b6af697306ee38351a3a35e99d38
29,940
import math from sys import stderr import time def retry(tries, delay=3, backoff=2): """Retries a function or method until it succedes Note: This assume the function succeded if no exception was thrown Args: tries(int): Number of attempts of the function. Must be >= 0 delay(int): Initial delay in seconds, should be > 0 backoff(int): Factor by which delay should increase between attempts """ if backoff <= 1: raise ValueError('backoff must be greater than 1') tries = math.floor(tries) if tries < 0: raise ValueError('tries must be 0 or greater') if delay <= 0: raise ValueError('delay must be greater than 0') def deco_retry(f): """Decorator for retries""" def function_attempt(f, *args, **kwargs): """ Single attempt of the function """ template = 'Attempt failed with Exception: \n{0}: {1}\n' try: r_value = f(*args, **kwargs) # first attempt r_status = True except Exception as exp: stderr.write(template.format(type(exp).__name__, exp)) r_value = exp r_status = False return r_value, r_status def f_retry(*args, **kwargs): """True decorator""" m_tries, m_delay = tries, delay # make mutable r_value, r_status = function_attempt(f, *args, **kwargs) while m_tries > 0: # Done on success if r_status is True: return r_value m_tries -= 1 # consume an attempt time.sleep(m_delay) # wait... m_delay *= backoff # make future wait longer # Try again r_value, r_status = function_attempt(f, *args, **kwargs) if r_status is True: return r_value else: raise r_value # true decorator -> decorated function return f_retry # @retry(arg[, ...]) -> true decorator return deco_retry
059d7a02cbabdbecc772fcd76e98fc602af0b6e7
29,942
import os def is_file_in_folder(filename, folder_path): """ Returns True if file is in the folder (not checking subfolders). Else False""" for f in os.listdir(folder_path): if f == filename: return True return False
6b7cdb5d2538ea3eda4d528ce47f6357e6c76319
29,943
import math def A000010(n: int) -> int: """Euler totient function phi(n): count numbers <= n and prime to n.""" numbers = [] i = 0 for i in range(n): if math.gcd(i, n) == 1: numbers.append(i) return len(numbers)
46b28b077f78965666eb34e0f4e7892e95b4162b
29,946
import numpy as np def arc2px(x_arc, y_arc, header): """Converts x and y arcsec coords into px.""" try: xpx = [0] * len(x_arc) ypx = [0] * len(y_arc) except TypeError: xpx = [0] ypx = [0] x_arc = [x_arc] y_arc = [y_arc] for i in range(0, len(xpx)): xpx[i] = int( np.round(((x_arc[i] - header["crval1"]) / header["cdelt1"]) + header["crpix1"])) for j in range(0, len(ypx)): ypx[j] = int( np.round(((y_arc[j] - header["crval2"]) / header["cdelt2"]) + header["crpix2"])) return xpx, ypx
fd1d893f41a889bd880c7f6c5971c3724615c5cf
29,947
def session_start(interp, args_w): """Start new or resume existing session""" if interp.session.is_active(): interp.notice("A session had already been " "started - ignoring session_start()") return interp.space.w_True res = interp.session.start(interp) return interp.space.newbool(res)
0b33802668a74c17bd991d4c3923889c3da8c3aa
29,948
import math def CalcDistance(p1, p2): """ Function to calculate distance in space between two points (p) p1, p2: (f) lists of coordinates for point1 and point2 """ dist = math.sqrt((p2[0]-p1[0])**2 + (p2[1]-p1[1])**2 + (p2[2]-p2[2])**2) return(dist)
099512d41acafa7832a425cb8fbe60a6c4be8ecf
29,951
import hashlib def password_hash(password: str): """ Hash a password for storing. Args: password (str): the password to hash. Returns: str: the hashed password. """ sha512_1 = hashlib.sha512(password.encode("utf-8")).hexdigest() sha512_2 = hashlib.sha512(sha512_1.encode("utf-8")).hexdigest() sha512_3 = hashlib.sha512(sha512_2.encode("utf-8")).hexdigest() md5_1 = hashlib.md5(sha512_3.encode("utf-8")).hexdigest() return hashlib.sha512(md5_1.encode("utf-8")).hexdigest()
9d0a4ebc9d309aa5902ff39e9d4bd630ec215be5
29,954
def _get_lights(guess: dict, truth: dict) -> int: """ Return the number of lights that we would observe. """ correct = 0 for p in guess: if guess[p] == truth[p]: correct += 1 # Adjust for double counting return int(correct / 2)
920b8305118293bceffbffad423cb32001c8862a
29,955
import os import stat import shutil def copyTree(src, dst, excludeExt=[], renameDict={}, renameExtDict={}, includeExt=None): """ Copy a directory to another one, overwritting files if necessary. copy_tree from distutils and copytree from shutil fail on Windows (in particular on git files) INPUTS: - src: source directory - dst: destination directory where files will be written/overwritten - includeExt: if provided, list of file extensions used for the copy - excludeExt: if provided, list of file extensions to be excluded from the copy - renameDict: dictionary used to rename files (the key is replaced by the value) - renameExt: dictionary used to rename extensions (the key is replaced by the value) """ def forceMergeFlatDir(srcDir, dstDir): if not os.path.exists(dstDir): os.makedirs(dstDir) for item in os.listdir(srcDir): srcFile = os.path.join(srcDir, item) dstFile = os.path.join(dstDir, item) forceCopyFile(srcFile, dstFile) def forceCopyFile (sfile, dfile): # ---- Handling error due to wrong mod if os.path.isfile(dfile): if not os.access(dfile, os.W_OK): os.chmod(dfile, stat.S_IWUSR) #print(sfile, ' > ', dfile) shutil.copy2(sfile, dfile) def isAFlatDir(sDir): for item in os.listdir(sDir): sItem = os.path.join(sDir, item) if os.path.isdir(sItem): return False return True if includeExt is not None and len(excludeExt)>0: raise Exception('Provide includeExt or excludeExt, not both') for item in os.listdir(src): filebase, ext = os.path.splitext(item) if ext in excludeExt: continue if includeExt is not None: if ext not in includeExt: continue s = os.path.join(src, item) if item in renameDict.keys(): item = renameDict[item] # renaming filename base on rename dict if ext in renameExtDict.keys(): item = filebase + renameExtDict[ext] # changing extension based on rename ext dict d = os.path.join(dst, item) if os.path.isfile(s): if not os.path.exists(dst): os.makedirs(dst) forceCopyFile(s,d) if os.path.isdir(s): isRecursive = not isAFlatDir(s) if isRecursive: copyTree(s, d) else: forceMergeFlatDir(s, d)
55a73050864e2bd0b368ea5f3b3266076971c37b
29,956
def getlambda(pixel, lo, hi): #----------------------------------------------------------------------------- """ Small utility to calculate lambda on a line for given position in pixels """ #----------------------------------------------------------------------------- if pixel is None: return 0.5 delta = hi - lo if delta == 0.0: return 0.5 return (pixel-lo)/(delta)
6245ff3ec09db39d913175e58b9cec08888aa66e
29,957
def array_pyxll_function_1(x): """returns the sum of a range of floats""" total = 0.0 # x is a list of lists - iterate through the rows: for row in x: # each row is a list of floats for element in row: total += element return total
8ebd50b6025f59cd2c161fea79e7eb47d0e76337
29,958
def sort_by_name(dicts): """ Sorting of a list of dicts. The sorting is based on the name field. Args: list: The list of dicts to sort. Returns: Sorted list. """ return sorted(dicts, key=lambda k: k.get("name", "").lower())
3364995d2cbc55e87a6f60d054861eadec9e0dda
29,959
import re def alphanum_string(input_string): """ Removes all non-alphanumeric characters from the given string. """ pattern = re.compile(r'[\W_]+') return pattern.sub('', input_string)
3e27c52b02d85c3374a82d5cc909b8a305a8c67a
29,961
def getConstants(): """Returns a list of constants for NXT brick. """ out = [] api = __import__('api') for constant in dir(api): if constant[0].isupper(): id = getattr(api, constant) if type(id).__name__ not in ["function", "type"]: out.append(constant) return out
c99053cba5d1e88b028e6b62e9b2c2d68bb00d42
29,964
def preprocess_doi(line): """ Removes doi.org prefix if full URL was pasted, then strips unnecessary slashes """ (_, _, doi) = line.rpartition('doi.org') return doi.strip('/')
55badcf6fb55d19dcdcaba0d6ece706978001d7f
29,965
def point_in_rectangle(point, rect_top_left, rect_sides): """ Checks if point is in rectangle Parameters ---------- point : (float, float) (x,y) coordinates of point rect_top_left : (float, float) (x,y) coordinates of rectangle top left corner rect_sides : (float, float) (x,y) lengths of rectangle sides Returns ------- bool True if point is in rectangle, otherwise False. """ return rect_top_left[0] < point[0] < rect_top_left[0] + rect_sides[0] and \ rect_top_left[1] < point[1] < rect_top_left[1] + rect_sides[1]
49fe7980f32d4716e38b2591bbfe8d1d8910c11e
29,968
import requests def zone_headers(zone, date): """ Get query headers for querying intra-zone flows """ area_map={ 'JP-HKD':1, 'JP-TH':2, 'JP-TK':3, 'JP-CB':4, 'JP-HR':5, 'JP-KN':6, 'JP-CG':7, 'JP-SK':8, 'JP-KY':9, 'JP-ON':10 } payload = { 'ajaxToken':'', 'areaCdAreaSumNon':'{:02d}'.format(area_map[zone]), 'daPtn':'00', 'daPtn1':'187', 'daPtn2':'275', 'daPtn3':'220', 'daPtn4':'187', 'daPtn5':'132', 'downloadKey':'', 'fwExtention.actionSubType':'headerInput', 'fwExtention.actionType':'reference', 'fwExtention.formId':'CB01S020P', 'fwExtention.jsonString':'', 'fwExtention.pagingTargetTable':'', 'fwExtention.pathInfo':'CB01S020C', 'fwExtention.prgbrh':'0', 'msgArea':'', 'requestToken':'', 'requestTokenBk':'', 'searchReqHdn':'', 'table1.currentPage':'', 'table1.dispRowNum':'200', 'table1.endIndex':'', 'table1.nextPageStartIndex':'', 'table1.pagingMode':'editable', 'table1.rows[0].rowParams.originRowIndex':'0', 'table1.rows[0].rowParams.rowAddUpdate':'', 'table1.rows[0].rowParams.rowDelete':'', 'table1.rows[0].rowParams.rowNum':'', 'table1.rows[0].rowParams.selected':'', 'table1.startIndex':'', 'table1.tableIdToken':'', 'table1.totalPage':'', 'tgtAreaHdn':'', 'tgtDaHdn':'', 'tgtNngp': date, 'tgtNngpHdn':'', 'transitionContextKey':'DEFAULT', 'updDaytime':'' } with requests.Session() as s: r = s.get('http://occtonet.occto.or.jp/public/dfw/RP11/OCCTO/SD/LOGIN_login') r2 = s.post('http://occtonet.occto.or.jp/public/dfw/RP11/OCCTO/SD/CB01S020C?fwExtention.pathInfo=CB01S020C&fwExtention.prgbrh=0', cookies=s.cookies, data=payload) r2.encoding = 'utf-8' headers=r2.text headers = eval(headers.replace('false', 'False').replace('null', 'None')) return headers
01fd230dbb06cb465df1ef2cf88b79a0b4634686
29,969
def do_default(default_value=u'', boolean=False): """ If the value is undefined it will return the passed default value, otherwise the value of the variable: .. sourcecode:: jinja {{ my_variable|default('my_variable is not defined') }} This will output the value of ``my_variable`` if the variable was defined, otherwise ``'my_variable is not defined'``. If you want to use default with variables that evaluate to false you have to set the second parameter to `true`: .. sourcecode:: jinja {{ ''|default('the string was empty', true) }} """ def wrapped(env, context, value): if (boolean and not value) or value in (env.undefined_singleton, None): return default_value return value return wrapped
433615685cb9edcaff2ae435bf2e71d4ffd68158
29,970
def merge_config(config, environment): """ :param config: Dictionary :param environment: Dictionary :return: Dictionary """ keys = ['HTTP_PROXY', 'HTTPS_PROXY', 'NO_PROXY'] for key in keys: if config.get(key.lower(), '') == '' and \ config.get(key, '') == '': value = environment.get(key) if environment.get(key, '') != '' \ else environment.get(key.lower(), '') if value != '': config[key] = value config[key.lower()] = value # Normalize for key in keys: value = config.get(key) if config.get(key, '') != '' \ else config.get(key.lower(), '') config[key] = value config[key.lower()] = value return config
4fabb193fc50241cb22ea1e30bcce0de7361c611
29,971
def prefix_dash_dash(params): """ Add -- for keys in gdmix tfjob params. """ if isinstance(params, dict): newParams = {} for k, v in params.items(): newParams["--{}".format(k)] = v return newParams else: raise ValueError("job params can only be dict")
85170ccf0adeb83718394c572949a420108d0e03
29,972
import importlib def get_from_module(module, attr_name): """ Return a reference from a module. The attribute name must exist in the module, it could be a variable, a callable or a class. If reference name doesn't exists in the module it will return None. Example: >>> get_from_module("my.module", "my_object") >>> get_from_module("my.module", "my_function") >>> get_from_module("my.module", "MyClass") :param basestring module: The module name. :param basestring attr_name: What should be returned from the module. :return: The value resolved by the module and attr name provided or None. """ module = importlib.import_module(module) try: return getattr(module, attr_name) except AttributeError: return None
b4e95ee167d71035241bc0df2ab87c25bb45b4ac
29,973
def get_equipments(): # noqa: E501 """gets information on all recorded test equipment Returns information on all test equipment. # noqa: E501 :rtype: List[Equipment] """ return 'do some magic!'
c3a7e0232ecb2b624971f29774ae1576494daaed
29,974
def get_S_ref_comp_out(S_ref_comp_in): """圧縮機吐出比エントロピー (4) 圧縮機吐出比エントロピーは圧縮機吸入比エントロピーに等しいとする。 Args: S_ref_comp_in(float): 圧縮機吸入比エントロピー (kJ/kg・K) Returns: float: 圧縮機吐出比エントロピー (kJ/kg・K) """ return S_ref_comp_in
5aacd8d35d1039f28fa30b3102d7f3823206b9c2
29,975
import math def split_line_geometry(linegeometry, split_value, split_method="LENGTH", best_fit_bool=True): """This function will take an ArcPolyline, a split value, a split method of either 'LENGTH' or 'SEGMENT COUNT', and boolean that determines if the lines split are the best of fit based on the length. The function returns a list of line geometries whose length and number are determined by the split value, split method, and best fit settings. Line Geometry- arc polyline/split value- the length or desired number of segments, /split method- determines if split value is treated as a length target or segment count target/ best fit bool determines if the length is rounded to be segments of equal length.""" segment_list = [] line_length = float(linegeometry.length) if str(split_method).upper() == "LENGTH" and not best_fit_bool: segment_total = int(math.ceil(line_length / float(split_value))) for line_seg_index in range(0, segment_total): start_position = (line_seg_index * (int(split_value))) end_position = (line_seg_index + 1) * int(split_value) seg = linegeometry.segmentAlongLine(start_position, end_position) segment_list.append(seg) else: segmentation_value = int(round(max([1, split_value]))) if str(split_method).upper() == "LENGTH" and best_fit_bool: segmentation_value = int(max([1, round(line_length / float(split_value))])) for line_seg_index in range(0, segmentation_value): seg = linegeometry.segmentAlongLine((line_seg_index / float(segmentation_value)), ((line_seg_index + 1) / float(segmentation_value)), True) segment_list.append(seg) return segment_list
9c720ca4211e80fae6e3a1b107b888b02fe6f7b9
29,976
import os def list_files(folder): """ :type folder: str :rtype: list of str """ file_list = [] for file_name in os.listdir(folder): full_file_name = os.path.join(folder, file_name) if os.path.isdir(full_file_name): file_list.extend(list_files(full_file_name)) else: file_list.append(full_file_name) file_list.sort() return file_list
3a0819e4f31bf1b2f7aefd64662cf6c993069800
29,977
def nullWrapper(message): """ This method returns the message that was sent to it. It is used in situations where you just want to post the raw text to the log. """ return message
7db3e8791ed8399856fefdcf9fece2466ddbcecd
29,978
import torch def get_nmse(x_hat, x): """ Calculate ||x_hat - x|| / ||x|| """ sse = torch.sum((x_hat - x)**2, dim=[1,2,3]) #shape [N] - sse per image denom = torch.sum(x**2, dim=[1,2,3]) #shape [N] - squared l2 norm per ground truth image nmse_val = sse / denom return nmse_val.cpu().numpy().flatten()
edda096ebc8d8932cde975a07e579c2cfca8051f
29,979
def update(playbook_configuration_id, global_vars, inventory, name, model, client, **kwargs): """Updates playbook configuration. Since playbook configuration is complex, there are the rules on update: \b 1. If 'model' or '--edit-model' field is set, it will be used for update. 2. If not, options will be used 3. If '--global-vars' is set, it will be used. Otherwise, patch will be applied for model dictionary. 4. If '--inventory' is set, it will be used. Otherwise, patch will be applied for model dictionary. """ if not model: model = client.get_playbook_configuration(playbook_configuration_id) if name is not None: model["data"]["name"] = name if global_vars is not None: model["data"]["configuration"]["global_vars"] = global_vars if inventory is not None: model["data"]["configuration"]["inventory"] = inventory return client.update_playbook_configuration(model)
60f5216eab26fa9ac1072386725a810d13cc64b6
29,980
def convert_to_table(header, rows): """ Create an HTML table out of the sample data. Args: header (str): The table header rows (List[str]): A list of rows as strings Returns: A dataset sample in the form of an HTML <table> """ header_html = '<tr><th>{}</th></tr>'.format('</th><th>'.join(header)) rows_html = ['<tr><td>{}</td></tr>'.format('</td><td>'.join(row)) for row in rows] if rows: table = '<table>{header}\n{rows}</table>'.format( header=header_html, rows='\n'.join(rows_html)) else: table = None return table
cf7c861015135b940d2a011da106f40d0aa31ba5
29,982
def looksLikeVestLutFile(path): """Returns ``True`` if the given ``path`` looks like a VEST LUT file, ``False`` otherwise. """ with open(path, 'rt') as f: lines = [] for i in range(10): line = f.readline() if line is None: break else: lines.append(line.strip()) validHeaders = ('%!VEST-LUT', '%BeginInstance', '%%BeginInstance') return len(lines) > 0 and lines[0] in validHeaders
c1e8006b6b81d949d353f773d2a41164c26eec98
29,984
from typing import Dict import csv def load_subjects(csvfile: str) -> Dict: """Load a list of subjects from a csv file along with metadata Subject,Age,Gender,Acquisition,Release 195041,31-35,F,Q07,S500 ... Return a dictionary with Subjects as keys and Age as the value """ result: Dict = {} with open(csvfile, 'r', encoding='utf-8-sig') as fd: reader: csv.DictReader = csv.DictReader(fd) for row in reader: if 'Age' in row: result[row['Subject']] = {'age': row['Age'], 'gender': row['Gender']} else: result[row['Subject']] = {} return result
e9249ce7a04869e6b565238aa06c65063becb361
29,985
import torch def fedavg(baseline_weights, local_deltas_updates, num_samples_list, server_lr=1): """ Server aggregation with learning rate of alpha. """ avg_update = [ torch.zeros(x.size()) # pylint: disable=no-member for _, x in local_deltas_updates[0] ] number_of_clients = len(local_deltas_updates) total_samples = sum(num_samples_list) for i, update in enumerate(local_deltas_updates): for j, (_, delta) in enumerate(update): # Use weighted average by number of samples avg_update[j] += delta * (num_samples_list[i] / total_samples ) #* (num_samples / total_samples) # Load updated weights into model updated_weights = [] for i, (name, weight) in enumerate(baseline_weights): updated_weights.append((name, weight + server_lr * avg_update[i])) return updated_weights
e983689ac4a7b47ab9380ec6a2f01662c5bbad47
29,986
def _mai(a: int, n: int) -> int: """ Modular Additive Inverse (MAI) of a mod n. """ return (n - a) % n
634af47425e9bd3afec12742782755a3f0f4ac1f
29,987
def filter_obs(filt): """ Observes the specified coordinates """ def inner(x): if len(x.shape) == 4: return x[:, :, :, filt] elif len(x.shape) == 3: return x[:, :, filt] else: return x[:, filt] return inner
6ce49337a845fd0c4f360310be5f707076522fcc
29,990
import subprocess import pathlib def get_changed_files(): """ Return pathlib.Paths for files changed in the last git commit. """ args = ['git', 'diff', '--name-only', 'HEAD~1'] paths = subprocess.check_output(args, text=True) paths = paths.splitlines() paths = {pathlib.Path(path) for path in paths} return paths
c87cce0693ba6bf28991c9496a944caa210efdd7
29,991
import re def get_model_presets(config, model): """Collect all the parameters model from the UI and return them as a dict. Args: config (dict): Recipe config dictionary obtained with dataiku.customrecipe.get_recipe_config(). model (str): Model name found in the UI. Returns: Dictionary of model parameters to be used as kwargs in gluonts Predictor. """ model_presets = {} matching_key = f"{model}_model_(.*)" for key in config: key_search = re.match(matching_key, key, re.IGNORECASE) if key_search: key_type = key_search.group(1) model_presets.update({key_type: config[key]}) return model_presets
492079ef55f7d1c3a621f2228f1e22b14765575d
29,992
def stay_on_track(params): """ Example of rewarding the agent to stay inside the two borders of the track """ # Read input parameters all_wheels_on_track = params['all_wheels_on_track'] distance_from_center = params['distance_from_center'] track_width = params['track_width'] # Give a very low reward by default reward = 1e-3 # Give a high reward if no wheels go off the track and # the agent is somewhere in between the track borders if all_wheels_on_track and (0.5 * track_width - distance_from_center) >= 0.05: reward = 1.0 # Always return a float value return float(reward)
fd8d94e4895a8129d559c10120a477987f9f0ccc
29,994
def _get_name(index, hdf5_data): """Retrieves the image file name from hdf5 data for a specific index. Args: index (int): Index of image. hdf5_data (obj): h5py file containing bounding box information. Returns: (str): Image file name. """ ref = hdf5_data['/digitStruct/name'][index, 0] file_name = ''.join([chr(item) for item in hdf5_data[ref][:]]) return file_name
7b65d6f6aede25265865734dae6a94def8b3524f
29,995
import re from datetime import datetime import time def extract_date(str_date): """Find the first %Y-%m-%d string and return the datetime and the remainder of the string """ rgx = re.compile('\d{4}-\d{2}-\d{2}') o_match = rgx.search(str_date) if o_match is not None: i_start = o_match.start() i_end = i_start+10 return (datetime( *(time.strptime(str_date[i_start:i_end], "%Y-%m-%d")[0:6])), str_date[0:i_start] + str_date[i_end:]) else: return (None, str_date)
3f3407490eec4e3d65e289b5e2ebef3246c9c63f
29,996
import operator def freeze_dict(dict_): """Freezes ``dict`` into ``tuple``. A typical usage is packing ``dict`` into hashable. e.g.:: >>> freeze_dict({'a': 1, 'b': 2}) (('a', 1), ('b', 2)) """ pairs = dict_.items() key_getter = operator.itemgetter(0) return tuple(sorted(pairs, key=key_getter))
0694f264419bb426597bc1fe6a4d0a7d3ae89fc5
29,997
from functools import reduce def multiply(*xs): """ >>> multiply(1, 2, 3) 6 """ return reduce(lambda x, y: x*y, xs)
c2de9a74f174cbc8d36923247bb12a2dd2d028d4
29,998
def expand_evar(s, env, expr): """search through a string to find any environment variable expressions and expand into s""" vars = expr.findall(s) for v in vars: vname = v[2:-1] if vname in env: s = s.replace(v, env[vname]) else: pass return s
eeeff89c73441f07d1ce0bf8a999146e5030eb96
29,999
import pandas def table_sort(df: pandas.DataFrame, sort_fun) -> pandas.DataFrame: """ 对表格排序 :param df: 待排序表格 :param sort_fun: 排序算法 :return: 排序后的表格 """ return sort_fun(df)
07bce9a08b7c04a78d4eaa8796c7c50cef22b698
30,000
from collections import Counter def password_philosophy(password_record): """Validates passwords according to the rules""" valid_1 = 0 valid_2 = 0 for line in password_record: prefix, suffix = line.split("-") min_count = int(prefix) max_count = int(suffix[:suffix.find(" ")]) letter = suffix[suffix.find(" ")+1:suffix.find(":")] input_password = suffix.split(":")[1].strip() letter_count = Counter(input_password) # validate by the first part's rules if min_count <= letter_count[letter] <= max_count: valid_1 += 1 # validate by the second part's rules first_index = min_count - 1 second_index = max_count - 1 first_pos_contains_index = input_password[first_index] == letter second_pos_contains_index = input_password[second_index] == letter if first_pos_contains_index != second_pos_contains_index: valid_2 += 1 part_1 = valid_1 part_2 = valid_2 return part_1, part_2
17966486349651aca9d9a13c942a2b2d387b2728
30,002