content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
import psutil def GetAvailableMemoryMb(): """Returns the available physical memory.""" return int(psutil.virtual_memory().total / 1e6)
7c8ccd753c92548eb7acc79c49585a93e2bc6470
41,923
def delete_com_line(line : str) -> str: """Deletes comments from line""" comm_start = line.find("//") if comm_start != -1: line = line[:comm_start] return line
e75d74f68222d35e1231a3725d4a34c1d3e768ad
41,924
def flatten_image(img): """ takes in an (m, n) numpy array and flattens it into an array of shape (1, m * n) """ s = img.shape[0] * img.shape[1] img_wide = img.reshape(1, s) return img_wide[0]
d97d070d9f827f57c79a6714e10d7b4cb8e06a46
41,929
def non_step(func): """A decorator which prevents a method from automatically being wrapped as a infer_composite_step by RecipeApiMeta. This is needed for utility methods which don't run any steps, but which are invoked within the context of a defer_results(). @see infer_composite_step, defer_results, RecipeApiMeta """ assert not hasattr(func, "_skip_inference"), \ "Double-wrapped method %r?" % func func._skip_inference = True # pylint: disable=protected-access return func
f257a882fd5cf9b92e786153ba2a524ead75745e
41,933
def _convert_to_absolute_uris(request, urls_obj): """ Convert relative URL paths to absolute URIs """ for url_name, url_path in urls_obj.items(): urls_obj[url_name] = request.build_absolute_uri(url_path) return urls_obj
970966e1a16dc5d990cd1a25a37aa426bf4899ad
41,934
def normalize_func(get_iter): """ normalize_func :param get_iter: :return: """ # new iterator total = sum(get_iter()) result = [] # new iterator for value in get_iter(): percent = 100 * value / total result.append(percent) return result
2dc3189a4cae102cf8db950869c058415828723b
41,936
def count_calls(results): """count number of "call" nodeid's in results""" return len(list(r for r in results.keys() if r[1]=='call'))
e3ad7fa2fb8577fff615e55514afbcbe1af0ac9d
41,937
import typing def exclude_header( headers: typing.Sequence[str], exclude: typing.Set[str] ) -> typing.Sequence[typing.Optional[str]]: """ Exclude header. Exclude columns from header by changing the entry to None. :param headers: headers :param exclude: columns to be excluded :returns: header with columns excluded """ excluded_headers = [None if current in exclude else current for current in headers] return excluded_headers
29d432abcae17848c42b6d238b744157a51ba133
41,940
import os def list_vassals_configs(directory): """ find all *.ini files in the given directory path :param directory: file system path containing *.ini files :returns: list of abs path ini files found """ result = [] for f in os.listdir(directory): if f.endswith('.ini'): result.append( os.path.join(directory, f) ) return result
45b7e90c80f89f46419b671b001230305314dee0
41,941
import itertools def group_by(it, n): """ >>> list(group_by([1, 2, 3, 4], 2)) [(1, 2), (3, 4)] """ return zip(*[itertools.islice(it2, i, None, n) for i, it2 in enumerate(itertools.tee(it))])
1bef08ea01449e2efcfa50d1ad3f7a36e2f5ee0c
41,943
def get_locus_blocks(glstring): """ Take a GL String, and return a list of locus blocks """ # return re.split(r'[\^]', glstring) return glstring.split('^')
8981ca07549544f26139c11c21394979658abe2a
41,944
import re def is_instance_type_format(candidate): """Return a boolean describing whether or not candidate is of the format of an instance type.""" return re.search(r"^([a-z0-9\-]+)\.", candidate) is not None
53568168301800776b5c7f1b39bbeddc8dc0ca0f
41,946
def enforce_file_extension(file, extension): """ Returns the given string (file name or full path) with the given extension. string had no file extension .extension will be appended. If it had another extension it is changed to the given extension. If it had the given extension already the unchanged string is returned. Parameters ---------- file: File name or full path. Can include a file extension .* extension: File extension the given file name will be returned with. Returns ------- The given file with the given extension. """ if not extension.startswith('.'): extension = '.' + extension split_str = file.split('.', 1) if (len(split_str) != 2) or (split_str[-1] != extension[1:]): file = split_str[0] + extension return file
021d69bcc1d4cf3f3fc500e9c8418c60b8c99d9f
41,947
import re def validate_password(password): """ Checks if the password has the right format. If not returns the things that needs to be changed in a list. """ not_fulfilled = [] try: password.encode(encoding='utf-8').decode('ascii') except UnicodeDecodeError: not_fulfilled.append("Password contains illegal characters. ") if len(password) < 6: not_fulfilled.append("Password must be 6 characters or longer. ") if not re.search("[a-z]", password): not_fulfilled.append("Password must contain at least one lowercase letter. ") if not re.search("[A-Z]", password): not_fulfilled.append("Password must contain at least one uppercase letter. ") if not re.search("[0-9]", password): not_fulfilled.append("Password must contain at least one number. ") # If the password should not contain spaces # if re.search("\s", password): return not_fulfilled
62ad39abc33b4ad5f459b83d23b5c13e5be0c1a9
41,951
def sms_is_limited(msg): """ :param msg: <str> :return: <bool> True if msj contain special characters else False """ for i in msg: if ord(i) > 127: return True return False
7df5eaa39f25bb71dc8688515d8f628332c9c157
41,952
def merge(list1, list2): """ Merge two sorted lists. Returns a new sorted list containing all of the elements that are in either list1 and list2. This function can be iterative. """ merged = [] idx1 = 0 idx2 = 0 while idx1 < len(list1) and idx2 < len(list2): if list1[idx1] <= list2[idx2]: merged.append(list1[idx1]) idx1 += 1 else: merged.append(list2[idx2]) idx2 += 1 if idx1 == len(list1): merged.extend(list2[idx2: ]) else: merged.extend(list1[idx1: ]) return merged
57d2bf358d6d0d3353e4b3953e2ddce3c2d28050
41,953
def remove_unknown_COB(df): """ When estimating share of migrants, ABS removes those without country of birth This is equivalent toa ssuming the missing COB information is uniformly distributed across Aus. Born and OSB Parameters ---------- df : [type] [description] """ # Remove unknown COB observation idx = (df.COB == "Inadequately Described / Born at Sea") | (df.COB == "Not stated") df = df.loc[~idx].copy() # rename - Australia and OS born idx = df.COB == "Australia (includes External Territories)" df.loc[idx, "COB"] = "Australian-born" df.loc[~idx, "COB"] = "Overseas-born" return df
a8a410dac5addf788ac864a3bd276ed2b68e5a6a
41,955
def read_key_words(file): """Reads list of words in file, one keyword per line""" return [line.rstrip('\n') for line in open(file)]
337bac4b6c2eac8a36ec372ea90c8bcab8218ed9
41,956
def HumanLatToFracDeg(latDM): """Converts latDM in human friendly string of form latDegXlatMin where X is N if lat positive and S if lat negative latDeg in units of integer degrees [0 ,90] lat Min in units of fractinal minutes [0.0, 60.0) to lat in total fractional degrees lat is in signed fractional degrees positive = North, negative = South [-90, 90] Does not handle wrapping lat over poles or lon past halfway round """ latDM = latDM.upper() if ('N' in latDM): (degrees,minutes) = latDM.split('N') lat = int(degrees) + (float(minutes) / 60.0) elif ('S' in latDM): (degrees,minutes) = latDM.split('S') lat = - (int(degrees) + (float(minutes) / 60.0)) else: raise ValueError("Bad format for latitude '{0}'".format(latDM)) return (lat)
a8cfde6a040415292c37d46d3371b2169175f5bd
41,957
import torch def quaternion_to_angle_axis(quaternion, eps=1e-6): """Convert quaternion vector to angle axis of rotation Adapted from ceres C++ library: ceres-solver/include/ceres/rotation.h Args: quaternion (Tensor): batch with quaternions Return: Tensor: batch with angle axis of rotation Shape: - Input: :math:`(N, 4)` - Output: :math:`(N, 3)` Example: >>> input = torch.rand(2, 4) # Nx4 >>> output = tgm.quaternion_to_angle_axis(input) # Nx3 """ if not torch.is_tensor(quaternion): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) input_shape = quaternion.shape if len(input_shape) == 1: quaternion = torch.unsqueeze(quaternion, dim=0) assert quaternion.size(1) == 4, 'Input must be a vector of length 4' normalizer = 1 / torch.norm(quaternion, dim=1) q1 = quaternion[:, 1] * normalizer q2 = quaternion[:, 2] * normalizer q3 = quaternion[:, 3] * normalizer sin_squared = q1 * q1 + q2 * q2 + q3 * q3 mask = (sin_squared > eps).to(sin_squared.device) mask_pos = (mask).type_as(sin_squared) mask_neg = (mask == False).type_as(sin_squared) # noqa batch_size = quaternion.size(0) angle_axis = torch.zeros( batch_size, 3, dtype=quaternion.dtype).to( quaternion.device) sin_theta = torch.sqrt(sin_squared) cos_theta = quaternion[:, 0] * normalizer mask_theta = (cos_theta < eps).view(1, -1) mask_theta_neg = (mask_theta).type_as(cos_theta) mask_theta_pos = (mask_theta == False).type_as(cos_theta) # noqa theta = torch.atan2(-sin_theta, -cos_theta) * mask_theta_neg \ + torch.atan2(sin_theta, cos_theta) * mask_theta_pos two_theta = 2 * theta k_pos = two_theta / sin_theta k_neg = 2.0 k = k_neg * mask_neg + k_pos * mask_pos angle_axis[:, 0] = q1 * k angle_axis[:, 1] = q2 * k angle_axis[:, 2] = q3 * k if len(input_shape) == 1: angle_axis = angle_axis.squeeze(0) return angle_axis
4073f3338e50f9be68c0ddb948473646d03a029c
41,958
def bk_nearest_neighbor_search(search_string, tree): """ Search tree for nearest matches to supplied string. Parameters ---------- search_string : str search string tree : BKTree tree to search Return values ------------- matches : list list of strings containing nearest matches from tree, where first item is distance from search_string to nearest matches """ threshold = tree.root.distance_metric[tree.metric](search_string, tree.root.string) matches_dictionary = {} matches_dictionary[threshold] = [] tree.root.recursive_nn_search(search_string, threshold, matches_dictionary, tree.metric) matches = [sorted(matches_dictionary.keys())[0]] for value in matches_dictionary[matches[0]]: matches.append(value) return matches
b86f0663f711aea29abbb79e203ee74452d34207
41,959
def month_id(dte): """ Return an integer in the format YYYYMM. Argument: dte - the dte to use in making the month id. """ result = dte.year * 100 + dte.month return result
ab5fcab351e7884b05803d0077d0eec473d08fa5
41,961
def DoSetIamPolicy(instance_ref, namespace, new_iam_policy, messages, client): """Sets IAM policy for a given instance or a namespace.""" if namespace: policy_request = messages.DatafusionProjectsLocationsInstancesNamespacesSetIamPolicyRequest( resource='%s/namespaces/%s' % (instance_ref.RelativeName(), namespace), setIamPolicyRequest=messages.SetIamPolicyRequest( policy=new_iam_policy)) return client.projects_locations_instances_namespaces.SetIamPolicy( policy_request) else: policy_request = messages.DatafusionProjectsLocationsInstancesSetIamPolicyRequest( resource=instance_ref.RelativeName(), setIamPolicyRequest=messages.SetIamPolicyRequest( policy=new_iam_policy)) return client.projects_locations_instances.SetIamPolicy(policy_request)
f1f63be4ade6a8d3578b6175074b43ff3f3116bf
41,962
def get_vpc_gateway_ip(cidr_block_formatting): """Get vpc gateway IP Args: cidr_block_formatting (string): Cidr block formating Returns: string: Vpc gateway ip """ vpc_gateway_ip = cidr_block_formatting.replace( "\\", "").format(0, 1) return vpc_gateway_ip
34532eaee64a9b876f71384f5d548d62554205a2
41,963
def delete_snapshot(connection, snapshot_id=None): """Deletes a snapshot. * connection: An ec2 connection instance. * volume_id: ID of snapshot to delete. Returns True if deletion is successful. """ return connection.delete_snapshot(snapshot_id)
702bfffaa7d69453613e42aefcb019963191b152
41,964
def sinal(u): """ Retorna a classe baseada no valor de u. """ return 1 if u >= 0 else -1
60fa8887e1975646b5e69d786a15a77df3e595b7
41,965
import threading def _get_internal_name(name): """Returns the internal thread-local name of an attribute based on its id. For each specified attribute, we create an attribute whose name depends on the current thread's id to store thread-local value for that attribute. This method provides the name of this thread-specific attribute. Args: name: str, name of the exposed attribute. Returns: str, name of the internal attribute storing thread local value. """ return '__' + name + '_' + str(threading.current_thread().ident)
522a686ab7aaca18dfa8e2ac667881b15a75fd32
41,967
import requests import csv def download_testing_data(): """ Terrestrial Climate Change Resilience - ACE [ds2738] California Department of Natural Resources — For more information, see the Terrestrial Climate Change Resilience Factsheet at http://nrm.dfg.ca.gov/FileHandler.ashx?DocumentID=150836. The California Department... """ url='http://data-cdfw.opendata.arcgis.com/datasets/7c55dd27cb6b4f739091edfb1c681e70_0.csv' with requests.Session() as s: download = s.get(url) content = download.content.decode('utf-8') data = list(csv.DictReader(content.splitlines(), delimiter=',')) return data
4f0299286029b21ec060b3acf712db76e7b0e7d4
41,968
def iou(box1, box2): """ Calculates Intersection over Union for two bounding boxes (xmin, ymin, xmax, ymax) returns IoU vallue """ return iou
04e192b6bd0de825d98c72a089496eb47bbe94b7
41,969
def gcd_step(a, b): """ Performs a single step of the gcd algorithm. Example: gcd_step(1071, 462) == (2, 147) because 1071 == 2 * 462 + 147. """ if a < b: return (0, b) res = 0 while a >= b: a -= b res += 1 return (res, a)
02eedde8652a285c1b7af999d439c9fe77349d3f
41,970
def balance_phn_lengths(edit_ops, s_phns, d_phns): """lengths the source_phones or dest_phones if the indices in editops are greater than the lengths of the respective phoneme lists""" for _, spos, dpos in edit_ops: if spos > len(s_phns)-1: s_phns += ['blank'] * (spos - (len(s_phns)-1)) if dpos > len(d_phns)-1: d_phns += ['blank'] * (dpos - (len(d_phns)-1)) return s_phns, d_phns
ec1c0aeb6ede9f54c8a8b7fc3a84611081268189
41,972
from typing import List def findNumbers(self, nums: List[int]) -> int: """ :type nums: List[int] :rtype: int """ s = 0 for _ in nums: if 9 < _ < 100 or 999 < _ < 10000 or _ == 100_000: s = s + 1 return s
d85f223dae6c02179a617074528544158b0b6b99
41,973
import os import pickle as pickle def saveMirrorDictionary(mirD, savName): """Creates pickle object of Model dictionary named savName.pkl Return savName """ savName = savName + '.dict' f = open(savName, "wb") print("Pickling Dictionary object...") # create pickle, pickle.dump(mirD, f) # ensure file written to disk f.flush() os.fsync(f.fileno()) f.close # for python 3.4+ importing # convert dos linefeeds (crlf) to unix (lf) # edited for explicit file closes content = '' outsize = 0 infile= open(savName, 'rb') content = infile.read() infile.close() output = open(savName, 'wb') for line in content.splitlines(): outsize += len(line) + 1 output.write(line + str.encode('\n')) # ensure file written to disk output.flush() os.fsync(output.fileno()) output.close() print("Dictionary object pickled as '%s'" % savName) return savName
2d6d2da736db823f426c9cb99233d47dae8f4575
41,974
from typing import List def list_shifted(l: List, num: int = 1) -> List: # noqa """Shift all elements by a given amount (default: shift one to left).""" if num > 0: return l[num:] if num < 0: return l[:num] return l
d1ff0a66ac35026389980032863074cdbc566dc9
41,975
def generate_Q_data(Q): """ 問題のテキストデータを生成する。改行コードはCR+LFとする。 Parameters ========== Q : tuple read_Q()の返り値 Returns ======= txt : str 問題のテキストデータ """ crlf = '\r\n' # DOSの改行コード size, block_num, block_size, block_data, block_type, num_lines = Q txt = 'SIZE %dX%d%s' % (size[0], size[1], crlf) txt += 'BLOCK_NUM %d%s' % (block_num, crlf) for i in range(1, block_num+1): txt += crlf txt += 'BLOCK#%d %dX%d%s' % (i, block_size[i][0], block_size[i][1], crlf) for row in block_data[i]: txt += ','.join(['+' if n == -1 else str(n) for n in row]) + crlf return txt
70c3c1c0b2550a6805b1b2abedf0e5b115377e05
41,977
def get_feature_names(factors_path): """ Get feature names by reading header of factors csv Args: factors_path: path to factors csv with names of features as header Returns: list(str): list of column names read from file """ with open(factors_path) as f: col_names = f.readline().split(',') col_names[-1] = col_names[-1].strip('\n') # Skip first field if empty (result of to_csv(save_index=True)) if not col_names[0]: return col_names[1:] else: return col_names
0e224a610d9b4f072d76562b6efb0fb675eab5e0
41,978
import os def get_download_type(download_path): """Get type and format of downloadable from pathname.""" if "/meningsmangder/" in download_path: return "corpus", "XML" elif "/frekvens/" in download_path: return "token frequencies", "CSV" elif "/pub/lmf/" in download_path: return "lexicon", "LMF" # Display filename elif "." in os.path.split(download_path)[-1]: filename = os.path.split(download_path)[-1] return filename, filename.split(".")[-1] else: return "other", None
02c02756f5a87ee6282dabfeecbe4f436c278869
41,980
def references(resp_json, return_obj, options): """Extract references from the subquery.""" pubs = resp_json.get('data') for rec in pubs.get('result', []): # Available fields data = {'title': rec.get('title'), 'year': rec.get('year'), 'journal': rec.get('journal'), 'doi': rec.get('doi'), 'cite': rec.get('citation'), 'page_range': rec.get('pages')} # Reference number data.update(ref_id='neot:ref:{0:d}' .format(rec.get('publicationid', 0))) # Publication volume(number) if rec.get('issue') and rec.get('volume'): data.update(vol_no='{0:s} ({1:s})'.format(rec.get('volume'), rec.get('issue'))) else: data.update(vol_no=rec.get('volume')) # Not available directly in Neotoma data.update(kind=None, editor=None, authors=[]) return_obj.append(data) return return_obj # import re # # Format the unique database identifier # pub_id = 'neot:pub:' + str(rec.get('PublicationID')) # # Format author fields # author_list = list() # if 'Authors' in rec: # for author in rec.get('Authors'): # author_list.append(author['ContactName']) # # Look for a DOI in the citation string # if 'Citation' in rec: # doi = re.search('(?<=\[DOI:\ ).+(?=\])', # rec.get('Citation')) # if doi: # doi = doi.group() # else: # doi = None # # Build dictionary of bibliographic fields # reference = dict() # reference.update(kind=rec.get('PubType'), # year=rec.get('Year'), # doi=doi, # authors=author_list, # ident=pub_id, # cite=rec.get('Citation')) # # Format and append parsed record # ret_obj = format_handler(reference, ret_obj, format) # # End subroutine: parse_neot_resp # return len(resp_json['data'])
16c9ed6f339c4621bf0d0afe2c0c20d828cc9986
41,981
def get_stereo_cymbal2_note(instrument=24, start=0.0, duration=0.5, amplitude=30000, pitch=8.00, pan=0.7, fc=5333, q=40.0, otamp=0.5, otfqc=1.5, otq=0.2, mix=0.2, hit=None): """ ; Cymbal ; Sta Dur Amp Pitch Pan Fc Q OTAmp OTFqc OTQ Mix i24 0.0 0.25 30000 8.00 0.7 5333 40.0 0.5 1.5 0.2 .2 """ return "i%s %s %s %s %s %s %s %s %s %s %s %s" % \ (instrument, start, duration, amplitude, pitch, pan, fc, q, otamp, otfqc, otq, mix)
e6a4747d87144cee9114600406fb10a34e70aec4
41,982
def _transitive_parenthood(key, term_dict): """Finds all parents, transitively, of `key` in `term_dict`. Does not include itself in the set of parents, regardless of the type of relation. This is left to the caller to decide. Args: key: Go Term, e.g. GO:0000001 term_dict: Go term to set of parent go terms. Returns: Set of transitive parent go terms of `key`. """ running_total = set() to_examine = set(term_dict[key]) # Make a copy so we can pop from it. while len(to_examine) > 0: # pylint: disable=g-explicit-length-test cur_element = to_examine.pop() running_total.add(cur_element) for potential in term_dict[cur_element]: if potential not in running_total: to_examine.add(potential) return running_total
9c3aa32ad90d02965aad67ffa436b0d02af65ba4
41,984
def organize(iterable, key): """Put all of the elements in `iterable` into a dictionary which maps possible return values of key onto lists of items of iterable iterable - any iterable object (e.g. a list, or tuple) key - a function that takes items in interable as inputs Example: organize([1,2,3],lambda x: x==2) {True:[1,3],False:[2]} """ out = {} for item in iterable: k = key(item) if not k in out: out[k] = [] out[k].append(item) return out
19edcc50738a0247e57c0b7c4d1812e63000b7ef
41,985
def __getWindow(window_config:str): #Called in genImgPatches() """Parses window_config to get height and width as integers Args: window_config (str): string of window height and width. Example: '5000,5000' Outputs: window (dict): dictionary containing ax:dim pairs """ dims = window_config.split(',',1) axis = ('width','height') window = {ax:int(dim) for (ax,dim) in zip(axis,dims)} return window
8f9a9858f9f96c5f2698f922b63f2e7d328138a9
41,986
import functools def sortOperates(operates): """ 將操作做排序,排序優先順序為: 日期(越早越前) -> 操作類型(buy 優先,再來才是 sell) xxx_operate -> [datetime, buy/sell, cost/income] :param operates: 所有操作 :return: """ def compareOperates(op1, op2): """ sorted()也是一個高階函式,它可以接收一個比較函式來實現自定義排序, 比較函式的定義是,傳入兩個待比較的元素 x, y, 如果 x 應該排在 y 的前面,返回 -1, 如果 x 應該排在 y 的後面,返回 1。 如果 x 和 y 相等,返回 0。 def customSort(x, y): if x > y: return -1 if x < y: return 1 return 0 print(sorted([2,4,5,7,3], key=functools.cmp_to_key(customSort))) -> [7, 5, 4, 3, 2] :param op1: 請求 1 :param op2: 請求 2 :return: """ # datetime, buy/sell, cost/income time1, buy_sell1, _ = op1 time2, buy_sell2, _ = op2 # 時間越早排越前面 if time1 < time2: return -1 elif time1 > time2: return 1 # 數量少的排前面 if buy_sell1.value < buy_sell2.value: return -1 # 數量多的排後面 elif buy_sell1.value > buy_sell2.value: return 1 else: return 0 # 透過自定義規則函式 compareRequests 來對 requests 來進行排序 return sorted(operates, key=functools.cmp_to_key(compareOperates))
355b73d18820d8030dc0a30dcc6ef70f555e6e90
41,987
import re def find_default_value(line, token): """ Args: line: a string that holds the current line. token: The toekn object for holding option. Returns: token: the updated token accroding the existence of default value >>> tmp_token = docopt_util.Option('-v', None, True, '-v', None) >>> find_default_value('-v FILE input file [default: ./test.txt].', tmp_token) tmp_token = Option('-v', './test.txt', True, '-v', None) >>> tmp_token = docopt_util.Option('--location', None, True, '-l', '--location') >>> find_default_value('-l=<location_value> insert coordinate [default: 10.88].', tmp_token) tmp_token = Option('--location', 10.88, True, '-l', '--location') """ matching = re.search(r'\[.*?]', line) if matching is not None: default_value = matching.group(0)[1:-1].strip() # Test if this line of docstring contains a default value if re.search('default:', default_value, re.IGNORECASE): try: int(default_value.split()[1]) token.value = int(default_value.split()[1]) except ValueError: try: float(default_value.split()[1]) token.value = float(default_value.split()[1]) except ValueError: token.value = default_value.split()[1] return token
f0dd14274bddbfa672f02dc0b2130ffbf39e5462
41,988
def get_interesting_mask_columns(mask, column_indices): """ Load "interesting" mask columns. Very simple function. """ compressed_mask = mask[column_indices, :] return compressed_mask
3b5d47d58faba53f5c866ec45362c73ffc848bed
41,989
def get_slot(datetime_sec, band): """ Return IBP schedule time slot (0 ... 17) from given datetime_sec (second from UNIX time epoch) and band (14, 18, ..., 28) MHz value """ time_xmit = 10 # sec (transmitting time length) n_slots = 18 # number of slots period_sched = n_slots * time_xmit slot_offset = { 14: 0, 18: 1, 21: 2, 24: 3, 28: 4 } timeslot_in_sched = int(datetime_sec % period_sched) / time_xmit return (timeslot_in_sched - slot_offset[band]) % n_slots
0566887707e280b5c31f1ae87a3b443757421ce7
41,990
from unittest.mock import call def fsl_cluster(finput, findex, thresh=0.9, fosize=None, fothresh=None, **kwargs): """ Run FSL Cluster command (https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/Cluster) :param str finput: Input filename, image to be thresholded :param float thresh: Chosen threshold; default = 0.9 :param str findex: Output file with each cluster assigned an integer from 1 to N :param str fosize: Output file with each cluster voxel assigned an integer equivalent to its cluster size :param str fothresh: Output file with clusters assigned the original values (only > threshold remain) :return: """ # First try to run standard spherical project osize = "--osize={}".format(fosize) if fosize is not None else "" othresh = "--othresh={}".format(fothresh) if fothresh is not None else "" fsl = "cluster --in={} --thresh={} --oindex={} {} {}".format(finput, thresh, findex, osize, othresh) print("Running command: {}".format(fsl)) code_1 = call(fsl, **kwargs) return code_1
cf314748daaeb9880ebc9c914f880eaa091fa0f9
41,993
def form_for_field(config, field): """ Gets the form name which contains the field that is passed in """ for form in config['forms']: if field in form['csv_fields'].values(): return form['form_name']
d960154bbf473a2e8a730fd74fd70c3896620df0
41,994
def evaluate_chi2(chi2: float, gt_chi2: float, tol: float = 1e-3): """Evaluate whether chi square values match.""" if chi2 is None: return False return abs(chi2 - gt_chi2) < tol
2277365b1eb87d6591634ee92abe7380eec4e77f
41,995
import ast def copy_location(new_node, old_node): """An ast.copy_location extension. This function behaves identically to the standard ast.copy_location except that it also copies parent and sibling references. """ new_node = ast.copy_location(new_node, old_node) new_node.parent = old_node.parent new_node.previous, new_node.next = old_node.previous, old_node.next return new_node
ac827c9edf2a8de6c7abb1f6fd23eca8480c8cc4
41,996
import struct def get_endiannes(file: str): """ Return the endiannes of the SEG-Y file. Args: file (str) : Path to the file. Returns: '>' or '<' for big and little endian respectively. """ # try to read and unpack the sample format code bytes. # the value should be between 1 and 16 with open(file, 'br') as sgy: sgy.seek(3224) sf = sgy.read(2) return '>' if 1 <= struct.unpack('>h', sf)[0] <= 16 else '<'
11eb0b43d568c97082864bb555f89f5fdd14eb79
41,997
def get_org_user_defaults(): """Returns a dictionary of reasonable defaults for users returned from external LDAPs.""" defaults = dict( username = 'testuser', first_name = 'Test', last_name = 'User', email = 'testuser@colorado.edu' ) return defaults
63fc67514d8b40497e4ab7e2de2709ec8702c259
41,999
def get_latitude(dataset): """Get latitude dimension from XArray dataset object.""" for dim_name in ['lat', 'latitude']: if dim_name in dataset.coords: return dataset[dim_name] raise RuntimeError('Could not find latitude dimension in dataset.')
6a0eb8cbe27c0a802133ec80f7feb7f11afe8b15
42,000
async def get_indexed_tags(redis): """Get all tags monitored for indexing. Args: redis (aioredis.Redis): a Redis interface. Returns: A list of monitored tags as `str` objects. """ return await redis.lrange('indexed_tags', 0, -1, encoding='utf-8')
5f9c7cd1c2eb89e04013ddfd6fdb81d24b4d5685
42,003
def linear_search(l, item): """ Liner Search Implementation. Time O(n) run. O(1) space complexity, finds in place. :param l: A List :param item: Item from the List or N/a (-1). :return:the founded index of the wanted item. """ for i in range(len(l)): if item == l[i]: return i return -1
18369cdf07bd4784e21bb01b8a3545c5d742e5cf
42,004
def get_objecturls_from_bucket(client, bucket): """ params: - bucket: s3 bucket with target contents - client: initialized s3 client object """ next_token = '' urls = [] base_kwargs = { 'Bucket':bucket } while next_token is not None: kwargs = base_kwargs.copy() if next_token != '': kwargs.update({'ContinuationToken': next_token}) results = client.list_objects_v2(**kwargs) contents = results.get('Contents') #print(contents) for i in contents: k = i.get('Key') url = "https://%s.s3.amazonaws.com/%s" % (bucket, k) urls.append(url) next_token = results.get('NextContinuationToken') return urls
1c0b36810ddb63f57052ddd95b5dd55354aadf28
42,005
def parse_nat_msg(msg): """ Parse a syslog message from the nat program into a python dictionary. :param msg: nat msg from syslog :return: a dictionary of nat related key value pairs """ dnat_in = '' out = '' mac = '' src = -1 dest = -1 len_ = -1 tos = -1 proc = -1 ttl = -1 id_ = -1 proto = '' spt = -1 dpt = -1 window = -1 res = '' urgp = -1 words = msg.split(' ') for w in words: if w.startswith('DNAT_IN='): dnat_in = w.split('=')[1] elif w.startswith('OUT='): out = w.split('=')[1] elif w.startswith('MAC='): mac = w.split('=')[1] elif w.startswith('SRC='): src = w.split('=')[1] elif w.startswith('DST='): dest = w.split('=')[1] elif w.startswith('LEN='): len_ = w.split('=')[1] elif w.startswith('TOS='): tos = w.split('=')[1] elif w.startswith('PREC='): proc = w.split('=')[1] elif w.startswith('TTL='): ttl = w.split('=')[1] elif w.startswith('ID='): id_ = w.split('=')[1] elif w.startswith('PROTO='): proto = w.split('=')[1] elif w.startswith('SPT='): spt = w.split('=')[1] elif w.startswith('DPT='): dpt = w.split('=')[1] elif w.startswith('WINDOW='): window = w.split('=')[1] elif w.startswith('RES='): res = w.split('=')[1] elif w.startswith('URGP='): urgp = w.split('=')[1] d = dict() d['dnat_in'] = dnat_in d['out'] = out d['mac_address'] = mac d['src_ip'] = src d['dest_ip'] = dest d['len'] = len_ d['tos'] = tos d['proc'] = proc d['ttl'] = ttl d['id'] = id_ d['proto'] = proto d['spt'] = spt d['dpt'] = dpt d['window'] = window d['res'] = res d['urgp'] = urgp return d
dfebd52d86a716324d3b62ddc697ff28e67a036d
42,006
import functools def dec_check_lines(func): """ Decorator to check the currently displayed lines on the LCD to prevent rewriting on the screen. """ @functools.wraps(func) def wrapper(self, *args, **kwargs): str_args = str(args) + str(kwargs) if self.lines != str_args: self.lines = str_args func(self, *args, **kwargs) return wrapper
104be359535ed3ac11b40dc2a578eea73915ad15
42,008
def dbfarg(n): """ """ res="arg"+str(n) return res
237bb2fdb5534b692b562a2ccb25c8997b853472
42,009
def circumcircle(u, v, w): """find the center/radius of circumcircle of triangle uvw""" vu, wv, uw = (u - v), (v - w), (w - u) d = 2 * ((u - v).crs(v - w)).dot((u - v).crs(v - w)) a = (v - w).dot(v - w) * (u - v).dot(u - w) / d b = (u - w).dot(u - w) * (v - u).dot(v - w) / d c = (u - v).dot(u - v) * (w - u).dot(w - v) / d o = u * a + v * b + w * c r = (u - o).mag() return o, r
ee824a1ccc96663d1e347057938b1a6fc4da80ac
42,010
def convert_ms(ms): """ Converts milliseconds into h:m:s :param ms: int :return: str """ seconds = (ms / 1000) % 60 seconds = int(seconds) if seconds < 10: seconds = "0" + str(seconds) else: seconds = str(seconds) minutes = (ms / (1000 * 60)) % 60 minutes = int(minutes) if minutes < 10: minutes = "0" + str(minutes) else: minutes = str(minutes) hours = (ms / (1000 * 60 * 60)) % 24 hours = int(hours) if hours < 10: hours = "0" + str(hours) else: hours = str(hours) return hours + ":" + minutes + ":" + seconds
1b39e0ac151fbdf6a3e7a6a15e83c36ff40e0810
42,011
import json def _process_json(response_body): """ Returns True the list already exists """ data = json.loads(response_body) return "Available" in data and data["Available"] == "False"
84fd35d212120b246287f5f140184fefe9d6bf26
42,013
def handle_simple(ctx, name=None, custom_hello="Hi"): """ routing - simple handler w/ helloeter and one custom "static" hello """ print("Hello {0:s}".format(ctx.request.headers.get('test', None))) ctx.response.headers['XX-Custom-Header'] = str(name) return "{0:s} <b>{1:s}</b>".format(custom_hello, name)
a76168db66c64ad9396e3ba78b265191c2d41f20
42,014
import argparse def arg_parser(): """Argument parser""" parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('pn_file', metavar='P', help="Input") parser.add_argument('--pdb', '-p', default='data/pdb', help="Path to root of local PDB directory, expects mmCIF to be available") parser.add_argument('--gzip', '-g', action='store_true', help="Local PDB files are gzipped") parser.add_argument('--filter', '-f', action='store_true', help="Dont print records where no structure is found") parser.add_argument('--rama', '-r', action='store_true', help="Calculate ramachandran angles") parser.add_argument('--chi', '-c', action='store_true', help="Calculate chi1 angles") return parser
983a52a95689530c8c89923d8b37e7653ecfe4d4
42,015
def GetASTSummary(ast): """ Summarizes an AST field Flags: P - AST_PREEMPT Q - AST_QUANTUM U - AST_URGENT H - AST_HANDOFF Y - AST_YIELD A - AST_APC L - AST_LEDGER B - AST_BSD K - AST_KPERF M - AST_MACF C - AST_CHUD C - AST_CHUD_URGENT G - AST_GUARD T - AST_TELEMETRY_USER T - AST_TELEMETRY_KERNEL T - AST_TELEMETRY_WINDOWED S - AST_SFI """ out_string = "" state = int(ast) thread_state_chars = {0x0:'', 0x1:'P', 0x2:'Q', 0x4:'U', 0x8:'H', 0x10:'Y', 0x20:'A', 0x40:'L', 0x80:'B', 0x100:'K', 0x200:'M', 0x400:'C', 0x800:'C', 0x1000:'G', 0x2000:'T', 0x4000:'T', 0x8000:'T', 0x10000:'S'} state_str = '' mask = 0x1 while mask <= 0x10000: state_str += thread_state_chars[int(state & mask)] mask = mask << 1 return state_str
af6bd56509666162501dee0e3077a6470b9a6c0a
42,016
from typing import List from typing import Tuple import numpy def array_mathematics(a: List[List[str]], b: List[List[str]]) -> Tuple[numpy.ndarray, ...]: """ >>> array_mathematics([['1', '2', '3', '4']], ... [['5', '6', '7', '8']]) #doctest: +NORMALIZE_WHITESPACE (array([[ 6, 8, 10, 12]]), array([[-4, -4, -4, -4]]), array([[ 5, 12, 21, 32]]), array([[0, 0, 0, 0]]), array([[1, 2, 3, 4]]), array([[ 1, 64, 2187, 65536]])) """ arr_a, arr_b = numpy.array(a, int), numpy.array(b, int) dict_mathematics = { 'add': numpy.add(arr_a, arr_b), 'subtract': numpy.subtract(arr_a, arr_b), 'multiply': numpy.multiply(arr_a, arr_b), 'floor_divide': numpy.floor_divide(arr_a, arr_b, dtype=int), 'mod': numpy.mod(arr_a, arr_b, dtype=int), 'power': numpy.power(arr_a, arr_b, dtype=int) } return (*dict_mathematics.values(), )
e657a97f9dc27b8d77beba3f22e187b50f9734c6
42,017
def get_iso3_from_country_name(input_database, country_name): """ Return the iso3 code matching with a given country name using the bcg table """ if "Lao " in country_name: return "LAO" iso3_list = input_database.db_query( "bcg", column="ISO_code", conditions=["Cname='" + country_name + "'"] )["ISO_code"].tolist() if len(iso3_list) > 0: return iso3_list[0] else: backup_iso3_codes = { "Andorra": "AND", "Antigua and Barbuda": "ATG", "Australia": "AUS", "Bahamas": "BHS", "Bahrain": "BHR", "Belgium": "BEL", "Bolivia (Plurinational State of": "BOL", "Canada": "CAN", "Congo": "COG", "Cyprus": "CYP", "Dominican Republic": "DOM", "Germany": "DEU", "Hong Kong SAR, China": "HKG", "Iceland": "ISL", "Lebanon": "LBN", "Luxembourg": "LUX", "Netherlands": "NLD", "New Zealand": "NZL", "Niger": "NER", "Philippines": "PHL", "Republic of Korea": "KOR", "Russian Federation": "RUS", "Sao Tome and Principe ": "STP", "Spain": "ESP", "Suriname": "SUR", "Switzerland": "CHE", "Syrian Arab Republic": "SYR", "Taiwan": "TWN", "TFYR of Macedonia": "MKD", "United Arab Emirates": "ARE", "United Kingdom of Great Britain": "GBR", "United States of America": "USA", "Venezuela (Bolivarian Republic ": "VEN", } if country_name in backup_iso3_codes: return backup_iso3_codes[country_name] else: return None
322b10c4037d2cf1e65540b7240ea904b9e1e0d3
42,018
def is_number_tryexcept(s): """ Returns True if string `s` is a number. Taken from: https://stackoverflow.com/questions/354038/how-do-i-check-if-a-string-is-a-number-float """ try: float(s) return True except ValueError: return False
569f73b4be50e1ddb4157ec98e59c2310d874412
42,019
def trimBack(khashs): """ Trim k-mer from back Args: khashs (np.array) """ return khashs // 4
b502189e138ac5aeb3cf3138c0507d26f29eca40
42,022
def percent_nocturnal(records, user): """ The percentage of interactions the user had at night. By default, nights are 7pm-7am. Nightimes can be set in ``User.night_start`` and ``User.night_end``. """ if len(records) == 0: return 0 if user.night_start < user.night_end: night_filter = lambda d: user.night_end > d.time() > user.night_start else: night_filter = lambda d: not(user.night_end < d.time() < user.night_start) return sum(1 for r in records if night_filter(r.datetime)) / len(records)
9d5e66c5809d185da81f63d61ccc9efe3f51cba5
42,023
def load_l8_clouds_fmask(l8bqa): """ https://www.usgs.gov/land-resources/nli/landsat/landsat-collection-1-level-1-quality-assessment-band https://www.usgs.gov/land-resources/nli/landsat/cfmask-algorithm :param l8bqa: :return: """ return (l8bqa & (1 << 4)) != 0
3962345cf488b77b25feddeb491506c3bda3cc2b
42,024
def k4curvaturedp(B,μ,k,n): """ Generate the first 5 curvatures (including outer circle) for integer Apollonian gasket, with parameters satisfying: B^2+μ^2=kn 0<=μ<=B/sqrt(3) 2μ<=k<=n yielding (-B,B+k,B+n,B+k+n-2μ,B+k+n+2μ) Intended for integers, so it is Diophantine """ u=B+k+n v=2*μ return -B,B+k,B+n,u-v,u+v
aabc15bf35709cd092e7261d8ee1513d25a1e5c6
42,026
import six def split_ver(v): """Split v into a list of integers. Args: v (:obj:`str` or :obj:`list` of :obj:`int` or :obj:`int`): Version string to split using periods. Returns: :obj:`list` of :obj:`int` or :obj:`str` """ v = v.split(".") if isinstance(v, six.string_types) else v return [ int(x) if isinstance(x, six.string_types) and x.isdigit() else x for x in v if x not in ["", None] ]
c3bebb064c229c65c306f0cf40697337e2884a3b
42,031
def is_enabled(): # real signature unknown; restored from __doc__ """ is_enabled()->bool: check if the handler is enabled """ return False
70242041dc0ddf39d6bbb74efb4991f6a8501dbc
42,034
def overlap(x): """Function to define the overlap between two lists contained in a pandas dataframe 1) return the length of the drug shared list Parameters ---------- x : Dataframe Dataframe containing the drug list related to the side effect and the drug list related to the target """ return len(x['se_drug'].intersection(x['tg_drug']))
fb3fe3a74583b5c719d8a32d0c93d56d5d70dc9b
42,035
import logging def logger(): """Provide logger. :return: local logger. :rtype: Logger """ return logging.getLogger('fuzzing.fuzzer')
654a0f23b47062ba934a46f3657e3434b4142211
42,037
def distance2(x1, y1, z1, x2, y2, z2): """Calculates the distance^2 between point 1 and point 2.""" return (x1 - x2) * (x1 - x2) + (y1 - y2) * (y1 - y2) + (z1 - z2) * (z1 - z2)
ff2740fe1ce3e52e336a18dca9c4f7bb504cacf4
42,038
from textwrap import dedent def dedent_nodetext_formatter(nodetext, has_options, caller=None): """ Just dedent text. """ return dedent(nodetext)
7a4a06a68470f75eba26aa311b99f85a2859367a
42,039
def get_grid(size): """Create grid with size.""" return [[[] for _ in range(size)] for _ in range(size)]
53db2ac06d8ada944575d022685bbf2f21929b09
42,040
def format_datetime(value, format="%B %d, %Y"): """Format a date time to (Default): Month Day, LongYear""" if value is None: return "" return value.strftime(format).lstrip("0").replace(" 0", " ")
747a31ff83a2497d7d3257d7311e7e18071b5848
42,041
def duplicate_columns(frame): """keep='first' https://stackoverflow.com/questions/14984119/python-pandas-remove-duplicate-columns/32961145#32961145 数据大: dups = duplicate_columns(df) df.drop(dups, 1) 数据小: df.T.drop_duplicates().T """ frame = frame.fillna(-123456) # 处理缺失值 groups = frame.columns.to_series().groupby(frame.dtypes).groups dups = [] for t, v in groups.items(): dcols = frame[v].to_dict(orient="list") vs = list(dcols.values()) ks = list(dcols.keys()) lvs = len(vs) for i in range(lvs): for j in range(i + 1, lvs): if vs[i] == vs[j]: dups.append(ks[j]) # keep='first' break return dups
48f8ce9e3dcfaa17325080125bcc1e4c20e49f3c
42,042
def noop_walk(_data, _arg): """ No-op walker. """ return 0
4f60f855d7d3671ca61bd64b19f47524dc560ebe
42,043
def work_dir(tmpdir): """Return temporary directory for output/input files.""" return tmpdir.mkdir('work_dir')
952bf3fbe1a81a43a37d35e255a6ddc97a1a18ee
42,044
def sideCheck(ctx): """ Check if valid valorant side :param ctx: discord context to check for channel :return: """ def func(message): return ('att' in message.content.lower() or 'def' in message.content.lower()) and \ message.channel == ctx.channel return func
59ede2747bc4fce8a1949804203c71122cccc905
42,045
import six def _IsUrl(resource_handle): """Returns true if the passed resource_handle is a url.""" parsed = six.moves.urllib.parse.urlparse(resource_handle) return parsed.scheme and parsed.netloc
8ebd20a55c168cbefd243cbc01c87ea051a35c52
42,046
def build_project_list(client): """ dynamically create a choices list based on the Document Cloud projects for the associated account """ projects_list = [] for project in client.projects.all(): project_tuple = (project.title, project.title) projects_list.append(project_tuple) return projects_list
84e61e3330a45b12f3098c0d4d99179ff000882a
42,047
import numpy def join_struct_arrays(arrays): """ Joins structured numpy arrays. Thanks: https://stackoverflow.com/questions/5355744/numpy-joining-structured-arrays Args: arrays: Returns: """ sizes = numpy.array([a.itemsize for a in arrays]) offsets = numpy.r_[0, sizes.cumsum()] n = len(arrays[0]) joint = numpy.empty((n, offsets[-1]), dtype=numpy.uint8) for a, size, offset in zip(arrays, sizes, offsets): joint[:,offset:offset+size] = a.view(numpy.uint8).reshape(n,size) dtype = sum((a.dtype.descr for a in arrays), []) return joint.ravel().view(dtype)
083236bcd81975b437ebe5b79a084fa39d1a70a6
42,049
def preprocess_summary(context, answer, que_model): """preprocess summary to required input format for question generator model. Args: context (str): corpus to input model context. answer (str): input model answer. que_model (AnsGenModel): instance of AnsGenModel Returns: tupe[str, str]: tuple of tokens and attention masks. """ encode = que_model.tokenize_corpus(context, answer) return encode["input_ids"], encode["attention_mask"]
7337a25caeb2929e88d5fe302149a66bf09195ad
42,052
def gen_extra_info(*entries): """Generate extra information section""" if not entries: return '' entries = ''.join(['<tr><td><b>{}</b><td>{}'.format(k, v) for k, v in entries if v != '']) return ('<h2>Information</h2>' + '<table border=1>{}</table>'.format(entries))
319267d217e28aef42df5fa2e02682a86f91f603
42,053
def run(state, initial): """ Run the stateful action ``state`` given initial state ``initial``. Equivalent to ``state.run(initial)``. :return: A tuple of ``(a, s)``, where ``a`` is the value of the action, and ``s`` is the new state. """ return state.run(initial)
6204133fe147ff3ab6fd68cb69e697391ffa65f6
42,054
def author() -> str: """ It gets the authors string. :return: See description. :rtype str. """ return '(c) 2020 Giovanni Lombardo mailto://g.lombardo@protonmail.com'
8275d724299012f8bac866db512e695b97a87b2a
42,056
def _matches_to_sets(matches): """ Helper function to facilitate comparing collections of dictionaries in which order does not matter. """ return set(map(lambda m: frozenset(m.items()), matches))
4e8b38131130275c1ced0c960c0f6ef6d59c4790
42,057
def eval_precisions(scores, targets, topk=(1,)): """Computes the precision@k for the specified values of k Args: scores: FloatTensor, (num_examples, num_classes) targets: LongTensor, (num_examples, ) """ maxk = max(topk) num_examples = targets.size(0) _, preds = scores.topk(maxk, 1, largest=True, sorted=True) preds = preds.t() # (maxk, num_exmples) correct = preds.eq(targets.unsqueeze(0).expand_as(preds)) res = [] for k in topk: correct_k = correct[:k].view(-1).float().sum(0) res.append(correct_k.mul_(100.0 / num_examples).data.item()) return res
42e76d0b036b39a7531cdcbde6c7ebc3fa6ff13f
42,058
def get_rec_cycle(n): """1/x get the max abc recurring cycle length of 0.(abc) num""" dividend, times = 1, 1 remainder = [0 for x in range(n + 1)] while dividend != 0: times += 1 if (remainder[dividend]): return times - remainder[dividend] remainder[dividend] += times if dividend == 0: break if dividend < n: dividend *= 10 dividend %= n return 0
45a43aad1133558ade5d8d832279a2070fb9c9d5
42,059
def escape(query): """Escape a value to be used in a query""" query = query.replace("\\", "\\\\") query = query.replace("|", "\\|") query = query.replace("*", "\\*") return query
8eb162fc3eae6bc9c54257f33273ac8fee19637c
42,060
import torch def squared_deltaE(lab1, lab2): """Squared Delta E (CIE 1976). lab1: Bx3xHxW lab2: Bx3xHxW return: Bx1xHxW """ return torch.sum((lab1 - lab2) ** 2, 1, keepdim=True)
8540ac9c8274d9a292167141146dbd0ac8bb6386
42,061
def principal_from_interaction(interaction): """ principal_from_interaction(interaction: IInteraction) -> IPrincipal Find the primary :class:`IPrincipal` for the *interaction*. The primary principal is the principal of the first participation. """ return next(iter(interaction.participations)).principal
8aba1a9c63ef08e1e2aff105476750e7a2c9c06c
42,062
from collections import Counter def most_common(iterable): """ >>> most_common([1, 2, 2, 3, 4, 4, 4, 4]) 4 >>> most_common(list('Anthony')) 'n' >>> most_common('Stephen') 'e' Exceptionally quick! Benchmark at 12.1 us :param iterable: :return: """ data = Counter(iterable) return data.most_common(1)[0][0]
81fcc5c467cd8d46f853d1f32d099b2aff32b6e0
42,065
import re def is_divider(item: str) -> bool: """Return true if the string contains a divider character.""" return re.match(r'^(,|:)', item) is not None
3160a586b83dd3415026918936bee27ac0c7548f
42,066
import torch def gaussian_cdf(x, mean, chol_std): """Get cdf of multi-variate gaussian.""" scale = torch.diagonal(chol_std, dim1=-1, dim2=-2) z = (x - mean) / (scale + 1e-6) return 0.5 * (1 + torch.erf(z / torch.sqrt(torch.tensor(2.0))))
2fa08465c69afac91e8116f1ffbd22a771670b0d
42,067