content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def filter_probes_by_nan_and_sd(data_df, probe_frac_cutoff, probe_sd_cutoff): """ Filter out probes whose fraction of samples measured is less than probe_frac_cutoff. Also remove probes with standard deviation higher than probe_sd_cutoff. Args: data_df (pandas df) probe_frac_cutoff (float b/w 0 and 1) probe_sd_cutoff (float) Returns: out_df (pandas df): potentially smaller than original df """ # Number of NaNs per probe num_nans = data_df.isnull().sum(axis=1) # Number of samples num_samples = data_df.shape[1] # Fraction non-NaN per probe frac_non_nans_per_probe = 1 - num_nans/num_samples # Probe standard deviations probe_sds = data_df.std(axis=1) # Only return probes with more non-NaN data than probe_frac_cutoff # and lower sd than the cutoff probes_to_keep = ((frac_non_nans_per_probe > probe_frac_cutoff) & (probe_sds < probe_sd_cutoff)) out_df = data_df.loc[probes_to_keep, :] assert not out_df.empty, ( "All probes were filtered out. Try reducing the NaN threshold and/or SD cutoff.") return out_df
1268244d4975be7bcf114b94d33e567fb7cff1b5
32,741
import random def get_mac(): """ Gets a random mac address. """ mac = ("%02x:%02x:%02x:%02x:%02x:%02x" % (random.randint(0x00, 0xff), random.randint(0x00, 0xff), random.randint(0x00, 0xff), random.randint(0x00, 0xff), random.randint(0x00, 0xff), random.randint(0x00, 0xff))) return mac
33287d8dbe5815997b6ae1bcd9ec207ad3cf8e4f
32,742
import sys def BasisForm(function_space): """Wrapper around the elements.""" elem_type = function_space.str_to_elem[function_space.form_type] return getattr(sys.modules[__name__], elem_type)(function_space)
9304f53b59b6b1e79dc227766ebc80f446231b40
32,743
def forward_pass(tproblem, add_regularization_if_available=False): """Forward pass in DeepOBS.""" loss, _ = tproblem.get_batch_loss_and_accuracy( add_regularization_if_available=add_regularization_if_available ) return loss
8596de44f4e3a71d978e33d202e30f36a4c3e15a
32,748
import difflib def textPreservedRatio(o_text, d_text): """ Compute the ratio of preserved text between two revisions Args: o_text (list): a list of elements of the form [index of insertion position, text to be inserted] d_text (str): text content of the destination revision. Result: ratio of preserved text (real). """ total = 0 total_matched = 0 for text in o_text: for words in text.split(' '): matches = difflib.SequenceMatcher(None, words, d_text, autojunk=False).get_matching_blocks() matches = sorted(matches, key=lambda e: e[2], reverse=True) if (int(matches[0][2]) / len(words) > 0.8): total_matched += int(matches[0][2]) total += len(words) return round(total_matched / total, 2)
68a86c8785eeaebd17eb215e3c77846948e4925b
32,749
import importlib def get_model(params): """Return the model class by its name.""" module_name, class_name = params.model.name.rsplit('.', 1) i = importlib.import_module(module_name) return getattr(i, class_name)
55e360f57488eeeb35d8c1479293d10ffa1fac78
32,750
def create_model_identifier(name, version): """Get a compatible string as a combination of name and version""" new_name = "%s-%s" % (name, str(version).replace('.', '-')) return new_name
3aec457ff6836b93293b1adcb9e26a5429cfff09
32,751
def emoji_boolean(condition: bool) -> str: """Returns emoji depending on `condition` Args: condition (bool): subject condition Returns: str: emoji """ return "🟢" if condition else "🔴"
c2ca508ded5919da4811267893afd32b142cced3
32,752
def v1_protected(protected: bool | str) -> bool: """Cleanup old protected handling.""" if isinstance(protected, bool): return protected return True
ceb68509854be943ccf2846c5bd77e882373024d
32,753
def _quantile(sorted_values, q): """ For a sorted (in increasing order) 1-d array, return the value corresponding to the quantile ``q``. """ assert ((q >= 0) & (q <= 1)) if q == 1: return sorted_values[-1] else: return sorted_values[int(q*len(sorted_values))]
e3356c5911b031c153a4914dcd81052743005eae
32,754
import random import math def rGeom(p): """Generate a geometrically distributed random number. p is the success probability. The numbers are in the range {0, 1, 2,...}""" # CDF = 1-(1-p)^(k+1) (CDF of geometric distribution) # (1-p)^(k+1) = 1-CDF (... solve for k ...) # k+1 = log(1-CDF)/log(1-p) # k = (log(1-CDF)/log(1-p)) - 1 # insert a uniform random number in [0;1] for CDF to # obtain geometrically distributed numbers u = random.random() if p == 1 : return 0 return math.ceil( (math.log(1-u,1-p))-1 )
7eb1d3bbac0c79e1341abb2fa9528ea8c6c88e84
32,755
import re def allXinY(word1, word2): """ Check if word1 properly belongs in word2 :param word1: the word to be queried :param word2: the sentence / longer word :return: whether word1 as a sequence of words belong in word2 """ if word1 not in word2: return False # word2 = word2.strip(",").strip(".").strip("-").strip(":").strip(";") ind = word2.index(word1) # Only allow WORD_ format if the index is 0 if ind == 0: res = re.findall(word1 + '[^a-z]', word2) for m in res: if word2.index(m) == 0: return True res = re.findall('[^a-z]' + word1 + '[^a-z]', word2) for m in res: if word2.index(m) == ind - 1: return True return False # new_ind = -1 # # The beginning of word2 # if ind == 0: # new_word = word1 + " " # new_ind = 0 # # The ending of word2 # elif ind + len(word1) == len(word2): # new_word = " " + word1 # new_ind = ind - 1 # # The middle of word2 # else: # new_word = " " + word1 + " " # new_ind = ind - 1 # if new_word not in word2: # return False # else: # return word2.index(new_word) == new_ind
86d999ca226e9c575a9dfe40d7a7b2b08ae5a927
32,757
import torch from typing import Tuple def svd_flip(u: torch.Tensor, v: torch.Tensor, u_based_decision: bool = True) -> Tuple[torch.Tensor, torch.Tensor]: """Sign correction to ensure deterministic output from SVD.""" if u_based_decision: max_abs_cols = torch.argmax(torch.abs(u), dim=0) signs = torch.sign(u[max_abs_cols, range(u.shape[1])]) u *= signs v *= signs.unsqueeze(dim=1) else: max_abs_rows = torch.argmax(torch.abs(v), dim=1) signs = torch.sign(v[range(v.shape[0]), max_abs_rows]) u *= signs v *= signs.unsqueeze(dim=1) return u, v
2c32a016db5b6442d80dc2aba1ddeb0aaec2ca7c
32,758
def process_all_children(post): """ Flattens comments in a Piazza thread """ return post['children'] + sum([process_all_children(child) for child in post['children']], [])
a23b69cc6dcc31938240e8783b40da02c1f745b7
32,759
def recursive_dict_to_list(dict_data): """ Returns a list containing all values from a dictionary and any child contained dictionary. """ list_data = [] for v in dict_data.values(): if type(v) == dict: for v1 in recursive_dict_to_list(v): list_data.append(v1) else: list_data.append(v) return list_data
40f1bd7649d0462ff12a51958f028f098aeead56
32,760
import numbers def _operator_fallbacks(fallback_operator, doc=""): """Returns tuple of polymorphic binary operators. Based on the pattern shown in Python Standard Library numbers module. """ if fallback_operator is None: return (None, None) def forward(a, b): if type(a) == type(b): if a._modulus == b._modulus: return type(a)( a._modulus, fallback_operator(a._value, b._value), is_trusted=True ) else: return fallback_operator(int(a), int(b)) # raise ValueError("inconsistent modulus values") elif isinstance(b, int): return fallback_operator(int(a), b) elif isinstance(b, float): return fallback_operator(float(a), b) elif isinstance(b, complex): return fallback_operator(complex(a), b) else: return NotImplemented forward.__name__ = "__" + fallback_operator.__name__ + "__" forward.__doc__ = doc def reverse(b, a): if type(a) == type(b): if a._modulus == b._modulus: return type(a)( a._modulus, fallback_operator(a._value, b._value), is_trusted=True ) else: return fallback_operator(int(a), int(b)) # raise ValueError("inconsistent modulus values") elif isinstance(a, numbers.Integral): return fallback_operator(int(a), int(b)) elif isinstance(a, numbers.Real): return fallback_operator(float(a), float(b)) elif isinstance(a, numbers.Complex): return fallback_operator(complex(a), complex(b)) else: return NotImplemented reverse.__name__ = "__r" + fallback_operator.__name__ + "__" reverse.__doc__ = doc return forward, reverse
8c35f6a8c42633fea5785e1547575812e31df63b
32,761
def format_float_dot_delimiter(value): """Ensure that the float value has '.' as the decimal delimiter. This prevents errors caused by internationalisation where it is not wanted, i.e. 'de' where the decimal delimiter is ',' and not '.' . Parameters ---------- value : float Arbitrary float value that should be formatted Returns ------- str String representation of the float value, with the decimal delimiter. """ return str(value)
6e6873c7ee7e1fc789f41eab2d4e292cdf90d125
32,763
def cal_avg_distance(points, a, b, d, threshold_inlier): """ return average distance of points to the line model. Parameter --------- points : array like [[x1,y1],[x2,y2],...] a : float b : float d : float thereshold_inlier : float the threshold of discriminating whether the point is the inlier Return ------ avg_dis : float average distance inlier_rate : float inliers rate """ dis_sum = 0 inlier_num = 0 point_num = len(points) for point in points: dis = (a * point[0] + b * point[1] - d) ** 2 dis_sum += dis if dis < threshold_inlier: inlier_num += 1 avg_dis = dis_sum / point_num inlier_rate = inlier_num / point_num return avg_dis, inlier_rate
172d5e266d28810e002af35e868262f5213141a9
32,764
from typing import Union from typing import Dict from typing import Optional from typing import Any import json def format_default( message: Union[str, Dict], subject: Optional[str] = None ) -> Dict[str, Any]: """ Default formatter, converting event into Slack message format :params message: SNS message body containing message/event :returns: formatted Slack message payload """ attachments = { "fallback": "A new message", "text": "AWS notification", "title": subject if subject else "Message", "mrkdwn_in": ["value"], } fields = [] if type(message) is dict: for k, v in message.items(): value = f"{json.dumps(v)}" if isinstance(v, (dict, list)) else str(v) fields.append({"title": k, "value": f"`{value}`", "short": len(value) < 25}) else: fields.append({"value": message, "short": False}) if fields: attachments["fields"] = fields # type: ignore return attachments
9f2cef323c8a8793c072a8cf3541d96052b66fd9
32,765
def version_as_int(semver): """".""" v = semver.split('-')[0].replace('v', '').replace('V', '').replace('.', '') return v
b7ef6cad69cb049f4adb25b5fb9fd97459db6d97
32,766
import csv def sma_passenger(): """Return SMA of international passenger of thailand air transporation""" main_data = csv.reader(open('airtraffict.csv', newline='')) main_data = [row for row in main_data] arr_psng = 0 dep_psng = 0 tst_psng = 0 for i in main_data: if i[0] == 'BKK' and i[1] == 'Passenger' and i[2] == 'International': arr_psng += int(i[6]) dep_psng += int(i[7]) tst_psng += int(i[8]) arr_psng = '%.0f' % (arr_psng / 3) dep_psng = '%.0f' % (dep_psng / 3) tst_psng = '%.0f' % (tst_psng / 3) return [['arrive_inter_passenger', arr_psng], ['departure_inter_passenger', dep_psng]\ , ['transit_passenger', tst_psng]]
b816a66334aef35ef7aaf542e93411cbf32938fd
32,768
def full_name(app): """Builds App full_name, prepending the App with the name of the parent App. :param app: App as returned by queries.get_apps() :type app: dict :return: full name of the app :rtype: string """ name = app['name'] if app.get('parentApp'): parent_app = app['parentApp']['name'] name = f"{parent_app}/{name}" return name
c0b46e63ab662f9c3e33050e9a5f034400a45c58
32,771
def default_introspection_processing_hook(introspection_response, client, id_token): """ Hook to customise the returned data from the token introspection endpoint :param introspection_response: :param client: :param id_token: :return: """ return introspection_response
a3c051dd8ec1c11075d169365be6839806b9a9da
32,772
import random def get_random_crop_coords(height, width, crop_height, crop_width): """ get coordinates for cropping :param height: image height, int :param width: image width, int :param crop_height: crop height, int :param crop_width: crop width, int :return: xy coordinates """ y1 = random.randint(0, height - crop_height) y2 = y1 + crop_height x1 = random.randint(0, width - crop_width) x2 = x1 + crop_width return x1, y1, x2, y2
3da397b33bba76df2312611731536d4b5ce5e674
32,774
import numpy def default_sample_grid(vrange, res=8): """Calculate sample grid fine enough to capture point details resolution of smallest dimension will be minres elements """ #If provided a full resolution, use that, otherwise will be interpreted as min res if isinstance(res, (list,tuple)): if len(list(res)) == 3: return list(res) res = res[0] #Use bounding box range min minr = numpy.min(vrange) #res parameter is minimum resolution factor = float(res) / float(minr) RES = [int(factor*(vrange[0])), int(factor*(vrange[1])), int(factor*(vrange[2]))] print("Sample grid RES:",RES) return RES
67898ad7618a3e65bbc9f02c1d92f73fc4c00485
32,775
def _gbd(n): """Compute second greatest base-2 divisor""" i = 1 if n <= 0: return 0 while not n % i: i <<= 1 return i >> 2
48e86f46306149d6785a1c232138ae8effea9143
32,776
from typing import List def remove_multiples_from_list(arr: List[int]) -> List[int]: """ Removes lesser multiples of an sequential ascending array :param arr: Sequentially ascending list of integers :return: List of numbers who have no common multiples within the array """ for i in reversed(arr): if i == 1: # Everything is a multiple of 1 so pass to the next iteration pass for j in range(2, i): if i % j == 0: arr.remove(j) return arr
b1ede0d19c247df9db348e34b11a1905d77fce84
32,777
import re def collapse(string, character=" "): """Removes specified character from the beginning and/or end of the string and then condenses runs of the character within the string. Based on Ruby's stringex package (http://github.com/rsl/stringex/tree/master) """ reg = re.compile('(%s){2,}' % character) return re.sub(reg, character, string.strip(character))
eccff644aea51af813396b6bf764f91cd4b9d36b
32,779
import requests def isup(bot, trigger): """isup.me website status checker""" site = trigger.group(2) if not site: return bot.reply("What site do you want to check?") if not site.startswith('http://') and \ not site.startswith('https://'): if '://' in site: protocol = site.split('://')[0] + '://' return bot.reply("Try it again without the %s".format(protocol)) else: site = 'http://' + site if not '.' in site: site += ".com" try: response = requests.get(site) except Exception: response = None return if response: bot.say(site + ' looks fine to me.') else: bot.say(site + ' looks down from here.')
8981ba145178ba6a214bcefd9c1750ebd2338d98
32,780
def get_src(node): """ Returns src module of node, None if attr not defined """ return hasattr(node, "srcmodule") and getattr(node, "srcmodule") or None
28d37a39df61353eec31248da47572df5a5f2c75
32,781
def min_med_max(x): """Compute min, median, max of tensor x.""" return [x.min().item(), x.median().item(), x.max().item()]
b894c11e4bf6828d3627a6d79beec8c070c6657f
32,783
def Fraction2Decimal(numerator, denominator, span): """(n)umerator/(d)enominator를 계산하여 소수점 이하 span 자리까지 문자열 반환""" n = numerator d = denominator string = str(n // d) + "." remainer = n % d for i in range(span): if remainer == 0: break remainer = remainer * 10 string = string + str(remainer // d) remainer = remainer % d return string
17b6ba4471740e245c55cf6b77ffafca5abb0e4a
32,784
def get_matching_firstlevel_children_from_node(node, child_tag): """Takes a xml file as input and returns a list of first child elements to the root that matches the provided tag """ child_list = node.findall(child_tag) return child_list
7793a27a954c5159f037efbc5ae6902062750ff5
32,785
from typing import Dict def finds_node_type(edge_info: Dict) -> Dict: """Takes a dictionary of edge information and parses the data type for each node in the edge. Returns either None or a string containing a particular node from the edge. Args: edge_info: A dict of information needed to add edge to graph, for example: {'n1': 'subclass', 'n2': 'class','relation': 'RO_0003302', 'url': ['https://www.ncbi.nlm.nih.gov/gene/', 'http://purl.obolibrary.org/obo/'], 'edges': ['2', 'DOID_0110035']} Returns: A dictionary with 4 keys representing node type (i.e. "cls1", "cls2", "ent1", and "ent2") and values are strings containing a concatenation of the uri and the node. An example of a class-class edge is shown below: {'cls1': 'http://purl.obolibrary.org/obo/CHEBI_81395', 'cls2': 'http://purl.obolibrary.org/obo/DOID_12858', 'ent1': None, 'ent2': None} """ # initialize node types node type (cls=ontology class, ent1/ent2=instance or subclass node) nodes = {'cls1': None, 'cls2': None, 'ent1': None, 'ent2': None} if edge_info['n1'] == 'class' and edge_info['n2'] == 'class': nodes['cls1'] = edge_info['uri'][0] + edge_info['edges'][0] nodes['cls2'] = edge_info['uri'][1] + edge_info['edges'][1] elif edge_info['n1'] == 'class' and edge_info['n2'] != 'class': nodes['cls1'] = edge_info['uri'][0] + edge_info['edges'][0] nodes['ent1'] = edge_info['uri'][1] + edge_info['edges'][1] elif edge_info['n1'] != 'class' and edge_info['n2'] == 'class': nodes['ent1'] = edge_info['uri'][0] + edge_info['edges'][0] nodes['cls1'] = edge_info['uri'][1] + edge_info['edges'][1] else: nodes['ent1'] = edge_info['uri'][0] + edge_info['edges'][0] nodes['ent2'] = edge_info['uri'][1] + edge_info['edges'][1] return nodes
a799aba56310a33a18d2cc6d0d978515eedebc97
32,787
import re def extract_re_group(pattern): """ Extract the first captured group using the given pattern. """ def callable_(key, data, errors, context): value = data.get(key) or '' match = re.match(pattern, value) if not match: return try: data[key] = match.group(1) except IndexError: return return callable_
832c2ce3eb6e182bd22c8ee28e287dd1c207162f
32,789
def get_object_api_names(api_name, list_objs): """ Return a list of object api_names from list_objs """ return [o.get(api_name) for o in list_objs]
4ab4a1c5375e052f42511c2dbc438e1f157e5073
32,791
def fahrenheit_to_kelvin(a): """ Function to compute Fahrenheit from Kelvin """ kelvin = (a-32.0)*5/9 + 273.15 return kelvin
b24ec8c7bb2620158a93ed34a272cdf76fddb927
32,792
def get_missed_cleavages(sequences:list, n_missed_cleavages:int) -> list: """ Combine cleaved sequences to get sequences with missed cleavages Args: seqeuences (list of str): the list of cleaved sequences, no missed cleavages are there. n_missed_cleavages (int): the number of miss cleavage sites. Returns: list (of str): the sequences with missed cleavages. """ missed = [] for k in range(len(sequences)-n_missed_cleavages): missed.append(''.join(sequences[k-1:k+n_missed_cleavages])) return missed
7b8f3f2c11eb22d0311cefcfd59b9b5d5f1a4c78
32,793
def mkvec(ftr,seq,window,pos): """ >>> import feature >>> mkvec(lambda seq:feature.seq2frq(seq),"AAKDDECCSG",window=10,pos = 4) == {706: 1, 163: 1, 1222: 1, 9: 1, 3243: 1, 844: 1, 436: 1, 862: 1} True """ # ftr is the function make dict like "{pos:value for i in dimension}" n = seq[max(0,int(pos - window/2.0)):pos] c = seq[pos + 1:min(int(pos + window/2.0) + 1,len(seq))] vec = ftr('+'*(int(window/2 - len(n))) + n + seq[pos] + c + '-'*(int(window/2.0 - len(c)))) return vec
4f3f9d2a717b4905a42a15ad21432f4b87e83240
32,794
def _set_square(mul, square): """ Set square if square is None (for the initialization of methods for powering) """ if square == None: return lambda x : mul(x, x) else: return square
67689dc3fd5c282fd63e6cda9eb6b97f7d6b92ce
32,795
def urljoin(*pieces): """Join componenet of url into a relative url Use to prevent double slash when joining subpath """ striped = [s.strip('/') for s in pieces] return '/'.join(s for s in striped if s)
6f41e5ae515ae6cee3e19e36a94f45444c71b0ba
32,796
def _hop(a): # NB: redefined in MPyC setup if mix of 32-bit/64-bit platforms enabled """Simple and efficient pseudorandom program counter hop for Python 3.6+. Compatible between all 64-bit platforms. Compatible between all 32-bit platforms. Not compatible between mix of 32-bit and 64-bit platforms. """ return hash(frozenset(a))
c28db047fe5bd0b7c2770847222c4dcb8a1178a7
32,797
def get_objects_name(objects): """ Retrieves the names of objects. Parameters: objects (list): Objects to get names. Returns: list: Object names. """ names = [] for object in objects: if object.name[-5:-3] == '}.': names.append(object.name[:-4]) else: names.append(object.name) return names
f0ff4abb68c54c536338aa48eca0a3f6d57c0ae5
32,798
import math def degrees(angle): """Convert angle from radians to degrees.""" return 180 * angle / math.pi
3cdf03bb5fd34cce80a53f90ed39a2aacb2b6bda
32,801
def savedata(fullpathfilename, data): """ Save data to a file: csv from a pandas dataframe :param fullpathfilename: Full path to the file :param data: pandas dataframe to save :return: True if successful, False if not """ try: data.to_csv(fullpathfilename, header=True ) return True except Exception as e: print('Exception occurred: {} '.format(e)) return False
1a2ab34c04144c95764cebed7c25e54b1f326ec9
32,803
def translate_keypoints(keypoints, translation): """Translate keypoints. # Arguments kepoints: Numpy array of shape ``(num_keypoints, 2)``. translation: A list of length two indicating the x,y translation values # Returns Numpy array """ return keypoints + translation
02acd5ca99e86712e103be81f3b7c2362996446b
32,804
def conv2d_size_out(size, kernel_size, stride): """ Helper function for adjusting the size correctly in CNNs. :param size: :param kernel_size: :param stride: :return: """ return (size - kernel_size) // stride + 1
bf91de32122a32520f15cf4bd87079ad0cb98a26
32,806
def relu(x): """ ReLU element-wise activation. Args: x: array-like, any shape, array to compute activations for. Returns: x_relu: same type and shape as x, element wise application of ReLU to x. """ return x * (x > 0)
dd01c5cea3e2c77bd41c9f92dab37f5cf7890b10
32,807
def can_open_file(gcode_filepath): """ check whether a given filepath can be opened. If the filepath throws an error, False is returned. If the file can be opened, True is returned. """ if isinstance(gcode_filepath, str): # if gcode_filepath is a string try: # try opening file with open(gcode_filepath, 'r'): pass except IOError: # if an error is thrown, return False return False else: # if an error isn't thrown, return true return True else: # gcode_filepath is not a string, return false return False
6ad51e2d67b89886edb7ca65fa25ca1f7bdfe536
32,808
def cl_arguments_classificiation(parser): """This is a helper method of the method parse_cmd_arguments to add arguments to the parser that are specific to the cl setup for classifiers. Args: parser: Object of class :class:`argparse.ArgumentParser`. Returns: The Namespace object containing argument names and values. """ agroup = parser.add_argument_group('Classifier continual learning options.') agroup.add_argument('--class_beta', type=float, default=0.01, help='Trade-off for the CL regularizer for the hnet ' + 'in the replay model.') agroup.add_argument('--train_class_embeddings', action='store_true', help='Train embeddings of classifier hnet.') agroup.add_argument('--infer_output_head', action='store_true', help='Infer the output head when this option is ' + 'is activated and cl_scenario == 3. Otherwise ' + 'the output head grows in size when tasks are ' + 'added. This option does not have an effect if ' + 'cl_scenario != 3.') agroup.add_argument('--class_incremental', action='store_true', help='Weather or not we want class incremental ' + ' i.e. one class at a time learning. ' + 'otherwise we learn one task (with multiple ' + 'classes) at a time.') agroup.add_argument('--upper_bound', action='store_true', help='Train the classifier with "replay" data i.e ' + 'real data. This can be regarded an upper bound.') agroup.add_argument('--infer_with_entropy', action='store_true', help='Infer the task id by choosing the model with ' + 'lowest entropy. We iterate over all tasks ' + 'and compare the entropies of the different' + 'models.') parser.add_argument('--soft_temp', type=float, default=1., help='Scale the softmax temperature when inferring ' + 'task id through the entropy.') agroup.add_argument('--soft_targets', action='store_true', help='Use soft targets for classification in general.') parser.add_argument('--hard_targets', action='store_true', help='Use soft or hard targets for replayed data.') agroup.add_argument('--dont_train_main_model', action='store_true', help='Dont train the main model - this could be ' + 'interesting if you want to e.g. only train ' + 'hypernetwork embeddings.') agroup.add_argument('--test_batch_size', type=int, default=128, help='Test batch size.') parser.add_argument('--fake_data_full_range', action='store_true', help='Compute data over all preivous tasks.') parser.add_argument('--online_target_computation', action='store_true', help='When using "cl_reg=0", then this option will ' + 'ensure that the targets are computed on the ' + 'fly, using the hypernet weights acquired after ' + 'learning the previous task. Note, this ' + 'option ensures that there is alsmost no memory ' + 'grow with an increasing number of tasks ' + '(except from an increasing number of task ' + 'embeddings). If this option is deactivated, ' + 'the more computationally efficient way is ' + 'chosen of computing all main network weight ' + 'targets (from all previous tasks) once before ' + 'learning a new task.') parser.add_argument('--l_rew', type=float, default=0.5, help='Weight the loss between real and fake data. ' + 'l_rew < 0.5, real data loss is amplified, ' + 'l_rew > 0.5, fake data loss is amplified. ') return agroup
ff2a9c6ae3494c3550eba3bcbd5dd77982ba3e09
32,809
def format_box_wider2tfrecord(x, y, w, h, real_h, real_w): """ wider to tf_record record the rate of min point and width/height :param x: :param y: :param w: :param h: :param real_h: :param real_w: :return: """ print('orig: ', x, y, w, h, real_h, real_w) x_ = x / real_w y_ = y / real_h w_ = (x + w) / real_w h_ = (y + h) / real_h # return int(x), int(y), int(w), int(h) print('rate: ', x_, y_, w_, h_) return x_, y_, w_, h_
d6613576fa3b35df01ec9111f4c3730eb93f9fee
32,810
def _filter_attributes(attr_names, attr_values, sel): """Returns the filenames that match the attributes given by 'dic'""" # Then select those files whose attributes all match the selection filenames = [] for filename, attrs in attr_values: all_match = True for name, value in sel.items(): column = attr_names[name] #print("name=%s, value=%s, column=%s, attrs[column]=%s" % (name, value, column, attrs[column])) if attrs[column] != value: all_match = False break if all_match: filenames.append(filename) return filenames
11e29b4727c0d403bc1a01379046f824355869d9
32,812
import torch def calculate_moment_list(moment_num, en_list, normalize=True): """Calculate the n'th moment (up to moment_num) of a given energies list. Same function as in the dataset.""" res = [] if not torch.is_tensor(en_list): en_list = torch.Tensor(en_list) first = torch.mean(en_list) res.append(torch.mean(en_list)) if moment_num == 1: return res l = [] for val in en_list: # l.append((val - first) ** 2) l.append((val) ** 2) second = torch.mean(torch.Tensor(l)) res.append(second) if moment_num == 2: return res for i in range(3, moment_num + 1): l = [] for val in en_list: if normalize: # t = (val - first) ** i t = (val) ** i s = second ** i r = t / s l.append(r) else: # t = (val - first) ** i t = (val) ** i l.append(t) tmp = torch.mean(torch.Tensor(l)) res.append(tmp) return res
fb6a0c12258e4f835e4ac11d5518e3f55d28f586
32,813
import os def get_last_modified(abs_file_path): """ get last modified timestamp of file """ return os.path.getmtime(abs_file_path)
e91a797030a93b2ccb7949fd439e9d93685a80e4
32,815
import os import configparser def configGlobal(*args): """ Read a INI file to load the configuration into the program Returns: cfgData (configparser): loaded configuration to execute the app """ cfgfp = os.path.join(*args) cfgData = configparser.ConfigParser() cfgData.read(cfgfp, encoding="utf-8") return cfgData
97a342cbe1fc1d5cc437771bd458f403d121f8aa
32,817
def net_to_linkdict(net): """ Convert Net object from parse_net_file to dict { (from,to) : Link } mapping a link indexed by (from,to) tuple to Link data Parameters: net - Net object as returned by parse_net_file() Return value: dict {(from,to):Link} as described above """ return dict( [ ((link.init_node, link.term_node), link ) for link in net.links ] )
96f8ad92486078e48ed6d914b9d21cc3ebb96141
32,818
def find_unique(arr): """.""" in_order = sorted(arr) if (in_order[0] < in_order[len(in_order) - 1] and in_order[0] < in_order[len(in_order) - 2]): num = in_order[0] else: num = in_order[len(in_order) - 1] return num
6aabd875e0a9969e4db4d20b004cb3f161eee969
32,826
def check_transaction_exceptions(trade_data: dict) -> list: """ Check trade data for Binance decentralized exchanges """ exception_list = [] gas_limit = trade_data["gas_limit"] gas_cost = trade_data["gas_cost"] amount = trade_data["amount"] side = trade_data["side"] base = trade_data["base"] quote = trade_data["quote"] balances = trade_data["balances"] allowances = trade_data["allowances"] swaps_message = f"Total swaps: {trade_data['swaps']}" if "swaps" in trade_data.keys() else '' bsc_balance = balances["BSC"] # check for sufficient gas if bsc_balance < gas_cost: exception_list.append(f"Insufficient BSC balance to cover gas:" f" Balance: {bsc_balance}. Est. gas cost: {gas_cost}. {swaps_message}") trade_token = base if side == "side" else quote trade_allowance = allowances[trade_token] # check for gas limit set to low gas_limit_threshold = 21000 if gas_limit < gas_limit_threshold: exception_list.append(f"Gas limit {gas_limit} below recommended {gas_limit_threshold} threshold.") # check for insufficient token allowance if allowances[trade_token] < amount: exception_list.append(f"Insufficient {trade_token} allowance {trade_allowance}. Amount to trade: {amount}") return exception_list
c52e4e9ed5c9928b7d967b9e0548990769238f46
32,827
def get_all_descendants(db, parent): """Return all (non-retired) descendants of the parent. Parameters ---------- db : MongoDatabase The Mongo database from which request document data can be retrieved. parent : str The parent for which all descendants are desired. Returns ------- list(str): The descendants of the parent. Throws ------ ValueError: If there is no request document corresponding to the specified parent. """ current_request = db.requests.find_one({"_id": parent}) if not current_request: raise ValueError(parent + " has no request document") descendants = [] if current_request.get("children"): for child in current_request["children"]: if not db.requests.find_one({"_id": child}).get("retired"): descendants.append(child) descendants += get_all_descendants(db, child) # Remove duplicates return list(set(descendants))
a94f6eed1f316cc5aa25ef8e7b1db148aaee05d3
32,830
def parseWR(str): """ Parses the wavelength range. """ wr = str.strip('wr=[ ]"') return [float(i.strip(' ')) for i in wr.split(',')]
72ce70fb8a1f012b440e93fbaba7a30088b1b306
32,832
def choose_one(foo): """ input :[[3, 4], 7, 9, [11, 12, 13, 14], [16, 17, 18], [20, 21]] output: [0, 4, 7, 9, 13, 17, 21] """ loo = foo[:] for i in range(len(foo)): try: if len(foo[i]): if foo[i][0]==0: loo[i]==0 else: loo[i]= foo[i][len(foo[i])//2] else: pass except: pass if loo: if loo[0]!=0: loo.insert(0,0) else: pass else: pass return loo
089ceaf80fea8c349ec828e24f575acee1153a6a
32,833
def three_one(three): """ Converts three-letter amino acid codes to one-letter. Arguments: three (str): Three letter amino acid code; AMBER and CHARMM nomenclature for alternative protonation states is supported, but lost by conversion. Returns: str: Corresponding one-letter amino acid code """ return {"ALA": "A", "ARG": "R", "ASN": "N", "ASP": "D", "ASH": "D", "CYS": "C", "CYM": "C", "CYX": "C", "GLN": "Q", "GLU": "E", "GLH": "E", "GLY": "G", "HIS": "H", "HID": "H", "HIE": "H", "HIP": "H", "HSD": "H", "HSE": "H", "HSP": "H", "ILE": "I", "LEU": "L", "LYS": "K", "LYN": "K", "MET": "M", "PHE": "F", "PRO": "P", "SER": "S", "THR": "T", "TRP": "W", "TYR": "Y", "VAL": "V"}[three.upper()]
9de2a582fa57ce1f3dd2e26b91f1a86a2c1f11cb
32,834
import re def _extract_readnum(read_dict): """Extract read numbers from old-style fastqs. Handles read 1 and 2 specifications where naming is readname/1 readname/2 """ pat = re.compile(r"(?P<readnum>/\d+)$") parts = pat.split(read_dict["name"]) if len(parts) == 3: name, readnum, endofline = parts read_dict["name"] = name read_dict["readnum"] = readnum else: read_dict["readnum"] = "" return read_dict
f20c69df01b15a8411a81476f2df99f9f6358436
32,835
def warmup(): """Handles AppEngine warmup requests.""" return ""
47746597912ede5f1a2c8234aef690056529477a
32,836
def x_ian(x, word): """ Given a string x, returns True if all the letters in x are contained in word in the same order as they appear in x. >>> x_ian('srini', 'histrionic') True >>> x_ian('john', 'mahjong') False >>> x_ian('dina', 'dinosaur') True >>> x_ian('pangus', 'angus') False x: a string word: a string returns: True if word is x_ian, False otherwise """ if len(x) == 0: return True elif len(word) == 0: return False elif x[0] == word[0]: return x_ian(x[1:], word[1:]) else: return x_ian(x, word[1:])
def96c5dc36df5ae8a17bde26878eefcc0874f74
32,838
def unquote(s): """Adds quotes to a string.""" return '"' + s + '"'
be5e94d16c96da61f7302f52bfdee2dc5376102e
32,839
def next_RK2(h, b, func, vars, i): """ Gets the 'next rk2' value as part of the main rk2 function """ # Define key parameters a = 1 - b if b != 0: alpha = 1/(2*b) beta = 1/(2*b) else: alpha = 1 beta = 1 vi_euler = vars[i] + beta * h * func(vars[0], vars[1], vars[2]) vars2 = [0]*len(vars) for j in range(len(vars)): if j!= i: vars2[j] = vars[j] + alpha * h else: vars2[j] = vi_euler vi_nxt = vars[i] + h * (a * func(vars[0], vars[1], vars[2]) + b * func(vars2[0], vars2[1], vars2[2])) return vi_nxt
05ae16f1cdb4dc67c665b23e7b4e3aa8f6c9350a
32,840
import time import hashlib import json def get_location_request(lon, lat, taxi_id, operator, apikey): """Payload to send to geotaxi to update taxi location.""" payload = { 'timestamp': int(time.time()), 'operator': operator, 'taxi': taxi_id, 'lat': lat, 'lon': lon, 'device': 'phone', 'status': 'free', 'version':'2', } h = ''.join( str(payload[k]) for k in ['timestamp', 'operator', 'taxi', 'lat', 'lon', 'device', 'status', 'version'] ) h += apikey payload['hash'] = hashlib.sha1(h.encode('utf-8')).hexdigest() return json.dumps(payload).encode('ascii')
5e77d892606e4fdbe2db692006876788f0575c3e
32,841
def Thompson(model, _, __, n=100, rng=None): """ Thompson sampling policy. """ return model.sample_f(n, rng).get
2c98af5f8c415a24172ca0e1ad08e8c6889d15a1
32,842
import struct def hexptr2bin(hexptr): """ Input must be a int output : bytes in little endian """ return struct.pack('<L',hexptr)
c9907c1249b196537f6b242c79c58c6fc69c8940
32,843
def projects_get(): """ List all projects :rtype: List[Project] """ return 'do some magic!'
24089b1b9cb56900f4a76cfce96d50de76cbeff7
32,844
import argparse def _get_parser(): """ Builds an ``argparse`` parser for the ``msdss-dotenv`` command line tool. Returns ------- :class:`argparse.ArgumentParser` An ``argparse`` parser for ``msdss-dotenv``. Author ------ Richard Wen <rrwen.dev@gmail.com> Example ------- .. jupyter-execute:: :hide-output: from msdss_base_dotenv.cli import _get_parser parser = _get_parser() parser.print_help() """ # (_get_parser_parsers) Create main parser and sub parsers parser = argparse.ArgumentParser(description='Manages encrypted .env files') subparsers = parser.add_subparsers(title='commands', dest='command') # (_get_parser_init) Add init command init_parser = subparsers.add_parser('init', help='create env file and key') # (_get_parser_set) Add set command set_parser = subparsers.add_parser('set', help='set an env var') set_parser.add_argument('name', type=str, help='env var name to set') set_parser.add_argument('value', type=str, help='env var value to set') # (_get_parser_del) Add del command del_parser = subparsers.add_parser('del', help='delete an env var') del_parser.add_argument('name', type=str, help='env var name to delete') # (_get_parser_clear) Add clear command clear_parser = subparsers.add_parser('clear', help='clear env file and key') # (_get_parser_file_key) Add file and key arguments to all commands for p in [parser, init_parser, set_parser, del_parser, clear_parser]: p.add_argument('--env_file', type=str, default='./.env', help='path of .env file') p.add_argument('--key_path', type=str, default=None, help='path of key file') # (_get_parser_out) Return the parser out = parser return out
2dfffb8175c2c47ac81457d18f394da3a7af4c50
32,845
import torch def ClassificationAccuracy(output, target): """ ClassificationAccuracy on a given batch Args: output(:obj:`torch.Tensor`) - predicted segmentation mask of shape BATCHES x SCORES FOR DIFFERENT CLASSES target(:obj:`torch.Tensor`) - expected segmentation mask of shape BATCHES x SCORES FOR DIFFERENT CLASSES Returns: Classification Accuracy averaged over the batch of images """ predictions = torch.argmax(output.data, 1) # indices of the predicted clases correct = (predictions == target).sum().item() total = output.size(0) return correct / total
024efd8715492e7c5a2984b1846840c297edfe27
32,848
def transition_model(corpus, page, damping_factor): """ Return a probability distribution over which page to visit next, given a current page. With probability `damping_factor`, choose a link at random linked to by `page`. With probability `1 - damping_factor`, choose a link at random chosen from all pages in the corpus. """ amountOfPages = len(corpus.keys()) linkedPages = corpus[page] if len(linkedPages) == 0: linkedPages = corpus.keys() output = {page: 0 for page in corpus.keys()} for page in linkedPages: output[page] = damping_factor / len(linkedPages) for page in output.keys(): output[page] += (1 - damping_factor) / amountOfPages return output
7b7c92bf2738b5ad1ad9ab78466ab7051470f1cc
32,849
def be(entry: object) -> str: """ Return a stringified version of object replacing Nones with empty strings """ return str(entry).strip() if entry else ''
1c54dff6c3137bdeb511e149f177fe189234a70c
32,850
import requests def _get_url(url, type_=None, cookies=None): """Get content on given HTTP(S) url using Wget user agent. This method uses :mod:`requests` to make the request. The `type_` that is passed determines some behavior. Passing `rss` will set the `Accept` header to request `'application/rss+xml,application/rdf+xml,application/atom+xml,text/xml'`. :type url: str :param url: URL to fetch from :type type_: str :param type_: A string indicating the type of resource. :type cookies: dict :param cookies: Cookies to send with the request :returns: Response object :rtype: requests.Response """ head = { 'User-Agent': 'Wget/1.13.4 (linux-gnu)', 'Connection': 'Close', 'Proxy-Connection': 'Keep-Alive' } if type_ == 'rss': head['Accept'] = 'application/rss+xml,application/rdf+xml,application/atom+xml,text/xml' return requests.get(url, headers=head, cookies=cookies)
8eccfdb20bd8783091d8f79cbb200e0f579fa348
32,851
import os def ishdf5(path): """Is the file an HDF5 file?""" # tables.is_hdf5_file(path) # tables.is_pytables_file(path) (filename, ext) = os.path.splitext(path) if (ext is not None) and (len(ext) > 0) and (ext.lower() in ['.h5']): return True else: return False
6c818b35c14811df75abbfb14da4a07ee1b0d4ba
32,854
def _check_lfp_analysis(d): """if key LFP analysis is in the dict, return true""" if "_lfp_analysis" in d.keys(): return True else: return False
e3a1d830ad1018b3ee3eee2b0d714432538214c9
32,855
import os def get_sdf_files_not_in_db(db_connection, sdf_fn_in_folder): """ Returns the sdf_file names (full path) which are not already in the DB. :param db_connection: sqlite3.Connection, database connection :param sdf_fn_in_folder: list of string, filenames of the sdf-files. """ sdf_files_in_db = db_connection.execute("SELECT filename FROM sdf_file").fetchall() sdf_files_in_db = [str(x[0]) for x in sdf_files_in_db] return sorted(list(filter(lambda x: os.path.basename(x) not in sdf_files_in_db, sdf_fn_in_folder)))
463ba1f1599ef8f5306572a50dabdf942f32640c
32,856
def fixture_perform_upgrades_at_unlock(): """Perform user DB upgrades as normal during user unlock""" return True
acb1de4738d86ccbdbfcd6d9bea35caf50aed95d
32,857
def get_username_random(self): """ Gets random username """ username = self.follows_db_c.execute( "SELECT * FROM usernames WHERE unfollow_count=0 ORDER BY RANDOM() LIMIT 1" ).fetchone() if username: return username else: return False
30d2c0b030b959b61880fd89a2d715a5c67edbec
32,858
import os def render_audio(midi_file_path, sound_font): """ Render midi to audio """ # split file name and extention name, extention = midi_file_path.rsplit(".", 1) # set file names audio_file = name + ".wav" # synthesize midi file to audio cmd = "fluidsynth -F %s -O s16 -T wav %s %s 1> /dev/null" % (audio_file, sound_font, midi_file_path) os.system(cmd) return audio_file
72364588ff4da47a7474bdd76fc04ad5b12fa6e6
32,860
def count_args(x): """Counts number of unique arguments (org and response combined).""" return x[['org', 'response']].stack().nunique()
756db73b2065681ae5f53dfb1e9f6eedf7b8bdeb
32,861
def bitLeftShift(binIn, n): """ Input: - binIn: a binary number stored as a string. The most significant bit is stored as the first character in the string and so forth. - n: the number of bits to be shifted and n >= 0. Output: bin(binIn << n) """ pos = 0 allZero = True for digit in binIn: # break from loop if finding "1" if digit == "1": allZero = False break pos += 1 # take care of the case of all 0 if allZero == True: return "0" else: return binIn[pos:]+n*"0"
3d21e667d9e983c479c2ad1c0c7937978a9396c8
32,863
def compare_ltiv_data(expected, actual): """ Helper to test the LENGTH|TYPE|ID|VALUE data. It is packed in a dictionary like {ID: (VALUE, TYPE) """ for k, val in expected.items(): actual_v = actual.pop(k) if not (actual_v[0] == val[0] and actual_v[1] == val[1]): return False return actual == {}
8c03f9d756a51f52b3298965c5a7f0c4d061d5c2
32,864
def avarage(num1, num2): """ (number, number) -> number Return the avarage of num1 and num2.​ >>> avarage(10,20) 15.0 >>> avarage(2.5, 3.0) 2.75 """ return (num1 + num2) / 2
275f7808a650f2c139a0f121d23e8044c59cf69b
32,865
import struct def _decode_int(fp): """Decode an int tag :type fp: A binary `file object` :rtype: int """ return struct.unpack('>i', fp.read(4))[0]
9badc80814a1ce4e7bb6894b1625ca44d75ba433
32,867
import re def sort_alphanumeric(it): """ Sorts the given iterable in the way that is expected. E.g. test.txt, test_1.txt, test_2.txt, test_11.txt :param iterable it: Iterable to be sorted :return iterable: Sorted iterable """ def _convert(text): if text.isdigit(): return int(text) else: return text return sorted(it, key=lambda key: [_convert(c) for c in re.split('([0-9]+)', key)])
f7685d4e54c92002864a1c9e4384a97a4187bad3
32,869
def MultipleSameInput(Input,alphabet_guess_already): """if Input is in alphabet_guess_already, return True""" if Input in alphabet_guess_already: return True else: return False
a17ef3fc95582936212d23d03337e742b7b89abe
32,870
def get_average_uniqueness(indicator_matrix): """ Advances in Financial Machine Learning, Snippet 4.4. page 65. Compute Average Uniqueness Average uniqueness from indicator matrix :param indicator_matrix: (np.matrix) Indicator binary matrix :return: (float) Average uniqueness """ c = indicator_matrix.sum(axis = 1) #concurrency u = indicator_matrix.div(c,axis = 0) #uniqueness average_uniqueness = u[u > 0].mean() average_uniqueness = average_uniqueness.fillna(0) #average uniqueness return average_uniqueness
48b9e09f274d05456742b3aff4a36e4e37ac9085
32,871
def get_xml_tag_dict(xml_tree, tag, attribute): """Searches an XML tree for a tag. Under this tag it get all elements and returns them as a dict with "attribute" as the key, and the text for the element as the value Args: xml_tree (xml.etree.ElementTree): XML-tree to search through tag (str): An XML tag attribute (str): An XML attribute Returns: dict: Key,Value = tag attribute content, element text. Eg: {"da": "danish"...} """ tag_dict = {} tags = xml_tree.find(tag) for tag in tags: tag_value = tag.text tag_key = tag.attrib.get(attribute) tag_dict[tag_key] = tag_value return tag_dict
4d4bc983a282ac962fe55f917446f757cbd89c55
32,874
def mknj2i(item): """ Transforms "mknj" notation into tensor index order for the ERI. Args: item (str): an arbitrary transpose of "mknj" letters; Returns: 4 indexes. """ notation = "mknj" notation = dict(zip(notation, range(len(notation)))) return tuple(notation[i] for i in item)
190e15bec44503e012cf04c5ec784d2b3d744aac
32,875
import math import string def alphabet_enumeration(length): """ Return list of letters : A, B, ... Z, AA, AB, ... See mapentity/leaflet.enumeration.js """ if length == 0: return [] if length == 1: return ["A"] width = int(math.ceil(math.log(length, 26))) enums = [] alphabet = string.ascii_uppercase for i in range(length): enum = "" for j in range(width): enum = alphabet[i % 26] + enum i = i // 26 enums.append(enum) return enums
57a0e980c15b480a6f62018d5898c93f278dca93
32,876
def msec_to_units(time_ms, resolution): """Convert milliseconds to BLE specific time units.""" units = time_ms * 1000 / resolution return int(units)
7654d0ddda09514fedb4ff0f8d67194b0f4c52ae
32,877
import pickle async def load_users(): """ Loads users from 'users.pickle' file :return: dictionary {id:User} """ with open("users.pickle", "rb") as file: users_info_loaded = pickle.load(file) print("Users loaded") return users_info_loaded
d0f4e0c745f4dac362373a40cb8ea7684d068f64
32,879
def strip(val): """ Если val - текстовая строка, убирает пробелы с начала и конца, иначе возвращает как есть. """ if hasattr(val, 'strip'): return val.strip(' ') else: return val
506dc3b0f38263949c1c7034cd0c5695b3f987a7
32,880
def clean_word(word): """ word (str): word to clean Returns word with specific special characters removed """ string = '' for c in word: if c in [',', '!', '?', '.', '(', ')', '"']: continue string += c return string
f2881ffbb05ee77c10c7061715fb3273b192b741
32,882
def big_l_array(p, lp): """ Generate L array using pattern and L' array, Theorem 0.2.2, see proof :param p: the pattern :param lp: the L' array :return: the L array """ l = [0] * len(p) l[1] = lp[1] for i in range(2, len(p)): l[i] = max(l[i - 1], lp[i]) return l
b4159189965e9dd8db451c3f9aa637e8306cc0ba
32,883
import ast def is_valid_python(tkn: str) -> bool: """Determine whether tkn is a valid python identifier :param tkn: :return: """ try: root = ast.parse(tkn) except SyntaxError: return False return len(root.body) == 1 and isinstance(root.body[0], ast.Expr) and isinstance(root.body[0].value, ast.Name)
b1c04002f1fab770e11477eb380d2dfdc5a986a3
32,884
import torch def make_pad_mask(lengths: torch.Tensor, le : bool = True) -> torch.Tensor: """Make mask tensor containing indices of padded part. See description of make_non_pad_mask. Args: lengths (torch.Tensor): Batch of lengths (B,). Returns: torch.Tensor: Mask tensor containing indices of padded part. Examples: >>> lengths = [5, 3, 2] >>> make_pad_mask(lengths) # masks = [[0, 0, 0, 0 ,0], # [0, 0, 0, 1, 1], # [0, 0, 1, 1, 1]] masks = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [1, 1, 0, 0, 0]] """ batch_size = int(lengths.size(0)) max_len = int(lengths.max().item()) seq_range = torch.arange(0, max_len, dtype=torch.int64, device=lengths.device) seq_range_expand = seq_range.unsqueeze(0).expand(batch_size, max_len) seq_length_expand = lengths.unsqueeze(-1) # mask = seq_range_expand >= seq_length_expand # fix: torch.float32 -> torch.int32 if le: mask = (seq_range_expand < seq_length_expand).type(torch.int32) else: mask = (seq_range_expand >= seq_length_expand).type(torch.int32) # print(mask) return mask
43b32a4dc7b1053ad80a8d6c47ea39d1835d5a71
32,886