content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def check_your_guess(ans, your_guess, turns, history): """ The function checks whether the format of user's guess is correct and calculates how many left chances (depends on the format). :param ans: str, the answer :param your_guess: str, the letter user guesses :param turns: int, how many left chances before guess :param history: the collection of every letter user guesses :return: int, how many left chances after guess """ if not your_guess.isalpha() or len(your_guess) > 1: # not single alphabet print('illegal format.') elif your_guess in ans: print('You are correct!') # correct if your_guess not in history: # user guesses the same letter(correct) twice turns -= 1 elif your_guess not in ans: # wrong print('There is no ' + your_guess + "'s in the word.") turns -= 1 return turns
5b43858b8ce2af7f9683118fc318bdfbb702ffe8
586,887
def get_objects(client, keys): """Retrieve objects from configured memcached under specified keys :param client: memcache client object :param keys: list of strings :return: direct bytestream, no need to read from buffer. If error or timeout, return None. """ objects = client.get_multi(keys) if bool(objects): return objects else: return None
b4abd213578595ce63fc5e7ad141bf9754c25770
488,651
import warnings def get_weight(target_easy_negative, easy_negative, hard_negative): """ Args: target_easy_negative ([type]): [description] easy_negative ([type]): [description] hard_negative ([type]): [description] Returns: w_h, w_e: scaling factor for hard and easy and negative for achieving the target_easy_negative """ w_e = target_easy_negative / easy_negative transfer_weight = easy_negative - target_easy_negative if transfer_weight < 0: warnings.warn( "Transfering weight from hard negative to easy negative") w_h = 1 + transfer_weight / hard_negative return w_h, w_e
56d8bcbcfc21145a164ca1882d245225e13cf5c7
44,647
import copy def regularize(l): """ If necessary mix in a small prior for regularization. """ epsilon=1e-3 l=copy.copy(l) for i in range(0,len(l)): if l[i]==0: l[i]=epsilon return l
cde50c63fef8b22e800daeecda29998e62813850
631,913
def get_N_teachers(school_type, N_classes): """Return the number of teachers / class for different school types.""" teachers = { 'primary':N_classes + int(N_classes / 2), 'primary_dc':N_classes * 2, 'lower_secondary':int(N_classes * 2.5), 'lower_secondary_dc':N_classes * 3, 'upper_secondary':int(N_classes * 2.85), 'secondary':int(N_classes * 2.5), 'secondary_dc':int(N_classes * 2.5) } return teachers[school_type]
7e931a57554e5fa1b81d00864ed6458c17dfce6b
350,741
from typing import List def get_options(command: dict) -> List[str]: """Extract options from Click's information dictionary retrieved via `to_info_dict`.""" params = command["params"] result = [] for param in params: result.extend(filter(lambda x: "--" in x, param["opts"])) return result
6526e5b64a641e0e05fce4c02c356cb39a9621c1
356,704
def key_kicking_dgs_to_end(name): """ Key function to help sort faculty names, putting "DGS"/"TBD" at end of list.""" return "ZZZZZ"+name if not (" " in name) else name
c4d5b4e4aa74ef1212d9d6aa41b9d7bbfada786d
389,806
def user_in_group(user_entry, group_name): """Check if a user is in a group. Note this will return false for the special group "public", even though all users are technically in this group.""" try: return group_name in [g.name for g in user_entry.groups] except AttributeError as UserNotSignedIn: return False
47ccbb2f991da8a3659a84882c0828610bd1c83c
552,767
def DependencyNameFromFullString(full_string: str)->str: """ Returns the part without version, only dependency name """ return full_string.split()[0]
33ff38473ee0b23d4cb305d1208e6ca459c0b503
136,195
def _is_connected(parent, child, dag): """ Whether two node are connected. Args: parent (BaseNode): Parent node. child (BaseNode): Child node. dag (DagGraph): Graph instance. Returns: bool, True or False. """ return parent.name in dag.precursor_table.get(child.name)
2ebc17022e4f926b775bfc621f712b2808056dde
343,692
def is_logs_synced(mst, slv): """Function: is_logs_synced Description: Checks to see if the Master binary log file name and log position match that the Slave's Relay log file name and log position. Arguments: (input) mst -> Master instance. (input) slv -> Slave instance. (output) True or False -> True is return if logs are in sync. """ is_synced = True if mst.gtid_mode and slv.gtid_mode: if mst.exe_gtid != slv.exe_gtid: is_synced = False # Non-GTID server. else: if mst.file != slv.relay_mst_log or mst.pos != slv.exec_mst_pos: is_synced = False return is_synced
9ee81bd9cfa66fb450bdd289c9437ffc9bf349bb
125,969
def GetProjectUriPath(project): """Return the URI path of a GCP project.""" return '/'.join(['projects', project])
774c373be9ec63e593c20a03f2673f5e05f08b9c
220,717
def exclude_bad(data, columns, values): """Exclude rows of data with poor quality control flags. eg. data_new = exlcude_bad(data, ['Practical Salinity Corrected QC Flag '], [0,4,9]) :arg data: the data set :type data: pandas DataFrame :arg columns: Column names with quality control information :type columns: list of strings :arg values: the quaility control values to be exlcuded :type values: lis tof integers :returns: data_return - new data frame with bad data excluded. """ data_return = data for col in columns: for val in values: data_return = data_return[data_return[col] != val] return data_return
22e1c73ea66e0da6d93b1899456d13f1046ca2c7
108,377
def has_c19_scope (scopes): """ Check if the COVID-19 GLIDE number or HRP code is present """ for scope in scopes: if scope.type == "1" and scope.vocabulary == "1-2" and scope.code.upper() == "EP-2020-000012-001": return True elif scope.type == "2" and scope.vocabulary == "2-1" and scope.code.upper() == "HCOVD20": return True return False
1d9c96d093450bd4ab0200eb190302b36eb593f7
685,311
import json def loop_casts(casts: list) -> dict: """Loop all casts and remove duplicates by putting them into a dict using the first timestamp as a key in timeseries list :param casts: list of paths :return: dict of casts as dict """ by_start_time = dict() for now_c in casts: with open(now_c) as f: cast = json.load(f) start_time = cast["properties"]["timeseries"][0]["time"] by_start_time[start_time] = cast return by_start_time
057e64e1f4f76e337f34d644d0896bfc4967c778
214,213
def get_labels(data): """ Returns the list of labels for the given issue or PR data. """ return [edge["node"]["name"] for edge in data["node"]["labels"]["edges"]]
c6a1f26f812cab3c8512d01b09948eef78445ae6
292,894
def dlr_list(client_session): """ This function returns all DLR found in NSX :param client_session: An instance of an NsxClient Session :return: returns a tuple, the first item is a list of tuples with item 0 containing the DLR Name as string and item 1 containing the dlr id as string. The second item contains a list of dictionaries containing all DLR details """ all_dist_lr = client_session.read_all_pages('nsxEdges', 'read') dist_lr_list = [] dist_lr_list_verbose = [] for dlr in all_dist_lr: if dlr['edgeType'] == "distributedRouter": dist_lr_list.append((dlr['name'], dlr['objectId'])) dist_lr_list_verbose.append(dlr) return dist_lr_list, dist_lr_list_verbose
a7cee3130bb5d178d3c254b998cb4ade1c7803fc
627,208
def schedule_url(year, stype, week): """ Returns the NFL.com XML schedule URL. `year` should be an integer, `stype` should be one of the strings `PRE`, `REG` or `POST`, and `gsis_week` should be a value in the range `[0, 17]`. """ xmlurl = 'http://www.nfl.com/ajax/scorestrip?' if stype == 'POST': week += 17 if week == 21: # NFL.com you so silly week += 1 return '%sseason=%s&seasonType=%s&week=%s' % (xmlurl, year, stype, week)
0d6c6a2721fcffa558a2b8db8c21d2ee2d2dc0d2
422,046
def _all_na_or_values(series, values): """ Test whether every element in the series is either missing or in values. This is fiddly because isin() changes behavior if the series is totally NaN (because of type issues) Example: x = pd.DataFrame({'a': ['x', np.NaN], 'b': [np.NaN, np.NaN]}) x.isin({'x', np.NaN}) Args: series (pd.Series): A data column values (set): A set of values Returns: bool: True or False, whether the elements are missing or in values """ series_excl_na = series[series.notna()] if not len(series_excl_na): out = True elif series_excl_na.isin(values).all(): out = True else: out = False return out
f6c3f05a7dc2ad03047b1529cdcd00f6dd091899
42,168
def fmt_mate(mate_score): """Format a mate value as a proper string.""" if mate_score < 0: # mate in X for black return "-M{:d}".format(abs(mate_score)) else: # mate in X for white return "+M{:d}".format(abs(mate_score))
21b17fe934dd7f05b75743bc86b61a58dee45325
376,448
def tag_search(html_document): """ This function searches all the tags in the html document :param html_document: :return All the tags as a list: """ tags = [] # tag container index = 0 while index < len(html_document): # traverse through all the document tag = '' if html_document[index] == '<' and html_document[index + 1] != '!': # if it gets tag opening angle bracket # and if It is not an html comment(<!) while html_document[index] != '>': # It goes all the way up to '>' tag += html_document[index] index += 1 tag += html_document[index] # append the closing angle bracket tags.append(tag) else: index += 1 # if It can't get an opening angle bracket, It skips it return tags
7fe9f33ef4b5cadb0f6a2555e4132948a2eab271
185,139
def surround_text(text, left='>>', right='<<', pad=2, preserve_whitespace=True): """ Surround ``text`` string with the characters from `left` and `right` - if color is not enabled this is a good way to draw attention to a particular block of text. :param text: the text to surround :type text: str :param left: text that goes to the left of ``text`` :type left: str :param right: text that goes to the right of ``text`` :type right: str :param pad: whitespace padding to place between ``left``, ``right`` and ``text`` :type pad: int :param preserve_whitespace: preserve existing whitespace before and after text, such that indenting would remain unchanged. :type preserve_whitespace: bool :return: formatted string :rtype: str """ ltext = left + (' ' * pad) rtext = (' ' * pad) + right if preserve_whitespace: lspace = len(text) - len(text.lstrip()) # 10 rspace = len(text) - len(text.rstrip()) lpad = max((lspace - len(ltext), 0)) # 6 rpad = max((rspace - len(rtext), 0)) ltext = (' ' * lpad) + ltext rtext = rtext + (' ' * rpad) return '{}{}{}'.format(ltext, text.strip(), rtext) else: return '{}{}{}'.format(ltext, text.strip(), rtext)
3dfbfda2b8ebb03d93d8e9bb3e5972ea38bc536a
159,123
import typing def get_paths(spec: dict) -> typing.List[str]: """This function extracts a list of paths from an OpenAPI specification. :param spec: a dict with an OpenAPI specification :return: a list of OpenAPI path strings (e.g.: /pets, /pets/{pet_id}) """ return [str(k) for k in spec['paths'].keys()]
cee9381b068f812f4912053a1d7808b775967661
509,962
def remove_words(words_to_remove, list_of_strings): """ words_to_remove: list of words to remove list_of_strings: list of string from which to remove words >>> remove_words(['bye'], ['hello, bye', 'bye, hi']) ['hello, ', ', hi'] """ for word in words_to_remove: list_of_strings = [s.replace(word, '') for s in list_of_strings] return list_of_strings
a4c8d9efd3e80908ec231be66b111ca5a27e372e
327,765
import itertools def _flatten_sentences(sentences): """ Flatten a list of sentences into a concatenated list of tokens. Adapted from https://stackoverflow.com/questions/952914/how-to-make-a-flat-list-out-of-list-of-lists. >>> s1 = 'the gunman kill police'.split() >>> s2 = 'police killed the gunman'.split() >>> _flatten_sentences([s1, s2]) ['the', 'gunman', 'kill', 'police', 'police', 'killed', 'the', 'gunman'] :param sentences: a list of sentences. :return: a list of tokens. """ return list(itertools.chain.from_iterable(sentences))
f6acab1f578a8dfa23b4de9225f4a0efee4705be
483,357
def get_datatype_metranet(datatype): """ maps de config file radar data type name into the corresponding metranet data type name and Py-ART field name Parameters ---------- datatype : str config file radar data type name Returns ------- metranet type : dict dictionary containing the metranet data type name and its corresponding Py-ART field name """ if datatype == 'dBZ': datatype_metranet = 'ZH' field_name = 'reflectivity' elif datatype == 'dBZv': datatype_metranet = 'ZV' field_name = 'reflectivity_vv' elif datatype == 'ZDR': datatype_metranet = 'ZDR' field_name = 'differential_reflectivity' elif datatype == 'uRhoHV': datatype_metranet = 'RHO' field_name = 'uncorrected_cross_correlation_ratio' elif datatype == 'uPhiDP': datatype_metranet = 'PHI' field_name = 'uncorrected_differential_phase' elif datatype == 'V': datatype_metranet = 'VEL' field_name = 'velocity' elif datatype == 'W': datatype_metranet = 'WID' field_name = 'spectrum_width' elif datatype == 'CLT': datatype_metranet = 'CLT' field_name = 'clutter_exit_code' elif datatype == 'ST1': datatype_metranet = 'ST1' field_name = 'stat_test_lag1' elif datatype == 'ST2': datatype_metranet = 'ST2' field_name = 'stat_test_lag2' elif datatype == 'WBN': datatype_metranet = 'WBN' field_name = 'wide_band_noise' elif datatype == 'MPH': datatype_metranet = 'MPH' field_name = 'mean_phase' else: raise ValueError( 'ERROR: Metranet fields do not contain datatype '+datatype) return {datatype_metranet: field_name}
bf48063305b9815d28c3037e5d499687215de09f
242,982
def _val_to_byte_list(number, num_bytes, big_endian=True): """ Converts an integer into a big/little-endian multi-byte representation. Similar to int.to_bytes() in the standard lib but returns a list of integers between 0 and 255 (which allows for bitwise arithmetic) instead of a bytearray. """ if number > (2**(8*num_bytes))-1: raise ValueError("Unsigned integer %d does not fit into %d bytes!" % (number, num_bytes)) byte_list = [] for b in range(num_bytes): val = (number >> (8*b)) & 0xff if big_endian: byte_list.insert(0, val) else: byte_list.append(val) return byte_list
e4788217bdc6cfabc57d4c6d2bf290c887eaf19a
151,223
import torch def get_ndc_rays(H, W, focal, near, rays_o, rays_d): """ Transform rays from world coordinate to NDC. NDC: Space such that the canvas is a cube with sides [-1, 1] in each axis. For detailed derivation, please see: http://www.songho.ca/opengl/gl_projectionmatrix.html https://github.com/bmild/nerf/files/4451808/ndc_derivation.pdf In practice, use NDC "if and only if" the scene is unbounded (has a large depth). See https://github.com/bmild/nerf/issues/18 Inputs: H, W, focal: image height, width and focal length near: (N_rays) or float, the depths of the near plane rays_o: (N_rays, 3), the origin of the rays in world coordinate rays_d: (N_rays, 3), the direction of the rays in world coordinate Outputs: rays_o: (N_rays, 3), the origin of the rays in NDC rays_d: (N_rays, 3), the direction of the rays in NDC """ # Shift ray origins to near plane t = -(near + rays_o[..., 2]) / rays_d[..., 2] rays_o = rays_o + t[..., None] * rays_d # Store some intermediate homogeneous results ox_oz = rays_o[..., 0] / rays_o[..., 2] oy_oz = rays_o[..., 1] / rays_o[..., 2] # Projection o0 = -1. / (W / (2. * focal[0])) * ox_oz o1 = -1. / (H / (2. * focal[1])) * oy_oz o2 = 1. + 2. * near / rays_o[..., 2] d0 = -1. / (W / (2. * focal[0])) * (rays_d[..., 0] / rays_d[..., 2] - ox_oz) d1 = -1. / (H / (2. * focal[1])) * (rays_d[..., 1] / rays_d[..., 2] - oy_oz) d2 = 1 - o2 rays_o = torch.stack([o0, o1, o2], -1) # (B, 3) rays_d = torch.stack([d0, d1, d2], -1) # (B, 3) return rays_o, rays_d
6bba680569bbe2401b0c3462f28faaa2867240ad
611,591
def nrlist(sequence): """Python 2.7 compatible list deduplication """ unique = [] [unique.append(item) for item in sequence if item not in unique] return unique
24af53cb426898c48cacc7772f582899dca9b4d1
347,425
def find_two_entry_product(numbers): """ Given the list of numbers, print the product of two that sum to 2020. numbers (list): A list of integers. Return (int): The product of two entries in numbers that sum to 2020. Raises: ValueError if no two entries in numbers sums to 2020. >>> l = [1721, 675, 299] >>> find_two_entry_product(l) 514579 """ for index, number1 in enumerate(numbers): for number2 in numbers[index+1:]: if (number1 + number2) == 2020: return number1 * number2 raise ValueError('No two entries in numbers sums to 2020')
abf225ec9f676ecb18d7663830f60232c92c25f7
320,270
import re def total_balls(overs_str, str_ignore=['DNB', 'TDNB'], ignore_value=0): """ Calculate total balls bowled from overs formatting Parameters ---------- overs_str: string Overs bowled in traditional format overs.balls str_ignore: list Strings to ignore and return 0 - indicators that the player didn't bowl ignore_value: int Value to input for strings matched into str_ignore Returns ------- int: The number of balls bowled as an int """ if overs_str in str_ignore: return ignore_value grouping_re = re.compile(r'^([0-9]*)\.([0-5]*)$').search(overs_str) if grouping_re is None: return int(overs_str) * 6 else: overs = int(grouping_re.group(1)) * 6 balls = int(grouping_re.group(2)) return overs + balls
084be59818abac177269fdce1a6911cd8b9d130a
322,805
def class_as_descriptor(name): """Return the JVM descriptor for the class `name`""" if not name.endswith(';'): return 'L' + name + ';' else: return name
128c344a73920d32dbbec65afd87cf4733920298
585,636
def week_day_on_first_auroran(dek_year: int) -> int: """Returns the Gregorian week day for the first Auroran of a given year Args: dek_year (int): Year. Return: int: The week day. Example: 1 = Sunday; 2 = Monday; 3 = Tuesday ... 7 = Saturday. """ week_day = ( (1 + 5 * ((dek_year) % 4) + 4 * ((dek_year) % 100) + 6 * ((dek_year) % 400)) % 7 ) + 1 return week_day
3efb77275cf76fc8a03334ddce574b483e454d67
551,597
def count_digits(value): """ Count of digits of given number. :param value: integer value (can also be negative) :returns: count of digits of given number >>> count_digits(0) 1 >>> count_digits(1024) 4 >>> count_digits(1234567890) 10 >>> count_digits(-1234567890) 10 """ if value == 0: return 1 value = abs(value) result = 0 while value > 0: result += 1 value //= 10 return result
0ec0b8d6f7d4ea81b4f570370a0418f239e13b9b
356,443
def get_ends(bed3): """Get the BED coordinates of the last nucleotide of each record.""" ends = bed3.copy() ends['chrom_start'] = ends['chrom_end'] - 1 return ends.sort_values(by=['chrom', 'chrom_start'])
c67b4b1a220fa9c0ed71191d5cd7da29acf429f0
614,133
import yaml def render_manifest(key_value_pairs): """Returns formatted cloud-init from dictionary k:v pairs""" return "#cloud-config\n" + yaml.dump( key_value_pairs, default_flow_style=False, default_style='\'')
5a2a5f6adf9d10c93dc2761f759cfc45377ffd41
185,594
import torch import math def cal_GauProb(mu, sigma, x): """ Return the probability of "data" given MoG parameters "mu" and "sigma". Arguments: mu (BxGxC) - The means of the Gaussians. sigma (BxGxC) - The standard deviation of the Gaussians. x (BxC) - A batch of data points (coordinates of position). Return: probabilities (BxG): The probability of each point in the probability of the distribution in the corresponding mu/sigma index. (Assume the dimensions of the output are independent to each other.) """ x = x.unsqueeze(1).expand_as(mu) # BxC -> Bx1xC -> BxGxC prob = torch.rsqrt(torch.tensor(2*math.pi)) * torch.exp(-((x - mu) / sigma)**2 / 2) / sigma return torch.prod(prob, dim=2)
3237d9dbc89968804dfe66722ef62f86f5000c4d
656,970
import re def parseLatLon(text): """Degrees/minutes/seconds notation (including cardinal direction) parsed and converted to float value; returned in two-element tuple along with single-characater cardinal direction. """ d, m, s, c = re.split('[^\w\.]', text) value = float(d) + float(m) / 60 + float(s) / 3600 return value, c
2b2e6698a4d0849b731c6b2e01127691731d7afb
191,146
def _public_attrs(obj): """Return a copy of the public elements in ``vars(obj)``.""" return { key: val for key, val in vars(obj).copy().items() if not key.startswith('_') }
a492f2fb9fc60f6551f376be468a0b88bbc490d0
141,073
import importlib def fix_module_name(module, cls): """ Turns a.b._C.C into a.b.C if possible. :param module: the module :type module: str :param cls: the class name :type cls: str :return: the (potentially) updated tuple of module and class name """ if module.split(".")[-1].startswith("_"): try: module_short = ".".join(module.split(".")[:-1]) getattr(importlib.import_module(module_short), cls) module = module_short except Exception: pass return module, cls
7d84b2fde58ed6b581f48088dfb30c9f08794508
538,851
def hydraulic_resistance_coefficient(Eg, Re, ke, d): """ :param Eg: Coefficient of hydraulic efficiency. If no data: Eg=0.95 for pipelines with scraper system; Eg=0.92 for pipelines without scrapper system. Dimensionless :param Re: Reynolds number. Dimensionless :param ke: equivalent pipe roughness, m :param d: pipe inner diameter, m :returns: Hydraulic resistance coefficient, dimensionless """ return (1.05/Eg**2) * 0.067 * (158/Re + 2*ke/d)**0.2
d67c3feaf882b2e583491d30de5733ce11852324
116,099
def unNormalizeImage(image, mean=[0.485, 0.456, 0.406], STD=[0.229, 0.224, 0.225]): """ Unnormalizes a numpy array given mean and STD :param image: Image to unormalize :param mean: Mean :param STD: Standard Deviation :return: Unnormalize image """ for i in range(0, image.shape[0]): image[i, :, :] = (image[i, :, :] * STD[i]) + mean[i] return image
9fabf2a2114362cf751bb99b578f69940bfe594b
426,772
import torch def hsv2rgb(hsv): """ H, S and V input range = 0 ÷ 1.0 R, G and B output range = 0 ÷ 1.0 """ eps = 1e-7 bb,cc,hh,ww = hsv.shape H = hsv[:,0,:,:] S = hsv[:,1,:,:] V = hsv[:,2,:,:] # var_h = torch.zeros(bb,hh,ww) # var_s = torch.zeros(bb,hh,ww) # var_v = torch.zeros(bb,hh,ww) # var_r = torch.zeros(bb,hh,ww) # var_g = torch.zeros(bb,hh,ww) # var_b = torch.zeros(bb,hh,ww) # Grayscale if (S == 0).all(): R = V G = V B = V # Chromatic data else: var_h = H * 6 var_h[var_h == 6] = 0 #H must be < 1 var_i = var_h.floor() #Or ... var_i = floor( var_h ) var_1 = V * ( 1 - S ) var_2 = V * ( 1 - S * ( var_h - var_i ) ) var_3 = V * ( 1 - S * ( 1 - ( var_h - var_i ) ) ) # else { var_r = V ; var_g = var_1 ; var_b = var_2 } var_r = V var_g = var_1 var_b = var_2 # var_i == 0 { var_r = V ; var_g = var_3 ; var_b = var_1 } var_r = torch.where(var_i == 0, V, var_r) var_g = torch.where(var_i == 0, var_3, var_g) var_b = torch.where(var_i == 0, var_1, var_b) # else if ( var_i == 1 ) { var_r = var_2 ; var_g = V ; var_b = var_1 } var_r = torch.where(var_i == 1, var_2, var_r) var_g = torch.where(var_i == 1, V, var_g) var_b = torch.where(var_i == 1, var_1, var_b) # else if ( var_i == 2 ) { var_r = var_1 ; var_g = V ; var_b = var_3 } var_r = torch.where(var_i == 2, var_1, var_r) var_g = torch.where(var_i == 2, V, var_g) var_b = torch.where(var_i == 2, var_3, var_b) # else if ( var_i == 3 ) { var_r = var_1 ; var_g = var_2 ; var_b = V } var_r = torch.where(var_i == 3, var_1, var_r) var_g = torch.where(var_i == 3, var_2, var_g) var_b = torch.where(var_i == 3, V, var_b) # else if ( var_i == 4 ) { var_r = var_3 ; var_g = var_1 ; var_b = V } var_r = torch.where(var_i == 4, var_3, var_r) var_g = torch.where(var_i == 4, var_1, var_g) var_b = torch.where(var_i == 4, V, var_b) R = var_r #* 255 G = var_g #* 255 B = var_b #* 255 return torch.stack([R, G, B], 1)
4035d22ab6dcbf8fe62e6352dd15b0b5a1a60b5e
502,932
import random def pick_event(event_intervals): """ Returns a randomly selected index of an event from a list of intervals proportional to the events' probabilities :param event_intervals: list of event intervals """ i_event = 0 cumul_intervals = [] cumul_interval = 0.0 for interval in event_intervals: cumul_interval += interval cumul_intervals.append(cumul_interval) i = random.random() * cumul_interval i_last_event = len(event_intervals) - 1 while i > cumul_intervals[i_event] and i_event < i_last_event: i_event += 1 return i_event
40856e66a272ce9a86b49b647bd85148cfd33703
663,989
def find_idx(words, idx): """ Looks for the named index in the list. Test for the index name surrounded by quotes first, if it is not present then the error will return the index name without quotes """ try: return words.index('"' + idx + '"') except ValueError: return words.index(idx)
4c76fcd752de0545cea77def672d1555c071815f
19,415
def find_in_list_of_lists(list_of_lists, value): """ Find value in list of lists and return first found index of list. :param list_of_lists: :param value: :return: """ for i, lst in enumerate(list_of_lists): if value in lst: return i return None
99116e77e7bab189aaf4b04784d1145e56940d45
130,011
import re def get_contents (fname): """ Return the contents of the given file. Strip comments (lines starting with ;) """ if type(fname) == list: contents = '\n'.join(fname) else: fp = open (fname, "r") contents = fp.read() fp.close() return re.sub(r"\s*;(.*?)\n", "\n", contents).strip()
1ec8cce0c8e4a719691b8c7f802eb3a9152e34cc
633,318
def _int_bin_length(x): """Determine how many bits are needed to represent an integer.""" # Rely on the fact that Python's "bin" method prepends the string # with "0b": https://docs.python.org/3/library/functions.html#bin return len(bin(x)[2:])
84a5bbc6653931f3f158549ae99e194a4e6654b4
239,717
def render_name_to_key(name: str) -> str: """Convert the name of a render target with a valid dictionary key.""" return name.replace(".", "_")
e95135f659bc1700b102e51115e2be0955e7cb89
276,521
import csv def read_csv(csvfile): """ Read CSV file and index by the unique id. """ index = {} with open(csvfile, 'r') as data: for row in csv.DictReader(data): index[row['id']] = row return index
f3da5f6075349a378fe49fe03b12005085b88e47
174,473
def _edge_func(G): """Returns the edges from G, handling keys for multigraphs as necessary. """ if G.is_multigraph(): def get_edges(nbunch=None): return G.edges(nbunch, keys=True) else: def get_edges(nbunch=None): return G.edges(nbunch) return get_edges
ca488c5bde3b193ec308a99f66eb184bee6acd5c
111,676
def descendants(term_id, ont_id_to_og, ont_id="17"): """ Get the descendant terms for a given input term. Parameters ---------- term_id: The ontology term ID. ont_id_to_og: The ontology graph ID map ont_id: The ontology ID Returns --------- The term ID's of the descendant terms in the ontology. """ og = ont_id_to_og[ont_id] return og.recursive_relationship( term_id, recurs_relationships=['inv_is_a', 'inv_part_of'] )
2900edc89a50fe4dd27db34dcf68da3688e924e6
494,669
from pathlib import Path import re import json def word_counter(json_flag, filename): """Counts unique words in a file, can output common or all words. The occurrence of words (case is ignored) is counted. The output is to stdout in a form similar to a histogram or frequency chart. The output is the unique word in the input file followed by the number of occurrences, i.e., "word = {number of occurrences}". One word per line is listed. For the default operation only the most common words are output, which is defined as words that occur with a percentage > 0.1% of the words in the file. With the -json option all words and their frequency are output. This program will not work and will produce incorrect results if run with files with letters with diacritics. Input files are assumed to be English text. Example output to stdout when -json arg is NOT provided: lend - 156 shown - 78 ... Output to stdout when -json arg is provided: {"bleed": 1, "showne": 1, "finis": 1, ...} Args: json_flag (string): If the first argument is "-json" then the output will be a single string of a json list of the words and their frequency. If "-json" is NOT set then the output will be only the most common words in repeated lines of, e.g., "bleed - 1". filename (string): The name of the ASCII or UTF-8 encoded text file to parse. Returns: True for success """ # read in file contents = Path(filename).read_text() contents = contents.lower() # Find words that contain "'"" or "-", or words that do not words = re.findall("(?=\S*['-])[a-z'-]+|[a-z]+", contents) adict = {} # Iterate through words and count occurrences, # save in dictionary with the word as the key. for key in words: adict[key] = adict.get(key, 0) + 1 default_dict = {} len_words = len(words) # Iterate through words and save for default output if the word # occurrence is > 0.1% of the words in the file. if len_words > 0: for key in words: if adict[key]/len_words > 0.001: default_dict[key] = adict[key] # create list of dictionary keys default_keys = default_dict.keys() # output results (adict) to stdout if json_flag: if adict: print(json.dumps(adict)) elif default_dict: # print word and their respective frequency for key in default_keys: print("%s - %d" % (key, default_dict[key])) return True
6954295d4bb6c3672b117db600d2cc5643be197b
127,751
def join_col(col): """Converts an array of arrays into an array of strings, using ';' as the sep.""" joined_col = [] for item in col: joined_col.append(";".join(map(str, item))) return joined_col
f6386d99e69e3a8c04da2d7f97aa7fb34ac9044c
25,386
def is_number(s): """ Check if a string s represents a number Parameters ---------- s : `str` String to check Returns ------- `bool` ``True`` if a string represents an integer or floating point number """ try: int(s) is_int = True except ValueError: is_int = False try: float(s) is_float = True except ValueError: is_float = False return is_float | is_int
192dc02300d4d614a12a9cb9282b8d35332c07e7
561,042
def formatAbn(abn): """Formats a string of numbers (no spaces) into an ABN.""" if len(abn)!=11: return abn return u'{0} {1} {2} {3}'.format(abn[0:2],abn[2:5],abn[5:8],abn[8:11])
2746c206ee5156fa7939ed11f04af1865824ef8c
47,193
def table_exists(db_cur, table_name): """ Return a table name from a sqlite database to check if it exists. Parameters: db_cur(Cursor): A sqlite cursor connection. table_name(str): Name of a table in the sqite database. """ query = "SELECT name FROM sqlite_master WHERE type='table' AND name='{}'".format( table_name ) return db_cur.execute(query).fetchone() is not None
1d11f196d5175e28617a4c56e80a1a3878e6239f
124,287
def drop(list_a: list, period: int): """Problem 16: Drop every N'th element from a list Parameters ---------- list_a : list The input list period : int The number of every N elements to drop Returns ------- list A list without a number of dropped elements. Raises ------ TypeError If the given argument is not of `list` type ValueError If the given `n` is greater than the list's length or smaller than 1 """ if not isinstance(list_a, list): raise TypeError('The argument given is not of `list` type.') if period > len(list_a) or period < 1: raise ValueError('The value of `period` is not valid.') return [x for i, x in enumerate(list_a) if (i + 1) % period != 0]
10d8c6e1d968a4bd6890439a6200eabd75d5c3a9
197,487
def chunk(lst, n_chunks): """ https://stackoverflow.com/questions/2130016/ splitting-a-list-into-n-parts-of-approximately-equal-length Parameters ---------- lst : list n_chunks : int Returns ------- list chunked list """ k, m = divmod(len(lst), n_chunks) return [lst[i * k + min(i, m):(i + 1) * k + min(i + 1, m)] for i in range(n_chunks)]
418fa1599676821bb897efd139d3c17c5facebb8
31,622
def _apriori_gen(frequent_sets): """ Generate candidate itemsets :param frequent_sets: list of tuples, containing frequent itemsets [ORDERED] >>> _apriori_gen([('A',), ('B',), ('C',)]) [('A', 'B'), ('A', 'C'), ('B', 'C')] >>> _apriori_gen([('A', 'B'), ('A', 'C'), ('B', 'C')]) [('A', 'B', 'C')] >>> _apriori_gen([tuple(item) for item in ['ABC', 'ABD', 'ABE', 'ACD', 'BCD', 'BCE', 'CDE']]) [('A', 'B', 'C', 'D'), ('A', 'B', 'C', 'E'), ('A', 'B', 'D', 'E'), ('B', 'C', 'D', 'E')] >>> cc = [('55015', '55314'), ('55015', '55315'), ('55314', '55315'), ('57016', '57017'), ('57043', '57047'), ('581325', '582103')] >>> _apriori_gen(cc) [('55015', '55314', '55315')] """ # Sanity check for the input errors = [freq for freq in frequent_sets if sorted(list(set(freq))) != sorted(list(freq))] assert not errors, errors assert sorted(list(set(frequent_sets))) == sorted(frequent_sets), \ set([(x, frequent_sets.count(x)) for x in frequent_sets if frequent_sets.count(x) > 1]) new_candidates = [] for index, frequent_item in enumerate(frequent_sets): for next_item in frequent_sets[index + 1:]: if len(frequent_item) == 1: new_candidates.append(tuple(frequent_item) + tuple(next_item)) elif frequent_item[:-1] == next_item[:-1]: new_candidates.append(tuple(frequent_item) + (next_item[-1],)) else: break return new_candidates
9a5465b9a7bc26f15506c221e3187bbcd4910205
140,513
def hamming_dist(a, b): """Compute Hamming distance between two strings. """ assert len(a) == len(b) return sum(1 for i in range(len(a)) if a[i] != b[i])
97b8060030dc3dd542dba6ab1225fc26da6ff347
187,659
def normalize_From(node): """ Return a list of strings of Python 'from' statements, one import on each line. """ statements = [] children = node.getChildren() module = '.'*node.level + node.modname for name, asname in children[1]: line = 'from %s import %s' % (module, name) if asname is not None: line += ' as %s' % asname line += '\n' statements.append(line) return statements
133405ceb331582aa023f78ac71e9c6fed8760f8
92,125
def extract_issues_html(html): """ Extracts the part of the HTML page, which contains the list of issues Testing the relevant part avoids getting false positives/negatives due to certain texts being present also in other parts of the page. """ return html.split('id="issues"')[1]
56304effc096bb5007d7f3c04bc87860a602c10d
598,578
def is_container(obj): """Check if `object` is a list or a tuple. Args: obj: Returns: True if it is a *container*, otherwise False """ return isinstance(obj, (list, tuple))
09db5b1d136b6db1969097d19f7d63d1750971ab
70,805
import json def _read(config_filename): """Read a comment-augmented json file from the given filename.""" with open(config_filename) as f: lines = f.readlines() content_lines = [l for l in lines if not l.lstrip().startswith('//')] content = ''.join(content_lines) return json.loads(content)
ccfb9b36b88c5e15d1224be75f6dce03e4e1dd78
490,480
def wrap(x, m, M): """Wraps ``x`` so m <= x <= M; but unlike ``bound()`` which truncates, ``wrap()`` wraps x around the coordinate system defined by m,M.\n For example, m = -180, M = 180 (degrees), x = 360 --> returns 0. Args: x: a scalar m: minimum possible value in range M: maximum possible value in range Returns: x: a scalar, wrapped """ diff = M - m while x > M: x = x - diff while x < m: x = x + diff return x
eb86baba697a0890ff90e176fffa0e7cd904d0bd
668,931
def get_trunc_minute_time(obstime): """Truncate obstime to nearest minute""" return((int(obstime)/60) * 60)
1a1a6ba47573442f0e98ca9aeaa8a5506e7ab081
15,942
def filter_to_be_staged(position): """Position filter for Experiment.filter() to include only worms that still need to be stage-annotated fully.""" stages = [timepoint.annotations.get('stage') for timepoint in position] # NB: all(stages) below is True iff there is a non-None, non-empty-string # annotation for each stage. return not all(stages) or stages[-1] != 'dead'
df771b0856c91c8663ad06cacf86bf3389df04c5
699,246
def prune_wv(df, vocab, extra=["UUUNKKK"]): """Prune word vectors to vocabulary.""" items = set(vocab).union(set(extra)) return df.filter(items=items, axis='index')
1e42a8426a5b931f9d611f7773d7d018c85ca507
685,907
import torch def get_ranks(outputs): """ Returns the ranks according to the outputs (1 for highest grade). Deal with draws by assigning the best rank to the first output encountered. Args: outputs: torch.Tensor, (batch_size, 1 ou 2) tensors outputs. Returns: torch.Tensor, ranks corresponding to the grades in a line Tensor. """ grades = outputs[:, -1] n = len(grades) sorter = torch.argsort(grades, descending=True) ranks = torch.zeros(n, dtype=torch.long) ranks[sorter] = torch.arange(1, n + 1) return ranks
8e586938ce2addc91bdcb52b34e09be813bf211a
415,157
def lr_update( num_updates: int, warmup_updates: int, warmup_init_lr: float, lr_step: float, decay_factor: float, ) -> float: """InverseSquareRootSchedule. https://github.com/pytorch/fairseq/blob/master/fairseq/optim/lr_scheduler/inverse_square_root_schedule.py#L32 Args: num_updates: number of batches already used. warmup_updates: number of batch steps for warm up. warmup_init_lr: initial learning rate. lr_step: step for increasing learning rate during warm up. decay_factor: factor for decreasing learning rate after warm up. Returns: learning rate multiplicate factor """ if num_updates < warmup_updates: lr = warmup_init_lr + num_updates * lr_step else: lr = decay_factor * num_updates ** -0.5 if warmup_init_lr > 0: return lr / warmup_init_lr return 0
bcc5f1b26e26f1e683893095ad11870868d888a1
338,368
def input_volume(input_mass, input_conc): """ Computes the required amount of volume for a given mass and concentration. Silently assumes that the units are: - input_mass: ng (nanograms) - input_conc: ng/µL (nanograms per microlitre) """ return input_mass / input_conc
f23799616c3583d03e2c47739a038e1a24182755
136,261
def calculate_time_from_stop(segment_df, dist_along_route, prev_record, next_record): """ Calculate the time from stop within the tuple (prev_record, next_record) Algorithm: if prev_record = next_record: the bus is parking at the stop, return 0 Calcualte the distance within the tuple Calculate the distance between the current location and the prev record Calcualte the ratio of these two distances Use the ratio to calcualte the time_from_stop :param segment_df: dataframe for the preprocessed segment data :param dist_along_route: distance between the intial stop and the current location of the bus :param prev_record: single record of the route_stop_dist.csv file :param next_record: single record of the route_stop_dist.csv file :return: total seconds of the time_from_stop """ if prev_record.get('stop_id') == next_record.get('stop_id'): return 0.0 distance_stop_stop = next_record.get('dist_along_route') - prev_record.get('dist_along_route') distance_bus_stop = next_record.get('dist_along_route') - dist_along_route ratio = float(distance_bus_stop) / float(distance_stop_stop) assert ratio < 1 try: travel_duration = segment_df[(segment_df.segment_start == prev_record.get('stop_id')) & ( segment_df.segment_end == next_record.get('stop_id'))].iloc[0]['travel_duration'] except: travel_duration = segment_df['travel_duration'].mean() time_from_stop = travel_duration * ratio return time_from_stop
3e441b76aff11db8850cd90e695b4d8edc7ea01a
357,694
def SubstTemplate(contents, values): """ Returns the template with substituted values from the specified dictionary. Keywords to be substituted are surrounded by '@': @KEYWORD@. No attempt is made to avoid recursive substitution. The order of evaluation is random based on the order of the keywords returned by the Python dictionary. So do NOT substitute a value that contains any @KEYWORD@ strings expecting them to be recursively substituted, okay? """ for key, val in values.items(): try: contents = contents.replace('@' + key + '@', val) except TypeError: print(repr(key), repr(val)) return contents
ff7fd400c9502af1a8e7a9e1dde81dd6d6d411ae
425,062
def map_to_range(x: int, from_low: int, from_high: int, to_low: int, to_high: int) -> int: """ Re-map a number from one range to another. A value of fromLow would get mapped to toLow, a value of fromHigh to toHigh, values in-between to values in-between. Do not constrain values to within the range, because out-of-range values are sometimes intended and useful. Inspired by https://www.arduino.cc/reference/en/language/functions/math/map/ :param x: The number to map :param from_low: The lower bound of the value’s current range :param from_high: The upper bound of the value’s current range :param to_low: The lower bound of the value’s target range :param to_high: The upper bound of the value’s target range :return: The re-mapped value :type x: int :type from_low: int :type from_high: int :type to_low: int :type to_high: int :rtype: int """ return int((x - from_low) * (to_high - to_low) / (from_high - from_low) + to_low)
ab69a069c9544b8a2546f849f8544e81631a6870
46,890
def c_array(py_array, c_type): """Makes a C array from a python list or array and a C datatype Arguments ---------- py_array: array-like data to convert c_type: C datatype to which elements of py_array will be converted Returns ------- C array of chosen datatype """ if not isinstance(py_array, list): pya = list(py_array) else: pya = py_array return (c_type * len(pya))(*pya)
c015fdd6d308d3956f94a2c33493e280b8aad2a3
380,813
def split_fqdn(fqdn): """ Unpack fully qualified domain name parts. """ if not fqdn: return [None] * 3 parts = fqdn.split(".") return [None] * (3 - len(parts)) + parts[0:3]
6615456641d09e8e1f2d4f38517204b3db1e1d7c
691,045
import uuid def get_uuid(data): """Compute a UUID for data. :param data: byte array """ return str(uuid.uuid3(uuid.NAMESPACE_URL, data))
f91e6e76c14736c1678bc000b7246ac6b518171f
22,656
def scale_series(ds): """ Scale values in pandas data-series by minimum value in series :params ds: pandas data-series """ return ds / ds.min()
9ace3db3b869a59bd2113da50413d1ba73990ec9
240,282
def read_map_file(file_name): """ reads a map file generated by TI's ARM compiler. the function expects a file of a form similar to: . . . GLOBAL SYMBOLS: SORTED BY Symbol Address address name ------- ---- 00000000 __TI_static_base__ 00000000 g_pfnVectors 00000200 __STACK_SIZE 00000209 _Z10set_pinoutv . . . ffffffff __c_args__ ffffffff binit [85 symbols] """ map_file = open(file_name).readlines() [i] = [i for i, x in enumerate(map_file) if "GLOBAL SYMBOLS: SORTED BY Symbol Address" in x] i += 4 # skip the lines separating the header from the start of the list map_dict = {} for line in map_file[i:]: if line[0] == '\n' or line[0] == '[': break addr, symbol = line.split() map_dict[int(addr, 16)] = symbol return map_dict
553fb838402220424a94335d3ad566c2a93a224f
665,244
def tool_enabled(feature): """Generate the name of a feature to test whether it is enabled.""" return 'enable_' + str(feature)
a5707995ce3f726a16ecbb16b6edf3b2ffc2bebd
357,616
import functools def cached(f): """A simple caching decorator for instance functions not taking any arguments""" @functools.wraps(f) def g(self): if not hasattr(self, "__cache"): self.__cache = f(self) return self.__cache return g
1ccf54d4523727b485f17dcbaf9ac0ea88b1a2cf
162,722
def serializeEdge(edge): """Return the Edge as native Python datatypes, fit for json. Edges only reference Knobs, so we have to store their Node id here to be able to correctly associate it once reconstructed. """ return { "source_nodeId": edge.source.node().uuid, "source_name": edge.source.name, "target_nodeId": edge.target.node().uuid, "target_name": edge.target.name, }
58662cdbfd1bc22e0dc4276e5240961dc0d5672a
628,121
def is_palindrome(num): """Checks if a number is a palindrome. Does return True for single digit numbers""" str_num = str(num) for idx in range(len(str_num) // 2 + 1): if not str_num[idx] == str_num[-idx - 1]: return False return True
eb85bf8e54cbae59d81783e1917b04ebb2239239
479,472
def contains(str1, str2): """Return true if str1 contains str2""" try: result = str1.find(str2) except TypeError as e: return False # Handle if str2 is None except AttributeError as e: return False # Handle if str1 is None if result == -1: return False return True
de7cf6136e29fb7f05d11ca9ce1765b1f467be83
228,770
import itertools def _flip(items, ncol): """The items in `items` are listed in column order and displayed across `ncol` columns. Re-order the list so that the items will be displayed across rows rather than down columns.""" return list(itertools.chain(*[items[i::ncol] for i in range(ncol)]))
266dc2d5758a2bcc3416ca67af4f9d2ee3422ab0
527,791
def escape_forward_slashes(s): """ Escape forward slashes for our custom escape scheme where '/' escapes to '\f' and '\' escapes to '\b'. Note that http://flask.pocoo.org/snippets/76/ recommends against doing this, but the problem is that URL variable boundaries can be ambiguous in some cases if we don't do a simple escaping like this. :param s: the string :return: the escaped string """ return s.replace('\\', '\\b').replace('/', '\\f')
17ee9b799c6561d408c956e555ce286664fee94e
317,278
import random import string def create_random_letters(digit): """ 生成随机字母 :param digit: 大小写混合,参数为位数(长度) :return: """ letters_list = [random.choice(string.ascii_letters) for i in range(digit)] # string.ascii_letters=abcdefghigklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ random_letters = "".join(letters_list) return random_letters # 返回随机字母
64fe5920bacc8cfd4b29ae5ae1f9f7551745644a
603,255
def to_loxone_level(level): """Convert the given HASS light level (0-255) to Loxone (0.0-100.0).""" return float((level * 100) / 255)
9004c0eaa2ade4c664d30a0c79288d2f11a1362a
656,894
import hashlib def _hash(bs: bytes) -> bytes: """ Generic hash function for hashing keys. """ return hashlib.sha256(bs).digest()
a1059b37d579ee5d1700780fe9ed58f1a6637abc
142,785
def is_palindrome(n): """ Fill in the blanks '_____' to check if a number is a palindrome. >>> is_palindrome(12321) True >>> is_palindrome(42) False >>> is_palindrome(2015) False >>> is_palindrome(55) True """ x, y = n, 0 f = lambda: y * 10 + x % 10 while x > 0: x, y = x // 10, f() return y == n
a20e69502c7609131e2fe27d1b92fe1563558c38
664,496
import re def parse_remote_url(url): """Extract user, repo from URL.""" repo_urls = [ 'https://gitlab.audeering.com/', 'http://gitlab.audeering.local/', 'http://gitlab2.audeering.local/', 'git@srv-app-01.audeering.local:', r'^.*@gitlab.audeering.com/', 'https://github.com/', 'git@github.com:', ] for repo_url in repo_urls: url = re.sub(repo_url, '', url) url_parts = url.split('/') repo = url_parts[-1] user = '/'.join(url_parts[:-1]) if repo.endswith('.git'): repo = repo[:-4] return (user, repo)
821a212fa43af01b956fbe226c388cb0b86ab71e
558,655
def _extract_defines_from_option_list(lst): """Extracts preprocessor defines from a list of -D strings.""" defines = [] for item in lst: if item.startswith("-D"): defines.append(item[2:]) return defines
a8ecbe7a01052c313bc21f945edf4ff6e97ba35e
515,439
def inaction(x): """Never intervene.""" return 0
8c719efc4210ce9b7a698b595f64e906f1482fe9
554,672
from typing import Tuple def string_from_sentence(sentence: Tuple[Tuple]) -> str: """Get sentence as string from list of list representation Args: sentence (Tuple[Tuple]): Sentence in list of list representation Returns: str: Sentence as string """ return ''.join([edge[-1] for edge in sentence])
c7abf8ff452835b6bbc5706e5e3718453dc5162c
38,563
import re def get_wiki_links(text: str) -> list: """ Get all wiki links from the given text and return a list of them. Each list item is a dictionary with the following keys: - wiki_link: the exact match - link: the extracted link - text: the possible extracted text """ wiki_links = [] wiki_link_regex = r'\[\[(.*?)\]\]' for match in re.finditer(wiki_link_regex, text): out = { "wiki_link": match.group(), } if '|' in match.group(1): out['link'], out['text'] = match.group(1).split('|') else: out['link'] = match.group(1) out['text'] = match.group(1) # if the link ends with `_index` remove it if out['link'].endswith('_index'): out['link'] = out['link'][:-6] wiki_links.append(out) return wiki_links
133c94d532cde1ba95ecf310c2ec64923a3cc9b9
87,988
def to_sci_not(f): """Convert float `f` to a scientific notation string (not including the `$`) by using string formats `e` and `g`. """ s_e = f"{f:.4e}" s_dec, s_pow_ = s_e.split("e") s_dec = f"{float(s_dec):.4g}" pow_ = int(s_pow_) return f"{s_dec} \\times 10^{{ {pow_} }}"
d46b7ec7a7a87a2c444d1fee528b6bc44902f4b8
494,427
def index_data(word_index, data): """Function to convert a dataset to indexed instances based on a given word index. :param word_index: given mapping of words to indices :param data: list of text instances :return: list of indexed instances """ data_vector = [] for instance in data: instance_vector = [word_index["<START>"]] for word in instance: instance_vector.append(word_index.get(word, word_index["<UNK>"])) data_vector.append(instance_vector) return data_vector
f10dca89e859ba2a642de1fd4bd6c375174fd6e4
381,172
def best_time(time1, time2): """Return the best of two given times.""" time = min(time1, time2) if time == 0: return max(time1, time2) # if no time yet --> set the highest return time
f9fc7274dfe34aeb6a480daf8aa68646748d5d19
501,837
def rename_labels_to_internal(x): """Shorten labels and convert them to lower-case.""" return x.replace("Experience", "exp").lower()
9b39a5626768226492a200c0198115e432db0c78
519,395