content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
import re def time2seconds(time): """ delta time string to number of seconds time: e.g. '2d' for 2 days, supported units: d-day, h-hour, m-minute, s-second return: integer number of seconds Note: Month and year duration varies. Not worth doing for the sake of simplicity. """ assert len(re.findall(r'^\d+[s|m|h|d]$', str(time))) == 1, 'Invalid time string format.' tmap = {'s':1,'m':60,'h':3600,'d':3600 * 24} unit = time[-1] value = int(time[:-1]) return value * tmap[unit]
6cde282b6b6b1f3ff435bfe6d46279a780dafa85
42,737
import torch def calc_au(model, test_loader, delta=0.01): """compute the number of active units """ means = [] for datum in test_loader: batch_data, _ = datum mean, _ = model.encode_stats(batch_data) means.append(mean) means = torch.cat(means, dim=0) au_mean = means.mean(0, keepdim=True) # (batch_size, nz) au_var = means - au_mean ns = au_var.size(0) au_var = (au_var ** 2).sum(dim=0) / (ns - 1) return (au_var >= delta).sum().item(), au_var
33887036354c48d202baeff82690e4ee6eb44a64
42,738
import sys def get_dataset_vars(dataset, met_vars, in_file=False): """get the names of dataset variables for each dataset :dataset: gridded dataset name :met_vars: list of met variables (must match know variables) :returns: returns dictionary of variables as named in the dataset file """ if dataset == "PRINCETON": var_dict = dict( LWdown="dlwrf", PSurf="pres", Wind="wind", SWdown="dswrf", Tair="tas", Qair="shum", Rainf="prcp") elif dataset == "CRUNCEP": if in_file: var_dict = dict( LWdown="Incoming_Long_Wave_Radiation", PSurf="Pression", Wind="U_wind_component", SWdown="Incoming_Short_Wave_Radiation", Tair="Temperature", Qair="Air_Specific_Humidity", Rainf="Total_Precipitation") else: var_dict = dict( LWdown="lwdown", PSurf="press", Wind="*wind", SWdown="swdown", Tair="tair", Qair="qair", Rainf="rain") elif dataset in ["WATCH_WFDEI", "GSWP3"]: return met_vars else: sys.exit("Unknown dataset %s - more coming later" % dataset) return {v: var_dict[v] for v in met_vars}
b64a909bb6c325fcf44053447706a52b651a2d9a
42,739
import os import json def mock_threat_intel_query_results(): """Load test fixtures for Threat Intel to use with rule testing Fixture files should be in the following JSON format: [ { "ioc_value": "1.1.1.2", "ioc_type": "ip", "sub_type": "mal_ip" } ] """ mock_ioc_values = dict() for root, _, fixture_files in os.walk('tests/integration/fixtures/threat_intel/'): for fixture_file in fixture_files: with open(os.path.join(root, fixture_file), 'r') as json_file: mock_ioc_values.update( {value['ioc_value']: value for value in json.load(json_file)} ) # Return the function to mock out ThreatIntel._query # This simply returns values from the log that are in the mock_ioc_values def _query(values): return [ mock_ioc_values[value] for value in values if value in mock_ioc_values ] return _query
530c39e41babf8631479738aa9e5fd8a50740483
42,740
def initial_subtract(pixels, width, height): """ Find the mean and sample standard deviation of border pixels. Subtract the mean intensity from every pixel. Return the sample standard deviation. """ boundary = [0.] * (height + height + width + width - 4) # top and bottom boundaries boundary[:width] = pixels[:width] boundary[width:2*width] = pixels[-width:] # left and right boundaries for r in range(height-2): boundary[(2*width)+(r*2)] = pixels[width*(r+1)] boundary[(2*width)+(r*2)+1] = pixels[(width*(r+2))-1] # mean of boundary pixels mean = sum(boundary) / len(boundary) # sample standard deviation (sigma) of intensity for boundary pixels stddev = (sum((p - mean) ** 2 for p in boundary) / (len(boundary) - 1)) ** 0.5 # iterate through pixels and substract mean intensity # negative values are rounded up to zero for i, p in enumerate(pixels): pixels[i] = (abs(p - mean) + (p - mean)) / 2 return stddev
956f0f9e509ee80e974163a384f801ab02008d98
42,743
def filter_dict(field, dict, level = 2): """ Filter through nested dictionarys like one would do with arrays. Only works if sub-dictionaries all have same kwargs as desired by 'field' """ return [(x,dict[x][field]) for x in dict.keys()]
83c8226c83dafad277a4defe78080e3087d56f01
42,744
def convert_little_endian(string): """ >>> convert_little_endian('C0 00') '00 C0' """ lst = string.split(" ") lst.reverse() return " ".join(lst)
052edfde996ed022b5a0f5076dcfb52866716456
42,745
def getPointIndex(x, y, pointsPerWidth, pointsPerHeight): """Returns the index of the given point in a poinsPerWidth*pointsPerHeight grid. pointsPerWidth and pointsPerHeight should be odd numbers""" # These calculations take the top left corner as the (0;0) point # and both axes increase from that point. originRow = int(pointsPerHeight/2) originColumn = int(pointsPerWidth/2) # A positive x value increases the column, and vice versa # A positive y value decrease the row since the above calculations took the row as increasing # when going downwards. pointRow = originRow - y pointColumn = originColumn + x index = pointRow*pointsPerWidth+pointColumn return index
bbf375689dabb9407d9eca3dcd517f5eab4fe0f8
42,746
def prepare_input(input_str, from_item): """ A function for preparing input for validation against a graph. Parameters: input_str: A string containing node or group identifiers. from_item: Start processing only after this item. Returns: A list of node identifiers. """ # Get the list of nodes provided by the user input_list = input_str.lower().split()[from_item:] # Strip commas input_list = [u.strip(',') for u in input_list] # Strip extra whitespace input_list = [u.strip() for u in input_list] # Create a placeholder for the final list of identifiers final_list = [] # Check if the input contains group aliases for i in input_list: # If the input contains a range of identifiers, unpack if ':' in i: # Get the prefix of the alias (I, B, T, or G) prefix = i[0] # Get numbers and cast: ignore the first character of the first id try: start, end = int(i.split(':')[0][1:]), int(i.split(':')[1]) except ValueError: # Print error message print("[ERROR] Check syntax for identifier range: do not add " "identifier prefix to the second part, i.e. g1:g5.") # Append erroneous identifier to the list to catch the error final_list.append(i) continue # Create a list of unpacked identifiers in the range unpacked = [prefix + str(x) for x in range(start, end + 1)] # Extend the list of identifiers final_list.extend(unpacked) # Otherwise, append identifier to the final list if ':' not in i: final_list.append(i) return final_list
b0545f0d58ad9788fa8a1538e163d0959965dc8a
42,747
def read_word(): """ Method for reading in the word """ word = str() while True: word = input("Input the word to be tested or \'-1\' to quit: \n") if (word.isalpha() and word.__len__() <= 16) or word == "-1": break else: print("The word must contain only alphabetical characters and be a maximum of 16 characters long") sentinel = False if word == "-1": sentinel = True word = word.lower() return word, sentinel
4167e20499f94094ed631cf610b14c05814a9c71
42,748
import uuid def get_uuid_from_query_params(request, param): """ return uuid from a given queryset after parsing it. """ param_field = request.query_params.get(param, None) if param_field: try: return uuid.UUID(param_field) except ValueError: return None return None
1c8e3b2ffbe7020f6d1d796aeae55d53d0b39867
42,749
def sanitize(s: str) -> str: """Something like b64encode; sanitize a string to a path-friendly version.""" to_del = [" ", ";", ":", "_", "-", "/", "\\", "."] s = s.lower() for c in to_del: s = s.replace(c, "") return s
813b3c743f57443dcebddef987bf71b58f94f73c
42,751
import subprocess def calledprocesserror_helper(*popenargs, **kwargs): """ Helper for the calledprocesserror test. Lambdas can't raise exceptions, so this logic gets its own function. """ # Workaround so that the --version check doesn't throw a CalledProcessError if "--version" in popenargs[0]: return "1.2.3" else: raise subprocess.CalledProcessError(2, "", output="mocked error")
045617b4478ebbdeca13fa07c9ba3e4001689519
42,752
import pickle def _pickle(pickle_file): """ Loads a pickle file that works in both py 2 and 3 Parameters ------ pickle_file : str path to pickle file to load """ try: with open(pickle_file.as_posix(), "rb") as f: return pickle.load(f) except UnicodeDecodeError: with open(pickle_file.as_posix(), "rb") as f: return pickle.load(f, encoding="latin1")
f34f7649d0c0b0480e86fc1cb76eae74b1099113
42,753
def _aihub_coord_to_coord(coords): """Covert aihub-style coords to standard format. >>> _aihub_coord_to_coord({ ... "X좌표1": 602.004, ... "X좌표2": 571.004, ... "X좌표3": 545.004, ... "X좌표4": 531.004, ... "Y좌표1": 520.004, ... "Y좌표2": 505.004, ... "Y좌표3": 465.004, ... "Y좌표4": 428.004, ... }) [(602.004, 520.004), (571.004, 505.004), (545.004, 465.004), (531.004, 428.004)] """ max_num = max(int(item[3:]) for item in coords) return [(coords[f"X좌표{n}"], coords[f"Y좌표{n}"]) for n in range(1, max_num + 1) if f"X좌표{n}" in coords]
fed881fe532442ccb2c13d44262a5ef12f5c5143
42,754
def even(n : int) -> bool: """ """ return (n % 2) == 0
2b468707945d17cdf4bd3e6842a2cc2642e66e61
42,755
def f1_from_roc(fpr, tpr, pos, neg): """Calculate f1 score from roc values. Parameters ---------- fpr : float The false positive rate. tpr : float The true positive rate. pos : int The number of positive labels. neg : int The number of negative labels. Returns ------- float The f1 score. """ fp = fpr * neg fn = (1 - tpr) * pos tp = pos - fn f1 = tp / (tp + ((fn + fp) / 2)) return f1
d682ef92c8f3a43f1e88ab98125cb3f3d33cf189
42,756
def datetime_to_pretty_str(dt): """ Convert datetime object to string similar to ISO 8601 but more compact. Arguments: ---------- dt: dateime object ... for which the string will be generated. Returns: -------- dt_str: string The pretty string respresentation of the datetime object. """ dt_str = dt.strftime('%Y-%m-%d %H:%M:%S') return dt_str
24508950d6a2995247a0dd305ddb82086285bd18
42,757
def recursive_convert_sequences(data): """ recursively applies ``convert_sequences`` """ if not hasattr(data,'keys'): return data if len(data.keys()) == 0: return data try: int(data.keys()[0]) except ValueError: tmp = {} for key, value in data.items(): tmp[key] = recursive_convert_sequences(value) return tmp intkeys = [] for key in data.keys(): intkeys.append(int(key)) intkeys.sort() out = [] for key in intkeys: out.append(recursive_convert_sequences(data[str(key)])) return out
a9ddea313d0f22413af37f411001c7db6f54d087
42,758
def convert_string_to_bool(string_value): """ simple method used to convert a tring to a bool :param string_value: True or False string value :type string_value: string - required :return: bool True or False :rtype bool """ if string_value == 'True': return True else: return False
3e4113721df399408719ae7737136691f904ae78
42,762
def has_finite_length(obj): """ Return ``True`` if ``obj`` is known to have finite length. This is mainly meant for pure Python types, so we do not call any Sage-specific methods. EXAMPLES:: sage: from sage.sets.set import has_finite_length sage: has_finite_length(tuple(range(10))) True sage: has_finite_length(list(range(10))) True sage: has_finite_length(set(range(10))) True sage: has_finite_length(iter(range(10))) False sage: has_finite_length(GF(17^127)) True sage: has_finite_length(ZZ) False """ try: len(obj) except OverflowError: return True except Exception: return False else: return True
483a5cbb69f197622373c224de41f9e0ddd149ec
42,763
import numpy def MakePoint(*args): """Makes a point from a set of arguments.""" return numpy.matrix([[arg] for arg in args])
19e165a357f0b8e6933459aca65fdc62714fd878
42,764
import csv def cluster_keywords(input_file_path): """ Cluster keywords based on the shorted version of the keywords read from the input file. Args: input_file_path: the path to the tsv file containing keywords and their shortened version. Returns: shortened_keywords_list: a dictionary with shortened keywords as key, and list of keywords in that cluster as value total_keyword_counts: total number of keywords being clustered model_name_list: a list of names of the LaserTagger models used for shortening keywords, as indicated in tsv file """ shortened_keywords_list = [] total_keyword_counts = 0 with open(input_file_path) as f: read_tsv = csv.reader(f, delimiter="\t") model_name_list = next(read_tsv)[1:] for i in range(len(model_name_list)): shortened_keywords_list.append({}) for line in read_tsv: total_keyword_counts += 1 for index, shortened_keyword in enumerate(line[1:]): shortened_keyword = shortened_keyword.lower() if shortened_keyword == "": continue if shortened_keyword not in shortened_keywords_list[index]: shortened_keywords_list[index][shortened_keyword] = [line[0]] else: shortened_keywords_list[index][shortened_keyword].append(line[0]) return shortened_keywords_list, total_keyword_counts, model_name_list
fb000bc9d36f901e09f3a958a42e555b90c9ae56
42,765
def find_percentage(lines, num_ahead): """ Function: find_percentage\n Parameter: lines -> lines of current output file opened, num_ahead -> number of lines ahead where we get the percentage of edge similarity\n Returns: edge similarity percentage in that file """ percentage = 0.0 if 'Following' in lines[3 + num_ahead]: if 'Following' in lines[3 + num_ahead + 2]: percentage = lines[3 + num_ahead + 5].split(':')[1] elif 'graph' in lines[3 + num_ahead + 2]: percentage = lines[3 + num_ahead + 4].split(':')[1] elif 'graph' in lines[3 + num_ahead]: if 'Following' in lines[3 + num_ahead + 1]: percentage = lines[3 + num_ahead + 4].split(':')[1] elif 'graph' in lines[3 + num_ahead + 1]: percentage = lines[3 + num_ahead + 3].split(':')[1] return percentage
6fa880d1fff40bfb86b8eedb03085e6b4ffe978e
42,766
def float_div(num1, num2): """Function: float_div Description: Takes two numbers and does floating division. Returns zero if the divisor is zero. Arguments: (input) num1 number -> First number. (input) num2 number -> Second number. (output) Return results of division or 0. """ try: return float(num1) / num2 except ZeroDivisionError: return 0
372c1eb0fda84d066d7ed5c6a7990869380fffb8
42,767
from datetime import datetime def parse_date(seconds): """Parse datetime from value received from fullcalendar. """ return datetime.utcfromtimestamp(int(seconds))
5c6d690ed3fb7b47602be38bde333c5b3b75f4d8
42,768
from functools import reduce from operator import truediv def analyze(sample_paragraph, typed_string, start_time, end_time): """Returns a list containing two values: words per minute and accuracy percentage. This function takes in a string sample_paragraph, a string provided by user input typed_string, a number start_time and a number end_time. Both start_time and end_time are measured in seconds. """ num_of_wrds = len(typed_string) / 5 try: wrds_per_min = num_of_wrds / (end_time - start_time) * 60 except ZeroDivisionError: wrds_per_min = float('Inf') words_pair = zip(sample_paragraph.split(), typed_string.split()) alg = lambda cnt_length, sp_ts: (cnt_length[0] + (sp_ts[0] == sp_ts[1]), cnt_length[1] + 1) # the algebra for catamorphism try: accuracy = truediv(*reduce(alg, words_pair, (0, 0))) * 100 except ZeroDivisionError: accuracy = 0.0 return [wrds_per_min, accuracy]
ac797542fc90cc800deec731209fa336a7181739
42,769
def filter_by_hardware_interface(ctrl_list, hardware_interface, match_substring=False): """ Filter controller state list by controller hardware interface. @param ctrl_list: Controller state list @type ctrl_list: [controller_manager_msgs/ControllerState] @param hardware_interface: Controller hardware interface @type hardware_interface: str @param match_substring: Set to True to allow substring matching @type match_substring: bool @return: Controllers matching the specified hardware interface @rtype: [controller_manager_msgs/ControllerState] """ list_out = [] for ctrl in ctrl_list: for resource_set in ctrl.claimed_resources: if match_substring: if hardware_interface in resource_set.hardware_interface: list_out.append(ctrl) break else: if resource_set.hardware_interface == hardware_interface: list_out.append(ctrl) break return list_out
ce1dc94543b0fde61944f8a730fd4717b3c83da7
42,771
def convolve(X, Y): """ Convolves two series. """ N = len(X) ss = [] for n in range(0, len(Y)): s = 0 for l in range(0, len(X)): s += X[l].conjugate()*Y[(n+l)%len(Y)] ss.append(s) return ss
8a9f5d37d25cea175228a87428c516f2dec1f022
42,773
def _columnspace(M, simplify=False): """Returns a list of vectors (Matrix objects) that span columnspace of ``M`` Examples ======== >>> from sympy.matrices import Matrix >>> M = Matrix(3, 3, [1, 3, 0, -2, -6, 0, 3, 9, 6]) >>> M Matrix([ [ 1, 3, 0], [-2, -6, 0], [ 3, 9, 6]]) >>> M.columnspace() [Matrix([ [ 1], [-2], [ 3]]), Matrix([ [0], [0], [6]])] See Also ======== nullspace rowspace """ reduced, pivots = M.echelon_form(simplify=simplify, with_pivots=True) return [M.col(i) for i in pivots]
4bc7b18b6781426ff4e0cb9b587836b46aef23b7
42,774
def _bounce(open, close, level, previous_touch): """ did we bounce above the given level :param open: :param close: :param level: :param previous_touch :return: """ if previous_touch == 1 and open > level and close > level: return 1 elif previous_touch == 1 and open < level and close < level: return -1 else: return 0
cc4ca803325fcdf70976f5032c747e8b19d2acc0
42,775
import hashlib def get_md5_hash(path): """ Calculates the md5 hash for a specific file. """ md5_hash = hashlib.md5() md5_hash.update(open(path, 'rb').read()) return md5_hash.hexdigest()
30120003948d334a11a0ca45fb6d22125e4b85ce
42,776
def _error_function(param, x_vals, y_vals): """ The error function to use with the lestsq method in scipy.optimize. :param list of floats param: current estimates of the intercept and slope for the least squares line. :param list of floats x_vals: the x-values at which to calculate the residuals. :param list of floats y_vals: the observed y-value at x. :return: _residuals :rtype: list of floats """ _residuals = (param[0] + param[1] * x_vals) - y_vals return _residuals
7c144fde2974ba911c9b78829023e1c6b0e56f45
42,779
from datetime import datetime def string_to_date(string): """ #把字符串转成date :param string: :return: """ return datetime.strptime(string, "%Y-%m-%d")
6b554a2b9b23bc2e3a86809cd30e225f53db6d76
42,780
def mode_property(cls): """Generates a property that - when read, determines whether a recipient mode specified by <cls> is selected - when set, updates the recipient_modes list by removing or adding the mode specified by <cls> """ def _get_recipient(self): for x in self.context.recipient_modes: if isinstance( x, cls): return True return False def _set_recipient(self, value): if value == _get_recipient(self): # This case is kind of implicitly handled by the formlib. I catch # it here anyway to make the algorithm a bit more obvious. return if value: self.context.recipient_modes += (cls(),) else: self.context.recipient_modes = tuple( mode for mode in self.context.recipient_modes if not isinstance(mode, cls)) return property(fget=_get_recipient, fset=_set_recipient)
414dd4a05dbcd6bdf211acc95dd81e069ab9f4b4
42,781
def get_indexes(cursor, table_name): """ Returns a dictionary of fieldname -> infodict for the given table, where each infodict is in the format: {'primary_key': boolean representing whether it's the primary key, 'unique': boolean representing whether it's a unique index} """ # This query retrieves each index on the given table, including the # first associated field name # "We were in the nick of time; you were in great peril!" sql = """ WITH primarycols AS ( SELECT user_cons_columns.table_name, user_cons_columns.column_name, 1 AS PRIMARYCOL FROM user_cons_columns, user_constraints WHERE user_cons_columns.constraint_name = user_constraints.constraint_name AND user_constraints.constraint_type = 'P' AND user_cons_columns.table_name = %s), uniquecols AS ( SELECT user_ind_columns.table_name, user_ind_columns.column_name, 1 AS UNIQUECOL FROM user_indexes, user_ind_columns WHERE uniqueness = 'UNIQUE' AND user_indexes.index_name = user_ind_columns.index_name AND user_ind_columns.table_name = %s) SELECT allcols.column_name, primarycols.primarycol, uniquecols.UNIQUECOL FROM (SELECT column_name FROM primarycols UNION SELECT column_name FROM uniquecols) allcols, primarycols, uniquecols WHERE allcols.column_name = primarycols.column_name (+) AND allcols.column_name = uniquecols.column_name (+) """ cursor.execute(sql, [table_name, table_name]) indexes = {} for row in cursor.fetchall(): # row[1] (idx.indkey) is stored in the DB as an array. It comes out as # a string of space-separated integers. This designates the field # indexes (1-based) of the fields that have indexes on the table. # Here, we skip any indexes across multiple fields. indexes[row[0]] = {'primary_key': row[1], 'unique': row[2]} return indexes
7f71b0f2c493deb4d3ef6e0b73021575ba1e2ce1
42,782
def para_lda(): """GridSearchCV parameteres for LDA.""" para_lda = [{ 'solver': ['svd'], # 'shrinkage': ['auto'], 'n_components': [5, 10, 15, 20, 25, 30], # 'priors': [], # 'store_covariance': [], 'tol': [1e-2, 1e-3, 1e-4]}, { 'solver': ['lsqr', 'eigen'], 'shrinkage': ['auto'], 'n_components': [5, 10, 15, 20, 25, 30], 'tol': [1e-2, 1e-3, 1e-4]}] return para_lda
c1986a67000d4fd36a952a3642ae1b56e8909411
42,784
def make_chunks(l, n): """ Chunks a list into ``n`` parts. The order of ``l`` is not kept. Useful for parallel processing when a single call is too fast, so the overhead from managing the processes is heavier than the calculation itself. Parameters ---------- l : list Input list. n : int Number of parts. Examples -------- .. code-block:: python make_chunks(range(13), 3) # -> [[0, 3, 6, 9, 12], [1, 4, 7, 10], [2, 5, 8, 11]] """ return [l[i::n] for i in range(n)]
df0c3ddf67ed892ce47cd073f67003ed9e85b6d6
42,785
def pr(vp,vs): """ Computes the Poisson ratio Parameters ---------- vp : array P-velocity. vs : array S-velocity. Returns ------- pr : array Poisson ratio. """ vpvs=vp/vs pr = 0.5*((vpvs**2-2)/(vpvs**2-1)) return (pr)
bec82f868b847b85e39c90016f6787e20faa91ae
42,788
def load_specman_exp(path): """ Import parameter fields of specman data Args: path (str) : Path to either .d01 or .exp file Returns: params (dict) : dictionary of parameter fields and values """ exp_file_opened = open(path, encoding="utf8", errors="ignore") file_contents = exp_file_opened.read().splitlines() exp_file_opened.close() params = {} c = "" for i in range(0, len(file_contents)): exp_content = str(file_contents[i]) splt_exp_content = exp_content.split(" = ") if "[" in exp_content and "]" in exp_content and "=" not in exp_content: c = splt_exp_content[0].replace("[", "").replace("]", "") elif exp_content == "": c = "param" elif len(splt_exp_content) > 1: params[c + "_" + splt_exp_content[0]] = splt_exp_content[1] elif len(splt_exp_content) == 1 and exp_content != "": params[c + "_" + str(i)] = splt_exp_content else: pass return params
59191161c30c274c35899b47bdec96128aca7102
42,789
import time def timestamp_to_gmtime(ts): """Return a string formatted for GMT >>> print(timestamp_to_gmtime(1196705700)) 2007-12-03 18:15:00 UTC (1196705700) >>> print(timestamp_to_gmtime(None)) ******* N/A ******* ( N/A ) """ if ts: return "%s (%d)" % (time.strftime("%Y-%m-%d %H:%M:%S UTC", time.gmtime(ts)), ts) else: return "******* N/A ******* ( N/A )"
7e0dd51d2811c361c301ee92e62eb5b271539bf1
42,790
def to_positive_int(int_str): """ Tries to convert `int_str` string to a positive integer number. Args: int_str (string): String representing a positive integer number. Returns: int: Positive integer number. Raises: ValueError: If `int_str` could not be converted to a positive integer. """ try: int_int = int(int_str) except ValueError: raise ValueError("argument must represent an integer number") if int_int<1: raise ValueError("argument must be a positive integer number") return int_int
322942f257ca390e5d7ae5d9f74c33d6b8623baf
42,791
def get_module_url(module_items_url): """ Extracts the module direct url from the items_url. Example: items_url https://canvas.instance.com/api/v1/courses/course_id/modules/module_id/items' becomes https://canvas.instance.com/courses/course_id/modules/module_id """ return module_items_url.replace('api/v1/','').replace('/items', '')
bf03e0139c07e1d43be8123e1966fac5fd68239a
42,792
def horizontal_unfold(A): """ For a 3D tensor A(a,i,b), we unfold like: A(a,ib) """ S = A.shape return A.reshape(S[0], S[1] * S[2])
59caaa3db71c868d08264c64a88401e85ce6136c
42,793
def _excel_col(col): """Covert 1-relative column number to excel-style column label.""" quot, rem = divmod(col - 1, 26) return _excel_col(quot) + chr(rem + ord('A')) if col != 0 else ''
85488c0ef6594483e88e6d644189e4ffceaaea46
42,795
import re def _verify_ip(ip): """Return True if ip matches a valid IP pattern, False otherwise.""" if not ip: return False ip_pattern = re.compile('\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}') return ip_pattern.match(ip) is not None
e828c03726d09480dcc3a7caa509a465b9a7a97e
42,796
def p_displace(bin_str): """ P 置换 :param bin_str: 32 位的密文 :return: 置换后的 32 位密文 """ if len(bin_str) != 32: raise ValueError("二进制字符串长度必须是 32") displace_table = [16, 7, 20, 21, 29, 12, 28, 17, 1, 15, 23, 26, 5, 18, 31, 10, 2, 8, 24, 14, 32, 27, 3, 9, 19, 13, 30, 6, 22, 11, 4, 25] re_bin = "" for i in displace_table: re_bin += bin_str[i - 1] return re_bin
3efb44f6d2667b65906dbce9af360107700fb8e1
42,797
import re def match_dummies(item:str, rep_word:bool=True): """Replaces profanity with dummy word""" bad_prefixes = ['nig', 'bitc', 'puss', 'fuc', 'shit', 'hoe', 'ass', 'cock', 'fag', 'tits', 'cunt', 'dick', 'piss'] if rep_word: replace_prefixes = [f"expletive_{i}" for i in range(len(bad_prefixes))] for pre, rep in zip(bad_prefixes, replace_prefixes): match = r'({bad_prefix})(.*?)\b'.format(bad_prefix=pre) item = re.sub(match, rep, item) else: for pre in bad_prefixes: match = r'({bad_prefix})(.*?)\b'.format(bad_prefix=pre) item = re.sub(match, '', item) return item
b55180e49efdb40b5919a518f08c61396751a2fb
42,798
def available_moves(hex_board): """ Get all empty positions of the HexBoard = all available moves. :param hex_board: HexBoard class object :return: list of all empty positions on the current HexBoard. """ return [(i, j) for i in range(hex_board.size) for j in range(hex_board.size) if hex_board.is_empty((i, j))]
4813cdc69c64c4260390bb8ee47f541eb274ae4d
42,799
from typing import Dict def parse_group_id(group_id: str) -> Dict: """ 解析插件组ID :param group_id: sub_1234_host_1 :return: { "subscription_id": 1234, "object_type": host, "id": 1, } """ source_type, subscription_id, object_type, _id = group_id.split("_") return { "subscription_id": subscription_id, "object_type": object_type, "id": _id, }
8e5e773993b7bea728d85133794b246901bc8c66
42,801
def textContent(node): """Return the text in `node`, including that in the child-nodes. This is the equivalence of the text_content() method for HTML nodes, but works for both HTML and XML nodes. """ return ''.join(node.itertext())
a6760b4855b4c674f38a4ad360faf9e1dd924f71
42,803
def compile_source(filepath): """ Compile the source file at *filepath* into a code object. Code objects can be executed by an exec statement or evaluated by a call to ``eval()``. """ # line endings must be represented by a single newline character ('\n'), # and the input must be terminated by at least one newline character. fp = open(filepath, 'U') try: return compile(fp.read() + '\n', filepath, 'exec') finally: fp.close()
8eef8201590b14832e6e617de10adb3d2b5ff3b7
42,804
def extract_impression_id(line, assert_first_line=False): """ Extracts the impression_id from a line """ if type(line) == bytes: line = line.decode() return line[:line.index("|")].strip()
09f67f24e4e517c1ac66df5cc1fb8d7d359ad3c9
42,806
def file_exists (filename): """ Determine if a file with the given name exists in the current directory and can be opened for reading. Returns: True iff it exists and can be opened, False otherwise. :param filename: Filename to check. :return True if file exists. """ try: f = open (filename, 'r') f.close () return True except IOError: return False
f8319ccbec0b3dc75d0428cbf7f1a65782d98005
42,808
def check_number_threads(numThreads): """Checks whether or not the requested number of threads has a valid value. Parameters ---------- numThreads : int or str The requested number of threads, should either be a strictly positive integer or "max" or None Returns ------- numThreads : int Corrected number of threads """ if (numThreads is None) or (isinstance(numThreads, str) and numThreads.lower() == 'max'): return -1 if (not isinstance(numThreads, int)) or numThreads < 1: raise ValueError('numThreads should either be "max" or a strictly positive integer') return numThreads
a8d683d5c265f43567031e8c10314efad2411ec9
42,809
def permutations(arr, position, end, res): """ Permutate the array """ if position == end: res.append(tuple(arr)) else: for index in range(position, end): arr[index], arr[position] = arr[position], arr[index] permutations(arr, position+1, end, res) arr[index], arr[position] = arr[position], arr[index] return res
f101b5073052d394b402d8eb6c4d144047036b77
42,812
import os import hashlib def hash_file(filename): """ Calculate the md5 hash of a file. Used to check for stale files. :param filename: The name of the file to check :type str: :return: A string containing the md5 hash of the file :rtype: str """ if os.path.exists(filename): hasher = hashlib.md5() with open(filename, "rb") as file_to_hash: buffer = file_to_hash.read() hasher.update(buffer) return hasher.hexdigest() else: return 0
0c317a92d8a94a96128a4be9dc86bd94980e135f
42,813
def read_maze(maze_file): """ (file open for reading) -> list of list of str Return the contents of maze_file in a list of list of str, where each character is a separate entry in the list. """ res = [] for line in maze_file: maze_row = [ch for ch in line.strip()] res.append(maze_row) return res
2084ac891012932774d46d507f550e8070e3cc47
42,814
import os def normPath(path): """ Convert to an normalized path (remove redundant separators and up-level references). """ return os.path.normpath(path)
88f5ed57ffc823f8de6b547dfd5b84cf25242813
42,815
def can(obs, action_id): """Returns True if the specified action is available.""" return action_id in obs.observation.available_actions
509e7baa411529114881d95c38684d232d71db5a
42,816
def removeengineeringpids(pids): """Removing propcodes that are associated with engineering and calibration proposals""" new_pids=[] for pid in pids: if not pid.count('ENG_') and not pid.count('CAL_'): new_pids.append(pid) return new_pids
18a3f14f6645a2d27727192b045cfb7b64f959f3
42,818
def make_pin_name(port, index): """ Formats a pin name of a multi-bit port """ return "{}_b{}".format(port, index)
e3e7c3476583bd80a68b53e077399b278f501573
42,821
import os def patch_mypy_prefix_module_discovery() -> bool: # no cov """ pip does not use a virtual environment for builds so we need to patch Mypy in order for it to recognize installed build requirements """ # pip always sets this, but we cannot merely set MYPYPATH with it b/c we encounter: # https://github.com/python/mypy/issues/10829 python_path = os.environ.get('PYTHONPATH', '') if not python_path: return False paths = python_path.split(os.pathsep) # pip only sets one location, see: # https://github.com/pypa/pip/blob/21.3.1/src/pip/_internal/build_env.py#L137 if len(paths) > 1: return False temp_build_dir = os.path.dirname(paths[0]) # https://github.com/pypa/pip/blob/21.3.1/src/pip/_internal/build_env.py#L74 # https://github.com/pypa/pip/blob/21.3.1/src/pip/_internal/utils/temp_dir.py#L164 if not os.path.basename(temp_build_dir).startswith('pip-build-env-'): return False wheel_requirements_install_path = os.path.join(temp_build_dir, 'normal', 'lib', 'site-packages') if not os.path.isdir(wheel_requirements_install_path): return False # At this point, assume we are indeed being built by pip and error out if patching fails # # https://github.com/python/mypy/issues/5701#issuecomment-751494692 patch_file = os.path.join(wheel_requirements_install_path, 'mypy', 'pyinfo.py') if not os.path.isfile(patch_file): raise OSError('Cannot find Mypy file to patch') with open(patch_file, 'r', encoding='utf-8') as f: lines = f.readlines() for _patch_start_index, line in enumerate(lines, 1): if line.startswith('def getsitepackages():'): break else: raise ValueError('Cannot apply patch to Mypy file') for line in [ ' # type: () -> List[str]', ' return sys.path', '', '', 'def _get_site_packages():', ][::-1]: lines.insert(_patch_start_index, f'{line}\n') with open(patch_file, 'w', encoding='utf-8') as f: f.writelines(lines) return True
5f2ad005660207a2bdce497ac0a96f356be6ce24
42,822
import os def grab_next_session(session_directory): """ Given an empy directoy create a bunch of folders. session_01 and on. :param session_directory: empty directory you wnat to put these folders in :type session_directory: str :return: the latest directory path :rtype: str """ dir_list = [] if not os.listdir(session_directory): session = os.path.join(session_directory, 'session_01') os.makedirs(session) else: for file_name in os.listdir(session_directory): file_path = os.path.join(session_directory, file_name) if os.path.isdir(file_path): dir_list.append(int(file_name.split('_')[1])) suffix = max(dir_list) + 1 session_dir = "session_{:02d}".format(suffix) session = os.path.join(session_directory, session_dir) os.makedirs(session) return session
41b92f14b17bbac5e13456eca656e16fae2243d6
42,823
import os import hashlib def calc_hashes(file_path, hash_names, start = 0, end = None): """ Calculate hashes for a file. The 'file_path' argument is the file to calculate hash functions for, 'start' and 'end' are the starting and ending file offset to calculate the has functions for. The 'hash_names' argument is a list of hash names to calculate. Returns the the list of calculated hash values in the hexadecimal form in the same order as 'hash_names'. """ if end == None: end = os.path.getsize(file_path) chunk_size = 65536 to_read = end - start read = 0 hashes = [] for hash_name in hash_names: hashes.append(hashlib.new(hash_name)) with open(file_path, "rb") as f: f.seek(start) while read < to_read: if read + chunk_size > to_read: chunk_size = to_read - read chunk = f.read(chunk_size) for hash_obj in hashes: hash_obj.update(chunk) read += chunk_size result = [] for hash_obj in hashes: result.append(hash_obj.hexdigest()) return result
0c7a576ec25085a1601f2797642643c00dbd38d6
42,824
def getTime(t): """ Returns a string after converting time in seconds to hours/mins/secs Paramters: t (float): time in seconds Returns: s (str): number of hours, if more than 1 hour number of minutes and seconds, if more than 1 minute number of seconds, otherwise """ if t >= 3600: s = str(round(t // (3600), 2)) + " hours.\n" elif t >= 60: s = str(t // 60) + " mins, " + str(t % 60) + " secs.\n" else: s = str(t) + " secs.\n" return s
8448c6f3d5216ab6585d2367e9bac07170ecd08b
42,825
def make_tag_decorator(known_tags): """ Create a decorator allowing tests to be tagged with the *known_tags*. """ def tag(*tags): """ Tag a test method with the given tags. Can be used in conjunction with the --tags command-line argument for runtests.py. """ for t in tags: if t not in known_tags: raise ValueError("unknown tag: %r" % (t,)) def decorate(func): if (not callable(func) or isinstance(func, type) or not func.__name__.startswith('test_')): raise TypeError("@tag(...) should be used on test methods") try: s = func.tags except AttributeError: s = func.tags = set() s.update(tags) return func return decorate return tag
80a97f0db5198629aa1f48163d14b2eae463e933
42,827
def variance(rv, *args, **kwargs): """ Returns the variance `rv`. In general computed using `mean` but may be overridden. :param rv: RandomVariable """ return rv.variance(*args, **kwargs)
479175f7c101612ea14cef7caf3c5876c2714987
42,828
def param_nully(value) -> bool: """Determine null-like values.""" if isinstance(value, str): value = value.lower() return value in [None, '', 'undefined', 'none', 'null', 'false']
243ab7fdbd08f236a3382cc5e545f035557d8c53
42,829
def formatChapter(chapter): """A chapter (or section or whatever) number.""" try: chapter = str(int(chapter)) except ValueError: raise ValueError("Chapter %s is not a number" % chapter) pass return chapter
4b7bffe56ca3e3db07f946a2a2e96f76372625b1
42,830
def assemble_cla_status(author_name, signed=False): """ Helper function to return the text that will display on a change request status. For GitLab there isn't much space here - we rely on the user hovering their mouse over the icon. For GitHub there is a 140 character limit. :param author_name: The name of the author of this commit. :type author_name: string :param signed: Whether or not the author has signed an signature. :type signed: boolean """ if author_name is None: author_name = 'Unknown' if signed: return author_name, 'EasyCLA check passed. You are authorized to contribute.' return author_name, 'Missing CLA Authorization.'
9ea59337f1d3d04531c6fe3457a6c769327ab767
42,831
def has_optional(al): """ return true if any argument is optional """ return any([a.init for a in al])
6c2bf7836afc34fa47408cbdb94c94335fa21b26
42,832
import struct import math def _sc_decode(soundcheck): """Convert a Sound Check string value to a (gain, peak) tuple as used by ReplayGain. """ # SoundCheck tags consist of 10 numbers, each represented by 8 # characters of ASCII hex preceded by a space. try: soundcheck = soundcheck.replace(' ', '').decode('hex') soundcheck = struct.unpack('!iiiiiiiiii', soundcheck) except (struct.error, TypeError): # SoundCheck isn't in the format we expect, so return default # values. return 0.0, 0.0 # SoundCheck stores absolute calculated/measured RMS value in an # unknown unit. We need to find the ratio of this measurement # compared to a reference value of 1000 to get our gain in dB. We # play it safe by using the larger of the two values (i.e., the most # attenuation). maxgain = max(soundcheck[:2]) if maxgain > 0: gain = math.log10(maxgain / 1000.0) * -10 else: # Invalid gain value found. gain = 0.0 # SoundCheck stores peak values as the actual value of the sample, # and again separately for the left and right channels. We need to # convert this to a percentage of full scale, which is 32768 for a # 16 bit sample. Once again, we play it safe by using the larger of # the two values. peak = max(soundcheck[6:8]) / 32768.0 return round(gain, 2), round(peak, 6)
fcdce4c73241e9de00f1a658b0635fc626ac6774
42,834
import pathlib def stringify_path(filepath): """Attempt to convert a path-like object to a string. Parameters ---------- filepath: object to be converted Returns ------- filepath_str: maybe a string version of the object Notes ----- Objects supporting the fspath protocol (Python 3.6+) are coerced according to its __fspath__ method. For backwards compatibility with older Python version, pathlib.Path objects are specially coerced. Any other object is passed through unchanged, which includes bytes, strings, buffers, or anything else that's not even path-like. """ if hasattr(filepath, "__fspath__"): return filepath.__fspath__() elif isinstance(filepath, pathlib.Path): return str(filepath) return filepath
83fca05a40e3b0f518d6bed454848a4ba6ed14f9
42,835
def renderFKPs(landmarks) -> dict: """Extract facial keypoints Args: landmarks ([type]): [description] Returns: dict: {fkp: [x, y]} """ keypoints = {} index = 0 for fkp in landmarks: keypoints[index] = [fkp.x, fkp.y] index += 1 return keypoints
c9c8a9efaa3f78fdba0bdad4bb22cd6d503ca2ad
42,838
def validate_coverage( route, metric="node_coverage", max_gap_sec=3 * 60 * 60, min_node_coverage_percent=0.75, ): """ Make sure there is sufficient coverage of the planned route with live data """ if metric == "time": last_update, live = None, [] for r in route: live += [l for l in r.get("updates", []) if l.get("eventTime")] for l in sorted(live, key=lambda l: l.get("eventTime")): if last_update is None: last_update = l.get("eventTime") if (l.get("eventTime") - last_update).total_seconds() > max_gap_sec: return False last_update = l.get("eventTime") if last_update is None: # Did not see any updates return False elif metric == "node_coverage": # Do not allow less than 75% node coverage with live data live_stations, planned_stations = [], [] for r in route: live_stations += [ l.get("stationId") for l in r.get("updates", []) if l.get("stationId") ] planned_stations += [ p.get("stationId") for p in r.get("stations", []) if p.get("stationId") ] intersection = set(planned_stations).intersection(set(live_stations)) if ( float(len(intersection)) / float(len(set(planned_stations))) < min_node_coverage_percent ): return False return True
0257b7a746141425194c69ed50ac4e554cccb980
42,839
def schedule_bus_number(bus_stop_code, bus_selected): """ Message that will be sent when user wants to choose bus for their scheduled messahes :return: """ return 'Bus Stop Code <b>{}</b>\nYou can select the bus numbers that you want to receive their arrival timings.' \ '\n\nIf you did not select any, all bus timings will be shown on the scheduled message.\n\n' \ 'You can select up to 5 buses per message.\n\nClick confirm after selecting your bus numbers.\n\n' \ 'Click/Type /exit to stop scheduling message.\n\n<b>Bus Selected:{}</b>'.format(bus_stop_code, bus_selected)
0b23cb5b1ba8d92b623488459e794bf212904ff1
42,840
def alphabetic_score(name, list_position): """Function that calculates an alphabetic score for a name based on ascii value and list position. """ score = 0 for character in name: score += ord(character) - 64 return score * (list_position + 1)
e828d1bce66ea1a74515a64c3aecadc03a4e733b
42,841
def mean_center_utilmat(U, axis=1, fillna=True, fill_val=None): """Gets the mean-centered utility matrix Parameters: U (DataFrame) : utilily matrix (rows are users, columns are items) axis (int) : The axis along mean is evaluated, {0/'index', 1/'columns'}, default 1 fillna (bool) : Indicates whether missing/null values are to be filled fill_val (None/float) : Value to be used to fill null values when fillna==True, default None Returns: U (DataFrame): mean-centered utility matrix """ mean_centered = U.sub(U.mean(axis=axis), axis=1-axis) if fillna: if fill_val is not None: return mean_centered.fillna(fill_val) else: return mean_centered.fillna(0) else: return mean_centered
dad6239843aa47e8894a04b49f87ef34e4bc2e7a
42,842
def as_list(val): """return a list with val if val is not already a list, val otherwise""" if isinstance(val, list): return val else: return [val]
484c4163ea8e3dd17c9c4372554b54f16434b995
42,843
def is_placeholder(x): """Returns whether `x` is a placeholder. # Arguments x: A candidate placeholder. # Returns Boolean. # Examples ```python >>> from keras import backend as K >>> input_ph = K.placeholder(shape=(2,4,5)) >>> K.is_placeholder(input_ph) True ``` """ return hasattr(x, '_mxnet_placeholder') and x._mxnet_placeholder
4c5974c66f196ff6ba7e62fff05c91d59d6e14ba
42,846
def to_pixel_units(l, pixelwidth): """Scales length l to pixel units, using supplied pixel width in arbitrary length units """ try: return l / pixelwidth except (TypeError, ZeroDivisionError): return l
4e8883cf3ff007858f122ac13433c5346c851211
42,847
import tempfile def has_flag(compiler, flagname): """Return a boolean indicating whether a flag name is supported on the specified compiler. """ with tempfile.NamedTemporaryFile("w", suffix=".cpp") as f: print("Testing for flag %s" % (flagname)) f.write("int main (int argc, char **argv) { return 0; }") try: compiler.compile([f.name], extra_postargs=[flagname]) except Exception as e: print(e) return False return True
68467b6424ac8eb7f3049f21812d877f0fbe7cd3
42,848
def comparer(ses_hash, usr_hash): """This provides a function to compare initially hashed flight times with doubly hashed stored flight times.""" total = 0 kmax = 0 for k in usr_hash.keys(): if k in ses_hash: kmax += 1 total += (abs(int(ses_hash[k]) - int(usr_hash[k])) - 24) score = 4.8 - (total/kmax) #4 ms minus average deviation off normal if score > 4.8: score = 1 elif score < 0: score = 0 else: score = abs(score)/4.8 return(score)
de184fce02712427afd7156dea5f9fc19804ffb7
42,849
def metadata(data): """Convert a dictionary of strings into an RST metadata block.""" template = ":%s: %s\n" return ''.join(template % (key, data[key]) for key in data)
8d47746df2a232ff043b5a60527917f6f75329ee
42,850
def cli(ctx, group_id): """Get information about a group Output: a dictionary containing group information """ return ctx.gi.groups.show_group(group_id)
5bb99f5d76ab7a4dd1e471ca39339f7082105849
42,852
from typing import Union from typing import Tuple import secrets import hashlib def wep_encypher_pw(password: str, salt: Union[None, str] = None) -> Tuple[str, str]: """ Hash and salt a password string and return the SHA-512 digest of the hashed and salted string. Args: password: A password inputted by the user salt: The salt to apply to the password before hashing Returns: The SHA-512 message digest, in hexadecimal form, of the password string with a salt applied, along with the salt itself. """ if not salt: salt = secrets.token_hex(16) return salt, hashlib.sha512((password + salt).encode("utf-8")).hexdigest()
ffbb5ec08b2e9f8c8c9567f254bcc90180f9d7f5
42,855
import sys from datetime import datetime def datetime_from_isoformat(value: str): """Return a datetime object from an isoformat string. Args: value (str): Datetime string in isoformat. """ if sys.version_info >= (3, 7): return datetime.fromisoformat(value) return datetime.strptime(value, '%Y-%m-%dT%H:%M:%S.%f')
ffa0616d1896ef049b811206a7296438a000c150
42,856
import os def find_in_dirs(filename, directories): """Find the file under given directories. Args: filename: File/Folder name to find. directories: List of directories to search for. Returns: If found, return the combined path with directory and filename. Return None, otherwise. """ for directory in directories: candidate = os.path.join(directory, filename) if os.path.exists(candidate): return candidate return None
477e514d1309ad8f9a0c51ce11cb9da302208a00
42,857
def varint_to_blob_length(l): """ Blob field lengths are doubled and 12 is added so that they are even and at least 12 """ if l == 0: return 0 else: return (l - 12) / 2
8638250edb42e6ee5e51f097104d4787f36c88ad
42,858
def patient_form(first_name, last_name, patient_id, gender, birthdate): """OpenMRS Short patient form for creating a new patient. Parameters OpenMRS form field Note first_name personName.givenName N/A last_name personName.familyName N/A patient_id identifiers[0].identifier N/A gender patient.gender M or F birthdate patient.birthdate single digits must be padded N/A identifiers[0].identifierType use "2" N/A identifiers[0].location use "1" """ data = {"personName.givenName": first_name, "personName.familyName": last_name, "identifiers[0].identifier": patient_id, "identifiers[0].identifierType": 2, "identifiers[0].location": 1, "patient.gender": gender, "patient.birthdate": birthdate,} return data
98e41b828b1de828bf6925b24d9c9e321c4c4cfa
42,859
import os import subprocess def show_annotated_feed(): """ Runs script to show annotated feed. Returns PID of process.""" pwd = os.path.split(os.path.abspath(__file__))[0] loc = os.path.join(pwd, 'scripts', 'show_feed.py') process = subprocess.Popen(['nohup', loc], stdout=open('/dev/null', 'w'), stderr=open('logfile.log', 'a'), preexec_fn=os.setpgrp) return process.pid
c1dd718622fd3e7152a03beae221afdd35f38825
42,860
def __check_flag__(all_vars, flag, requirements): # type: (dict, str, list) -> list """ Checks the given flag against the requirements looking for issues. :param all_vars: All variables. :param flag: Flag to check. :param requirements: Flag requirements. :returns: A list of issues (empty if none). """ flag_header = "Flag " is_not = " is not " issues = [] if len(requirements) == 1: # Only check type req_type = requirements[0] if not type(all_vars[flag]) in req_type: issues.append(flag_header + flag + is_not + str(req_type)) if len(requirements) == 2: # Check type req_type = requirements[0] req_values = requirements[1] if not type(all_vars[flag]) in req_type: issues.append(flag_header + flag + is_not + str(req_type)) else: # Check that it is also one of the options if not all_vars[flag] in req_values: issues.append(flag_header + flag + "=" + all_vars[flag] + " is not supported. Available values: " + str(req_values)) return issues
0a19f0ffa55b95a7c4ee62e5ee5010a1e52c7848
42,861
def test_site_redirects(old_links, success_messages, error_messages): """ For each old link, verify that it gets a permanent redirect header and that the subsequent page is 200 """ return success_messages, error_messages
f73c7aa4b8c41fa06eb58afffee6a4dac1c68dda
42,862
def assemble_custom_shelflist(assemble_shelflist_test_records): """ Pytest fixture. Returns a utility function for creating a custom shelflist at the given location code. Uses the `assemble_shelflist_test_records` fixture to add the records to the active Solr environment for the duration of the test. Returns a tuple: environment records (erecs), location records (lrecs), and item test records (trecs). """ def _assemble_custom_shelflist(lcode, sl_item_data, id_field='id'): test_locdata = [(lcode, {})] test_itemdata = [] for item_id, data in sl_item_data: new_data = data.copy() new_data['location_code'] = lcode test_itemdata.append((item_id, new_data)) _, lrecs = assemble_shelflist_test_records(test_locdata, id_field='code', profile='location') erecs, trecs = assemble_shelflist_test_records(test_itemdata, id_field=id_field) return erecs, lrecs, trecs return _assemble_custom_shelflist
fb432cce9c79997fba3f7670ff4b3bf97c26846a
42,863
import random def mutate_path(path): """ Gets a random index 0 to next to last element. Copies path, swaps two nodes, compares distance. Returns mutated path. """ # - will go out of range if last element is chosen. random_idx = random.randint(0, len(path) - 2) new_path = list(path) new_path[random_idx], new_path[random_idx + 1] = new_path[random_idx + 1], new_path[random_idx] return new_path
07484de90755aa275f3874ace247f3f8f8d8b4f6
42,864
import random async def flip_coin(): """Flips a coin.""" rand = random.randint(0, 1) side = random.randint(1, 20) if side == 20: return {"message": "The coin landed on... its side?"} elif rand == 0: return {"message": "The coin landed on heads."} elif rand == 1: return {"message": "The coin landed on tails."}
fc86cd2f7d8af48ce2a098df98ef639ca5a55c3b
42,865
def middle(lst): """ Takes a list and returns a new list that contains all but the first and last elements. Input: lst -- a list Output: new -- new list with first and last elements removed """ new = lst[1:] # Stores all but the first element del new[-1] # Deletes the last element return new
ee065663b7ace7a8f582a6967096862585b9f599
42,868
def cluster_set_name(stem, identity): """Get a setname that specifies the %identity value..""" if identity == 1.0: digits = "10000" else: digits = f"{identity:.4f}"[2:] return f"{stem}-nr-{digits}"
c23a71ef98cbaf7b5b11fc57893f3f0d4fa589b8
42,869
def find_underscores(edffilename): """ Utility function :param edffilename: :return: """ str_copy = list(edffilename).copy() indices = [] ind = len(str_copy) - 1 # find indices of the '_' while len(str_copy) > 0: char = str_copy.pop() if char == '_': indices.append(ind) ind -= 1 if len(indices) > 1: return indices[0] else: return None
4494389d1b4c9eede4e54cdb8febf18ffb64ed7f
42,870