content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def deltav(V, R, r0, r1, r2, r3): """return the differential output voltage for the 4x load cell""" i0 = V / (4.0 * R + r0 - r3) i1 = V / (4.0 * R - r0 + r3) va = i0 * (2.0 * R - r1 - r3) vb = i1 * (2.0 * R + r2 + r3) return va - vb
a4eec41b788709e45ab6eab7420e059642449dce
27,214
def get_project_ids(config, modeling_application): """ Get a list of the ids of models in the source database for a particular modeling application (e.g., XPP) Args: config (:obj:`dict`): configuration modeling_application (:obj:`str`): modeling application (e.g., ``XPP``) Returns: :obj:`list` of :obj:`int`: ids of projects """ response = config['source_session'].get(config['source_api_endpoint'] + '/models?modeling_application={}'.format(modeling_application)) response.raise_for_status() projects = response.json() projects.sort() return projects
c3ebe7f74a1eb27dbbcd316002ad91d2ebaa4bf6
27,216
from subprocess import check_output def get_pid(name): """ Gets PID of a running process by name :return: pid of selected process by name """ pid = check_output(["pidof", name]) if not pid: raise ValueError(f"Pid of {name} not found. " f"Likely {name} background process is not running.", f"pid: {pid}") return pid
61785dc67ca93e5a7cd48914eabc94726f2699ff
27,217
import os import mimetypes def extract(fname_from, fname_to=None): """Workflow for converting an input file from their zipped to their unzipped equivalent. :param files_list: String; The input files to decompress :keyword fname_to: String; optional name of the resulting decompressed file. Defaults to the original file name, but with the outermost file extension removed. External dependencies: - gunzip: should come with gzip - bunzip2: Should come with the bzip2 package """ target = fname_to if fname_to else os.path.splitext(fname_from)[0] task = { "name": "decompress:"+fname_from, "targets": [target], "file_dep": [fname_from] } _, min_file_type = mimetypes.guess_type(fname_from) if min_file_type == 'gzip': task['actions'] = [ "gzip -d < "+fname_from+" > "+target ] return task elif min_file_type == 'bzip2': task['actions'] = [ "bzip2 -d < "+fname_from+" > "+target ] return task else: return None
8e718ca64d036a2f6dadd4569846f765d2a1e9f8
27,220
def construct_filter_based_on_destination(reimbursable_destination_type: str): """ Construct Filter Based on Destination :param reimbursable_destination_type: Reimbusable Destination Type :return: Filter """ filters = {} if reimbursable_destination_type == 'EXPENSE_CATEGORY': filters['destination_expense_head__isnull'] = True elif reimbursable_destination_type == 'ACCOUNT': filters['destination_account__isnull'] = True return filters
73116ce3e6398e98c1540380094e70cc1620867b
27,221
import os from datetime import datetime def get_modification_date(filename): """ function: get_modification_date determines the modification time of a file with the given filename input: string filename: filename output: datetime modification_time: the modification time of the file """ t = os.path.getmtime(filename) return datetime.fromtimestamp(t)
3f5057ed5bc3bba84bdc170ba0d002a812327f45
27,222
def fastMaxVal(oraSet, leftRoom, memo={}): """ 动态规划解法: 要想用动态规划,首先要满足两个条件:重复子问题 和 最优子结构 每个父节点会组合子节点的解来得到这个父节点为跟的子树的最优解,所以存在最优子结构。 同一层的每个节点剩余的可选物品集合都是一样的,所以具有重复子问题 因此可以利用动态规划来解决问题。 动态规划的核心就是提供了一个memory,能够缓存已经计算过的值 :param oraSet: :param leftRoom: :param memo: :return: """ if (len(oraSet), leftRoom) in memo: result = memo[(len(oraSet), leftRoom)] elif oraSet == [] or leftRoom == 0: result = (0, ()) elif oraSet[0].getWeight() > leftRoom: result = fastMaxVal(oraSet[1:], leftRoom, memo) else: nextItem = oraSet[0] leftValue, leftToken = fastMaxVal(oraSet[1:], leftRoom - nextItem.getWeight(), memo) leftValue += nextItem.getValue() rightValue, rightToken = fastMaxVal(oraSet[1:], leftRoom, memo) if leftValue > rightValue: result = (leftValue, leftToken + (nextItem,)) else: result = (rightValue, rightToken) memo[(len(oraSet), leftRoom)] = result return result
a2952ee74aa52765c952800bcaffaf770ffe9b4b
27,224
import random def natural(values, classes=5, maxsize=1000, samples=3): """ Jenks Optimal (Natural Breaks) algorithm implemented in Python. The original Python code comes from here: http://danieljlewis.org/2010/06/07/jenks-natural-breaks-algorithm-in-python/ and is based on a JAVA and Fortran code available here: https://stat.ethz.ch/pipermail/r-sig-geo/2006-March/000811.html Returns class breaks such that classes are internally homogeneous while assuring heterogeneity among classes. For very large datasets (larger than maxsize), will calculate only on subsample to avoid exponential runtimes. Calculated multiple times (samples) and takes the average break values for better consistency. Lower and higher bounds are kept intact. """ #values = sorted(values) # maybe not needed as is already done main.py # if too few values, just return breakpoints for each unique value, ignoring classes if len(values) <= classes: return list(values) + [values[-1]] def getbreaks(values, classes): # the original algorithm by Carson Farmer mat1 = [] for i in range(0,len(values)+1): temp = [] for j in range(0,classes+1): temp.append(0) mat1.append(temp) mat2 = [] for i in range(0,len(values)+1): temp = [] for j in range(0,classes+1): temp.append(0) mat2.append(temp) for i in range(1,classes+1): mat1[1][i] = 1 mat2[1][i] = 0 for j in range(2,len(values)+1): mat2[j][i] = float('inf') v = 0.0 for l in range(2,len(values)+1): s1 = 0.0 s2 = 0.0 w = 0.0 for m in range(1,l+1): i3 = l - m + 1 val = float(values[i3-1]) s2 += val * val s1 += val w += 1 v = s2 - (s1 * s1) / w i4 = i3 - 1 if i4 != 0: for j in range(2,classes+1): if mat2[l][j] >= (v + mat2[i4][j - 1]): mat1[l][j] = i3 mat2[l][j] = v + mat2[i4][j - 1] mat1[l][1] = 1 mat2[l][1] = v k = len(values) kclass = [] for i in range(0,classes+1): kclass.append(0) kclass[classes] = float(values[len(values) - 1]) kclass[0] = float(values[0]) countNum = classes while countNum >= 2: id = int((mat1[k][countNum]) - 2) kclass[countNum - 1] = values[id] k = int((mat1[k][countNum] - 1)) countNum -= 1 return kclass # Automatic sub sampling for large datasets # The idea of using random sampling for large datasets was in the original code. # However, since these samples tend to produce different results, # ...to produce more stable results we might as well calculate the # ...breaks several times and using the sample means for the final break values. if len(values) > maxsize: allrandomsamples = [] for _ in range(samples): randomsample = sorted(random.sample(values, maxsize)) # include lower and higher bounds to ensure the whole range is considered randomsample[0] = values[0] randomsample[-1] = values[-1] # get sample break tempbreaks = getbreaks(randomsample, classes) allrandomsamples.append(tempbreaks) # get average of all sampled break values jenksbreaks = [sum(allbreakvalues)/float(len(allbreakvalues)) for allbreakvalues in zip(*allrandomsamples)] else: jenksbreaks = getbreaks(values, classes) return jenksbreaks
8ba4085847bb1dbb70433a0af1491c83dccc39d9
27,225
def check_new_str(methods, new_dict_def, new_dict_att, old_dict_def, old_dict_att): """ Check if there is new strategy produced by different method. :param methods: different meta-strategy solver. :param new_dict_def: defender's current str dict. :param new_dict_att: attacker's current str dict. :param old_dict_def: defender's old str dict. :param old_dict_att: attacker's old str dict. :return: """ new_att = {} new_def = {} for method in methods: new_def[method] = new_dict_def[method] - old_dict_def[method] new_att[method] = new_dict_att[method] - old_dict_att[method] if len(new_def[method]) == 0 and len(new_att[method]) == 0: print(method + " has not produced a new strategy.") else: print(method + ":defender's new str is ", new_def[method]) print(method + ":attacker's new str is ", new_att[method]) return new_def, new_att
75e6761d76d46228f3ab0d9377357600d75c6964
27,227
def calc_url_by_from_date(url, period1_sec, period2_sec): """This function takes the url and the start and ens periods and insert them in the relevant place in the url """ prefix = url.split('history?', 1)[0] + 'history?' periods = 'period1=' + str(period1_sec) + '&period2=' + str(period2_sec) suffix = '&' + url.split('&', 2)[2] return prefix+periods+suffix
404c7a7259c2edf1dbc565ea21b8158d33b8b1ba
27,228
def _simplename(name): """create simple version of the font name""" # return alphanumeric characters of a string (converted to lowercase) return ''.join(c.lower() for c in name if c.isalnum())
471eadb1277dc750fea00db4975763e05eb5a177
27,229
def containsNonAlphaNum(word_list): """ Does list of words contain any special characters? Parameters: word_list: list of words Returns: bool: whether any word in the list contains a special character """ chars = ["-", "_", "."] allow_chars = set(chars) for word in word_list: for char in word: if not(char.isalnum()) and char not in allow_chars: return True return False
7a87c706eaec5cd4ee10fa0ccc027a4850430eb3
27,230
def graph_codes(dataframe,code): """A function that takes the dataframe, NAICS code(string). Graphs both the original and the LQ of the code using hvplot. Only necessary arguments are dataframe and code. """ return dataframe.hvplot(c= code, tiles='OSM', title= code, alpha=0.6) + dataframe.hvplot(c= 'LQ_' + code, tiles='OSM', title= 'LQ_' + code, alpha=0.6)
ad403862e136f83dd4e334cd0cdf47fa8cc5df4c
27,231
def _get_user_id_from_session(session): """ Get user's identifier from the session. It could be their id or username since both are unique. """ user_sess_id = session.session_token.get("sub") if user_sess_id: try: user_sess_id = int(user_sess_id) except ValueError: # if we can't cast to an int, don't. could be username pass return user_sess_id
759249581ea54660968958ceed1dd29c5cf1c247
27,232
def str_fsize(sz): """ Formats file size as string (i.e., 1.2 Mb) """ if sz < 1024: return '%.1f bytes' % sz sz /= 1024.0 if sz < 1024: return '%.1f KB' % sz sz /= 1024.0 if sz < 1024: return '%.1f MB' % sz sz /= 1024.0 if sz < 1024: return '%.1f GB' % sz sz /= 1024.0 return '%.1f TB' % sz
6e42389ab96678d595c3c0bd11441e6fa5c96810
27,234
def get_position_of_one(min: float, max: float) -> float: """ Maps 1.0 value in range min-max to range 0-1 :param min: minimum :param max: maximum :return: """ mm = max - min return (1.0 - min) / mm
773810b54e8e84b108185468c6e1e9c416ee08ce
27,235
def subsystemNamespace(functionName, subsystemName): """ Prepend the subsystem name to a function to act as a namespace. This avoids the possibility of different subsystems with the same function names overwriting each other. Returned function names are of the form ``SYS_functionName``. Note: Since ``.`` indicates an attribute or element of a module, we use an ``_`` instead. Although it might be nice visually, and is suggestive of the relationship between these functions and the subsystem modules, it causes problems when generating the docs since the generation treats the ``.`` as if it legitimate python (which it isn't, since we don't have the full path). Args: functionName (str): Name of the function. subsystem (str): The current subsystem in the form of a three letter, all capital name (ex. ``EMC``). Returns: str: Properly formatted function name with the subsystem prepended as a namespace. """ return "{subsystemName}_{functionName}".format(subsystemName = subsystemName, functionName = functionName)
12e640c028cc28918d23dca06e8a268a62790884
27,236
import json def to_json(datadict, encoder=json.JSONEncoder): """ Serialize python objects using json. Supports numpy.ndarray serialization when encoder=NumpyEncoder ********* params: datadict -> a dict with the python object to serialize e.g. {'obj_name': obj, ..} encoder -> json Encoder. To be replaced by a different encoder, e.g. NumpyEncoder for numpy.ndarray, to serialize datatypes not supported by the default return: a json object """ return json.dumps(datadict, cls=encoder)
f564bf27aa52361a6ccfc223c132a5de0ce1dfda
27,237
import re def extrapolate_args(contents): """Well... since the result output for dirb doesn't give the commands used, I'm gonna have to do it the hard way! Works backwards from the result output to derive the command line args used. Heavily dependent on the version in use. If any of these expected values change, the regex patterns need to also be updated. Not the best, but what are ya gonna do? :param contents: String value of output file """ user_agent_pattern = re.compile('USER_AGENT: (.+)') cookie_pattern = re.compile('COOKIE: (.+)') fine_tuning_pattern = re.compile('OPTION: Fine tunning of NOT_FOUND detection') headers_pattern = re.compile('ADDED_HEADERS:.+\n--\n(.+)\n--') case_sensitivity_pattern = re.compile('OPTION: Using Case-Insensitive Searches') location_pattern = re.compile('OPTION: Printing LOCATION header') not_found_pattern = re.compile('OPTION: Ignoring NOT_FOUND code -> (\d+)') output_file_pattern = re.compile('OUTPUT_FILE: (.+)') proxy_pattern = re.compile('PROXY: (.+)') proxy_auth_pattern = re.compile('PROXY AUTHORIZATION: (.+)') not_recursive_pattern = re.compile('OPTION: Not Recursive') silent_mode_pattern = re.compile('OPTION: Silent Mode') trailing_slash_pattern = re.compile('OPTION: NOT forcing an ending') http_auth_pattern = re.compile('AUTHORIZATION: (.+)') non_existing_pattern = re.compile('OPTION: Show Not Existant Pages') stop_warning_pattern = re.compile('OPTION: Not Stoping on warning message') extension_list_pattern = re.compile('EXTENSIONS_LIST: \((.+)\) \|') extension_file_pattern = re.compile('EXTENSIONS_FILE: (.+)') speed_delay_pattern = re.compile('SPEED_DELAY: (\d+) miliseconds') command_args = [] command_args.append('-a %s' % user_agent_pattern.findall(contents)[0] if len(user_agent_pattern.findall(contents)) else None) command_args.append('-c "%s"' % cookie_pattern.findall(contents)[0] if len(cookie_pattern.findall(contents)) else None) command_args.append('-f' if len(fine_tuning_pattern.findall(contents)) else None) command_args.append('-H "%s"' % headers_pattern.findall(contents)[0] if len(headers_pattern.findall(contents)) else None) command_args.append('-i' if len(case_sensitivity_pattern.findall(contents)) else None) command_args.append('-l' if len(location_pattern.findall(contents)) else None) command_args.append('-N %s' % not_found_pattern.findall(contents)[0] if len(not_found_pattern.findall(contents)) else None) command_args.append('-o %s' % output_file_pattern.findall(contents)[0] if len(output_file_pattern.findall(contents)) else None) command_args.append('-p %s' % proxy_pattern.findall(contents)[0] if len(proxy_pattern.findall(contents)) else None) command_args.append('-P %s' % proxy_auth_pattern.findall(contents)[0] if len(proxy_auth_pattern.findall(contents)) else None) command_args.append('-r' if len(not_recursive_pattern.findall(contents)) else None) command_args.append('-S' if len(silent_mode_pattern.findall(contents)) else None) command_args.append('-t' if len(trailing_slash_pattern.findall(contents)) else None) command_args.append('-u %s' % http_auth_pattern.findall(contents)[0] if len(http_auth_pattern.findall(contents)) else None) command_args.append('-v' if len(non_existing_pattern.findall(contents)) else None) command_args.append('-w' if len(stop_warning_pattern.findall(contents)) else None) command_args.append('-X %s' % extension_list_pattern.findall(contents)[0] if len(extension_list_pattern.findall(contents)) else None) command_args.append('-x %s' % extension_file_pattern.findall(contents)[0] if len(extension_file_pattern.findall(contents)) else None) command_args.append('-z %s' % speed_delay_pattern.findall(contents)[0] if len(speed_delay_pattern.findall(contents)) else None) return 'dirb %s' % ' '.join(filter(None, command_args))
5edf02320b990a5eeec48d77363434e38ab836d7
27,238
def vector(a, b): """ Return the vector between a and b """ return b - a
8d55f178bf67c7dfd48d1e8def8af798c6593203
27,239
def uppaal_system_declaration(printer, ast): """Prints the Uppaal system declaration.""" decls_str = '\n'.join(map(lambda decl: printer.ast_to_string(decl), ast["decls"])) # instances_str = '\n'.join(map(lambda inst: printer.ast_to_string(inst), ast["instances"])) system_decl_str = printer.ast_to_string(ast["systemDecl"]) return f'{decls_str}\n{system_decl_str}'
4c910742595ee28fa1aae1ebe093d6391f7745bc
27,240
def in_time_period(current_datetime, start_week_day, end_week_day, start_time, end_time): """ 测试所给时间是否在某一个时间段内 :param current_datetime: 一个datetime.datetime对象 :return: 有则返回True, 否则返回False """ current_weekday = current_datetime.weekday() current_time = current_datetime.time() if start_week_day <= current_weekday <= end_week_day: if current_weekday == start_week_day: if current_time < start_time: return False if current_weekday == end_week_day: if current_time >= end_time: return False return True return False
4e4f7ed817583354e9ff7292d083d6b5d4ea1106
27,243
import time def past_millisecond(time_started: int): """How many milliseconds has past... time_started: posix timestamp. """ return int((time.time() - time_started) * 1000)
e35ddf6c9a826e3bfc5800e64a26feb4f0a5022c
27,244
def decode_integer_big_endian_4(frame, start): """ Encode an integer in a byte array using big endian on 4 bytes. Args: frame: Source byte array start: Read start index Returns: Decoded integer value """ return (frame[start] << 24) | (frame[start + 1] << 16) | (frame[start + 2] << 8) | frame[start + 3]
00b7809b3f759d57628f1f70af801f6f7f4ccb41
27,245
def current_user_files(user, modelFile): """ This view should return a list of all the files created by the currently authenticated user. """ return modelFile.objects.filter(owner=user).exclude(removed=1)
93c2374c0f7e8c204ba7e92e4d91da843fd1eb84
27,246
def undo_transforms(y, transformers): """Undoes all transformations applied.""" # Note that transformers have to be undone in reversed order for transformer in reversed(transformers): if transformer.transform_y: y = transformer.untransform(y) return y
84c5d7b73e3aebeeb04d683048e29e01cc8e940b
27,248
def adda(a, b, c=0, d=0, e=0): """Add number b to number a. Optionally also add any of numbers c, d, e to the result. """ print(f"Function `adda` called with arguments a={a} b={b}", end="") if c: print(f" c={c}", end="") if d: print(f" d={d}", end="") if e: print(f" e={e}", end="") print() return a + b + c + d + e
50a1a923f2dd046114bf92caffe9ba770d062f43
27,249
def findNewParameters(parameters, bngParameters): """ finds which parameters were added in the xml definition that were not in the original bngl file """ tmp = [par.strip().split(" ")[0] for par in parameters] newPar = [] for bngp in bngParameters: if bngp not in tmp: newPar.append("\t {0} {1}\n".format(bngp, bngParameters[bngp])) return newPar
87f954f642f1105de87b78b180b71d85f2b7bf67
27,250
def _looks_like_url(name): """ Function tries to determine if input argument looks like URL and not like S3 bucket name. :param name - input name :return: True, if name looks like URL; False otherwise. """ if name.endswith('.com'): return True if name.find(':') >= 0: # Assume it is port number return True return False
d65eff615612fd087ec48a847347be6a5cefef91
27,251
import pickle def load_from_file(fn): """ Wczytanie danych zapisanych wcześniej przez save_to_file. :param fn: nazwa pliku. :return: wczytane dane. """ return pickle.loads(open(fn, "rb").read())
b25a8063b0e7d77bf2725b2657fc7e65473317bd
27,253
def yesno(boolean): """Boost-based C++ programs can accept 'yes' and 'no' on command line arguments mapped to bools.""" if boolean is True: return 'yes' return 'no'
a7e1c4c590a87c9d0c04b6be619e8ccf93754bda
27,254
def battery() -> int: """Get battery voltage.""" return 0
ffab9868759257418b5ae9f115cea9f5c3e14c9d
27,255
def multi_replace(s, rep_dict): """ Replace multi strings Parameter --------- s: string The string need to be replaced rep_dict: dict The replace patterns, {old: new} Return ------ s: string The replaced string """ for pattern in rep_dict: s = s.replace(pattern, rep_dict[pattern]) return s
01ce4d83b12d60252b01681adc89c12a2f8b3e91
27,257
import subprocess def get_current_app_properties() -> bytes: """ This method return the properties of our current connected device, we have to connect our device to our machine or have our emulator running. :return: Bytes that are the response of our adb command """ # We execute the following command that returns only the properties of our current device properties = subprocess.run('adb shell "dumpsys window windows | grep -E "mCurrentFocus"', stdout=subprocess.PIPE) # If because of the device we don't get anything after running that command, we run this other one if str(properties.stdout) == "b''": properties = subprocess.run('adb shell "dumpsys window windows | grep -E "mObscuringWindow"', stdout=subprocess.PIPE) # We return the response of that command return properties.stdout
571de877bedd8ab371d2cf8354f6e01c1afbac49
27,259
from typing import Union def unit_translate_to_B(size: int, unit: str) -> Union[int, float]: """ :return: number of byte """ if unit == 'B': return size elif unit == 'K': return size * 1024 elif unit == 'M': return size * 1048576 elif unit == 'G': return size * 1073741824 else: raise ValueError(f'Unit ({unit}) is not available.')
62590f36235e75801eccac0203ecbfb539f1a34f
27,262
def generate_gene(base, indexes) -> str: """ Takes a base string and indices of where the base string to be added to the cumulative string and returns constructed gene """ gene = base for i in indexes: gene = gene[:i + 1] + gene + gene[i + 1:] return gene
a5cdca4d70e111003defe8286733ccc36325a66d
27,263
import math def get_meansd(values): """Returns mean and standard deviation of the specified values.""" if not values: return float('nan'), float('nan') mean = sum(values) / len(values) sd = math.sqrt(sum([(v - mean) ** 2 for v in values]) / len(values)) return mean, sd
77f69a86a886f3ea7f814f91a02a362fee2e3588
27,265
def exclude_records( cluster_id_from_params: str, cluster_id_from_instance: str, cluster_type_from_params: str, cluster_type_from_instance: str, ) -> bool: """判断是否排除记录 :param cluster_id_from_params: 请求参数中的集群 ID,用以过滤集群下的资源 :param cluster_id_from_instance: 实例中携带的集群 ID :param cluster_type_from_params: 请求参数中的集群环境,包含正式环境和测试环境 :param cluster_type_from_instance: 实例中的集群环境类型 :returns: 返回True/False, 其中 True标识可以排除记录 """ if not cluster_id_from_instance: return True if cluster_id_from_params: if cluster_id_from_instance != cluster_id_from_params: return True elif str(cluster_type_from_params) != str(cluster_type_from_instance): return True return False
b4ea964474b41d26db4acaf7163b35a6c4b462bd
27,267
def string_to_float(in_str): """Turn the Fortran format string into a float""" if "D" not in in_str: in_str = in_str.replace("-","D-") out_float = float(in_str.replace("D", "E")) return out_float
f108b29a766fefcfd2fa974563c7c194bf1e1ade
27,268
def _GetCountDict(arr): """ *Internal Use Only* """ res = {} for v in arr: res[v] = res.get(v, 0) + 1 return res
ac5d3f91249423bb942ab3271e0f6cea5b67bbd7
27,271
def get_short_object_id(cid): """return shortened contribution ID (ObjectId) for `cid`. >>> get_short_object_id('5a8638add4f144413451852a') '451852a' >>> get_short_object_id('5a8638add4f1400000000000') '5a8638a' """ length = 7 cid_short = str(cid)[-length:] if cid_short == '0'*length: cid_short = str(cid)[:length] return cid_short
8e39f877f3b8914401d898aa5423b4fa2360aacf
27,272
def _make_scales(notes): """ Utility function used by Sound class for building the note frequencies table """ res = dict() for note, freq in notes: freq = round(freq) for n in note.split('/'): res[n] = freq return res
21ebc91750f231fa3d25f10f59d55c921940db4b
27,273
def is_win(board, symbol): """ This way of calculating wins is purely so it looks nice. It is not fast and could be done faster using less python hacky methods and such. But it looks nice!""" matches = [ # Win checks in tic tac toe board[0:3], # [0 1 2] board[3:6], # [3 4 5] board[6:9], # [6 7 8] board[0:7:3], # [0 3 6] board[1:8:3], # [1 4 7] board[2:9:3], # [2 5 8] board[0:9:4], # [0 4 8] board[2:7:2], # [2 4 6] ] for line in matches: if all(symbol == i for i in line): return True return False
eaba893bc7c4d4dd842fd339e3e53059d64ea061
27,276
import os def get_size(file_path): """get size of file""" st = os.stat(file_path) return st.st_size
03c8ca827985fafbccf8079fb75884def010e715
27,277
import torch def interp(x: torch.Tensor, xp: torch.Tensor, fp: torch.Tensor) -> torch.Tensor: """Interpolate ``x`` tensor according to ``xp`` and ``fp`` as in ``np.interp``. This implementation cannot reproduce numpy results identically, but reasonable. Code referred to `here <https://github.com/pytorch/pytorch/issues/1552#issuecomment-926972915>`_. Args: x: the input tensor that needs to be interpolated. xp: the x-coordinates of the referred data points. fp: the y-coordinates of the referred data points, same length as ``xp``. Returns: The interpolated values, same shape as ``x``. """ if x.dim() != xp.dim() != fp.dim() != 1: raise ValueError( f"Required 1D vector across ``x``, ``xp``, ``fp``. Got {x.dim()}, {xp.dim()}, {fp.dim()}." ) slopes = (fp[1:] - fp[:-1]) / (xp[1:] - xp[:-1]) locs = torch.searchsorted(xp, x) locs = locs.clip(1, len(xp) - 1) - 1 return slopes[locs] * (x - xp[locs]) + xp[locs]
b18c373d8d1bfc8da736a0322d6abcc41af3cde0
27,279
import json def _communicate(ws, message: dict) -> dict: """Sends a JSON-formatted python dict on the supplied websocket connection""" ws.send(json.dumps(message)) return json.loads(ws.recv())
670f084cb78fe656c0dc769369d0e0982ce8d4be
27,280
def readCurrency(txt): """ -> Number validation. :txt: message printed before number validation. :return: a float number. """ while True: try: user_input = input(txt).strip().replace(',','.') if '.' in user_input or user_input.isnumeric(): p = float(user_input) break else: print('ERROR! This is not a valid number.') except ValueError: print('ERROR! Try a number.') return p
4082e8698bd3de74c85e67658cbf224a99219003
27,281
def get_pr_labels(pull): """Gets PR labels as set""" pr_lables = set() for label in pull.labels: pr_lables.add(label.name) return pr_lables
be807aeddddfd642412ba9e1455085af1c67ca7d
27,282
def all_image_ids(images): """Given images data, return a dict of all image IDs therein. :param images: images data from Twistlock, decoded from json :return: A dict whose keys are the IDs of the images, and values are the tag for that image (if present) """ results = {} for image in images: info = image['info'] results[info['id']] = info['repoTag']['tag'] return results
829614ea917521b6292754a6aa29be0486360a31
27,284
def addNodeMI(nm_dict, node, value): """ Add motif information to node dictionary. Input: nm_dict: (dictionary) the dictionary of node motif degree node: (int) the id of node value: (int) the change value of node Output: nm_dict: (dictionary) changed node motif degree dictionary """ if node not in nm_dict.keys(): nm_dict[node] = value else: nm_dict[node] += value return nm_dict
67fe86aefcf8c54feaa72e07a15f2be65b916fc1
27,286
def calc_vwap(self, session_close_price, session_high_price, session_low_price, session_volume): """ Have to write the code for standard deviation too. Code from https://gist.github.com/jxm262/449aed7f3ce0919e57a1f0ad8c18a9d9 """ vwap = (((session_low_price + session_high_price + session_close_price) / 3) * session_volume).cumsum() / session_volume.cumsum() return {"price": round(vwap.tolist()[-1], 2)}
6d19824483cc032be06b433e57ecfff32e50167b
27,288
import re def _create_matcher(utterance): """Create a regex that matches the utterance.""" # Split utterance into parts that are type: NORMAL, GROUP or OPTIONAL # Pattern matches (GROUP|OPTIONAL): Change light to [the color] {item} parts = re.split(r"({\w+}|\[[\w\s]+\] *)", utterance) # Pattern to extract name from GROUP part. Matches {item} group_matcher = re.compile(r"{(\w+)}") # Pattern to extract text from OPTIONAL part. Matches [the color] optional_matcher = re.compile(r"\[([\w ]+)\] *") pattern = ["^"] for part in parts: group_match = group_matcher.match(part) optional_match = optional_matcher.match(part) # Normal part if group_match is None and optional_match is None: pattern.append(part) continue # Group part if group_match is not None: pattern.append(fr"(?P<{group_match.groups()[0]}>[\w ]+?)\s*") # Optional part elif optional_match is not None: pattern.append(fr"(?:{optional_match.groups()[0]} *)?") pattern.append("$") return re.compile("".join(pattern), re.I)
152f5d12f9db3552c6113825d9151b8a235c6790
27,289
def dict_without_keys(d, *omitkeys): """ Returns a copy of a dict without the specified keys Args: d (dict): A dict that to omit keys from *omitkeys: Variable length list of keys to omit Returns: dict: A dict with omitted keys """ return {key: d[key] for key in d.keys() if key not in omitkeys}
75b04a1ef3f2848e390b065f2df12eedec8919ea
27,290
def get_root_nodes(config_yaml): """ List the root nodes defined in the given taxonomy config. Args: config_yaml: configuration content Returns: list of root nodes """ root_nodes = [] for root_node in config_yaml['Root_nodes']: root_nodes.append(root_node['Node']) return root_nodes
8f336e87ec0688ab15ca91c032f6921175ab021b
27,292
def hard_dedupe(s): """Takes in a string, and returns a string where only the first occurrence of each letter is retained. So the string 'abccba' goes to 'abc'. The empty string returns the empty string.""" seen = set() ans = '' for char in s: if char in seen: continue seen.add(char) ans += char return ans
58104d46d19c1de713fc86ff08ea36383dc5598e
27,294
def rgb2(r, g, b): """ >>> rgb(0,0,0) '000000' >>> rgb(1,2,3) '010203' >>> rgb(255,255,255) 'FFFFFF' >>> rgb(254,253,252) 'FEFDFC' >>> rgb(-20,275,125) '00FF7D' :param r: :param g: :param b: :return: """ r, g, b = [min(255, max(x, 0)) for x in [r, g, b]] return "{0:06X}".format(r << 16 | g << 8 | b)
c96ee448889fa1c9bef3c5e57ec835f95df54c5e
27,295
def their_nth_to_last(head, i): """ididnttestit""" if i < 1: return None p1 = p2 = head for _ in range(i-1): if p2 == None: return None p2 = p2.next while p2.next != None: p1 = p1.next p2 = p2.next return p1.data
e210affa4230b6fc83b683caacdb94edc84f27f8
27,296
import re def standardize_whitespace(string: str) -> str: """ Removes leading, trailing, and repeat whitespace from a given *string*. """ return re.sub(r"\s+", " ", string.strip())
499f361101d45eea82928dafef8b18a87d910841
27,297
from numpy import isnan def same(x,y): """Are two Python objects considered the same?""" try: if x == y: return True except: pass try: if isnan(x) and isnan(y): return True except: pass ## try: ## from numpy import allclose ## if allclose(x,y): return True ## except: pass return False
1bb599350f1e2ba240df23b3119c12fac9ac8718
27,300
def privacy_decoder(encoded_message, key): """ Decodes an encoded string NOT SECURE :param encoded_message: :param key: :return: """ decoded_chars = [] for i in range(len(encoded_message)): key_c = key[i % len(key)] decoded_c = chr(ord(encoded_message[i]) - ord(key_c) % 256) decoded_chars.append(decoded_c) return "".join(decoded_chars)
d0e9bc874134a846cea220e75824ee64ebd404a7
27,301
def snake_to_camel(snake): """convert snake_case to camelCase""" components = snake.split("_") return components[0] + "".join([comp.title() for comp in components[1:]])
2c3f67f05d82505b45b2199dead1077700c1a3ef
27,302
import torch def utterances_tensors_to_utterance_ints(utterances, utterances_lens): """ converts the utterances in the incoming tensors to a list of integers, eg 15235, 111222, etc Note that since 0 terminates an utterance, we dont need to worry about leading zeros. the empty utterance will be simply 0 assumes incoming utterances tensor is zero padded, ie no undefined values zero padding is assumed to have been added on the right hand side of each row, ie rows will look something like: [ 3, 2, 1, 3, 1, 0], [ 1, 0, 0, 0, 0, 0], [ 1, 2, 2, 2, 1, 0], [ 1, 3, 0, 0, 0, 0], [ 2, 3, 0, 0, 0, 0], """ N = utterances.size()[0] max_len = utterances.size()[1] assert len(utterances.size()) == 2 assert len(utterances_lens.size()) == 1 utterances_ints_by_len = torch.LongTensor(N, max_len) if utterances.is_cuda: utterances_ints_by_len = utterances_ints_by_len.cuda() utterances_ints_by_len[:, 0] = utterances[:, 0] for t in range(1, max_len): utterances_ints_by_len[:, t] = utterances_ints_by_len[:, t - 1] * 10 + utterances[:, t] utterances_ints = [] for n in range(N): _len = min(utterances_lens[n], max_len - 1) utterances_ints.append(utterances_ints_by_len[n, _len].item()) return utterances_ints
5d54033b5b86e5db338c453f86c5815c8bdccd86
27,303
def agregar_letra(lugares_usados_total,backup_text,event,escribir,save,lugares_usados_temp,palabra,boton_de_la_letra,window,pos_atril_usadas): """Agrega una letra al tablero y hace efectivo el update de la ventana. Agrega las cordenadas ingresadas a save y actualiza los lugares usados""" se_puso_una_letra = True if event[0] >= 0 and event[0] <= 14 and event[1] >= 0 and event[1] <= 14: # Se asegura que lugar que se tocó para poner la letra es una posición del tablero backup_text.insert(0,window.Element(event).GetText()) # se guarda el texto que habia en el boton window[event].update(escribir) # event = posición del botón tocado (dado por key=(i,j)), si se agarra del atril es una letra. save[event] = escribir lugares_usados_total.append(event) lugares_usados_temp.insert(0,event) # Agrega elementos a lista[0] corriendo los demas. ultimo elem = pos 0 siempre. palabra.append(escribir) pos_atril_usadas.append(boton_de_la_letra) se_puso_una_letra = False return se_puso_una_letra
b2087dd786cf4d5c7c121f8df6f57097f8998cb8
27,304
import sys import inspect import six def CreateCulledCallable(func): """\ Returns a method that will be called with a subset of the potential parameters based on its implementation. Example: def MyMethod(a): .... culled_method = CreateCulledCallable(MyMethod) culled_method(2, a=1, c=3) -> MyMethod(a=2) """ if sys.version_info[0] == 2: arg_spec = inspect.getargspec(func) else: arg_spec = inspect.getfullargspec(func) arg_names = { arg for arg in arg_spec.args } positional_arg_names = arg_spec.args[:len(arg_spec.args) - len(arg_spec.defaults or [])] # Handle perfect forwarding scenarios if not arg_names and not positional_arg_names: if getattr(arg_spec, "varkw", None) is not None: # ---------------------------------------------------------------------- def Invoke(kwargs): return func(**kwargs) # ---------------------------------------------------------------------- elif arg_spec.varargs is not None: # ---------------------------------------------------------------------- def Invoke(kwargs): return func(*tuple(six.itervalues(kwargs))) # ---------------------------------------------------------------------- else: # ---------------------------------------------------------------------- def Invoke(kwargs): return func() # ---------------------------------------------------------------------- else: # ---------------------------------------------------------------------- def Invoke(kwargs): potential_positional_args = [] invoke_kwargs = {} for k in list(six.iterkeys(kwargs)): if k in arg_names: invoke_kwargs[k] = kwargs[k] else: potential_positional_args.append(kwargs[k]) for name in positional_arg_names: if name not in kwargs and potential_positional_args: invoke_kwargs[name] = potential_positional_args.pop(0) return func(**invoke_kwargs) # ---------------------------------------------------------------------- return Invoke
ed5197fb9ac6825c0a64fed95db545f861e0fdda
27,306
def mround(match): """Return formated string """ return "{:.2f}".format(float(match.group()))
c2ad7dd90ec8269672e7b66263b5772d27eec562
27,307
def get_compatibility_list_from_parser(parser): """ Extract the docstring information (if any) about compatible versions :param parser: a parser function :return: list of versions """ docstring = parser.__doc__ if docstring is None: return None lines = docstring.split('\n') for line in lines: if 'compatibility' in line.lower(): try: return [version.strip() for version in line.split(':')[1].split(',')] except IndexError: continue return None
25f83a29f684739c9989ffa03119ca3f880514f3
27,309
def trim_axes(axes, n): """little helper to massage the axes list to have correct length...""" axes = axes.flat for ax in axes[n:]: ax.remove() return axes[:n]
89190dbcfa7eee867bbe9ac2eb3453042bf436d9
27,311
def map_SWAS_var2GEOS_var(var, invert=False): """ Map variables names from SWAS to GEOS variable names """ d = { # '1_3_butadiene':, # '1_butene':, # '2_3_methylpentane':, # '224_tmp':, 'acetaldehyde': 'ALD2', 'acetone': 'ACET', # 'acetylene':, 'benzene': 'BENZ', # GEOSChem, but not GEOS-CF output # 'benzenechb':, 'cis_2_butene': 'PRPE', # NOTE: lumped tracer for >= C3 alkenes 'cyclo_pentane': 'ALK4', # NOTE: lumped tracer for >= C4 Alkanes 'dms': 'DMS', # GEOSChem, but not GEOS-CF output # 'dmschb':, 'ethane': 'C2H6', # 'ethene':, # 'ethylbenzene':, # 'extra_1':, # 'extra_2':, # 'extra_3':, # 'extra_4':, # 'extra_5':, # 'extra_l2':, 'iso_butane': 'ALK4', # NOTE: lumped tracer for >= C4 Alkanes 'iso_butene': 'PRPE', # NOTE: lumped tracer for >= C3 alkenes 'iso_pentane': 'ALK4', # NOTE: lumped tracer for >= C4 Alkanes 'isoprene': 'PRPE', # NOTE: lumped tracer for >= C3 alkenes 'methanol': 'MOH', 'mp_xylene': 'XYLE', 'n_butane': 'ALK4', # NOTE: lumped tracer for >= C4 Alkanes 'n_heptane': 'ALK4', # NOTE: lumped tracer for >= C4 Alkanes 'n_hexane': 'ALK4', # NOTE: lumped tracer for >= C4 Alkanes 'n_octane': 'ALK4', # NOTE: lumped tracer for >= C4 Alkanes 'n_pentane': 'ALK4', # NOTE: lumped tracer for >= C4 Alkanes 'o_xylene': 'XYLE', 'pent_1_ene': 'PRPE', # NOTE: lumped tracer for >= C3 alkenes 'propane': 'C3H8', 'propene': 'PRPE', # NOTE: lumped tracer for >= C3 alkenes 'toluene': 'TOLU', # 'toluenechb':, 'trans_2_butene': 'PRPE', # NOTE: lumped tracer for >= C3 alkenes 'trans_2_pentene': 'PRPE', # NOTE: lumped tracer for >= C3 alkenes } # Invert the dictionary? if invert: d = {v: k for k, v in list(d.items())} return d[var]
89ee80ff698aa17c04b589c3068b5b584adea95b
27,312
from datetime import datetime def timespan(date, current=None, reach=2): """ Calculate human readable timespan. """ if current is None: current = datetime.now() timespans = [ ('millennium', 'millennia', 60*60*24*365*1000), ('century', 'centuries', 60*60*24*365*100), ('decennium', 'decennia', 60*60*24*365*10), ('year', 'years', 60*60*24*365), ('month', 'months', 60*60*24*30), ('week', 'weeks', 60*60*24*7), ('day', 'days', 60*60*24), ('hour', 'hours', 60*60), ('minute', 'minutes', 60), ('second', 'seconds', 1) ] message = None reachstart = None delta = int((current - date).total_seconds()) for i, (singular, plural, seconds) in enumerate(timespans): # Is our time at least one 'unit' worth of this span? if delta >= seconds: # Get the number of units it's worth, and the remainder. n, delta = divmod(delta, seconds) # Append to message. if message is not None: message += ', ' else: reachstart = i message = '' message += '{n} {noun}'.format(n=n, noun=plural if n >= 2 else singular) # Stop if we reached our precision limit. if reachstart is not None and reach is not None and i - reachstart + 1 >= reach: break if message is None: message = 'just now' else: message += ' ago' return message
7ac57111815190e6c212207dff67194061a7a450
27,313
import six def byteify(value): """ Convert ``value`` into a byte-string. """ if isinstance(value, six.text_type): return value.encode('utf-8') return value
d1192564eb9206f28ffd803ab75324d78ef73156
27,314
def is_valid_bdf_multiplier(multiplier): """check if the detect_multiplier is valid""" if multiplier < 3 or multiplier > 50: return False return True
d6c9b96611f42d2004f00fd334d5ae8773aecb94
27,315
def m(request): """Parametrized fixture with all possible data sources.""" return request.param()
8237a784702c80502d6ba2a9e8faad70aaa7f369
27,316
def rawtext(s): """Compile raw text to the appropriate instruction.""" if "\n" in s: return ("rawtextColumn", (s, len(s) - (s.rfind("\n") + 1))) else: return ("rawtextOffset", (s, len(s)))
474101797a703ccdfd16ffb606b0321bbb3fcdcd
27,317
def need_generate_func(func_line): """ :param func_line: :return: """ if func_line.strip().endswith("default") or func_line.strip().endswith("delete") \ or func_line.strip().startswith("typedef") or func_line.strip().startswith("using"): return False return True
38e7df423c2cc7fc9acb284ffe686f97dd6690d4
27,318
def findall(manager, *args, **kwargs): """ Find all items with attributes matching ``**kwargs``. This isn't very efficient: it loads the entire list then filters on the Python side. """ found = [] searches = kwargs.items() for obj in manager.list(): try: if all(getattr(obj, attr) == value for (attr, value) in searches): found.append(obj) except AttributeError: continue return found
b31675c55837e6652effd28bc814ed9cc98a5788
27,319
import re def _can_skip_tests(file_names, triggers): """ Determines if tests are skippable based on if all files do not match list of regexes :param file_names: list of changed files generated by _get_changed_files() :param triggers: list of regexes matching file name that indicates tests should be run :return: safe to skip tests """ for file_name in file_names: if any(re.match(trigger, file_name) for trigger in triggers): return False return True
de3f9c66332b8b6cc1c788233a6db0462fb38470
27,322
def extract_used_params(model_param_names, grid_model_params, params_dict, algo="GLM"): """ This function is used to build a dict out of parameters used by our gridsearch to build a H2O model given the dict structure that describes the parameters and their values used by gridsearch to build that particular mode. :param model_param_names: list contains parameter names that we are interested in extracting :param grid_model_params: dict contains key as names of parameter and values as list of two values: default and actual. :param params_dict: dict containing extra parameters to add to params_used like family, e.g. 'gaussian', 'binomial', ... :return: params_used: a dict structure containing parameters that take on values as name/value pairs which will be used to build a model by hand using the same parameter setting as the model built by gridsearch. """ params_used = dict() grid_model_params_keys = grid_model_params.keys() for each_parameter in model_param_names: parameter_name = str(each_parameter) if parameter_name in grid_model_params_keys: params_used[parameter_name] = grid_model_params[each_parameter]['actual'] if params_dict: for key, value in params_dict.items(): params_used[key] = value # add distribution family to parameters used list # only for GLM, change lambda to Lambda if algo =="GLM": if 'lambda' in params_used.keys(): params_used['Lambda'] = params_used['lambda'] del params_used['lambda'] return params_used
8116971d9c00f82322ae319df8fcba5c424f71a9
27,324
def format_puzzle(grid): """Formats the puzzle for printing.""" puzzle = "" for line in grid: puzzle += " ".join(line) + "\n" return puzzle
0b6017d0e9d8a2d29cfe83ef3c20c4cdf535d035
27,326
def inspect_positional_arguments(parameters, args): """ inspect positional arguments """ positional_or_keyword_params = [ (name, param) for name, param in parameters.items() if param.kind == param.POSITIONAL_OR_KEYWORD ] # *args var_positional = [ (name, param) for name, param in parameters.items() if param.kind == param.VAR_POSITIONAL ] result = { name: arg for (name, param), arg in zip(positional_or_keyword_params, args) } if var_positional: name, param = var_positional[0] result[name] = tuple(args[len(result):]) or param.empty elif len(result) < len(args): error = 'function takes at most {} positional arguments but {} were given'.format( len(positional_or_keyword_params), len(args) ) raise TypeError(error) return result
a9083d01696fb1f83f5f9dd10253834711adf0ce
27,327
def _make_pointer_increments(name, nargs, sig): """Increments array pointers following each function call.""" inclist = "" for i in range(nargs): if sig[i] in "vpf": inclist += "++%s%d; " % (name, i,) return inclist
e8ab53559beca771177fcf1adca0fe2610f3793f
27,328
def validate_login(content, validator, default): """ content: 检查文本 所有判断条件在validator字典传入,validator对应键值为: { 文本内(content)所含的字符串: [状态, 返回信息] } return: 登录状态 """ for condition in validator.keys(): if condition in content: return validator.get(condition) return default
e6ef488f4b5d07705a8f3b58b4aed2102efe04c5
27,331
import os import glob import sys def examine_data_directory(input: str, include_only: list, exclude: list): """ Given a path, generates paths for all data files within, recursively. :param input: str for root of data dump :param include_only: if non-empty, only return these files :param exclude: if non-empty, don't return these files :return: list of file paths as strings """ data_filepaths = [] # Check if this path exists first. if not os.path.isdir(input): raise FileNotFoundError(f"Cannot find {input}.") print(f"Looking for records in {input}") including = False if len(include_only) > 0: print(f"Will only include the specified {len(include_only)} file(s).") including = True excluding = False if len(exclude) > 0: print(f"Will exclude the specified {len(exclude)} file(s).") excluding = True # Find all files, not including lone directory names for filepath in glob.iglob(input + '**/**', recursive=True): if len(os.path.basename(filepath)) == 28 and \ filepath not in data_filepaths: if including and os.path.basename(filepath) not in include_only: continue if excluding and os.path.basename(filepath) in exclude: continue data_filepaths.append(filepath) if len(data_filepaths) > 0: print(f"{len(data_filepaths)} files found.") else: sys.exit("No files found at this path!") return data_filepaths
abda72dadd5f91c0240856eed21621f8cd10c6e6
27,332
import random def roll(add=0): """A quick function to roll a d20 and add the modifier""" return random.randint(1, 20) + add
e8afe08b270ac2488ded0e6c9fa9574d406d9005
27,334
import sys def get_sp_dir(): # pragma: no cover """ Get the absolute path of the ``site-packages`` directory. """ for p in sys.path[::-1]: if p.endswith("site-packages"): return p raise Exception("'site-package' directory not found!")
077007dc23207703afe712b1eb419b2dcfdaa81f
27,335
def tree_2_0_from_tree_0_1(tree): """Convert CIX 0.1 to CIX 2.0.""" # - update some of the no longer used <file> attributes # - drop "generator" try: del tree[0].attrib["generator"] except KeyError: pass # - drop 'md5' and 'mtime' on the <file> tag try: del tree[0].attrib["md5"] except KeyError: pass try: del tree[0].attrib["mtime"] except KeyError: pass # - move "language" attribute on <file> to "lang" and to "lang" on # <module> (New multi-lang CIX output will allow one file to # have modules of different langs.) for file in tree.getiterator("file"): lang = file.get("language") if lang is not None: file.set("lang", lang) for module in file.getiterator("module"): if module.get("lang") is None: module.set("lang", lang) try: del file.attrib["language"] except KeyError: # Be tolerant of transitional CIX. pass # - move <doc> and <signature> optional sub tags into parent # attribute # PERF: This could be done better. for tag in ("variable", "function", "class", "module", "interface", "argument", "classref", "interfaceref"): for node in tree.getiterator(tag): for child in reversed(node): # reversed() so can modify while iterating over if child.tag == "signature": if child.text: # be tolerant of <signature /> node.set("signature", child.text) node.remove(child) elif child.tag == "doc": if child.text: # be tolerant of <doc /> node.set("doc", child.text) node.remove(child) if not node: # no children now node.text = None # - move non-variable tags to attributes # (XXX currently <classref> and <interfaceref> tags are not moved) for tag in ("variable", "argument", "classref", "interfaceref"): for node in tree.getiterator(tag): for child in reversed(node): if child.tag == "type": node.set("citdl", child.get("type")) node.remove(child) if not node: # no remaining children node.text = None if tag == "argument": node.tag = "variable" node.set("ilk", "argument") # - move <returns> to a <function> attribute for node in tree.getiterator("function"): for child in reversed(node): # PERF: could just check last child if child.tag == "returns": assert child[0].tag == "type" node.set("returns", child[0].get("type")) node.remove(child) # - move classrefs and interfacerefs to attributes # Note: <classref attribute="__mixin__"> => "mixinrefs" attribute. # This is used by Ruby (though not used for eval, yet). for scope_ilk in ("class", "interface"): for node in tree.getiterator(scope_ilk): interfacerefs = [] classrefs = [] mixinrefs = [] for child in reversed(node): if child.tag == "classref": if "__mixin__" in child.get("attributes", ""): mixinrefs.append(child.get("citdl") or child.attrib["name"]) else: classrefs.append(child.get("citdl") or child.attrib["name"]) node.remove(child) elif child.tag == "interfaceref": interfacerefs.append(child.get("citdl") or child.attrib["name"]) node.remove(child) if classrefs: classrefs.reverse() assert not [c for c in classrefs if ' ' in c] node.set("classrefs", ' '.join(classrefs)) if interfacerefs: interfacerefs.reverse() assert not [i for i in interfacerefs if ' ' in i] node.set("interfacerefs", ' '.join(interfacerefs)) if mixinrefs: mixinrefs.reverse() assert not [m for m in mixinrefs if ' ' in m] node.set("mixinrefs", ' '.join(mixinrefs)) if len(node) == 0: node.text = None # - make all scope tags a "scope" tag (easier for elem.find() usage) for tag in ("class", "function", "interface", "module"): for node in tree.getiterator(tag): node.tag = "scope" if tag == "class" and "__namespace__" in node.get("attributes", ""): node.set("ilk", "namespace") attributes = node.get("attributes").split() attributes.remove("__namespace__") if not attributes: del node.attrib["attributes"] else: node.set("attributes", ' '.join(attributes)) elif tag == "module": node.set("ilk", "blob") else: node.set("ilk", tag) tree.set("version", "2.0") return tree
b85696ccf506cef3bf39b89ed67b2ea74b052b75
27,336
def extract_media(request): """Extract the media from a flask Request. To avoid race conditions when using greenlets we cannot perform I/O in the constructor of GcsObjectVersion, or in any of the operations that modify the state of the service. Because sometimes the media is uploaded with chunked encoding, we need to do I/O before finishing the GcsObjectVersion creation. If we do this I/O after the GcsObjectVersion creation started, the the state of the application may change due to other I/O. :param request:flask.Request the HTTP request. :return: the full media of the request. :rtype: str """ if request.environ.get("HTTP_TRANSFER_ENCODING", "") == "chunked": return request.environ.get("wsgi.input").read() return request.data
c66b1d89a6c9039ff4ab814e93250db55d7cb62c
27,337
from subprocess import check_output def shell_call(cmd): """ Run a command and return output of stdout as result. """ try: return str(check_output(cmd, shell=True), "utf-8") except: return ""
0e7cd724858cb2c878a057dcb9d166fea9a1700e
27,339
def listify( l, no_elements = 1, check_if_list = True ): """ Returns a list with no_elements elements duplicate of l unless l is a list """ if isinstance(l, list) and check_if_list: return l else: return [l] * no_elements
c0b203e4afcdbfaba552b195e6dc414f48af6e47
27,340
def yn_prompt(question: str, yes=None, no=None) -> bool: """Ask yes-no question. Args: question: Description of the prompt yes: List of strings interpreted as yes no: List of strings interpreted as no Returns: True if yes, False if no. """ if not yes: yes = ["yes", "ye", "y"] if not no: no = ["no", "n"] prompt = question if not prompt.endswith(" "): prompt += " " prompt += "[{} or {}] ".format("/".join(yes), "/".join(no)) print(prompt, end="") while True: choice = input().lower().strip() if choice in yes: return True elif choice in no: return False else: print( "Please respond with '{}' or '{}': ".format( "/".join(yes), "/".join(no) ), end="", )
3338493b42b118d9aacadff70dab3738643b538a
27,341
def html_abstract_check(agr_data, value): """ future: check a database reference does not have html in the abstract :param agr_data: :param value: :return: """ if 'abstract' in agr_data: assert agr_data['abstract'] == value if agr_data['abstract'] == value: return 'Success: Expected abstract value. Abstracts do have html, have not decided whether they should not' return 'Success: Abstracts do have html, have not decided whether they should not'
8b476952aa7242baf3e4a0ab607a57ea9d46c674
27,342
def hlstr(string, color="white"): """ Return HTML highlighting text with given color. Args: string (string): The string to render color (string): HTML color for background of the string """ return f"<mark style=background-color:{color}>{string} </mark>"
52e9d069d559feec2237d0847e667a1cb53326d8
27,343
import math def calc_fuel(mass): """Calculates the fuel needed to transport a mass (take each mass, divide by three, round down, and subtract 2) without regard to the added mass of the fuel. Args: mass: mass Returns: fuel needed (int) """ return math.floor(mass/3) - 2
041e88d0e0669f2bab981d0ebab5b0f4ef3bec6d
27,345
import requests def call_omeka_api(url): """ call Omeka API """ r = requests.get(url) return r.json()
dc4b786f12fdf9de5dd4fb9db78828efb86d468d
27,346
def welcome(): """List all available api routes.""" return ( 'hi')
f0e621aabd03bcfb6d4ab07b70ca60b7d0c5c9c3
27,347
import os def package_upload_to(instance, filename): """ Create the upload path to the new file, e.g., `maps/by-sha256/0/001.../filename.ext` """ return os.path.join('packages', instance.file_hash[0], instance.file_hash, filename)
4bfd388b314063a515ffc444328ebf4d7ab9fcb6
27,348
def clean(query_params): """clean removes all the shard stuff that's not useful for analysis and is usually present""" shard_limited = [ "ids", "NOW", "cl", "ForceShardHandler", "group.distributed.second", "group.distributed.first", "ShardRouter.SHARD_COORDINATOR_IP", "ShardRouter.VNODES_ENABLED", "ShardRouter.SHARD_COORDINATOR_ID", "version", "shard.url", "isShard", "distrib", "fsv", "shards.purpose", "wt", ] cleaned = [] for t in query_params: tokens = t.split("=") found = False for c in shard_limited: if tokens[0] == c: found = True break if not found and tokens[0].startswith("group.topgroups."): # remove token range partes of queries if tokens[0] == "fq": # clean out token fqs and _parent fqs as they're nonsense value = tokens[1] if value == "-_parent_:F" or value.startswith( "{!caching_lucene}(_token_long" ): continue cleaned.append(t) sorted(cleaned) return cleaned
fb44c187d52fef345af596bd059f40e4382d81a4
27,349
def share_command(mwdb, file_or_hash, group): """ Share object with another group """ obj = mwdb.query(file_or_hash) obj.share_with(group) return dict(message="Shared {object_id} with {group}", object_id=obj.id, group=group)
c44d2f8ba805f3f793913ea7bf48ed9d21518f0d
27,350
def make_valid_did(lfn_dict): """ When managing information about a LFN (such as in `rucio upload` or the RSE manager's upload), we add the `filename` attribute to record the name of the file on the local disk in addition to the remainder of the DID information. This function will take that python dictionary, and strip out the additional `filename` key. If this is not done, then the dictionary will not pass the DID JSON schema validation. """ lfn_copy = dict(lfn_dict) lfn_copy['name'] = lfn_copy.get('name', lfn_copy['filename']) del lfn_copy['filename'] return lfn_copy
10e5777e5727e835683752aac197b43808d48fb4
27,353
def get_response(move) -> int: """ Utility function to enter the response to a move. :param move: The move to which to respond. :return: 1: hit, 2: hit and ship destroyed, 3: game over, you win, -1: miss """ print("Move made:", move) return int(input('What response to return?'))
cb115447fe5cf244b7a0bee424edeedb45b5ea42
27,354
import gzip def load_fasttext_class_probabilities(probability_file_path): """ Utility function that loads class probabilities from a previously performed prediction run. :param probability_file_path: str, path to the output file with class probabilities for the test dataset :return: list of float: probability of belonging to the positive class for each example in the test dataset """ probabilities = [] with gzip.open(probability_file_path, 'rt') as fin: for line in fin: cols = line.rstrip().split() prob = None for i, col in enumerate(cols): if col == '__label__1': prob = float(cols[i + 1]) assert prob is not None probabilities.append(prob) return probabilities
3ddff33c878b6ec013b2bfb8ff1e978f602a7d91
27,355