content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def FormatScoreTimes(score_times): """Provide a color-coded formatting of the output of ScoreLogFile.""" ok = '\033[94m' warn = '\033[93m' fail = '\033[91m' result = [] for name, score_time in score_times.iteritems(): if score_time < 0.2: color = ok elif score_time < 2.0: color = warn else: color = fail result.append(color + '%80s | %5.3f\033[0m' % (name, score_time)) return '\n'.join(result)
67c7456974d907f8f87eb9cde409083693a08118
12,713
def isWithin(rect1, rect2): """Checks whether rectangle 1 is within rectangle 2 Parameters: rect1: list of coordinates [minx, maxy, maxx, miny] rect2: list of coordinates [minx, maxy, maxx, miny] Returns: True if rect1 within rect2 else False """ minx1, maxy1,maxx1, miny1 = rect1 minx2, maxy2,maxx2, miny2 = rect2 if minx1 < minx2 or maxx1 > maxx2 or miny1 < miny2 or maxy1 > maxy2: return False return True
d7adca34b7cfb4294316090e18c164db0a34e818
12,714
def doolittle(matrix_a): """ Doolittle's Method for LU-factorization. :param matrix_a: Input matrix (must be a square matrix) :type matrix_a: list, tuple :return: a tuple containing matrices (L,U) :rtype: tuple """ # Initialize L and U matrices matrix_u = [[0.0 for _ in range(len(matrix_a))] for _ in range(len(matrix_a))] matrix_l = [[0.0 for _ in range(len(matrix_a))] for _ in range(len(matrix_a))] # Doolittle Method for i in range(0, len(matrix_a)): for k in range(i, len(matrix_a)): # Upper triangular (U) matrix matrix_u[i][k] = float(matrix_a[i][k] - sum([matrix_l[i][j] * matrix_u[j][k] for j in range(0, i)])) # Lower triangular (L) matrix if i == k: matrix_l[i][i] = 1.0 else: matrix_l[k][i] = float(matrix_a[k][i] - sum([matrix_l[k][j] * matrix_u[j][i] for j in range(0, i)])) # Handle zero division error try: matrix_l[k][i] /= float(matrix_u[i][i]) except ZeroDivisionError: matrix_l[k][i] = 0.0 return matrix_l, matrix_u
03ba90c29dfb67ffe1edf939b49f3ab537931831
12,715
import random def randFunc(): """ Rand # generating func :return: int """ return random.randint(1,3)
f2b2501dcc1acede3d99d36b55587b6e97f8ae5a
12,716
import subprocess def shell(cmd, cwd=None, closeFds=True): """ Executes a shell command :param cmd: Command string :param cwd: Current working directory (Default value = None) :param closeFds: If True, all file descriptors except 0, 1 and 2 will be closed before the child process is executed. Defaults to True. :returns: a (return code, std out, std error) triplet :rtype: tuple of int, str, str """ if cwd: #pylint: disable=consider-using-with process = subprocess.Popen( cmd, shell=True, stderr=subprocess.PIPE, stdout=subprocess.PIPE, cwd=cwd, close_fds=closeFds ) else: process = subprocess.Popen(cmd, shell=True, stderr=subprocess.PIPE, stdout=subprocess.PIPE, close_fds=closeFds) stdout, stderr = process.communicate() return (process.returncode, stdout, stderr)
9c7600bd32c2f3d15f02269754261ea3548ef1ba
12,718
def getRequest(request): """ Get an openid request from the session, if any. """ return request.session.get('openid_request')
f6ebcb13e631365f53ae3a362eaf2dba614d7344
12,719
def filter_staged(position_name, position_annotations, timepoint_annotations): """Filter-function for filter_annotations() to return worms that have been stage-annotated fully and are noted as "dead".""" stages = [tp.get('stage') for tp in timepoint_annotations.values()] # NB: all(stages) below is True iff there is a non-None, non-empty-string # annotation for each stage. return all(stages) and stages[-1] == 'dead'
14e7f12fa100da4ef9a26496f7f9fc83e2988d3d
12,720
import math def parse_sizes(raw: dict) -> dict: """Pass.""" parsed = {} parsed["disk_free_mb"] = math.floor(raw["disk_free"] / 1024 / 1024) parsed["disk_used_mb"] = math.ceil(raw["disk_used"] / 1024 / 1024) parsed["historical_sizes_devices"] = raw["entity_sizes"].get("Devices", {}) parsed["historical_sizes_users"] = raw["entity_sizes"].get("Users", {}) return parsed
491169f74ff6393da33b3c436e2aba2a87c26a1f
12,721
def flatten_df(df): """Flatten the df to an array Args: df(pd.DataFrame): a dataframe Returns: an array """ return df.values.flatten()
4faedf1059fa60c2ee3af95cd6f01e7d3cadd97e
12,723
import random def get_random_num(number): """ xxx """ maxnum = int(number) return random.randint(1, maxnum)
9ad5a62ed510f4153b91f2a5c982ef83373dfca3
12,724
def fastdance(progs, compiled_moves): """Dance faster using parsed moves.""" for (func, args) in compiled_moves: progs = func(progs, *args) return progs
a3d0f68b3a75c79cc489d17aa8eefaf64a351d2b
12,726
from typing import Counter def duplicated(v): """Returns a generator expression of values within v that appear more than once """ return (x for x, y in Counter(v).items() if y > 1)
bf854dfde83c9d998f3f0b5f5553254be6347a4c
12,727
def _readonly_array_copy(x): """Return a copy of x with flag writeable set to False.""" y = x.copy() y.flags["WRITEABLE"] = False return y
ffd973bb354a642362a7382934f879c9dfa95c3b
12,728
import re def str_list_to_list_str(str_list, regex_pattern='[A-Z]\d+'): """ Turn a string of a list into a list of string tokens. Tokens determined by regex_pattern """ p = re.compile(regex_pattern) return p.findall(str_list)
99d3bbccadc0676dac6854ee717f8aef3bb878a6
12,729
import os def check_file(filename, expected_bytes): """Download a file if not present, and make sure it's the right size.""" if not os.path.exists(filename): print("please make sure {0} exists in the current directory".format(filename)) statinfo = os.stat(filename) if statinfo.st_size == expected_bytes: print('Found and verified', filename) else: print(statinfo.st_size) raise Exception( "File {0} didn't have the expected size. Please ensure you have downloaded the assignment files correctly".format(filename)) return filename
2db9e5378182feed7b4c692184a0eadc84c4e9cc
12,730
import yaml def get_configuration(config: str) -> dict: """ Open Config YAML file """ with open(config) as f: configuration = yaml.load(f, Loader=yaml.FullLoader) return configuration
c275b80363bca4a8587b7f09ead17a85f9b8dfda
12,731
def flagger(value): """ Conversion routine for flags. Accepts ints or comma-separated strings. :param str value: The value to convert. :returns: A value of an appropriate type. """ try: # Convert as an integer return int(value) except ValueError: # Convert as a comma-separated list return [v.strip() for v in value.split(',')]
cdc3fab338fd7f25499e25593302fb209f9be2a4
12,732
def get_accuracy(testSet, predictions): """Calculate accuracy.""" correct = 0 for x in range(len(testSet)): if testSet[x][-1] in predictions[x]: correct = correct + 1 return correct / float(len(testSet)) * 100
afb8b781fff25373c764d49457422fd3d676019d
12,733
import time def time_func(func, kwargs): """ Time a function. Return the time it took to finish and subsequent result. """ start = time.time() res = func(**kwargs) finish = time.time() - start return finish, res
78f73058a90048f8de24915f1f8ba56bda79cc6a
12,734
def gen_end(first_instruction_address): """ Generate an end record. """ # specify the size of each column col2_size = 6 # content for each column col1 = "E" col2 = hex(first_instruction_address)[2:].zfill(col2_size).upper() return col1 + col2
d5df69b33cfb4f99aa9dbc5e003b713637fa1eff
12,738
def is_equal_2_refactorings_list(a, b): """ This method implement is_equal method which should return True if two instances of Individual class are equal. Otherwise it return False. The duplicate instances are removed from population at each generation. Only one instance is held to speed up the search algorithm """ if len(a.X[0]) != len(b.X[0]): return False for i, ro in enumerate(a.X[0]): if ro.name != b.X[0][i].name: return False if ro.params != b.X[0][i].params: return False return True
155248bb60ac560af03edd533d4573c9a5801b78
12,739
import re def filter_subject(subj, cfg): """Function: filter_subject Description: Filter out strings from the message subject line. Arguments: (input) subj -> Message subject line. (input) cfg -> Configuration settings module for the program. (output) subj -> Filtered message subject line. """ for f_str in cfg.subj_filter: subj = re.sub(f_str, "", subj).strip() return subj
db3e2dcff95ef91b0190e4a3c9a0c9c72113403a
12,740
def filter_remove_background(rgb, red_lower_thresh=235, green_lower_thresh=210, blue_lower_thresh=235, output_type="bool"): """ Create a mask to filter out pixels which are pre-defined as background Args: np_img: RGB image as a NumPy array. red_lower_thresh: Red channel lower threshold value. green_lower_thresh: Green channel lower threshold value. blue_lower_thresh: Blue channel lower threshold value. output_type: Type of array to return (bool, float, or uint8). Returns: NumPy array representing a mask where background pixels have been masked out. """ r = rgb[:, :, 0] > red_lower_thresh g = rgb[:, :, 1] > green_lower_thresh b = rgb[:, :, 2] > blue_lower_thresh result = ~(r & g & b) if output_type == "bool": pass elif output_type == "float": result = result.astype(float) else: result = result.astype("uint8") * 255 return result
0320af3ce1a20501e772a2feec4a4268b5a6d059
12,741
def get_json(item): """nasty hack to get round requests api change""" if callable(item): return item() else: return item
13b549063503fc1bf8857a23af38d905735c901e
12,743
def variantMetadata_object(processed_request): """ Builds the variantAnnotation object. Since we only have one model for this object we keep it simple. """ beacon_variant_metadata_v1_0 = { "default": { "version": "beacon-variant-metadata-v1.0", "value": { "geneId": "", "HGVSId": "", "transcriptId": "", "alleleId": "", "variantClassification": "", "variantType": "", "disease": "", "proteinChange": "", "clinVarId": "", "pubmedId": "", "timestamp": "", "info": { } } }, "alternativeSchemas": [] } return beacon_variant_metadata_v1_0
4bf6dc519fcca02ea63d0f94af55adffe29ad9f8
12,744
def _is_filtering_only_by_id(options): """ Funcao que checa se a query tem como unico filtro o ID de uma entidade. Args: options (str): As opções da query. Returns: (bool): Verdadeiro, se o unico filtro é na propriedade ID. False, do contrário. """ if "$select" in options: return False filter_ = options.get("$filter") try: prop, operator, _ = filter_.split() if prop == "id" and operator == "eq": return True return False except (AttributeError, ValueError): return False
984074de92f3dc2fd9b300cdaf7f500c77e96269
12,745
def to_unicode_repr( _letters ): """ helpful in situations where browser/app may recognize Unicode encoding in the \u0b8e type syntax but not actual unicode glyph/code-point""" return repr(_letters)
379331356ea4f4b3763a16c3d0e531e984ec7a85
12,746
def getExperimentalDataPaths(): """ getExperimentalDataPaths() """ print("------------------------------------------------------------") print("PLEASE ENTER PATHS OF EXPERIMENTAL DATA") print("") print("IF THERE ARE MULTIPLE DATA FILE YOU WANT TO TOMOGRAPHY,") print("ENTER ALL PATHS SEPARATED WITH SPACE.") print("LIKE THIS >> .\\datadirectory\\ex1.txt .\\datadirectory\\ex2.txt ...") print("------------------------------------------------------------") print(">>") paths = list(input().split()) return paths
f9a63ca89de6c636fc8d6d7499c9eaf9933c9b4c
12,747
def find_closest_stores(friends, stores): """ Finds the closest store to each friend based on absolute distance from the store. Parameters: friends: Dictionary with friend names as keys and point location as values. stores: Dictionary with store names as keys and point locations as values. Returns: Dictionary with friends names as keys and the store closest to them as values. >>> friends1 = {'rob': 10, 'bob': 12} >>> stores1 = {'walmart': 11, 'costco': 12} >>> find_closest_stores(friends1, stores1) {'rob': 'walmart', 'bob': 'costco'} >>> friends2 = {'bob': 12} >>> stores2 = {'target': 12, 'costco': 12} >>> find_closest_stores(friends2, stores2) {'bob': 'costco'} # My doctests >>> friends3 = {'joe': 10, 'jack': 20} >>> stores3 = {'target': 5, 'walmart': 16} >>> find_closest_stores(friends3, stores3) {'joe': 'target', 'jack': 'walmart'} >>> friends4 = {'bob': 12} >>> stores4 = {'target': 12, 'costco': 12, 'apple': 12} >>> find_closest_stores(friends4, stores4) {'bob': 'apple'} >>> friends5 = {'joe': 0, 'jack': 2.5} >>> stores5 = {'target': 1.25, 'walmart': 1} >>> find_closest_stores(friends5, stores5) {'joe': 'walmart', 'jack': 'target'} """ return {names: min([(abs(distance - locations), store) \ for store, distance in stores.items()])[1] \ for names, locations in friends.items()}
ca879f6f442a4d734bf9e3c7b0313cd31ea2a026
12,748
def sum_nth_i(seq, n): """ takes a list seq and a number n that is bigger then 0 and takes the sum of the n number """ if n > 0: res = 0 for i in range(n-1, len(seq), n): print(i) res += seq[i] return res else: print("n needs to be bigger then 0 \n n > 0")
3ee7eec74b73449038d735355f29b3b57c9402b5
12,749
def _func_and(current, checks): """Implementation of And.""" return all(check(current) for check in checks)
a778d9193c958729f40e09feb3903350b2d3054b
12,750
def _extra_gas_dndfeh(s, feh, p, M): """ Not normalized! """ z = (10**feh)/p return z * (1 + s*(1-1/M)) / (1/(1-s/M) - 2*z*(1-1/M))
485880edfc10be58716a31f25edd4d1c1188867e
12,751
def bash_quote(*args): """Quote the arguments appropriately so that bash will understand each argument as a single word. """ def quote_word(word): for c in word: if not (c.isalpha() or c.isdigit() or c in '@%_-+=:,./'): break else: if not word: return "''" return word return "'{0}'".format(word.replace("'", "'\"'\"'")) return ' '.join(quote_word(word) for word in args)
cb25adb60ea98cb6887e89a6a4f5097bb8f4843f
12,752
def get_area(root): """Extracts the ash cloud total area Values returned are in this order" 1. Total ash area 2. Total ash area unit""" area = root.alert.total_area.attrib.get('value') area_unit = root.alert.total_area.attrib.get('units') return area, area_unit
ea5a908e4226359aeb962f66c4ed4691d8d93a1b
12,754
def mid_point(p1, p2): """ 计算中点 """ x = (p1[0] + p2[0]) // 2 y = (p1[1] + p2[1]) // 2 return [x, y]
2630e4158ea6414242b05ada987f2cf020442339
12,755
def extract_task(tsk, tsk_lst): """ Searches for a task in the task lst and if found: the corresponding keywords and values will be returned Function only works if task is present in the list one time. :param tsk: task to extract information for :type tsk: str :param tsk_lst: list of tasks to run for some driver :type tsk_lst: tuple(tuple(str/dict)) :rtype: tuple(str/dict) """ tsk_inf = None for _tsk_inf in tsk_lst: if any(x == tsk for x in _tsk_inf): # just looks in all pars tsk_inf = _tsk_inf break return tsk_inf
b8c3e68ca5d00227e51cbd0b869cbe09ddd3123f
12,756
def question_data_path(instance, filename): """Returns Questions data path.""" # question data will be uploaded to MEDIA_ROOT/question_<id>/<filename> return 'question_{0}/{1}'.format(instance.title.replace(" ", "_"), filename) # return 'question_{0}/{1}'.format(instance.id, filename)
dc268bb4e3ff0d00ac3bb8ba8299ce0629951487
12,757
def _compscale(data): """ Automatically computes a scaling for the wavefunctions in the plot. Args: data (dict): The data neccesarry for computing the scale factor. Needs to contain 'wfuncs' and 'energies'. Returns: scale (float): The computed scale. """ wfuncs = data['wfuncs'].T energies = data['energies'] scale = 1e6 # choose an arbitray large number for index in range(len(energies) - 1): new_scale = (energies[index + 1] - energies[index]) / ( abs(min(wfuncs[index + 1])) + max(wfuncs[index])) if new_scale < scale: scale = new_scale return scale
0c33313c86c38568a1713de28e5f5b44b2b0207c
12,758
def get_nodes(G,**kwargs): """ search nodes having the given attributes :param G: graph where nodes are searched :param kwargs: atom= , in_cycle= :return: a list of nodes :rtype: list """ nodes=[] if G!=None: for n in G.nodes(): match=True for k,v in kwargs.items(): if k in G.nodes[n] and v==G.nodes[n][k]: match&=True else: match&=False if match: nodes.append(n) return nodes
058475407fd0ad7104fb9eb90a8118ee130ae98f
12,759
from functools import reduce def equal(list_): """ Returns True iff all the elements in a list are equal. >>> equal([1,1,1]) True """ return reduce( lambda a, x: a and (x[0] == x[1]), zip(list_[1:], list_[:-1]), True, )
65b8a9b5652ecd6b3cc9709dd7e56b4e1258caf4
12,760
def yields_from_leung_nomoto_2020(feh): """ Supernova data source: Leung & Nomoto, 2020, ApJ, Vol 888, Issue 2, Id 80 The seven datasets are provided for Z/Zsun values of 0, 0.1, 0.5, 1, 2, 3 and 5. Using Zsun = 0.0169 the corresponding FeH values are -1, -0.301, 0.0, 0.301, 0.4771 and 0.69897. We use seven intervals delimited by midpoints of those values. """ if feh <= -1.65: return [0.0, 3.39e-3, 3.33e-10, 1.16e-8, 1.14e-1, 3.98e-3, 1.70e-2, 1.17e-1, 5.40e-2, 1.11e-2, 6.73e-1] elif -1.65 < feh <= -0.65: return [0.0, 3.38e-3, 1.22e-10, 4.53e-9, 1.14e-1, 3.96e-3, 1.60e-2, 1.22e-1, 5.28e-2, 9.73e-3, 6.69e-1] elif -0.65 < feh <= -0.15: return [0.0, 3.38e-3, 3.41e-10, 1.37e-8, 1.16e-1, 4.40e-3, 1.11e-2, 1.36e-1, 6.14e-2, 9.38e-3, 6.34e-1] elif -0.15 < feh <= 0.15: return [0.0, 3.35e-3, 1.25e-9, 3.80e-8, 1.17e-1, 4.00e-3, 8.26e-3, 1.35e-1, 6.80e-2, 8.49e-3, 6.10e-1] elif 0.15 < feh <= 0.39: return [0.0, 3.29e-3, 4.59e-9, 9.63e-8, 1.19e-1, 3.86e-3, 5.46e-3, 1.32e-1, 5.69e-2, 7.14e-3, 5.83e-1] elif 0.39 < feh <= 0.59: return [0.0, 2.62e-3, 2.90e-8, 6.40e-7, 1.12e-1, 3.42e-3, 3.77e-3, 1.30e-1, 5.33e-2, 6.79e-3, 5.57e-1] elif 0.59 <= feh: return [0.0, 2.20e-3, 1.20e-8, 9.43e-8, 1.40e-1, 3.16e-3, 2.66e-3, 1.40e-1, 3.91e-2, 5.45e-3, 5.15e-1]
21ad4bf1eab2193a97f98594c60a5e3db18e4a74
12,761
def stocking_event_dict(db): """return a dictionary representing a complete, valid upload event. This dictionary is used directly to represent a stocking event, or is modified to verify that invalid data is handled appropriately. """ event_dict = { "stock_id": None, "lake": "HU", "state_prov": "ON", "year": 2015, "month": 4, "day": 20, "site": "Barcelona", "st_site": None, "latitude": 44.5, "longitude": -81.5, "grid": "214", "stat_dist": "NC2", "species": "LAT", "strain": "SLW", "no_stocked": 18149, "year_class": 2014, "stage": "y", "agemonth": 18, "mark": "ADCWT", "mark_eff": 99.5, "tag_no": 640599, "tag_ret": 99, "length": 107.44, "weight": 563.8153159, "condition": 1, "lot_code": "LAT-SLW-13", "stock_meth": "b", "agency": "MNRF", "notes": "FIS ID = 73699", # new "hatchery": "CFCS", "agency_stock_id": "P1234", } return event_dict
bed85f03438700754d52a0d858d7a41ee5079406
12,762
import socket def get_device_description(): """ This info is later shown in the "Open sessions" overview in the client. Should be something so the user knows where this session is coming from. :return: :rtype: """ return 'Console Client ' + socket.gethostname()
2881b58e5854156adc57fb834284d55fc961cc55
12,763
import time import os import logging import signal def terminate(end, proc, kill): """Terminate or kill the process after end.""" if not end or time.time() <= end: return False if kill: # Process will not die, kill everything pgid = os.getpgid(proc.pid) logging.info( 'Kill %d and process group %d', proc.pid, pgid) os.killpg(pgid, signal.SIGKILL) proc.kill() return True logging.info( 'Terminate %d on timeout', proc.pid) proc.terminate() return True
7cf36dc08597d19a1d6bb7cd55b5cbb8a71fa4c1
12,765
import os def make_dir_safe(path): """ Make a directory without any race conditions @return - true if newly created, false if already exists """ try: os.makedirs(path) except OSError: if not os.path.isdir(path): raise else: return False return True
afcffc88039bcef0e83d90bf4ae96fd7ee261e38
12,767
def convert_dir_regex_to_dir_prefix_regex(dir_regex): """ The patterns used to match directory names (and file names) are allowed to match a prefix of the name. This 'feature' was unintentional, but is being retained for compatibility. This means that a regex that matches a directory name can't be used directly to match against a file name and test whether the file should be excluded because it matches the directory. The pattern 'photos' will match directory names 'photos' and 'photos2', and should exclude files 'photos/kitten.jpg', and 'photos2/puppy.jpg'. It should not exclude 'photos.txt', because there is no directory name that matches. On the other hand, the pattern 'photos$' should match 'photos/kitten.jpg', but not 'photos2/puppy.jpg', nor 'photos.txt' If the original regex is valid, there are only two cases to consider: either the regex ends in '$' or does not. :param dir_regex: a regular expression string or literal :type dir_regex: str """ if dir_regex.endswith('$'): return dir_regex[:-1] + r'/' else: return dir_regex + r'.*?/'
bb9843984d9a724380eab2509095fccc03e280e1
12,768
async def consume_aiter(iterable): """consume an async iterable to a list""" result = [] async for item in iterable: result.append(item) return result
897c8e9380f9c631f2dd8721884ec1ccbe462d48
12,769
import hashlib def cluster_name(cluster, build): """Return or select a cluster name.""" if cluster: return cluster if len(build) < 20: return 'e2e-%s' % build return 'e2e-%s' % hashlib.md5(build).hexdigest()[:10]
d2f2fe3d434357237ba9c645f3253447fc4ac953
12,770
def task_returner(task) -> dict: """ * 根据不同的task状态生成不同的返回值 Args: task: Returns: """ if task.state == "PENDING": response = { "state": "empty task", "info": {"current": 0, "total": 4, "status": "empty task"}, } elif task.state == "FILL DATA": response = { "state": task.state, "info": task.info, } elif task.state == "SUCCESS": response = {"state": task.state, "info": task.info, "msg": "success"} if "result" in task.info: response["result"] = task.info["result"] elif task.state != "FAILURE": response = {"state": task.state, "info": task.info} if "result" in task.info: response["result"] = task.info["result"] else: # something went wrong in the background job response = { "state": task.state, "info": { "current": 4, "total": 4, }, "status": str(task.info), # this is the exception raised } return response
9d020f70d2f0a00e1a0042e9cba86e58d41f41e0
12,771
import ipaddress def validate_ip(ip): """ Checks if an IP address is valid for lookup. Will return False if an IP address is reserved or invalid. Resource: https://en.wikipedia.org/wiki/Reserved_IP_addresses """ try: return not ipaddress.ip_address(ip).is_private except ValueError: return False
5d10a41e6bc6d645cbef24e8e4128baaf67aa997
12,772
def init_symbol_table(): """ Returns a Python dictionary mapping strings (symbol names) to integers (address refered to by that symbol), initialized with all of the Hack assembly predefined symbols. """ predefined_symbols = { 'SP': 0, 'LCL': 1, 'ARG': 2, 'THIS': 3, 'THAT': 4, 'R0': 0, 'R1': 1, 'R2': 2, 'R3': 3, 'R4': 4, 'R5': 5, 'R6': 6, 'R7': 7, 'R8': 8, 'R9': 9, 'R10': 10, 'R11': 11, 'R12': 12, 'R13': 13, 'R14': 14, 'R15': 15, 'SCREEN': 0x4000, 'KBD': 0x6000 } return predefined_symbols
7a64be8add2565d3dff5302df82088af7b3383e6
12,773
def get_sdp_path(doc, subj: int, obj: int): """ 'Convulsions that occur after DTaP are caused by a fever, and fever may cause headache.' ----> [Convulsions, caused, by, fever] Get shortest dependency path without networkx lib. Usues spacy's LCA (lowest common ancestor) matrix Adapted from:https://towardsdatascience.com/find-lowest-common-ancestor-subtree-and-shortest-dependency-path-with-spacy-only-32da4d107d7a """ lca = doc.get_lca_matrix()[subj, obj] current_node = doc[subj] subj_path = [current_node] if lca != -1: if lca != subj: while current_node.head.i != lca: current_node = current_node.head subj_path.append(current_node) subj_path.append(current_node.head) current_node = doc[obj] obj_path = [current_node] if lca != -1: if lca != obj: while current_node.head.i != lca: current_node = current_node.head obj_path.append(current_node) obj_path.append(current_node.head) return subj_path + obj_path[::-1][1:]
23f994d17e3b496fe6d8d8dcd06527675c9f0795
12,774
def is_integer(val): """Returns whether the given value is an integer. Args: val (object): value to check Returns: ``True`` if the given value is an integer, otherwise ``False``. """ try: val += 1 except TypeError: return False return True
ba038448c04b3072c08547c963aa24bf3cbc97b2
12,775
def add_links(self, value): """ This creates a Link header from the links Parameters ---------- self : lib.schema.riak.objects.Link The Link schema object value : dict The headers of the request to add the links to """ links = getattr(self, 'links', []) if links: value['Link'] = ', '.join( link._as_dict()['location'] for link in links) return value
3d507ce928b227399ca325d5a55cab6b5ae07791
12,777
def children(level, idx): """ Return all the children of the Healpix pixel idx at level (in nested format) :param level: Resolution level :param idx: Pixel index :return: All the parents of the pixel """ chld = [] for ind in range(4): chld.append((level + 1, 4 * idx + ind)) return chld
ef1a127270a689fc7c05eb2f2b40abb7e6efc2ce
12,778
def knapsack_iterative(items, bag_capacity): """ Uses dp to find optimal solution to the one-zero (either take or leave) knapsack problem :param items: assumed given as a tuple of (space_it_takes_up, value) :param bag_capacity: given as an integer of the capacity of the bag :return: maximised value, items taken """ table = [[0 for _ in range(bag_capacity + 1)] for _ in range(len(items))] # typically have a first col of 0s first_item = items[0] for c in range(1, bag_capacity + 1): # work out first row so can extend solution to other rows if first_item[0] <= c: # consider only the first item for all weights of the bag table[0][c] = first_item[1] # set the value to the value of the first item # can only take each item at most once, so don't need to extend solution in first for loop for i in range(1, len(items)): # loop for all items apart from the first, represents the row for j in range(1, bag_capacity + 1): # loop for cumulative capacity of the bag, represents the column item = items[i] if item[0] <= j: # space item takes vs bag capacity (j), if we can take item table[i][j] = max( # take max of (remember can only take each item once) table[i - 1][j], # - previous best, and leave this item item[1] + table[i - 1][j - item[0]] # - take this item and anything we can with the left space ) # space by going up one row and going the the index with space else: # no chance we can take the item, go with previous best table[i][j] = table[i - 1][j] # took_items = find_chosen(table, items) return table, []
65872a23762d314dac2c357ba0941142237eaedb
12,780
import json def getUrnJson(data): """Returns URN to entity in JSON data""" decodedJson = json.loads(data) return decodedJson["urn:lri:property_type:id"]
af1761acb9322f295af3500cc4ecc519da16d7b1
12,781
def type_to_str(_type : type | tuple, sep : str =" or "): """ Converts type or tuple of types to string e. g. <class 'bin_types.functions.Function'> -> function For tuples will separate types with argument sep, standard sep is " or " """ if isinstance(_type, tuple): types = [] for i in _type: types.append(type_to_str(i)) return sep.join(types) if _type is None: return "none" res = str(_type) main_part = str(_type).find("'")+1 end_main_part = str(_type).rfind("'") res = res[main_part:end_main_part] res = res.split(".")[-1] # removes modules names return res.lower()
04589e24aef9db1d302b8d4d439d7d5e1ba70e49
12,782
def get_total_pages(items): """ returns the total number of pages for a list of serialized items """ items_per_page = 12 if len(items) % items_per_page == 0: return int(len(items) / items_per_page) pages = int(len(items) / items_per_page + 1) return pages
8e2b0a33f6c462e3f38d24643968bd5ed265d1a9
12,783
import time def file_upload_to(instance, filename): """ 文件上传相对路径 :param instance: 实例 :param filename: 文件名 :return: 上传的文件路径 """ # 传过来的实力,需要有个user_id的值,用户ID print(instance.user_id) name = 'file/{}/{}_{}'.format(time.strftime('%Y/%m'), instance.user_id, filename) return name
a24fc844c6d469cd66af4a5ade3e526781fc53c7
12,784
def dot(v, u): """v and u are vectors. v and u -> list""" vx, vy = v[0], v[1] ux, uy = u[0], u[1] dotproduct = vx*ux + vy*uy return dotproduct
b9a8794ff98787b7ae8e8c1420a375360abb34fa
12,785
def inv_entry_to_path(data): """ Determine the path from the intersphinx inventory entry Discard the anchors between head and tail to make it compatible with situations where extra meta information is encoded. """ path_tuple = data[2].split("#") if len(path_tuple) > 1: path_str = "#".join((path_tuple[0], path_tuple[-1])) else: path_str = data[2] return path_str
2c36c3a5755d7f82da534d417cc8350c96e0360a
12,786
def Imu_I0c1c2c3c4(mu, c): """ I(mu, c) where c = (I0, c1, c2, c3, c4) """ return c[0]*(1-c[1]*(1-mu**0.5)-c[2]*(1-mu)-c[3]*(1-mu**1.5)-c[4]*(1-mu**2.))
6553deaff09f4a3e00364b271095b5e80472b3a1
12,789
def isinstance_qutip_qobj(obj): """Check if the object is a qutip Qobj. Args: obj (any): Any object for testing. Returns: Bool: True if obj is qutip Qobj """ if ( type(obj).__name__ == "Qobj" and hasattr(obj, "_data") and type(obj._data).__name__ == "fast_csr_matrix" ): return True return False
3c140e012d0df97852e84c50a37e6464e107fe2e
12,790
def get_color_specifier(basecolor, number): """Build an OpenGL color array for the number of specified vertices.""" color = [float(x) for x in basecolor] if len(color) == 3: return ("c3d", color * int(number)) elif len(color) == 4: return ("c4d", color * int(number))
b7c14272aa393c66fdedc109b90f9c8fce6118b0
12,791
import os def list_files_in_folder(path, extension): """ List all files in a directory using os.listdir """ files = [] for r, d, f in os.walk(path): for file in f: if extension in file: files.append(os.path.join(r, file)) return files
337277d7d03c8a48c61436e62155e6c1f59c639c
12,792
import six def finddir(o, match, case=False): """ return all attributes of *o* which match string in match. if case is True require an exact case match. """ if case: names = [(name, name) for name in dir(o) if isinstance(name, six.string_types)] else: names = [(name.lower(), name) for name in dir(o) if isinstance(name, six.string_types)] match = match.lower() return [orig for name, orig in names if name.find(match) >= 0]
d2eff57dfc0366a125c3079851985765ceada96d
12,793
def CreateAdvancedOptionsConfig(client, args, existing_advanced_options_config): """Returns a SecurityPolicyAdvancedOptionsConfig message.""" messages = client.messages advanced_options_config = ( existing_advanced_options_config if existing_advanced_options_config is not None else messages.SecurityPolicyAdvancedOptionsConfig()) if args.IsSpecified('json_parsing'): advanced_options_config.jsonParsing = ( messages.SecurityPolicyAdvancedOptionsConfig.JsonParsingValueValuesEnum( args.json_parsing)) if args.IsSpecified('log_level'): advanced_options_config.logLevel = ( messages.SecurityPolicyAdvancedOptionsConfig.LogLevelValueValuesEnum( args.log_level)) return advanced_options_config
b242b32ba6733c41b1a53f269825c0f54bcf3f96
12,794
def smooth(s,p0,p1): """ Do a boxcar average on a 1D dataset :param v: :return: """ result=s*0 count=0 for p in range(p0,p1): result[-p0:-p1]+=s[-p0+p:len(s)-p1+p] count+=1 result/=count result[:-p0]=s[:-p0] result[-p1:]=s[-p1:] return result
355252c5bd71447d043095de0d955d0f601dfd6d
12,795
def _unlParseNodePart(nodePart): """ Parse the Node part of a UNL Arguments: nodePart: The node part of a UNL Returns: unlList: List of node part parameters in root to position order. """ if not nodePart: if nodePart is None: return None else: return [''] return nodePart.split('-->')
443c7f67b5deae47448bf93ff874acfb77cb83c3
12,796
def postproc_y_hat(pred_flows, adapt_info=None): """Postprocess the results coming from the network during the test mode. Here, y_hat, is the actual data, not the y_hat TF tensor. Override as necessary. Args: y_hat: predictions, see set_output_tnsrs() for details adapt_info: adaptation information in (N,H,W,2) format Returns: Postprocessed labels """ # Have the samples been padded to fit the network's requirements? If so, crop flows back to original size. if adapt_info is not None: pred_flows = pred_flows[:, 0:adapt_info[1], 0:adapt_info[2], :] return pred_flows
d93109e0c6a902dafb79dc1a9d6dbe73f29dc970
12,797
import re import sys def prompt(prompt_text): """ Prompt the user to choose one of the choices Choices are identified in the prompt_text by square brackets around a single letter option. """ choices = set(m.group(1) for m in re.finditer(r"\[(.)\]", prompt_text)) while True: sys.stderr.flush() sys.stdout.write(prompt_text) sys.stdout.flush() response=sys.stdin.readline().strip().lower() if not response: continue response = response[0] if response in choices: return response
1df7ea9797b72e07b31f9eef4d2095bb8b591c8d
12,798
def calc_raid_partition_sectors(psize, start): """Calculates end sector and converts start and end sectors including the unit of measure, compatible with parted. :param psize: size of the raid partition :param start: start sector of the raid partion in integer format :return: start and end sector in parted compatible format, end sector as integer """ if isinstance(start, int): start_str = '%dGiB' % start else: start_str = start if psize == -1: end_str = '-1' end = '-1' else: if isinstance(start, int): end = start + psize else: # First partition case, start is sth like 2048s end = psize end_str = '%dGiB' % end return start_str, end_str, end
a96e442d5915a108fbbf18838e2c4a311677eced
12,799
def chunks(sentences, number_of_sentences): """ Split a list into N sized chunks. """ number_of_sentences = max(1, number_of_sentences) return [sentences[i:i+number_of_sentences] for i in range(0, len(sentences), number_of_sentences)]
a11c06c60e2230b611fd669a1342e17970e27ba5
12,800
import torch def pos_def(ws, alpha=0.001, eps=1e-20): """Diagonal modification. This method takes a complex Hermitian matrix represented by its upper triangular part and adds the value of its trace multiplied by alpha to the real part of its diagonal. The output will have the format: (*,2,C+P) Arguments --------- ws : tensor An input matrix. The tensor must have the following format: (*,2,C+P) alpha : float A coefficient to multiply the trace. The default value is 0.001. eps : float A small value to increase the real part of the diagonal. The default value is 1e-20. """ # Extracting data D = ws.dim() P = ws.shape[D - 1] C = int(round(((1 + 8 * P) ** 0.5 - 1) / 2)) # Finding the indices of the diagonal ids_triu = torch.triu_indices(C, C) ids_diag = torch.eq(ids_triu[0, :], ids_triu[1, :]) # Computing the trace trace = torch.sum(ws[..., 0, ids_diag], D - 2) trace = trace.view(trace.shape + (1,)) trace = trace.repeat((1,) * (D - 2) + (C,)) # Adding the trace multiplied by alpha to the diagonal ws_pf = ws.clone() ws_pf[..., 0, ids_diag] += alpha * trace + eps return ws_pf
f18cc9087d8ad0cef859b57d524b453ad11dd429
12,801
import statistics def ping_statistics(data): """ Render result statistics :return: str result string """ TEMPLATE = """--- {hostname} ping statistics --- {packet} packets transmitted, {packet_received} packets received, {packet_lost:.1f}% packet loss""" RTT_TEMPLATE = """\nround-trip min/avg/max/stddev = {min:3.2f}/{avg:3.2f}/{max:3.2f}/{stddev:3.2f} ms""" ERROR_TEMPLATE = """--- {hostname} ping statistics --- ping: cannot resolve {hostname}: Unknown host""" results = [] for hostname, value in data.items(): if value.get("error"): # I could use PEP572 here results.append(ERROR_TEMPLATE.format(hostname=hostname)) continue rtts = value["rtts"] if value["seq"] == 0: packet, packet_received, packet_lost = 0, 0, 0 else: packet = value["seq"] packet_received = int(value["seq"]) - int(value["lost"]) packet_lost = value["lost"] / value["seq"] * 100 packets_info = TEMPLATE.format( hostname=hostname, packet=packet, packet_received=packet_received, packet_lost=packet_lost, ) rtt_info = "" if rtts: stdev = 0 if len(rtts) > 2: stdev = statistics.stdev(value["rtts"]) rtt_info = RTT_TEMPLATE.format( min=min(value["rtts"]), avg=sum(value["rtts"]) / value["seq"], max=max(value["rtts"]), stddev=stdev, ) results.append(packets_info + rtt_info) return "\n".join(results)
1ba4977f6c70dd0719bb5b1d5301795b3ca0ad6a
12,802
def dot_s(inputa,inputb): """Dot product for space vectors""" return inputa[:,0]*inputb[:,0] + inputa[:,1]*inputb[:,1] + inputa[:,2]*inputb[:,2]
7991192fa07b953cd2e8e3d5439769eebf0e1608
12,803
import time def utc2datetime(_utc, _format="%Y-%m-%d %H:%M:%S"): """ UTC转datetime """ return time.strftime(_format, time.localtime(int(_utc)))
992304ee88c99bf81734b8bf473fe8359810a843
12,804
import pickle def pickle_serialize(data): """ Serialize the data into bytes using pickle Args: data: a value Returns: Returns a bytes object serialized with pickle data. """ return pickle.dumps(data)
9c8809ab7bbd0375e9d94d6baae95e26166e6c56
12,805
from typing import Iterable import os def create_line(array: Iterable, sep: str) -> str: """create row of results file""" safe_array = [str(item) if item is not None else "" for item in array] return f"{sep.join(safe_array)}{os.linesep}"
b855f1aed5960a3114c8c0f72d3446116e83d800
12,806
def edit_diff(start, goal, limit): """A diff function that computes the edit distance from START to GOAL. >>> edit_diff("ash", "hash") 1 >>> edit_diff("roses", "arose") # roses -> aroses -> arose 2 >>> edit_diff("tesng", "testing") # tesng -> testng -> testing 2 >>> edit_diff("rlogcul", "logical") # rlogcul -> logcul -> logicul -> logical 3 """ if limit < 0: return float('inf') elif len(start) == 0 or len(goal) == 0: return abs(len(goal) - len(start)) elif start[0] == goal[0]: return edit_diff(start[1:], goal[1:], limit) else: add_diff = 1 + edit_diff(start, goal[1:], limit - 1) remove_diff = 1 + edit_diff(start[1:], goal, limit - 1) substitute_diff = 1 + edit_diff(start[1:], goal[1:], limit - 1) return min(add_diff, remove_diff, substitute_diff)
4016eca481c208fd39b028a4d267d5afb014bbe0
12,808
def time2milliseconds(the_time): """ Converts the time part of a datetime to milliseconds. """ result = 0 if the_time: result += the_time.hour result *= 60 result += the_time.minute result *= 60 result += the_time.second result *= 1000 return 0
5825fed1c9d64db5f145a73a158a5cb79d6065f7
12,809
def strip_cstring(data: bytes) -> str: """Strip strings to the first null, and convert to ascii. The CmdSeq files appear to often have junk data in the unused sections after the null byte, where C code doesn't touch. """ if b'\0' in data: return data[:data.index(b'\0')].decode('ascii') else: return data.decode('ascii')
c315e84debe7eef239afd4c856d45f4d2a776af2
12,810
def model_format_args(model, pars): """Format the model and parameter args to save in output file. Change to int, str and float. model in [temp, logg, fe/h, alpha] pars in order (R, band, vsini, sample). Can now also optionally handle a 5th parameter RV. """ temp = int(model[0]) logg = float(model[1]) fe_h = float(model[2]) alpha = float(model[3]) band = pars[1] res = int(pars[0] / 1000) vsini = float(pars[2]) sample = float(pars[3]) try: rv = float(pars[4]) return temp, logg, fe_h, alpha, band, res, vsini, sample, rv except IndexError: return temp, logg, fe_h, alpha, band, res, vsini, sample, 0.0
118a10c3670399c365c608d9ebfce7d48edb84fb
12,811
def abs_diff(a, b, base=0): """Absolute value of difference between scalars. abs_diff is symmetric, i.e. `a` and `b` are interchangeable. Args: a: First argument. An int. b: Seconds argument. An int. base: Dummy argument so that the argument signature matches other scalar diff functions. abs_diff is the same in all bases. Returns: abs(a - b). """ del base # Unused. return abs(a - b)
77d4d4231cd50a6e4cc527979fa806e139759d8f
12,812
def getTitles(db): """ Gets all the books titles in the whole database :param db: database object :return: sorted books titles """ data = [] for book in db.find('books'): data.append(book['Title']) return sorted(data)
f63d2138cef4d2ad51767ad62abacce5169bb3a2
12,813
def calc_auc(scored_labels, true_label): """Calculates area under the (precision-recall) curve.""" tp = 0 total = 0 for ( _, l, ) in scored_labels: if l == true_label: tp += 1 else: total += tp return float(total) / (tp * (len(scored_labels) - tp))
f1e907548d3f5897197e97f7340cf3d7f6645e89
12,815
def give_coordinates(data): """ Get ground-truth coordinates (X,Y,Z, room label) where the measurement was taken, given the JSOn structure as an input. """ message = {} message['true_coordinate_x'] = data['raw_measurement'][1]['receiver_location']['coordinate_x'] message['true_coordinate_y'] = data['raw_measurement'][1]['receiver_location']['coordinate_y'] try: message['true_coordinate_z'] = data['raw_measurement'][1]['receiver_location']['coordinate_z'] except: pass try: message['true_room'] = data['raw_measurement'][1]['receiver_location']['room_label'] except: pass return message
d412afbe08d4b1660ab9a12d1e567e4c4e4258fc
12,816
def filter_comment_lines(line): """Filter variable comment lines.""" if line.startswith(b"#"): return True
6f0441adb79ddf7aebb6466cb6dfdb74c74e1d24
12,817
def orders_by_dow(data_frame): """ Gives orders_count by day of week. :param data_frame: DataFrame containing orders data. :return: DataFrame of order_ids count by day of week. """ grouped = data_frame.groupby(['order_dow'], as_index=False) # count by column: 'order_id' count = grouped.agg({'order_id': 'count'}).rename( columns={'order_id': 'order_id_count'}) count['week_day'] = ['Saturday', 'Sunday', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday'] return count
bfca97ced614fb8c309f8edc482fc2136914759b
12,818
def is_user_logged(app): """Check for auth_tkt cookies beeing set to see if user is logged in.""" cookies = app.cookies if 'auth_tkt' in cookies and cookies['auth_tkt']: return True return False
67e670fd70787b35bc9b8d9a724512bfc4e7bac8
12,819
def create_get_service_interface_point_list_by_id(): # noqa: E501 """Create get-service-interface-point-list by ID Create operation of resource: get-service-interface-point-list # noqa: E501 :rtype: GetServiceInterfacePointListRPCOutputSchema """ return 'do some magic!'
2f54434f1ecfe77d50034e50b2c4333f24371a08
12,820
def test_cache_memoize(cache): """ Test that cache.memoize() caches the return value of a function using a key based on function arguments used. """ marker = 1 @cache.memoize() def func(a, b, c, d): return ((a, b, c, d), marker) args, markx = func(1, 2, 3, 4) assert args == (1, 2, 3, 4) assert markx == marker marker += 1 args, marky = func(1, 2, 3, 4) assert args == (1, 2, 3, 4) assert marky != marker assert marky == markx args, markz = func(5, 6, 7, 8) assert args == (5, 6, 7, 8) assert markz == marker
5b93bcab00b334b1c38114f8b700b6ef0d9c3178
12,821
def obey_reporting_preference(function): """ Do not execute this method if report_errors is disabled, even if someone tells you to! """ def inner(self, *args, **kwargs): if not self.reporting_enabled: return None else: return function(self, *args, **kwargs) return inner
c0b38babc999b7bf1ace6821d6079f1afb53e540
12,822
import re def mqtt_wildcard(topic, wildcard): """Returns True if topic matches the wildcard string. """ regex = wildcard.replace('.', r'\.').replace('#', '.*').replace('+', '[^/]*') if re.fullmatch(regex, topic): return True return False
fd7a3a5e8af1e6172decd62f2ba8294dfe07124c
12,824
def distinct_brightness(dictionary): """Given the brightness dictionary returns the dictionary that has no items with the same brightness.""" distinct, unique_values = {}, set() for char, brightness in dictionary.items(): if brightness not in unique_values: distinct[char] = brightness unique_values.add(brightness) return distinct
7f7bb5dba9bab113e15cc4f90ddd4dfda7bb5f01
12,826
import torch def norm(x): """ Normalize a tensor to a tensor with unit norm (treating first dim as batch dim) :param x: :return: """ b = x.size()[0] n = torch.norm(x.view(b, -1), p=2, dim=1) while len(n.size()) < len(x.size()): n = n.unsqueeze(1) n.expand_as(x) return x/n
b40ba939c80db85e2ac8377b21ea7a17589b1c0f
12,828
import time def __date_since_epoch__(value): """Internal function used for Unix datetime format""" day = time.strftime('%d',time.localtime(value)) month = time.strftime('%m',time.localtime(value)) year = time.strftime('%Y',time.localtime(value)) return day,month,year
34e8ee15f7fa5d6f662a7850b3252a37a3926371
12,829
def get_unobs_nd_names(gname): """ For a graph named gname, this method returns a list of the names of its unobserved nodes (i.e., either [], or ["U"] or ["U1", "U2"]) Parameters ---------- gname : str Returns ------- list[str] """ if gname in ["G2", "G3", "G5", "G6", "G10", "G11u", "G15", "G16"]: li = ["U"] elif gname in ["G7", "G7up"]: li = ["U1", "U2"] else: li = [] return li
3450293f464b1e7cc7ab343888a606bd96d0f094
12,830
import inspect def all_links(cls): """ Each object will have the attributes declared directly on the object in the attrs dictionary. In addition there may be attributes declared by a particular object's parent classes. This function walks the class hierarchy to collect the attrs in the object's parent classes For example if Location.City is a subclass of Location and Location has the attribute GPS_COORDS then this function would combine GPS_COORDS and the existing attributes on the Location.City object and return the combination """ links = cls.links[:] # walk the class hierarchy for sub in inspect.getmro(cls): for link in getattr(sub, 'links', []): if link in links: continue links.append(link) return links
2e3b1e4e527d339b0972345c81b6967857e54298
12,831