content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def get_package_names(pyproject): """ Get package names :param dict pyproject: pyproject.toml body. :return: Package names :rtype: list """ package_names = [] for pkg in pyproject["package"]: if pkg["category"] == "main": package_names.append(pkg["name"]) return package_names
9d12deab1613c4780a7b53d6f13233b72b51cf23
27,356
import random def sim_detections(gt, tpr, fpr): """Simulates detection data for a set of ground truth cluster labels and an annotator with a specified TPR and FPR. Returns an array of with same length as input gt, where 1 indicates the simulated annotator detected a cluster and 0 indicates an undetected cluster. Args: gt (array): Array of ground truth cluster labels. 1 indicates a true detection and 0 indicates a false detection. tpr (float): The true positive rate of the annotator. For a ground truth value of 1, it is the probability that the function will output 1, indicating that the simulated annotator detected the true cluster. fpr (float): The false positive rate of the annotator. For a ground truth value of 0, it is the probability that the funciton will output 1, indicating that the simulated annotator falsely detected the cluster. Returns: array: Array of detected cluster labels. A value of 1 indicates that a cluster was detected by the annotator, and 0 indicates that the cluster was not detected by the annotator. """ assert tpr >= 0 and tpr <= 1, "TPR must be between 0 and 1" assert fpr >= 0 and fpr <= 1, "FPR must be between 0 and 1" det_list = [] for item in gt: rand = random.random() if item == 1: if rand < tpr: det_list.append(1) else: det_list.append(0) elif item == 0: if rand < fpr: det_list.append(1) else: det_list.append(0) return det_list
ad8d0ac4423333c64ab1db8b838cba4ed7da4291
27,357
def update_csv_data_dict(csv_data, first_column, *other_columns): """ Update a csv dictionary for a given first column :param csv_data: The csv data dictionary to add the row to. :param first_column: The first column of the row to add. :param *other_columns: The further columns of the row to add. """ line = first_column for column in other_columns: line += ',%s' % ('' if column is None else column) csv_data[first_column] = line return csv_data
34abbef2c026bc520a7f4048cd00b6710414294d
27,361
import unittest def run_student_tests(print_feedback=True, show_traces=True, success_required=True): """Run a suite of student submitted tests. Tests must be located in /autograder/source/student_tests/ Args: print_feedback (bool): Print success or failure message show_traces (bool): Show failure/error stack traces success_required (bool): If True, this function will raise an AssertionError if any student tests fail. Returns: bool: True if all tests pass, False otherwise Raises: AssertionError if success_required is true and any test fails. """ suite = unittest.defaultTestLoader.discover('student_tests', top_level_dir="./") result = unittest.TestResult() suite.run(result) succeeded = len(result.failures) == 0 and len(result.errors) == 0 if not succeeded: if print_feedback: print( "It looks like your submission is not passing your own tests:") if len(result.errors) > 0: print("Errors:") for error in result.errors: print(error[0]._testMethodName) if show_traces: print(error[1]) if len(result.failures) > 0: print("Failures:") for failure in result.failures: print(failure[0]._testMethodName) if show_traces: print(failure[1]) if success_required: raise AssertionError("Student tests failed.") else: if print_feedback: print("Submission passes student tests.") return succeeded
e262b6d5e8c74ca9085aa943a5d58670314d781d
27,362
import torch def rotated_box_to_poly(rotated_boxes: torch.Tensor): """ Transform rotated boxes to polygons Args: rotated_boxes (Tensor): (x, y, w, h, a) with shape (n, 5) Return: polys (Tensor): 4 corner points (x, y) of polygons with shape (n, 4, 2) """ cs = torch.cos(rotated_boxes[:, 4]) ss = torch.sin(rotated_boxes[:, 4]) w = rotated_boxes[:, 2] - 1 h = rotated_boxes[:, 3] - 1 x_ctr = rotated_boxes[:, 0] y_ctr = rotated_boxes[:, 1] x1 = x_ctr + cs * (w / 2.0) - ss * (-h / 2.0) x2 = x_ctr + cs * (w / 2.0) - ss * (h / 2.0) x3 = x_ctr + cs * (-w / 2.0) - ss * (h / 2.0) x4 = x_ctr + cs * (-w / 2.0) - ss * (-h / 2.0) y1 = y_ctr + ss * (w / 2.0) + cs * (-h / 2.0) y2 = y_ctr + ss * (w / 2.0) + cs * (h / 2.0) y3 = y_ctr + ss * (-w / 2.0) + cs * (h / 2.0) y4 = y_ctr + ss * (-w / 2.0) + cs * (-h / 2.0) polys = torch.stack([x1, y1, x2, y2, x3, y3, x4, y4], dim=-1) polys = polys.reshape(-1, 4, 2) # to (n, 4, 2) return polys
52f0aa5f225006162bbbd676d1319477802cb49e
27,363
import torch def maybe_cuda(what, use_cuda=True, **kw): """ Moves `what` to CUDA and returns it, if `use_cuda` and it's available. Args: what (object): any object to move to eventually gpu use_cuda (bool): if we want to use gpu or cpu. Returns object: the same object but eventually moved to gpu. """ if use_cuda is not False and torch.cuda.is_available(): what = what.cuda() return what
ad9f8aa37c4d32000690768d5c18f327ff3bc76c
27,364
import logging def get_logger( log_level=logging.INFO, msg_format="%(asctime)s - %(levelname)s - %(filename)s:%(funcName)s:%(lineno)s" "- %(message)s", cls_name=__name__, ): """ Instantiate a new logger. Args: log_level = One of the log reporting levels defined in the logging module (Default: logging.INFO) cls_name = Class name for this logger (Default: __name__) """ logging.basicConfig(format=msg_format, level=log_level) return logging.getLogger(cls_name)
154518079694a57f44ebe2c83a5e19ff8c4b2396
27,365
import os import base64 import re def gen_oauth_nonce(): """ Generate oauth nonce for Twiter API - Get random 32 bytes data - Base64 encode - Pick only word characters """ random = os.urandom(32) encoded = base64.b64encode(random) words = re.sub('[^\w]', '', str(encoded)) return words
cc8ec7209727caed58acac56936df2db0318689a
27,367
def load_RIRE_ground_truth(file_name): """ Load the point sets defining the ground truth transformations for the RIRE training dataset. Args: file_name (str): RIRE ground truth file name. File format is specific to the RIRE training data, with the actual data expectd to be in lines 15-23. Returns: Two lists of tuples representing the points in the "left" and "right" coordinate systems. """ with open(file_name, "r") as fp: lines = fp.readlines() l = [] r = [] # Fiducial information is in lines 15-22, starting with the second entry. for line in lines[15:23]: coordinates = line.split() l.append( (float(coordinates[1]), float(coordinates[2]), float(coordinates[3])) ) r.append( (float(coordinates[4]), float(coordinates[5]), float(coordinates[6])) ) return (l, r)
9c7747b6fad1a10fb8cbb32162a3423e31fa40f3
27,368
def subdivide(x_1, x_2, n): """Performs the n-th cantor subdivision of the interval (x_1, x_2), a subset [0, 1]""" if n == 0: return [] new_x_1 = 2 * (x_1 / 3) + x_2 / 3 new_x_2 = x_1 / 3 + 2 * (x_2 / 3) return ( subdivide(x_1, new_x_1, n - 1) + [new_x_1, new_x_2] + subdivide(new_x_2, x_2, n - 1) )
ee2cc0ba214d363555224e4b70c10976c63d7dec
27,369
import functools def graph_conv_wrapper(func): """ Applies graph convention to a function using pytorch convention """ @functools.wraps(func) def wrapped_func(*args, **kwargs): new_args = [x.permute(0, 3, 1, 2) for x in args] ret = func(*new_args, **kwargs) return ret.permute(0, 2, 3, 1) return wrapped_func
945babc661cf121fcfa2563a997c2f72f5fb44e3
27,370
import os def txt_file_path_list(folder): """Returns sorted list of paths to ~.txt files found in specified folder.""" filenames = os.listdir(folder) txt_files = [] for file in filenames: if file[-3:] == 'txt': txt_files.append(os.path.join(folder, file)) else: pass txt_files.sort() return txt_files
246b7771915210e64e649bcfeafdda3df5a1cd03
27,371
def _get_ref(info: list) -> str: """Get the workspace reference from an info tuple""" return f"{info[6]}/{info[0]}/{info[4]}"
fbd7bb479abc090b643fa1f1ecfcdd84dee18f62
27,373
import re import inspect def parse_pyvars(code: str, frame_nr: int = 2): """Looks through call stack and finds values of variables. Parameters ---------- code : str SuperCollider command to be parsed frame_nr : int, optional on which frame to start, by default 2 (grandparent frame) Returns ------- dict {variable_name: variable_value} Raises ------ NameError If the variable value could not be found. """ matches = re.findall(r"\s*\^[A-Za-z_]\w*\s*", code) pyvars = {match.split("^")[1].strip(): None for match in matches} missing_vars = list(pyvars.keys()) stack = inspect.stack() frame = None try: while missing_vars and frame_nr < len(stack): frame = stack[frame_nr][0] for pyvar in pyvars: if pyvar not in missing_vars: continue # check for variable in local variables if pyvar in frame.f_locals: pyvars[pyvar] = frame.f_locals[pyvar] missing_vars.remove(pyvar) # check for variable in global variables elif pyvar in frame.f_globals: pyvars[pyvar] = frame.f_globals[pyvar] missing_vars.remove(pyvar) frame_nr += 1 finally: del frame del stack if missing_vars: raise NameError("name(s) {} not defined".format(missing_vars)) return pyvars
381d698d3905ee306f75ad888d07c1107e398251
27,374
def cli(ctx, q, page=1, page_size=10): """Search for tools in a Galaxy Tool Shed. Output: dictionary containing search hits as well as metadata for the search. For example:: {u'hits': [{u'matched_terms': [], u'score': 3.0, u'tool': {u'description': u'convert between various FASTQ quality formats', u'id': u'69819b84d55f521efda001e0926e7233', u'name': u'FASTQ Groomer', u'repo_name': None, u'repo_owner_username': u'devteam'}}, {u'matched_terms': [], u'score': 3.0, u'tool': {u'description': u'converts a bam file to fastq files.', u'id': u'521e282770fd94537daff87adad2551b', u'name': u'Defuse BamFastq', u'repo_name': None, u'repo_owner_username': u'jjohnson'}}], u'hostname': u'https://testtoolshed.g2.bx.psu.edu/', u'page': u'1', u'page_size': u'2', u'total_results': u'118'} """ return ctx.gi.tools.search_tools(q, page=page, page_size=page_size)
793e2149274e4d84e8884f17b0163a5aa28d3119
27,375
def P_f(x): """ Projection onto the free cone (x in R) """ return x
a24e0636747becf32b321c0b3539a996a8acd63b
27,377
import itertools def get_mismatches_from_one(sequence, list_let, nb_mis): """ Return all the sequences possible from sequence with mismatch = nb_mis""" nb_let = len(list_let) list_seq_final = [] len_seq = len(sequence) nb = range(len_seq) list_mismatch_place = list(itertools.combinations(nb, nb_mis)) list_letters_place = list(itertools.combinations(range(nb_let - 1), nb_mis)) for i in range(len(list_mismatch_place)): list_seq = [sequence[0:list_mismatch_place[i][0]] for k in range(len(list_letters_place))] for j in range(len(list_mismatch_place[i])): letter = sequence[list_mismatch_place[i][j]] letters_to_use = [let for let in list_let if let != letter] for m in range(len(list_seq)): list_seq[m] += letters_to_use[list_letters_place[m][j]] if j!= len(list_mismatch_place[i]) - 1: list_seq[m] += sequence[list_mismatch_place[i][j] + 1:list_mismatch_place[i][j+1]] else: list_seq[m] += sequence[list_mismatch_place[i][j] + 1:len_seq] list_seq_final += list_seq return list_seq_final
3341cfb936a6a52df8fcd4eb1c2786f1f588bbb1
27,378
def build_lcp_lr(LCP, n): """Builds LCP-LR dictionary from LCP array""" LCP_LR = {} def _build_lcp_lr(left, right): if left + 1 == right: common_prefix = LCP[right] else: mid = (left + right) // 2 common_prefix = min(_build_lcp_lr(left, mid), _build_lcp_lr(mid, right)) LCP_LR[(left, right)] = common_prefix return common_prefix _build_lcp_lr(1, n) return LCP_LR
3b9e3d3c91e96901d6d7a3e4965d15acbca25476
27,379
def _get_lines(file_obj): """Return all the lines in file_obj.""" return [line.strip() for line in file_obj.readlines()]
07b2bcab4ad9f4f48e5c633752c088791eee4c2d
27,380
def binary(e): """ @purpose: Returns a binary representation of the integer passed @parameter: e - The integer to be converted to binary @precondition: A positive integer value is passed @postcondition: A list of integers representing binary value of the integer, @Complexity: Best Case: O(1) if e is 0 Worst Case: O(e + log e) because the algorithm cuts e into half in the second loop """ if e > 0: rev_binary = [0] * e length = 0 while e > 0: rev_binary[length] = int(e%2) e = int((e - e%2) / 2) length += 1 return rev_binary[0:length] else: return [0]
5412cb4647de46acfe010862c0771bdc88e941e7
27,381
def cov(C): """ Attributes: C (array-like, dtype=float, shape=(n,n)): Confusion matrix Returns: cons (float): Coverage constraint function value """ return 1 - C[:, 0].sum()
f8cbaafaeb2003687b3344d683b4f3555cba58e6
27,383
import os def load_expectation(expectation_file_name, strip=True): # pragma: no cover """Return (unicode) str containing text in *expectation_file_name*. Expectation file path is rooted at tests/expectations. """ thisdir = os.path.dirname(__file__) expectation_file_path = os.path.abspath( os.path.join(thisdir, "expectations", "%s.txt" % expectation_file_name) ) with open(expectation_file_path, "rb") as f: expectation_bytes = f.read() if strip: return expectation_bytes.decode("utf-8").strip() return expectation_bytes.decode("utf-8")
c3b7c68939b9802a4cf71d4b3472cf74127470cc
27,385
def discovery_topic_is_device_config(topic): """Return True if the discovery topic is device configuration.""" return topic.endswith("config")
ffb313d9a35312bf8dd6f9c5ad97a9b1dc9ab2fd
27,387
def get_monoalphabetic_decryption(data: str, key: str) -> dict: """ Monoalphabetic cipher. Part for decryption. For decryption (operate with unicode code): c[i] = c[i] - key_code Where key_code = key[0] + key[1] + ... + key[len[key] - 1] (operate with unicode code) The structure of the function is similar to the encryption function described above. """ key_code = int() for char_iterator in key: key_code += ord(char_iterator) decrypted_data_unicode_list = list() decrypted_data_string = str() for char_iterator in data: # sub character code and key_code value decrypted_data_unicode_list.append((ord(char_iterator) - key_code) % 1114112) decrypted_data_string += chr((ord(char_iterator) - key_code) % 1114112) return {"decrypted_string": decrypted_data_string, "decrypted_character_codes": decrypted_data_unicode_list}
2ded86d0ed9c51a23c80db8342458965ef0d1b3c
27,391
def replace_value_with_key(s, replace_dict): """In string s values in the dict get replaced with their key""" for key in replace_dict: s = s.replace(replace_dict[key], key) return s
6e9c173d6121ac2c90838fa98a9f5d8e88da4ac6
27,393
def markDuplicate(key, idea, oldest_idea_id): """ Mark duplicate. Mark is for statistics purpose. :return: marked key, IDEA """ # If idea is present if idea: # Equality of ID's in tuple and idea, if true mark will be added if oldest_idea_id != idea.id: # Add True mark for duplicate event idea.aida_duplicate='True' # Return tuple: key for next deduplication phase and IDEA return (key[0:4], idea)
8ee5ceb8df6931642732b3f039a18f818b82713a
27,395
def METARtemp(val): """convert temp to METAR""" f_temp = float(val) i_temp = int(round(f_temp, 0)) f1_temp = int(round(f_temp * 10., 0)) if i_temp < 0: i_temp = 0 - i_temp m_temp = "M%02i" % (i_temp,) else: m_temp = "%02i" % (i_temp,) if f1_temp < 0: t_temp = "1%03i" % (0 - f1_temp,) else: t_temp = "0%03i" % (f1_temp, ) return m_temp, t_temp
5fcfa315109911a0061908960c05521840614abd
27,396
def format_class_name(name): """ Formats a string to CapWords. :param name: string to format :type name: str :return: string with the name in CapWords :rtype: str """ fixed_name = name.title().replace("_", "") return fixed_name
064369048205aaf5afe872844ceff4605b6f1498
27,397
import json def bot_factory(app_client, admin_authorization_header): """ Generate a bot with the specified name and protocol, return its id (user_id) and token. """ def gen(name, protocol): data = { 'name': name, 'protocol': protocol } r = app_client.post('/api/bots', data=json.dumps(data), content_type='application/json', headers={'Authorization': admin_authorization_header}) assert r.status_code == 201, r.data j = json.loads(r.data.decode('utf-8')) bot_id = j['id'] r = app_client.get('/api/bots/{}/token'.format(bot_id), headers={'Authorization': admin_authorization_header}) assert r.status_code == 200, r.data j = json.loads(r.data.decode('utf-8')) bot_token = j['token'] return bot_id, bot_token return gen
5ea30e56c958aaae7d6b05b46941a40ed1ec0c7c
27,399
def check_results(candidate_answers, acceptable_answers): """ Return a list containing the ranking of all elements of the list `candidate_answers` which are also in the list of `acceptable_answers`. """ total = 0 for i, answer in enumerate(candidate_answers): for acceptable_answer in acceptable_answers: if answer == acceptable_answer: return i + 1, total + 1 / (i + 1) elif answer in acceptable_answer: total += 1 / len(candidate_answers) return 0, total
4e75b82f72532d7797792fe8da4a4e3dd9d7c974
27,400
def get_attr(d: dict, t: str): """获取字典`d`下的`t`""" try: return d[t] except: return None
eba3fd6c7bb3eeae453db64b1260fdf7c0ed7f6f
27,401
from typing import Union from pathlib import Path import subprocess def is_git_ignored(filename: Union[str, Path]) -> bool: """ Check if file is git-ignored. Supports nested .gitignore files. """ folder = Path(filename).parent filename = Path(filename).name try: p = subprocess.run( ["git", "check-ignore", filename], stderr=subprocess.PIPE, stdout=subprocess.PIPE, encoding="utf-8", cwd=folder, ) # Will return exit code 1 if not gitignored is_ignored = not bool(p.returncode) except subprocess.CalledProcessError as exc: raise OSError(exc.stderr) return is_ignored
b5ac515ad98e140d6a95ec82915c2876093918dc
27,403
def split_seconds(seconds): """Split seconds into [day, hour, minute, second, ms] `divisor: 1, 24, 60, 60, 1000` `units: day, hour, minute, second, ms` >>> split_seconds(6666666) [77, 3, 51, 6, 0] """ ms = seconds * 1000 divisors = (1, 24, 60, 60, 1000) quotient, result = ms, [] for divisor in divisors[::-1]: quotient, remainder = divmod(quotient, divisor) result.append(quotient) if divisor == 1 else result.append(remainder) return result[::-1]
a37493b93130274f25e722d324a84dbdb7f5d03c
27,404
import time def wait_on_job(job): """Block until the async job is done.""" while job.status() != 'done': time.sleep(.05) # pragma: no cover return job
e08190d6a8dee960e7e3f3490ed934203f0aa7ff
27,405
def pkcs7_pad(plaintext: bytes, block_size: int=0x10) -> bytes: """ Pad a message using the byte padding algorithm described in PKCS#7 This padding scheme appends n bytes with value n, with n the amount of padding bytes. The specification describing the padding algorithm can be found here: https://tools.ietf.org/html/rfc2315#section-10.3 """ assert 0 < block_size < 0x100 # If the plaintext is an exact multiple of block_size, # we need to append a whole block. remainder = block_size - (len(plaintext) % block_size) return plaintext + bytes([remainder] * remainder)
fc6e910b428622ac6a9d59b46f04b6c0f7b2a783
27,406
def convert_null_to_none(value): """ this is to convert '' or nan to None for a list :param value: list or value :return: value """ if isinstance(value, list): value = [None if x == '' else x for x in value] else: value = None return value
cb33abd018420165b16818bf9054bf702b4f3a2c
27,407
from datetime import datetime import pytz def get_appropriate_object_from_model(object_set, is_queryset=False): """ Tools: - publish_status = {1: always on, 0: conditionally on, -1: always off, NULL never published} OK - this is how the game is played: Rule 0: only objects that COULD be in play can play Rule 1: if your date is in the future, then you can't play Rule 2: pick from the ones with "date set" that's in the past who have been published (i.e., live_as_of is not None) Rule 3: Barring that - pick the most-recently modified page with publish_status = 1 (this is because it IS possible for a "always on" page to have never gone through the publish step with a publish date - it's just FORCED TO BE ON) Rule 4: Barring THAT - pick the most-recently modified page with publish_status != -1 that has default_live = True. Rule 5: Barring THAT - return the Page that is the default home page (is that even possible)? or None RAD 13-Feb-2019 I've added an optional arg that allows processing of already-created querysets. That way you can have a model that groups instances by a foreign key, and then use the gatekeeper on clump. """ now = datetime.now(pytz.utc) # Use the whole model by default. # Otherwise if is_queryset is True, treat it as a queryset. if is_queryset: qs = object_set.exclude(publish_status=-1) else: qs = object_set.objects.exclude(publish_status=-1) # anything that is not available to anyone is ignored qs = qs.exclude(live_as_of__gt=now) # Send most-recent live_as_of qs1 = qs.exclude(live_as_of__isnull=True) # For some reason this does NOT WORK qs1 = qs.filter(publish_status=0).order_by('-live_as_of').first() if qs1: return qs1 # Send the most recently updated permanent on try: qs2 = qs.filter(publish_status=1).order_by('-date_modified').first() except: qs2 = qs.filter(publish_status=1).first() if qs2: return qs2 # Send the most-recent "default" try: qs3 = qs.filter(default_live=True).order_by('-date_modified').first() except: qs3 = qs.filter(default_live=True).first() if qs3: return qs3 # Nothing is avaialble - this will likely result in a 404 page being returned. return None
3b0bc31423c3f3eb71dab792c579db703826abce
27,408
def countSelectedPairs(all_selected_idx, print_msg = True, string = 'Major merger cut: '): """ Function to count selected pairs from the list of lists outputted by ball tree @all_selected_idx :: """ count_selected_pairs = 0 for j, mm in enumerate(all_selected_idx): if len(mm) >= 1: for m in mm: if j != m: count_selected_pairs += 1 if print_msg: print(string+'%d selected pairs'%(count_selected_pairs/2)) return count_selected_pairs
a8e7b66c218c833d91f8660032ec9715eaa76862
27,409
def get_arrival_from_pick(arrivals, pick): """ return arrival corresponding to pick :param arrivals: list of arrivals :type arrivals: list of either obspy.core.event.origin.Arrival or microquake.core.event.origin.Arrival :param pick: P or S pick :type pick: either obspy.core.event.origin.Pick or microquake.core.event.origin.Pick :return arrival :rtype: obspy.core.event.origin.Arrival or microquake.core.event.origin.Arrival """ arrival = None for arr in arrivals: if arr.pick_id == pick.resource_id: arrival = arr break return arrival
4b4e1df275601aa3990a22dc471541808a1bbbef
27,410
def delete_prefix(prefix: dict, session, fqdn) -> int: """Call API to delete the prefix.""" id = prefix["id"] result = session.delete(f"http://{ fqdn }/api/ipam/prefixes/{ id }/") return result
96cf4d689636ac3117ed38ac39d39612257761a6
27,411
def is_pc_router_interface(config_db, pc): """Check if portchannel is a router interface""" pc_interface_table = config_db.get_table('PORTCHANNEL_INTERFACE') for intf in pc_interface_table: if pc == intf[0]: return True return False
35da3dce6a4ca4ad3b9dfde3723dede6dd4ecef3
27,412
import gzip import json def load_json(filename): """ Load JSON from text file or gzip """ if filename.endswith("gz"): f = gzip.open(filename, "rt") else: f = open(filename, "rt") return json.load(f)
3601d835d394c00f79cf6c5900df810b33b2f11d
27,415
def get_events(sc): """ Get all event_logs and reverse order to make ascending. """ events = sc.getstatus('event_logs') events.reverse() return events
3b8387fd96183b0dd5279891ab15e3f69af579ce
27,416
import json def load_evaluation(filename): """Load single evaluation file. Adds the filename for reference""" data = None with open(filename) as fin: try: data = json.loads(fin.read()) data['filename'] = filename except json.JSONDecodeError: print(f"Could not JSON decode {filename}") return data
472392989f96185d5071d51f10d7bcaa234cdf1e
27,417
import torch def giou_loss(pred, target, weight, avg_factor=None): """GIoU loss. Computing the GIoU loss between a set of predicted bboxes and target bboxes. """ pos_mask = weight > 0 weight = weight[pos_mask].float() if avg_factor is None: avg_factor = torch.sum(pos_mask).float().item() + 1e-6 bboxes1 = pred[pos_mask].view(-1, 4) bboxes2 = target[pos_mask].view(-1, 4) lt = torch.max(bboxes1[:, :2], bboxes2[:, :2]) # [rows, 2] rb = torch.min(bboxes1[:, 2:], bboxes2[:, 2:]) # [rows, 2] wh = (rb - lt + 1).clamp(min=0) # [rows, 2] enclose_x1y1 = torch.min(bboxes1[:, :2], bboxes2[:, :2]) enclose_x2y2 = torch.max(bboxes1[:, 2:], bboxes2[:, 2:]) enclose_wh = (enclose_x2y2 - enclose_x1y1 + 1).clamp(min=0) overlap = wh[:, 0] * wh[:, 1] ap = (bboxes1[:, 2] - bboxes1[:, 0] + 1) * (bboxes1[:, 3] - bboxes1[:, 1] + 1) ag = (bboxes2[:, 2] - bboxes2[:, 0] + 1) * (bboxes2[:, 3] - bboxes2[:, 1] + 1) u = ap + ag - overlap ious = overlap / u enclose_area = enclose_wh[:, 0] * enclose_wh[:, 1] # i.e. C in paper gious = ious - (enclose_area - u) / enclose_area iou_distances = 1 - gious return torch.sum(iou_distances * weight)[None] / avg_factor
5a65efeff6244d1691fabf85c3d110e66a2cc43a
27,418
import torch def quaternion_linear_rotation(input, r_weight, i_weight, j_weight, k_weight, bias=None, quaternion_format=False): """ Applies a quaternion rotation transformation to the incoming data: The rotation W*x*W^t can be replaced by R*x following: https://en.wikipedia.org/wiki/Quaternions_and_spatial_rotation Works for unitary and non unitary weights. The initial size of the input must be a multiple of 3 if quaternion_format = False and 4 if quaternion_format = True. """ square_r = (r_weight * r_weight) square_i = (i_weight * i_weight) square_j = (j_weight * j_weight) square_k = (k_weight * k_weight) norm = torch.sqrt(square_r + square_i + square_j + square_k) norm_factor = 2.0 * norm square_i = norm_factor * (i_weight * i_weight) square_j = norm_factor * (j_weight * j_weight) square_k = norm_factor * (k_weight * k_weight) ri = (norm_factor * r_weight * i_weight) rj = (norm_factor * r_weight * j_weight) rk = (norm_factor * r_weight * k_weight) ij = (norm_factor * i_weight * j_weight) ik = (norm_factor * i_weight * k_weight) jk = (norm_factor * j_weight * k_weight) if quaternion_format: zero_kernel = torch.zeros(r_weight.shape) rot_kernel_1 = torch.cat((zero_kernel, 1.0 - (square_j + square_k), ij - rk, ik + rj), dim=0) rot_kernel_2 = torch.cat((zero_kernel, ij + rk, 1.0 - (square_i + square_k), jk - ri), dim=0) rot_kernel_3 = torch.cat((zero_kernel, ik - rj, jk + ri, 1.0 - (square_i + square_j)), dim=0) zero_kernel2 = torch.zeros(rot_kernel_1.shape) global_rot_kernel = torch.cat((zero_kernel2, rot_kernel_1, rot_kernel_2, rot_kernel_3), dim=1) else: rot_kernel_1 = torch.cat((1.0 - (square_j + square_k), ij - rk, ik + rj), dim=0) rot_kernel_2 = torch.cat((ij + rk, 1.0 - (square_i + square_k), jk - ri), dim=0) rot_kernel_3 = torch.cat((ik - rj, jk + ri, 1.0 - (square_i + square_j)), dim=0) global_rot_kernel = torch.cat((rot_kernel_1, rot_kernel_2, rot_kernel_3), dim=1) if input.dim() == 2: if bias is not None: return torch.addmm(bias, input, global_rot_kernel) else: return torch.mm(input, global_rot_kernel) else: output = torch.matmul(input, global_rot_kernel) if bias is not None: return output + bias else: return output
bec280bda0b2138514d3fa9ebd879e3ee25fb541
27,420
def parse_program(line): """Parse line to tuple with name, weight and list of programs above.""" program, *above = line.split(' -> ') name, weight = program.split() return name, int(weight[1:-1]), above[0].split(', ') if above else []
0ebcec6f32602d0bfcbfe857d49bcd46a20f528a
27,422
import click def networkx_to_dict(ngraph, verbose=True): """Turn the networkx data into a dict structure that can be consumed by the dataviz Note -------- The network data is extracted as follows > xx = G.nodes(data=True) > for x in xx: print(x[1]) ('respiratory tract infections', {'frequency': 145, 'freq_normalized': 92.77215189873418, 'score_avg': 0.63237, 'score_bucket': 2, 'size': 10}) ('acute lung injury', {'frequency': 9, 'freq_normalized': 6.69620253164557, 'score_avg': 0.62226, 'score_bucket': 2, 'size': 10}) ..... > yy = G.edges(data=True) > for y in yy: print(y) ('respiratory tract infections', 'MERS-CoV infection', {'weight': 2}) ('respiratory tract infections', 'lower respiratory tract infections', {'weight': 53}) .... Parameters ---------- ngraph : networkx.Graph DSL data turned into a graph Returns ---------- tuple A tuple containing two dictionaries ready to be turned into visjs JSON data sources. For example nodes = [ {'id': 1, 'label': 'Knowledge Graphs'}, {'id': 2, 'label': 'RDF'}, {'id': "3 3", 'label': 'Linked Data'} ] edges = [ {'from': 1, 'to': "3 3"}, {'from': 1, 'to': 2}, {'from': "3 3", 'to': 2} ] """ if not ngraph: return [], [] # nodes, edges NODES = [] if verbose: click.secho(f"Creating Dict for visjs dataviz.. ", fg="green") if verbose: click.secho(f"..nodes.. ", dim=True) def safe_id(_id): return _id.replace(" ", "_").strip() # px.colors.diverging.Temps TEST_COLORS = ['rgb(0, 147, 146)', 'rgb(57, 177, 133)', 'rgb(156, 203, 134)', 'rgb(233, 226, 156)', 'rgb(238, 180, 121)', 'rgb(232, 132, 113)', 'rgb(207, 89, 126)'] for x in ngraph.nodes(data=True): # id and label, the same; freqn = size [TBC] _id, label, freq, freqn = safe_id(x[0]), x[0].capitalize(), x[1]['frequency'], x[1]['freq_normalized'] score_avg, score_bucket = x[1]['score_avg'], x[1]['score_bucket'] temp = {'id': _id, 'label': label, 'group': 1} temp['value'] = int(freqn) temp['freq'] = int(freq) temp.update(x[1]) # add all other features too # TEST COLORS hardcoded temp['color'] = TEST_COLORS[3*score_bucket] # HTML titles temp['title'] = f"<h4>Concept: {label}</h4><hr>Frequency Norm: {freqn}<br>Frequency: {freq}<br>Score avg: {score_avg}<br>Score bucket: {score_bucket}" # temp['title'] = json.dumps(x[1], sort_keys=True, indent=4) # title = original JSON contents NODES.append(temp) EDGES = [] if verbose: click.secho(f"..edges.. ", dim=True) for x in ngraph.edges(data=True): # id and label, the same temp = {'from': safe_id(x[0]), 'to': safe_id(x[1])} temp['value'] = int(x[2]['weight']) temp.update(x[2]) # add all other features too temp['title'] = f"Strength: {x[2]['weight']}" EDGES.append(temp) if verbose: click.secho(f"Done", dim=True) return NODES, EDGES
678a214c86b2810c79d3223dc5832b922a9fbfc4
27,423
def namespaceable(request, namespaces, abc): """Return either helper class.""" if request.param == 'main': return namespaces.Namespaceable else: return abc.NamespaceableABC
014c6f763296aa9de22ab534d8e06c655959f9b5
27,424
def entry_check(entry): """ check if entry is a dict and has a key entry. If list return dict {'entry': entry} :param entry :return: entry_dict """ # print("entry type:", type(entry)) if isinstance(entry, dict): # print("keys:", entry.keys()) if 'entry' in entry: # print("returning entry") # print(entry) return entry # print('not an entry list') # print("entry:", entry) else: # print("not a dict so wrap as list in dict with key=entry") return {'entry': entry}
41e788a02edbf56938878fda045ebf65b7bb5df7
27,425
from typing import Tuple def is_tuple_list_type(t) -> bool: """ Allowed only `Tuple[Type1, ...]` :param t: :return: """ if not hasattr(t, '__origin__'): return False # Python36: List, Python37+: list if not t.__origin__ in (Tuple, tuple): return False return t.__args__[1] == Ellipsis
0be0fe167b0934e46d5b315da3552eb8a534ff05
27,426
import re def check_state_keys(state, keys_regex): """check if keys exists in state using full python paths""" regex = re.compile(keys_regex) for k, v in state.items(): if regex.findall(k): return True return False
423bbd9a01c7240d0c73f1f7370e38164d3ae63f
27,427
import subprocess def BuildBinary(build_dir, binary_name): """Builds the given binary in the given directory with Ninja. Returns True on success.""" return subprocess.call(["ninja", "-C", build_dir, binary_name]) == 0
91545bee67981e7c14707f6e09f1f1370d268ae7
27,429
import re def is_drive_letter(path): """Check wheter specified path is a drive letter""" return re.match('^[a-zA-Z]{1}:$', path)
bbef9719ce0a0a4910fa021003238aecbe0810f8
27,430
def convert_fasta_to_string(filename): """Takes a genome FASTA and outputs a string of that genome Args: filename: fasta file Returns: string of the genome sequence """ assert filename.split('.')[-1] == 'fasta' # assert correct file type with open(filename) as f: sequence = ''.join(f.read().split('\n')[1:]).lower() # splits by lines, removes first line, joins lines return sequence
4bb2a8228a08b2debfb5d8fab9cf08c05390f24f
27,431
def harmean(series, n): """ 调和平均值: 求series在n个周期内的调和平均值 计算方法: harmean(x, 5) = 1 / [(1 / x(1) + 1 / x(2) + 1 / x(3) + 1 / x(4) + 1 / x(5)) / 5] 例: harmean = tafunc.harmean(df["close"], 5) # 求5周期收盘价的调和平均值 注: 1. n包含当前k线 2. 调和平均值与倒数的简单平均值互为倒数 3. 当n为有效值, 但当前的k线数不足n根, 函数返回空值 4. series或n为0或空值的情况下, 函数返回空值 """ harmean_data = n / ((1 / series).rolling(n).sum()) return harmean_data
2152568e3cb75296cad5c4ef117779d850467a17
27,432
import os def get_files_to_push(local_path, device_path): """Get a list of the file(s) to push. Parameters ---------- local_path : str A path to a local file or directory device_path : str A path to a file or directory on the device Returns ------- local_path_is_dir : bool Whether or not ``local_path`` is a directory local_paths : list[str] A list of the file(s) to push device_paths : list[str] A list of destination paths on the device that corresponds to ``local_paths`` """ local_path_is_dir = os.path.isdir(local_path) local_paths = [local_path] if not local_path_is_dir else os.listdir(local_path) device_paths = [device_path] if not local_path_is_dir else [device_path + '/' + f for f in local_paths] return local_path_is_dir, local_paths, device_paths
819a4c5890c95783b060ed7a1a126ad8be088042
27,433
def unauthorized(e): """ custom 401 """ return '<h1>hey, you there, no wandering around here</h1>', 401
2243fb48421026fee1fde66096d80a3d09fa88fb
27,434
def calculate_gc(read): """Returns GC count.""" return (read.lower().count("g") + read.lower().count("c")) / len(read) * 100.0
aeae6346abbfb634ccd871e9ab191a404916961f
27,436
def identity(x): """Identity functions, useful as semantic action for rules.""" return x
359ef0c72e231f9b815e928a3f908d3cf5772f81
27,437
def tree_struct_sep(lines): """Separates tree structure from rest of the data. This is done to get an excepted format for the column_parser """ indx = None for line in lines: if line.startswith('; ********'): indx = lines.index(line)+1 break return lines[:indx],lines[indx:]
c896fb01d9565569dc4d5b9a1c43b84173892287
27,438
def nplus1loader(loadopt, attrs, nested=True): """ N+1 loader for attributes, be it a column or a relationship Give it a list of attributes, of '*' to handle them all. Args: attrs: Currently, only supports '*'. See `nplus1loader_cols()` and `nplus1loader_rels()` for fine-tuning. nested: Whether to automatically put the nplus1loader('*') on loaded relationships """ assert tuple(attrs) == ('*',), 'nplus1loader() only supports "*" yet' loadopt = loadopt.nplus1loader_cols('*', nested=nested) loadopt = loadopt.nplus1loader_rels('*', nested=nested) return loadopt
7fa5b44ebe8db9fa578d208b2b788e326d0bbb1f
27,439
import sys def get_evaluator(method, parent): """ Get an evaluator. Parameters ---------- method : str The name of the evaluator. parent : Selector The selector using the evaluator. Returns ------- Evaluator The initialized evaluator. """ return getattr(sys.modules[__name__], method)(parent)
5168e75c902440685dcde1c46f40481f02a23496
27,440
def pascal_triangle(n): """ Returns a list of lists of integers representing the Pascal’s triangle of n. """ if n <= 0: return [] result = [[1]] while len(result) is not n: tmp = [1] for i in range(len(result[-1]) - 1): tmp.append(result[-1][i] + result[-1][i + 1]) tmp.append(1) result.append(tmp) return result
1175816ec7b7e657543decf4be4478424bfabb79
27,443
def dsr_bc(D_eq): """ Beard and Chuang drop shape relationship function. Arguments: D_eq: Drop volume-equivalent diameter (mm) Returns: r: The vertical-to-horizontal drop axis ratio. Note: the Scatterer class expects horizontal to vertical, so you should pass 1/dsr_bc """ return 1.0048 + 5.7e-04*D_eq - 2.628e-02 * D_eq**2 + \ 3.682e-03*D_eq**3 - 1.677e-04 * D_eq**4
1687a0f762fa4b846563c0c327f0a93296c2a8f8
27,444
def apps(request): """ Directory to extract files to """ apps = request.config.option.apps return apps.split(',')
aa571a60803634dfcad61994a4af086fe14c5763
27,445
def validate_email_destination(os_mailbox, os_email): """ return True iff O.S. mailbox record is a destination linked to given O.S. email record """ destinations = os_email.get('destinations', None) return os_mailbox['id'] in destinations
e230dbc91aad7b62025361818ed4ffe61b3df33b
27,446
def units_map(param, mm=False): """ Units of parameters """ L = "cm" if mm: L = "mm" if param in ("temps", "Tl", "Tt", "T"): unit = " [mK]" elif param[0] == "I": unit = " [A]" elif ( param[:2] == "r0" or param[0] in ("L", "W", "R", "A") or param in ("centers", "sigmas", "D_ph", "width", "d") ): unit = " [" + L + "]" elif param[:2] in ("dt", "t0") or param in ("tcharge", "delay", "tmax", "tau"): unit = r" [$\mathrm{\mu s}$]" elif param in ("v0", "vrecoil"): unit = r" [$\mathrm{" + L + "~\mu s^{-1}}$]" elif param in ("meanKs", "thermKs", "kinetics"): unit = r" [$\mathrm{kg~" + L + "^2~\mu s^{-2}}$]" elif param in ("ts", "t", "times", "time"): unit = r" [$\mathrm{\mu s}$]" else: unit = "" return unit
357fb51de327e15d7fceb23023916036a48e128b
27,449
def get_sections_in_range(sections, time_range): """get sections in a certain time range (e.g., in the train_ranges) Args: sections: list of tuples (<onset>, <offset>) or (<label>, <onset>, <offset>) time_range: (list of) tuple(s) (<onset>, <offset>) Returns: """ if isinstance(time_range[0], (list, tuple)): return [ section for time_range_ in time_range for section in get_sections_in_range(sections, time_range_) ] else: return [ ( max(time_range[-2], section[-2]), min(time_range[-1], section[-1]) ) for section in sections if ( section[-1] >= time_range[-2] and section[-2] <= time_range[-1] ) ]
044ca7ddc3f4cf71077bc6ef236fcb396f73da62
27,450
def snap(point,shape,snapRange=20): """ snap 'point' if within 'snapRange' from the border defined by 'shape' """ snapped = list(point) if snapped[0] < snapRange: snapped[0] = 0 if snapped[0] > shape[1]-snapRange: snapped[0] = shape[1] if snapped[1] < snapRange: snapped[1] = 0 if snapped[1] > shape[0]-snapRange: snapped[1] = shape[0] return tuple(snapped)
42c0b6e124944f327489767179fc4fc422cbaedc
27,451
def make_front_matter(title, weight): """Makes the front matter needed for Hugo.""" return f"""--- title: {title} linkTitle: {title} weight: {weight+1} type: docs --- """
8d51b39fae382a579fdbeedcc28fd9955368fe01
27,452
def collide_parent_tree(widget, x, y): """Returns whether (x, y) collide with the widget and all its parents. """ if not widget.collide_point(x, y): return False parent = widget.parent while parent and hasattr(parent, 'to_parent'): x, y = parent.to_parent(x, y) # transform to parent's parent's if not parent.collide_point(x, y): return False parent = parent.parent return True
5019692635194b9313811ed6a8caffe891965bb8
27,453
import os import csv def load_auto_reply_settings(auto_reply_for_non_player_file): """ 读取关键词csv文档,返回keyword_dict对象 """ if os.path.exists(auto_reply_for_non_player_file): auto_reply_dict = dict() # {编号: [回复类型, 回复文件名称或内容, 视频描述]} with open(auto_reply_for_non_player_file, 'r') as f: auto_reply_list = csv.reader(f) # [编号, 回复类型, 回复文件名称或内容, 视频描述] # 为防止csv内容中编号有重复,这里使用自动生成的编号number number = 0 for auto_reply_row in auto_reply_list: content_type = auto_reply_row[1] content_data = auto_reply_row[2] video_desc = auto_reply_row[3] if content_type == '图片' and content_data[-4:] != '.jpg': content_data += '.jpg' auto_reply_dict[number] = [content_type, content_data, video_desc] number += 1 return auto_reply_dict else: return False
c7cf7d7ea8d054ddfd3551725687b33af6ff5959
27,455
def get_median(distribution): """ Median lies exactly midway of your distribution, when arraged in an order. Parameter: a list containing the distribution of the sample or population Returns: the median of the distribution """ n = len(distribution) # distribution size # for median, first sort the list distribution.sort() # next, compute the median based on `n` mid = int(n/2) if not n%2: median = (distribution[mid] + distribution[mid-1]) / 2 else: median = distribution[mid] return median
06c8f3f5735d80a406e9db72aca3db73812f92aa
27,456
import sys import traceback def extract_function_name(): """ Extracts failing function name from Traceback by Alex Martelli http://stackoverflow.com/questions/2380073/\how-to-identify-what-function-call-raise-an-exception-in-python """ tb = sys.exc_info()[-1] stk = traceback.extract_tb(tb, 1) fname = stk[0][3] return fname
4559ee47370089c72225aa76b6dc089de835bdac
27,458
def concat(s): """This function takes a more user friendly regex (E.g: 'abc'), and inserts '.' concat operators where appropriate. (E.g: 'a.b.c')""" my_list = list(s)[::-1] # convert regex string to a reverse ordered list # characters with special rules special_characters = ['*', '|', '(', ')', '+'] output = [] # the compiler friendly regular expression (E.g: 'a.b.c') while my_list: # iterate over the user friendly regex c = my_list.pop() if len(output) == 0: # always append the first character from the list output.append(c) elif c not in special_characters: # if c is a normal character # if the previous character is non-special, *, or + if output[-1] not in special_characters or output[-1] == '*' \ or output[-1] == '+': output.append('.') # preface c with a . operator output.append(c) else: output.append(c) elif c == '*' or c == '|' or c == '+': output.append(c) elif c == '(': if output[-1] != '|' and output[-1] != '(' and output[-1] != '.': output.append('.') output.append(c) else: output.append(c) else: output.append(c) return ''.join(output)
6d413e3f00f000d6fca3083bf116ea14dbc7b962
27,459
def get_src_path_root(src_path: str) -> str: """returns the root directory of a path (represented as a string)""" if "\\" in src_path: return src_path.split("\\", 1)[0] elif "/" in src_path: return src_path.split("/", 1)[0] return src_path
8df673721aa505f1647871b8df25ccabd0402fd9
27,460
def posok(state,x): """ 检测目标位置是否符合要求 """ y=len(state) for i in range(y): if abs(x-state[i]) in (0,y-i): return False else: return True
5d5e99876b6c8dbf7bb4618c23839432f3a9edb3
27,461
def numpow(series, n, m): """ 自然数幂方和 计算方法: numpow(x, n, m) = n ^ m * x + (n - 1) ^ m * x.shift(1) + (n - 2) ^ m * x.shift(2) + ... + 2 ^ m * x.shift(n - 2) + 1 ^ m * x.shift(n - 1) 注意: 当n为有效值但当前的series序列元素个数不足n个, 函数返回 NaN 序列 Args: series (pandas.Series): 数据序列 n (int): 自然数 m (int): 实数 Returns: pandas.Series: 幂方和序列 Example:: from tqsdk import TqApi, TqSim, tafunc api = TqApi(TqSim()) klines = api.get_kline_serial("CFFEX.IF1908", 24 * 60 * 60) numpow = tafunc.numpow(klines.close, 5, 2) print(list(numpow)) """ numpow_data = sum((n - i) ** m * series.shift(i) for i in range(n)) return numpow_data
6dbd88cbe06f03bf32a3beaef6c41dcf39ec1a1e
27,463
def clean_nan(data, data_column): """Takes pandas dataframe of data and removes all rows containing NaN""" data.dropna(subset=[data_column, \ 'Quality'], inplace=True) return data
3b41fad6b60951f57dd309f5bc5920ca467652f1
27,464
import torch def calc_pairwise_distance(X, Y): """ computes pairwise distance between each element Args: X: [N,D] Y: [M,D] Returns: dist: [N,M] matrix of euclidean distances """ rx=X.pow(2).sum(dim=1).reshape((-1,1)) ry=Y.pow(2).sum(dim=1).reshape((-1,1)) dist=rx-2.0*X.matmul(Y.t())+ry.t() return torch.sqrt(dist)
3a8c502163ea2788cd4fc79d44d6f2d2f04d24d8
27,465
def response_timeout(): """Server timeout response.""" return b"SPAMD/1.0 79 Timeout: (30 second timeout while trying to CHECK)\r\n"
92d1a467821919cb2d1aa1f23e64dd2df783bd9e
27,466
def str_to_address(v): """ :type v: str """ if not v: return None host, port = v.split(":", 1) return host, int(port)
843d324b159808dc7addecc2a6b19eef48c555c9
27,467
def get_size(driver, size_id): """ Return a ``NodeSize`` corresponding to a given id. :param driver: The libcloud driver to query for sizes. """ try: return [s for s in driver.list_sizes() if s.id == size_id][0] except IndexError: raise ValueError("Unknown size.", size_id)
197d0bcec335d5d03aa4f1a71e856b027ed7db3a
27,468
def samefile(path1, path2): """Return True if both pathname arguments refer to the same file or directory. :type path1: bytes | unicode | os.PathLike :type path2: bytes | unicode | os.PathLike :rtype: bool """ return False
16212c294b76cc0bfe335fde5b060204953e03ef
27,469
from typing import Any def non_english_lang_code(request: Any) -> str: """ Get all non-English language codes, but one per request. NOTE This is not currently used. Once a set of non-English language codes which are known to pass the test are identified, this can be used to test all of them one at a time. """ return request.param
80ad26b3ded55e34b61b3b9fdb0b83b62c34dc21
27,470
import time def createDateTimeString(now=None): """Return datetime as string (e.g. for saving results).""" if now is None: now = time.localtime() return str(now.tm_year) + str(now.tm_mon).zfill(2) + \ str(now.tm_mday).zfill(2) + '-' + \ str(now.tm_hour).zfill(2) + '.' + \ str(now.tm_min).zfill(2)
340d4c2e044622d3602b454bf601fcf1bd12ae12
27,471
def pssm_recovery_range(struct,pssm_map,min,max): """return percent pssm recovery fro residues within a range of b factors""" pssm_recovery = 0.0; struct_size = 0.0; for residue in struct.get_residues(): score= residue.get_list()[1].get_bfactor() #print score if score >= min and score <= max: residue_name = residue.get_resname() residue_num = residue.get_id()[1] status = pssm_map.conserved(residue_num,residue_name) if status: pssm_recovery += 1.0 struct_size += 1.0 return pssm_recovery/struct_size
f2a0157d4b561bf782d87cf8c8f41abf68fb188c
27,473
def fully_qualified_name(cls): """Return Fully Qualified name along with module""" return ".".join([cls.__module__, cls.__name__])
118e1acae269cae23f848ed0f983f0e58c2b2359
27,474
def check_navn(navn, limit=2, remove='Ja Nei Nå Dem De Deres Unnskyld Ikke Ah Hmm Javel Akkurat Jaja Jaha'.split()): """Removes all items in navn with frequency below limit and words in all case as well as all words in list 'remove'""" r = {x:navn[x] for x in navn if navn[x] > limit and x.upper() != x and not x in remove} return r
de70a4e7b38e85cfcd794c55030464243ca76304
27,475
def map_cursor_to_dict(cursor, columns): """ Map Cursor results to the dict """ results = [] for row in cursor.fetchall(): results.append(dict(zip(columns, row))) return results
b388a03e9bc2b2e99b8277f4ec16ab379cc4cb64
27,476
def point2str ( pnt ): """ point2str( pnt ) format a 3d data point (list of 3 floating values) for output to a .scad file. Also used to do equality comparison between data points. @param pnt - list containing the x,y,z data point coordinates @returns '[{x}, {y}, {z}]' with coordinate values formatted by specifications """ # IDEA use command line parameter to set the precission # IDEA have call time precission, so internal use (for point comparison) can be higher return ''.join ([ '[', ', '.join ([ '%.9g' % c for c in pnt ]), ']' ])
8d47a8d3c29be082ec3a45250f845f1e00633fab
27,477
def collect_params(model): """Collect optim params for use with dent.""" params = [] for p in model.parameters(): if p.requires_grad: params.append(p) return params
50245b8f331625ea2d68e37b70ba3a2a8bbe6350
27,479
def _get_extra_configmaps_and_metrics(resource_name, resource_qsets): """ configmap used for logbeat """ extra_configmaps, metrics = [], [] for res in resource_qsets: cms, mts = res.get_extra_configmaps_and_metrics(resource_name) extra_configmaps.extend(cms) metrics.extend(mts) return extra_configmaps, metrics
9ba3f2b65de06fda84464a567c8fa320d0abd007
27,480
import hashlib def sha384(s): """Compute the SHA384 of a given string.""" return hashlib.sha384(s.encode("utf-8")).hexdigest()
e62256c30c781f85f6ed56a1086747b83637229f
27,481
def compute_momentum(df, window=1): """Return momentum.""" df['Momentum'] = (df['Price'] / df['Price'].shift(window) - 1) return df.fillna(0)
fb74bbd365cae4eaa3b35d1dd6c7c6ee5b9211eb
27,482
def compute_center(detections): """Compute the center for each detection. Args: detections: A matrix of shape N * 4, recording the pixel coordinate of each detection bounding box (inclusive). Returns: center: A matrix of shape N I 2, representing the (x, y) coordinate for each input bounding box center. """ center = detections[:, [0, 1]] + detections[:, [2, 3]] center /= 2. return center
794b64de7ef7b1327bb0bb90982ac8d67c0a0fea
27,484
import re import os def _get_version(): """Read the __version__ value from src/domino_ingestion/version.py. We can't import the package because we're the installation script for the package, so we use regex and read the python file as a raw text file. """ version_regex = re.compile( r"""^__version__\s=\s['"](?P<version>.*?)['"] """, re.MULTILINE | re.VERBOSE ) version_file = os.path.join("src", "spark_validation", "version.py") with open(version_file) as handle: lines = handle.read() result = version_regex.search(lines) if result: return result.groupdict()["version"] raise ValueError("Unable to determine __version__")
dea500da7a425ec20b6534cb07894904d6a63f93
27,485
def subdict(d, nested_keys=None): """:return the dict nested hierarchically indicated by nested_keys or None if key list is incorrect :param nested_keys list of keys or a single keys """ if not isinstance(nested_keys, (tuple, list)): nested_keys = [nested_keys] for k in nested_keys: try: d = d[k] except: return {} return d
ceabc9ebfaccfe857e827698192a1c231db7bcde
27,486
import re def LegalizeName(name): """ Return a file name suitable for uploading to Google Storage. The names of such files cannot contain dashes or other non-identifier characters. """ return re.sub(r'[^A-Za-z0-9_/.]', '_', name)
1f5028b93b919beeeb2c2b25c8d29e0e4ac00e7a
27,488