content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
import os def normalize_dir(dir_name): """ Removes the '/' from the end of dir_name is it exists. Returns the normalized directory name """ if (dir_name[-1] == os.sep): dir_name = dir_name[:-1] return dir_name
c69b79ad31156742f8c22ffc59b4436d738a6ba5
24,845
def pbc(rnew, rold): """ Periodic boundary conditions for an msd calculation Args: rnew (:py:attr:`float`, optional): New atomic position rold (:py:attr:`float`, optional): Previous atomic position Returns: cross (:py:attr:`bool`, optional): Has the atom cross a PBC? rnew (:py:attr:`float`, optional): New position """ shift = abs(rold - rnew) shift = round(shift, 0) shift = int(shift) cross = False if shift < 2: if rnew - rold > 0.5: rnew = rnew - 1.0 cross = True elif -(rnew - rold) > 0.5: rnew = rnew + 1.0 cross = True else: if rnew - rold > 0.5: rnew = rnew - shift cross = True elif -(rnew - rold) > 0.5: rnew = rnew + shift cross = True return cross, rnew
67260f98371fbb95d2eca5958f75d80c76b89371
24,847
import requests def submit_task(task: dict, username: str, password: str) -> str: """ Submits a task using the AppEEARS API. Parameters ---------- task: dictionary following the AppEEARS API task object format username: Earthdata username password: Earthdata password Returns ------- Task ID Notes ----- For more information about the task object convention and all the properties that can be specified, check the documentation: https://lpdaacsvc.cr.usgs.gov/appeears/api/#task-object """ api_url = "https://lpdaacsvc.cr.usgs.gov/appeears/api" try: # get authorization token and build headers r = requests.post(f"{api_url}/login", auth=(username, password)) r.raise_for_status() token = r.json()["token"] headers = {"Authorization": f"Bearer {token}"} # submit the task and logout to dispose of the authentication r = requests.post(f"{api_url}/task", json=task, headers=headers) requests.post(f"{api_url}/logout", headers=headers) return r.json()["task_id"] except requests.HTTPError as err: raise Exception(f"Error submitting task. {err}")
fd75b86b5258f9b4b0abd02f8fb289a7467149df
24,848
from typing import Callable import math def get_sigmoid_annealing_function(epoch_checkpoint: int, temperature: float) -> Callable: """ Implements sigmoid annealing given a checkpoint (for the turning point - middle) and temperature (speed of change) """ def _sigmoid(data): """Ordinary sigmoid function""" return 1 / (1 + math.exp(-data)) def _annealing(epoch: int, threshold: float) -> float: """Annealing centred around epoch_checkpoint with a decline rate depending on temperature""" return _sigmoid((epoch - epoch_checkpoint) / temperature) * threshold return _annealing
08b9bf07912a7b79a830acb6896c015aa430c6ee
24,850
def compare_int(entry, num): """Return True if the integer matches the line entry, False otherwise.""" if int(entry) == num: return True else: return False
e779829b0d9a8343d3c48e6a66542e8e6ee62494
24,851
def _prepare_params(params): """return params as SmashGG friendly query string""" query_string = '' if len(params) == 0: return query_string prefix = '?expand[]=' query_string = prefix + '&expand[]='.join(params) return query_string
9fc0573961d50536ee28ae576ac030197eae0cf2
24,852
def load_settings(path='api/settings'): """Loads the user settings located under `api/`""" with open(path, 'r', encoding='utf-8') as file: return { l.strip(): r.strip() for l, r in (l.split('=') for l in file if l.strip()) }
1a89c3f80f0a8b4013429fbb060b453f5f447aa5
24,853
def get_mask(source, source_lengths): """ Args: source: [B, C, T] source_lengths: [B] Returns: mask: [B, 1, T] """ B, _, T = source.size() mask = source.new_ones((B, 1, T)) for i in range(B): mask[i, :, source_lengths[i]:] = 0 return mask
8466ff5113ca22488b4218f86c43bfea248197d1
24,855
import glob def get_all_html_files(directory): """ Returns list of html files located in the directory """ return glob.glob(directory + "/*.html")
5e92a8b4fc52ea63e5c65c5eb7b2487556b08a3d
24,857
from datetime import datetime import sys def iso2time(iso_val: str) -> datetime: """ 解析iso字符串时间到datetime类型 Args: iso_val: 年月日时间字符串, eg: 2020-03-12T11:49:31.392460 Returns: datetime """ if sys.version_info >= (3, 7): dt_val = datetime.fromisoformat(iso_val) else: dt_val = from_iso_datetime(iso_val) return dt_val
61107a2ae47322a802d8dd3eae7beaa628965bbf
24,858
def fact(n: int) -> int: """ >>> fact(0) 1 >>> fact(1) 1 >>> fact(2) 2 >>> fact(3) 6 >>> fact(4) 24 """ f = { n == 0: lambda n: 1, n == 1: lambda n: 1, n == 2: lambda n: 2, n > 2: lambda n: fact(n-1)*n }[True] return f(n)
232ce6d77cf99fedf720d2bf3a14b2c315657619
24,860
def fraction_word_to_num(number_sentence): """transfer english expression of fraction to number. numerator and denominator are not more than 10. Args: number_sentence (str): english expression. Returns: (float): number """ fraction={ 'one-third':1/3,'one-thirds':1/3,'one-quarter':1/4,'one-forth':1/4,'one-fourth':1/4,'one-fourths':1/4,'one-fifth':1/5, 'one-sixth':1/6, 'one-seventh':1/7, 'one-eighth':1/8, 'one-ninth':1/9, 'one-tenth':1/10,'one-fifths':1/5, 'one-sixths':1/6, 'one-sevenths':1/7, 'one-eighths':1/8, 'one-ninths':1/9, 'one-tenths':1/10,\ 'two-third':2/3,'two-thirds':2/3, 'two-quarter':2/4, 'two-forth':2/4,'two-fourth':2/4,'two-fourths':2/4, 'two-fifth':2/5, 'two-sixth':2/6, 'two-seventh':2/7, 'two-eighth':2/8, 'two-ninth':2/9, 'two-tenth':2/10,'two-fifths':2/5, 'two-sixths':2/6, 'two-sevenths':2/7, 'two-eighths':2/8, 'two-ninths':2/9, 'two-tenths':2/10,\ 'three-third':3/3,'three-thirds':3/3, 'three-quarter':3/4, 'three-forth':3/4,'three-fourth':3/4,'three-fourths':3/4, 'three-fifth':3/5, 'three-sixth':3/6, 'three-seventh':3/7, 'three-eighth':3/8, 'three-ninth':3/9, 'three-tenth':3/10,'three-fifths':3/5, 'three-sixths':3/6, 'three-sevenths':3/7, 'three-eighths':3/8, 'three-ninths':3/9, 'three-tenths':3/10,\ 'four-third':4/3,'four-thirds':4/3, 'four-quarter':4/4, 'four-forth':4/4,'four-fourth':4/4,'four-fourths':4/4, 'four-fifth':4/5, 'four-sixth':4/6, 'four-seventh':4/7, 'four-eighth':4/8, 'four-ninth':4/9, 'four-tenth':4/10,'four-fifths':4/5, 'four-sixths':4/6, 'four-sevenths':4/7, 'four-eighths':4/8, 'four-ninths':4/9, 'four-tenths':4/10,\ 'five-third':5/3,'five-thirds':5/3, 'five-quarter':5/4, 'five-forth':5/4,'five-fourth':5/4,'five-fourths':5/4, 'five-fifth':5/5, 'five-sixth':5/6, 'five-seventh':5/7, 'five-eighth':5/8, 'five-ninth':5/9, 'five-tenth':5/10,'five-fifths':5/5, 'five-sixths':5/6, 'five-sevenths':5/7, 'five-eighths':5/8, 'five-ninths':5/9, 'five-tenths':5/10,\ 'six-third':6/3,'six-thirds':6/3, 'six-quarter':6/4, 'six-forth':6/4,'six-fourth':6/4,'six-fourths':6/4, 'six-fifth':6/5, 'six-sixth':6/6, 'six-seventh':6/7, 'six-eighth':6/8, 'six-ninth':6/9, 'six-tenth':6/10,'six-fifths':6/5, 'six-sixths':6/6, 'six-sevenths':6/7, 'six-eighths':6/8, 'six-ninths':6/9, 'six-tenths':6/10,\ 'seven-third':7/3,'seven-thirds':7/3,'seven-quarter':7/4, 'seven-forth':7/4,'seven-fourth':7/4,'seven-fourths':7/4, 'seven-fifth':7/5, 'seven-sixth':7/6, 'seven-seventh':7/7, 'seven-eighth':7/8, 'seven-ninth':7/9, 'seven-tenth':7/10,'seven-fifths':7/5, 'seven-sixths':7/6, 'seven-sevenths':7/7, 'seven-eighths':7/8, 'seven-ninths':7/9, 'seven-tenths':7/10,\ 'eight-third':8/3,'eight-thirds':8/3, 'eight-quarter':8/4, 'eight-forth':8/4,'eight-fourth':8/4,'eight-fourths':8/4, 'eight-fifth':8/5, 'eight-sixth':8/6, 'eight-seventh':8/7, 'eight-eighth':8/8, 'eight-ninth':8/9, 'eight-tenth':8/10,'eight-fifths':8/5, 'eight-sixths':8/6, 'eight-sevenths':8/7, 'eight-eighths':8/8, 'eight-ninths':8/9, 'eight-tenths':8/10,\ 'nine-third':9/3,'nine-thirds':9/3, 'nine-quarter':9/4, 'nine-forth':9/4,'nine-fourth':9/4,'nine-fourths':9/4, 'nine-fifth':9/5, 'nine-sixth':9/6, 'nine-seventh':9/7, 'nine-eighth':9/8, 'nine-ninth':9/9, 'nine-tenth':9/10,'nine-fifths':9/5, 'nine-sixths':9/6, 'nine-sevenths':9/7, 'nine-eighths':9/8, 'nine-ninths':9/9, 'nine-tenths':9/10 } return fraction[number_sentence.lower()]
2b9125589db65c43e768c1fe49fc5f8555ee7b84
24,862
import csv def load_csv(csv_filename): """Load csv file generated py ```generate_training_testing_csv.py``` and parse contents into ingredients and labels lists Parameters ---------- csv_filename : str Name of csv file Returns ------- list[str] List of ingredient strings list[dict] List of dictionaries, each dictionary the ingredient labels """ labels, ingredients = [], [] with open(csv_filename, 'r') as f: reader = csv.reader(f) next(reader) # skip first row for row in reader: ingredients.append(row[0]) labels.append({'quantity': row[1].strip(), 'unit': row[2].strip(), 'item': row[3].strip(), 'comment': row[4].strip()}) return ingredients, labels
704151f36424f9e72ecb1d0dce9f8f7e8f77c1f1
24,870
def largest_common_substring(query, target, max_overhang): """Return the largest common substring between `query` and `target`. Find the longest substring of query that is contained in target. If the common substring is too much smaller than `query` False is returned, else the location `(start, end)` of the substring in `target` is returned. Parameters: ----------- query (str) The sequence to be found in target (minus some overhangs possibly). target (str) The sequence in which to find `query`. max_overhang Maximal size allowed for the flanking regions of `query` that would not be contained in `target`. Examples -------- >>> seqA = '-----oooooooo' >>> seqB = 'oooooo-----tttt' >>> largest_common_substring(seqA, seqA, 80) # == (0, 12) >>> largest_common_substring(seqA, seqB, 80) # == (5, 11) Notes: ------ This is intended for finding whether `query` can be extracted from `target` using PCR. See the PcrExtractionStation implementation in DnaSupplier.py. """ # The trick here is to start with the central region of "query". # This region is initially as small as max_overhang allows, and it is # progressively expanded on the sides max_overhang = min(max_overhang, int(len(query) / 2)) start, end = max_overhang, len(query) - max_overhang if query[start:end] not in target: return False while (start >= 0) and (query[start:end] in target): start -= 1 start += 1 while (end < len(query)) and (query[start:end] in target): end += 1 end -= 1 return start, end
4e0e1e1ee9d5d37fe5e56601fcedab66621ac9fb
24,871
import os def only_benchmark_models(path: str) -> bool: """ Predicate for list_config_files allowing to list all files that are benchmarks or models used in benchmarks """ dir_path, file_name = os.path.split(path) return file_name.startswith("eval_") or dir_path.endswith("models")
470afcc56118a332d01d0b1f164f08f1325f3190
24,872
def clean_predictions(predictions): """ Clean-up the predicted labels by changing impossible combinations (f.e. 'R-S', 'R-E' should be 'R-B', 'R-E'). :param predictions: The predicted labels. :return: The cleaned predicted labels. """ relation_labels = ['R-B', 'R-E', 'R-I', 'R-S'] for line_index in range(0, len(predictions)): sentence = predictions[line_index][0] relation_started_flag = False for label_index in range(0, len(sentence) - 1): cur_label = sentence[label_index] upcoming_relations_flag = False if cur_label in relation_labels: for upcoming_label in sentence[label_index + 1:]: if upcoming_label in relation_labels: upcoming_relations_flag = True if relation_started_flag: if upcoming_relations_flag: cur_label = u'R-I' else: cur_label = u'R-E' else: if upcoming_relations_flag: cur_label = u'R-B' else: cur_label = u'R-S' relation_started_flag = True predictions[line_index][0][label_index] = cur_label return predictions
dff5f5c0dca463693850fcc0b43aa841c2c3299a
24,874
def normalize_time(timestamp): """Normalize time in arbitrary timezone to UTC""" offset = timestamp.utcoffset() return timestamp.replace(tzinfo=None) - offset if offset else timestamp
bd8b7f0417afabe8c58eba3393e7d466c35a70be
24,875
def PopulationDynamics(population, fitness): """Determines the distribution of species in the next generation.""" n = list(population) L = len(population) f = fitness(population) for i in range(L): n[i] *= f[i] N = sum(n) if N == 0.0: return population for i in range(L): n[i] /= N return tuple(n)
b7f928c08e9275c7a314a4be92445588df02abc6
24,876
def extra(column: list, data_property: str, method): """ :param column: table A[column[0]] mapping table B[column[1]] :param data_property: table A property name :param method: query method """ def e(data_entity): if isinstance(data_entity, list): for entity in data_entity: entity.__setattr__(data_property, method(**{column[1]: data_entity.__getattribute__(column[0])})) return data_entity data_entity.__setattr__(data_property, method(**{column[1]: data_entity.__getattribute__(column[0])})) return data_entity return e
da3c8a2eaf6df1ca026ea48fd7fd36abf3b8b4a2
24,877
def round_up(n, size): """ Round an integer to next power of size. Size must be power of 2. """ assert size & (size - 1) == 0, "size is not power of 2" return ((n - 1) | (size - 1)) + 1
02f34fd5f2c059a9ee1b657f099b4699d90dfc01
24,878
def recombination(temperature): """ Calculates the helium singlet and triplet recombination rates for a gas at a certain temperature. Parameters ---------- temperature (``float``): Isothermal temperature of the upper atmosphere in unit of Kelvin. Returns ------- alpha_rec_1 (``float``): Recombination rate of helium singlet in units of cm ** 3 / s. alpha_rec_3 (``float``): Recombination rate of helium triplet in units of cm ** 3 / s. """ # The recombination rates come from Benjamin et al. (1999, # ADS:1999ApJ...514..307B) alpha_rec_1 = 1.54E-13 * (temperature / 1E4) ** (-0.486) alpha_rec_3 = 2.10E-13 * (temperature / 1E4) ** (-0.778) return alpha_rec_1, alpha_rec_3
c09e887053794a4c00b0daa3a48a57e563d89992
24,879
import torch def batch_quadratic_form(x: torch.Tensor, A: torch.Tensor) -> torch.Tensor: """ Compute the quadratic form x^T * A * x for a batched input x. Inspired by https://stackoverflow.com/questions/18541851/calculate-vt-a-v-for-a-matrix-of-vectors-v This is a vectorized implementation of out[i] = x[i].t() @ A @ x[i] x shape: (B, N) A shape: (N, N) output shape: (B) """ return (torch.matmul(x, A) * x).sum(1)
4e639fc210e944cdc6726c2daab85e486de58134
24,880
def _codepoint_is_ascii(ch): """ Returns true if a codepoint is in the ASCII range """ return ch < 128
931a3a67956bffd28f73e938e5d312951a2b3b80
24,881
import re def valid_email(email): """Check if entered email address is valid This checks if the email address contains a "@" followed by a "." Args: email (str): input email address Returns: bool: True if the input is a valid email and False otherwise """ # Ensure email is a string if not type(email) == str: return False # Find @ and . in the email address if re.match("[^@]+@[^@]+.[^@]+", email): return True else: return False
a675856a7bb8dae87a77990c9e752b0bb4177c9b
24,882
def _relpath(path, basepath): """Generate path part of relative reference. based on: cpython/Lib/posixpath.py:relpath """ path = [x for x in path.split('/')] basepath = [x for x in basepath.split('/')][:-1] i = 0 for index in range(min(len(path), len(basepath))): if path[index] == basepath[index]: i += 1 else: break parent_dirs = len(basepath) - i relpath = (['..'] * parent_dirs) + path[i:] if relpath == ['']: return '.' # gray zone: # if you want to remove the last slash, you have to climb up one directory. # 'http://h/p'(url), 'http://h/p/'(baseurl) -> '../p' if relpath == []: return '../' + path[-1] # gray zone generalized: # 'http://h/p'(url), 'http://h/p/p2'(baseurl) -> '../../p' if all((p == '..' for p in relpath)): return ('../' * (len(relpath) + 1)) + path[-1] # the first segment of a relative-path reference cannot contain ':'. # change e.g. 'aa:bb' to './aa:bb' if ':' in relpath[0]: relpath.insert(0, '.') return '/'.join(relpath)
6d5b6a24d28791a616de24b2841f0f61ac84e366
24,883
def get_iou(gt_bbx, pred_bbx): """ Calculate the Intersection over Union (IoU) of two bounding boxes. Based on: https://stackoverflow.com/questions/25349178/ calculating-percentage-of-bounding-box-overlap-for-image-detector-evaluation Parameters ---------- gt_bbx : dict ymin, xmin, ymax, xmax] Keys: {'xmin', 'xmax', 'ymin', 'ymax'} The (xmin, ymin) position is at the top left corner, the (xmax, ymax) position is at the bottom right corner pred_bbx : dict Keys: {'xmin', 'xmax', 'ymin', 'ymax'} The (xmin, ymin) position is at the top left corner, the (xmax, ymax) position is at the bottom right corner Returns ------- float in [0, 1] """ assert gt_bbx['xmin'] < gt_bbx['xmax'] assert gt_bbx['ymin'] < gt_bbx['ymax'] assert pred_bbx['xmin'] < pred_bbx['xmax'] assert pred_bbx['ymin'] < pred_bbx['ymax'] # determine the coordinates of the intersection rectangle x_left = max(gt_bbx['xmin'], pred_bbx['xmin']) y_top = max(gt_bbx['ymin'], pred_bbx['ymin']) x_right = min(gt_bbx['xmax'], pred_bbx['xmax']) y_bottom = min(gt_bbx['ymax'], pred_bbx['ymax']) if (x_right < x_left) or (y_bottom < y_top): iou = 0.0 intersection_area = (x_right - x_left) * (y_bottom - y_top) else: # The intersection of two axis-aligned bounding boxes is always an # axis-aligned bounding box intersection_area = (x_right - x_left) * (y_bottom - y_top) # compute the area of both BBs gt_bbx_area = (gt_bbx['xmax']-gt_bbx['xmin']) * \ (gt_bbx['ymax']-gt_bbx['ymin']) pred_bbx_area = (pred_bbx['xmax']-pred_bbx['xmin']) * \ (pred_bbx['ymax']-pred_bbx['ymin']) # compute the intersection over union by taking the intersection # area and dividing it by the sum of prediction + ground-truth # areas - the interesection area iou = intersection_area / \ float(gt_bbx_area + pred_bbx_area - intersection_area) assert iou >= 0.0 assert iou <= 1.0 return iou, intersection_area
20ce6ed931e1a26ed078113191c6de06c4030510
24,884
def directory_get_basename(path: str) -> str: """Returns the last directory in a path.""" p = path if p.endswith('/'): p = p[:-1] elif '.' in p: p = p[:p.rfind('/')] return p[p.rfind('/') + 1:]
69af9336142c88cd705162b2e4522aef2ac95403
24,889
def _return_quantity(quantity, return_quantity, units_out=''): """Helper method to return appropriate unit type Parameters ---------- quantity : :class:`~vunits.quantity.Quantity` obj Quantity object to use return_quantity : bool If True, returns :class:`~vunits.quantity.Quantity` obj. Otherwise, return ``quantity.mag`` units_out : str, optional Units to return. Not required if ``return_quantity`` is True. Returns ------- out : :class:`~vunits.quantity.Quantity` obj or float Value to return based on ``return_quantity``. """ if return_quantity: return quantity else: return quantity(units_out)
de68065aad70134e4e7778e099711013a9930e13
24,890
def _get_currency_pair(currency, native): """ Format a crypto currency with a native one for the Coinbase API. """ return '{}-{}'.format(currency, native)
92c3f43d1661f912a6bb63d14df1a7095eb784f3
24,891
import six def sanitize_command_output(content): """Sanitizes the output got from underlying instances. Sanitizes the output by only returning unicode characters, any other characters will be ignored, and will also strip down the content of unrequired spaces and newlines. """ return six.text_type(content, errors='ignore').strip()
55a42507b24d2fcb3993cfa8abda0cd04e4c7718
24,892
import os import argparse def DirType(d): """ given a string path to a directory, D, verify it can be used. """ d = os.path.abspath(d) if not os.path.exists(d): raise argparse.ArgumentTypeError('DirType:%s does not exist' % d) if not os.path.isdir(d): raise argparse.ArgumentTypeError('DirType:%s is not a directory' % d) if os.access(d, os.R_OK): return d else: raise argparse.ArgumentTypeError('DirType:%s is not a readable dir' % d)
a038d70874622a40d36e7e3f6192c156dc4791ed
24,893
def countmatch(str1, str2, countstr): """checks whether countstr occurs the same number of times in str1 and str2""" return str1.count(countstr) == str2.count(countstr)
a25f77cc6b847ff6f9b81af33836b3e4a715b056
24,894
def get_histogram_limits(filename): """Read a histogram file `filename' and return the smallest and largest values in the first column.""" hmin = 0 hmax = 0 with open(filename, "r") as f: line = f.readline().split("\t") hmin = float(line[0]) for line in f: line = line.split("\t") hmax = line[0] hmax = float(hmax) return (hmin, hmax)
8ceed4f939c40f0266afa9f3218073842578f26f
24,897
def decode_dict(d): """Decode dict.""" result = {} for key, value in d.items(): if isinstance(key, bytes): key = key.decode() if isinstance(value, bytes): value = value.decode() elif isinstance(value, dict): value = decode_dict(value) result.update({key: value}) return result
1d2bc6665692a42b8a5a0efd24c1a7fbeeaaaa0b
24,898
from datetime import datetime def str2timestamp(s, fmt='%Y-%m-%d-%H-%M'): """Converts a string into a unix timestamp.""" dt = datetime.strptime(s, fmt) epoch = datetime.utcfromtimestamp(0) return (dt - epoch).total_seconds()
69dd680623da8a61837676b540e1d5c053c8e198
24,899
def is_cached(o, name): """Whether a cached property is already computed. Parameters ---------- o : object The object the property belongs to. name : str Name of the property. Returns ------- bool True iff the property is already computed. Examples -------- >>> class MyClass(object): ... @cached_property ... def my_cached_property(self): ... print('Computing my_cached_property...') ... return 42 ... @cached_property ... def my_second_cached_property(self): ... print('Computing my_second_cached_property...') ... return 51 >>> my_object = MyClass() >>> my_object.my_cached_property Computing my_cached_property... 42 >>> is_cached(my_object, 'my_cached_property') True >>> is_cached(my_object, 'my_second_cached_property') False """ return name in o.__dict__
eb7b1356ded56dddb4cd917b27461e9108bd7b76
24,900
def get_food(request): """ Simple get food request """ return "Hello there, here's some food!"
0c9942b1e26a5399adbc06ece5d90ebd496ab4cc
24,902
def encode_no_auth(**kwargs): """ Dummy encoder. """ return {}
5912dc656233e32fb4a354a32fc4279d105cf5b7
24,903
import os def read_file(file_path: str, as_single_line: bool = False) -> str: """Read file content. :param file_path: path to the file. :param as_single_line: whether or not the file is to be read as a single line. :return: file content. """ with open(file_path, "r") as file: lines = [] for line in file.readlines(): if as_single_line: line = line.replace(os.linesep, "") lines.append(line) return "".join(lines)
21e528d90b5ec403c87c0bcee1e702b49787e160
24,904
def inverse_dead_zone(motor_output, dead_zone): """This is the inverted dead zone code which is important for Talons.""" if abs(motor_output) < .00001: #floating point rounding error workaround. return 0 elif motor_output > 0: return (motor_output*(1-dead_zone))+dead_zone else: return (-motor_output*(dead_zone-1))-dead_zone
7129323f47f34a5ae28d91e5b3128af4804aace7
24,905
import click def no_going_back(confirmation): """Show a confirmation to a user. :param confirmation str: the string the user has to enter in order to confirm their action. """ if not confirmation: confirmation = 'yes' prompt = ('This action cannot be undone! Type "%s" or press Enter ' 'to abort' % confirmation) ans = click.prompt(prompt, default='', show_default=False) if ans.lower() == str(confirmation): return True return False
385c665b4b690e9b80473006b6d1e9536310189f
24,906
def minutes_to_human_duration(minutes_duration): """ Convert a duration in minutes into a duration in a cool format human readable """ try: hours,minutes = divmod(minutes_duration,60) return "%sh %smin" %(hours,minutes) except TypeError: return None
22197c568505e366d5d4f6b020f8b61466deb43a
24,907
def chart_filter(df, year = None, month = None, neighbourhood = None, crime = None): """ Filters the given database in order to wrange the database into the proper dataframe required the graphs to display relevant information. Default value of None will allow the maps to display every single data point. Given specific information will filter the database Parameters ---------- df : Pandas Data Frame Dataframe of crime data year : int or list year or years of crime committed to be displayed in the graphs month : int or list month or months of crime commited to be displayed in the graphs neighbourhood : string or list neighbourhood or neighbourhoods of where crime occurs crime : string or list crime or crimes commited to be displayed Returns ------- Pandas Data Frame A filtered data frame or relevant information """ filtered_df = df if year != None: if type(year) == list: year_list = list(range(year[0], year[1]+1)) filtered_df = filtered_df.query('YEAR == %s' % year_list) else: filtered_df = filtered_df.query('YEAR == %s' % year) if month != None: if type(month) == list: month_list = list(range(month[0], month[1]+1)) filtered_df = filtered_df.query('MONTH == %s' % month_list) else: filtered_df = filtered_df.query('MONTH == %s' % month) if neighbourhood != None: if neighbourhood == []: neighbourhood = None elif type(neighbourhood) == list: filtered_df = filtered_df.query('DISTRICT == %s' % neighbourhood) else: filtered_df = filtered_df.query('DISTRICT == "%s"' % neighbourhood) if crime != None: if crime == []: crime = None elif type(crime) == list: filtered_df = filtered_df.query('OFFENSE_CODE_GROUP == %s' % crime) else: filtered_df = filtered_df.query('OFFENSE_CODE_GROUP == "%s"' % crime) return filtered_df
0958af7abd9d302adc2aff91b081d309a88a505e
24,908
def fibonacciAtIndex(index): """ Returns the fibonacci number at a given index. Algorithm is using 3 variables instead of recursion in order to reduce memory usage and runtime """ a1, a2 = 1,1 position = 3 # while position <= index: while index > 2: temp = a1 a1 = a2 a2 += temp # position += 1 index -= 1 return a2
2dde77a70bfe8d1c32777910521a9b4695ab326e
24,910
def get_lists_moving_avg(lists: list, period: int) -> list: """Get the moving average from the given lists. Notice that all lists must have the same size.""" num_lists = len(lists) avgs = [sum(vals)/num_lists for vals in zip(*lists)] if period > len(avgs): raise Exception( "Period can not be bigger than moving average list size.") return [sum(avgs[i-period:i]) / period for i in range(period, len(avgs) + 1)]
f25ed2216a1c7695d74a3f6dc7c8a14bed22af69
24,911
import os import pickle def load_object(file_name): """Save a Python object by pickling it.""" file_name = os.path.abspath(file_name) with open(file_name, 'rb') as f: return pickle.load(f)
35484a2d794a02970b6f757f1d10374cf646cf00
24,912
from typing import Tuple from typing import List def define_2d_correlations_hists() -> Tuple[List[str], List[float], List[Tuple[str, float, float, str]]]: """ Define the 2D correlation hists. Args: None. Returns: (ep_orientation values, assoc_pt values, (ep_orientation, lower_pt_bin_edge, upper_pt_bin_edge, name)) """ ep_orientations = ["all", "in", "mid", "out"] assoc_pt = [1.0, 1.5, 2.0, 3.0, 4.0, 5.0, 6.0, 10.0] # Example name: "raw2DratioallAss1015J2040C3050bg0812sig06rbX2rbY2.root", names = [ (ep, pt_1, pt_2, f"raw2Dratio{ep}Ass{int(pt_1 * 10)}{int(pt_2 * 10)}J2040C3050bg0812sig06rbX2rbY2") for ep in ep_orientations for pt_1, pt_2 in zip(assoc_pt[:-1], assoc_pt[1:]) ] return ep_orientations, assoc_pt, names
00a0f59fe7c238a271fa75a83085556fc51f4acc
24,913
def _dict_clean_departments(my_dict): """Remove internal annotations.""" my_dict['department_name'] = my_dict['department_name'].replace('_archive_', '').replace('_obsolete_', '').replace('_Obsolete_', '').replace('Obsolete_', '') return my_dict
c5be3a21491f8b82cabe812c03104f1e92d7baef
24,915
def is_valid_environment(env, nova_creds): """ Checks to see if the configuration file contains a section for our requested environment. """ if env in nova_creds.keys(): return env else: return False
c1a020f8b7ea8515d24f5f2547cd3eb5a182e306
24,916
import os from pathlib import Path def get_files(path, exts): """Return sorted list of absolute paths to files in the tree rooted at path. Only return the files with given extensions. """ result = [] for dirpath, _, filenames in os.walk(path): selected = [fn for fn in filenames if fn.lower().endswith(exts)] abspath = Path(dirpath).resolve() paths = (abspath / fn for fn in selected) result.extend(paths) return sorted(result)
b629b39f01972924f1b563060ef6703cb7b3b01c
24,917
import numpy as np def nd_to_3d(ndarray, Rlist, Glist, Blist, G_plus=0): """ input an nd image, Rchannels, Gchannels, Bchannels\n G_plus rise, green color down return a 3d image """ out = np.zeros([ndarray.shape[0],ndarray.shape[1],3]) for r in Rlist: out[:,:,0] += ndarray[:,:,r] out[:,:,0] /= len(Rlist) for g in Glist: out[:,:,1] += ndarray[:,:,g] out[:,:,1] /= (len(Glist)+G_plus) for b in Blist: out[:,:,2] += ndarray[:,:,b] out[:,:,2] /= len(Blist) return out
fcca0a1b168d5f218274b0773854a2713e72ffe0
24,918
def compress(dates, values): """ inputs: dates | values -----------+-------- 25-12-2019 | 5 25-12-2019 | 6 25-12-2019 | 7 25-12-2019 | 8 25-12-2019 | 9 25-12-2019 | 10 29-12-2019 | 11 29-12-2019 | 12 29-12-2019 | 13 03-01-2020 | 14 03-01-2020 | 15 03-01-2020 | 16 03-01-2020 | 17 returns: dates | values -----------+-------- 25-12-2019 | 10 29-12-2019 | 13 03-01-2020 | 17 """ if len(dates) != len(values): raise Exception("Length of dates and values must match.") dates.sort() values.sort() compressed_dates = [] compressed_values = [] compresser = {} for d, v in zip(dates, values): compresser[d] = v for d, v in compresser.items(): compressed_dates.append(d) compressed_values.append(v) return compressed_dates, compressed_values
d6e34a56e6834916f68af491f4b5793e28594685
24,919
def _load_image(client, load): """Load image from local disk """ with open(load, 'rb') as f: # load returns image list return client.images.load(data=f)[0]
c3c4f2cf074707126d93f12d8281da3179ddf0da
24,920
def _ParseClassNode(class_node): """Parses a <class> node from the dexdump xml output. Returns: A dict in the format: { 'methods': [<method_1>, <method_2>] } """ methods = [] for child in class_node: if child.tag == 'method': methods.append(child.attrib['name']) return {'methods': methods, 'superclass': class_node.attrib['extends']}
2d801173230a065e668b89cac155b31991cee656
24,921
import select def _is_readable(socket): """Return True if there is data to be read on the socket.""" timeout = 0 (rlist, wlist, elist) = select.select( [socket.fileno()], [], [], timeout) return bool(rlist)
79b258987171e3a5e4d3bab51a9b8c9a59e2415e
24,922
def add_lists(*args): """ Append lists. This is trivial but it's here for symmetry with add_dicts """ out = [] for arg in args: out.extend(arg) return out
a7992b388995dbe23ca4b117d0fe94a8197e81da
24,923
def export_sheet(ss, sheet_id, export_format, export_path, sheet_name): """ Exports a sheet, given export filetype and location. Allows export format 'csv', 'pdf', or 'xlsx'. :param ss: initialized smartsheet client instance :param sheet_id: int, required; sheet id :param export_format: str, required; 'csv', 'pdf', or 'xlsx' :param export_path: str, required; filepath to export sheet to :param sheet_name: str, required; name of sheet exported :return: str, indicating failure or success, with path, filename, extension """ if export_format == 'csv': ss.Sheets.get_sheet_as_csv(sheet_id, export_path) elif export_format == 'xlsx': ss.Sheets.get_sheet_as_excel(sheet_id, export_path) elif export_format == 'pdf': # there is an optional paperSize parameter; default is A1 ss.Sheets.get_sheet_as_pdf(sheet_id, export_path) if export_format == 'csv' or export_format == 'xlsx' or export_format == 'pdf': return 'Sheet exported to {}{}.{}'.format(export_path, sheet_name, export_format) else: return 'export_format \'{}\' is not valid. Must be \'csv\', \'pdf\', or \'xlsx\''.format(export_format)
76b49fa0904140571eb84526f6021448db54dea9
24,924
def rev_slice(i): """ """ return isinstance(i, slice) and i.step is not None and i.step < 0
680736407681301f16b93d5871630fcbd9ca9679
24,926
import sys def usage(): """ Defines the usage when called directly """ return "{} [state action terminating]".format(sys.argv[0])
846faa01af054fcb2f61ed36daaf34e8cd5f9af6
24,927
def add_tooltips_columns(renderer, tooltips, group): """ Args: renderer (GlyphRenderer): renderer for the glyph to be modified. tooltips (bool, list(str), list(tuple)): valid tooltips string as defined in the builder class. group (DataGroup): group of data containing missing columns. Returns: renderer (GlyphRenderer): renderer with missing columns added """ current_columns = renderer.data_source.data.keys() # find columns specified in tooltips if isinstance(tooltips[0], tuple): tooltips_columns = [pair[1].replace('@', '') for pair in tooltips] elif isinstance(tooltips[0], str): tooltips_columns = tooltips else: tooltips_columns = [] for column in tooltips_columns: if column in current_columns: continue elif '$' in column: continue renderer.data_source.add(group.get_values(column), column) return renderer
f13fd71a4288936575c8157bc8829eed6545e004
24,928
def adapt_sample_keys(sample_list, key_type): """ Converts sample_list to a new format where instead of "scene_id", "object_id" and "ann_id" there is a "sample_id". :param key_type: 'kkk' for {scene_id}-{object_id}_{ann_id} 'kk' for {scene_id}-{object_id} 'k' for {scene_id} :return: new sample list. """ assert key_type in ['kkk', 'kk', 'k'] up_sl = [] for item in sample_list: if key_type == 'kkk': key_format = '{}-{}_{}' item['sample_id'] = key_format.format(item['scene_id'], item['object_id'], item['ann_id']) up_sl.append(item) elif key_type == 'kk': key_format = '{}-{}' item['sample_id'] = key_format.format(item['scene_id'], item['object_id']) up_sl.append(item) elif key_type == 'k': key_format = '{}' item['sample_id'] = key_format.format(item['scene_id']) up_sl.append(item) else: pass return up_sl
8845da67ee9627cf377efc1c7b789eaa4cfb2c65
24,930
import torch def get_distance_measure(x: torch.Tensor, p: int = 1): """Given input Nxd input compute NxN distance matrix, where dist[i,j] is the square norm between x[i,:] and x[j,:] such that dist[i,j] = ||x[i,:]-x[j,:]||^p]] Arguments: x {torch.Tensor} -- [description] Keyword Arguments: p {int} -- [description] (default: {1}) Returns: [dist] -- [NxN distance matrix] """ N, D = x.size() dist = torch.repeat_interleave(x, N, dim=1) dist.permute(1, 0) dist = torch.pow(torch.abs(dist - dist.permute(1, 0)) ** p, 1 / p) return dist
716fd603263aae905720058ec698caf8c8d9e5a3
24,931
import types import inspect def get_pytorch_model(module, model_settings): """ Define a DeepSphere-Weather model based on model_settings configs. The architecture structure must be define in the 'module' custom python file Parameters ---------- module : module Imported python module containing the architecture definition. model_settings : dict Dictionary containing all architecture options. """ if not isinstance(module, types.ModuleType): raise TypeError("'module' must be a preimported module with the architecture definition.") # - Retrieve the required model arguments DeepSphereModelClass = getattr(module, model_settings['architecture_name']) fun_args = inspect.getfullargspec(DeepSphereModelClass.__init__).args model_args = {k: model_settings[k] for k in model_settings.keys() if k in fun_args} # - Define DeepSphere model model = DeepSphereModelClass(**model_args) return model
a32a01163b71b1610c97adad1b3febfa1d4d5a25
24,932
def p_testlist_single(p, el): """testlist_plus : test""" return [el], el
0879bcf4c414acf0f01028e80ece2d5ed4db8489
24,933
def get_sample_name_and_column_headers(mpa_table): """ Return a tuple with sample name, column headers, database, and table variant. Variant is an integer also indicating the number of rows to skip when parsing table. """ with open(mpa_table) as f: first_line = f.readline().strip() second_line = f.readline().strip() third_line = f.readline().strip() fourth_line = f.readline().strip() if first_line.startswith("#SampleID"): columns = first_line.split() return columns[0], columns, "#Unknown database", 1 if second_line.startswith("#SampleID"): db = first_line sample_name = second_line.split()[1] columns = third_line.split() return sample_name, columns, db, 2 if third_line.startswith("#SampleID"): db = first_line sample_name = third_line.split()[1] columns = fourth_line.split() return sample_name, columns, db, 3 raise NotImplementedError("No support for table type with first four lines like:\n %s\n %s\n %s\n %s" % (first_line, second_line, third_line, fourth_line))
f37d52124cae2d1a893627390cde731b52f87d32
24,936
def lingray(x, a, b): """Auxiliary function that specifies the linear gray scale. a and b are the cutoffs.""" return 255 * (x-float(a))/(b-a)
40562246a2a3ba377344bc593091c49ca290a04b
24,937
def calc_wmark_low_and_totalreserve_pages(): """Calculate sum of low watermarks and total reserved space over all zones, and return those two values. Values are in pages. """ fzoneinfo = open("/proc/zoneinfo") wmark_low = 0 totalreserve = 0 keep_reading = True while keep_reading: keep_reading = False managed = 0 high = 0 max_lowmem_reserve = 0 #-- loop over lines in zone for line in fzoneinfo: #-- new zone encountered if line starts with "Node" if line.startswith("Node"): keep_reading = True break info = line.split(None, 1) if len(info) >= 2: if info[0] == 'low': wmark_low += int(info[1]) elif info[0] == 'high': high = int(info[1]) elif info[0] == 'managed': managed = int(info[1]) elif info[0] == 'protection:': lowmem_reserve = list( int(l) for l in info[1].strip('()\n ').split(',')) max_lowmem_reserve = max(lowmem_reserve) #-- calculate reserved pages for this zone according to algorithm # https://github.com/torvalds/linux/blob/6aa303defb7454a2520c4ddcdf6b081f62a15890/mm/page_alloc.c#L6559 reserve = max_lowmem_reserve + high if reserve > managed: reserve = managed totalreserve += reserve return wmark_low, totalreserve
e184a82e9a61002eb883e2c8a1e9063bd53a860e
24,938
import torch def get_criterion(config): """ Creates the torch criterion for optimization """ if config["name"] == "cross_entropy": return torch.nn.CrossEntropyLoss() else: raise NotImplementedError
2d1a9cc5983ab64fe2016637c292de3b9551079b
24,939
import subprocess def isProgramAvailable(programName): """Find if program passed in is available. There's probably a better way to do this that's portable, and in a sense this is not "safe", but for the scope of this program, this approach is fine. Arg: programName: A string containing a program name. Returns: A boolean specifying whether the program specified is available. """ return not subprocess.Popen(["bash", "-c", "type " + programName], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL).wait()
28cb7d06175808ee58b8b479cce1f1a14ba30719
24,942
def is_magic(attribute): """Check if attribute name matches magic attribute convention.""" return all([ attribute.startswith('__'), attribute.endswith('__')])
d392a34902ce22127d92bc4d678228d59b97cba9
24,944
def expand_row(header, row): """Parse information in row to dict. Args: header (dict): key/index header dict row (List[str]): sambamba BED row Returns: dict: parsed sambamba output row """ thresholds = {threshold: float(row[key]) for threshold, key in header['thresholds'].items()} data = { 'chrom': row[0], 'chromStart': int(row[1]), 'chromEnd': int(row[2]), 'sampleName': row[header['sampleName']], 'readCount': int(row[header['readCount']]), 'meanCoverage': float(row[header['meanCoverage']]), 'thresholds': thresholds, 'extraFields': row[header['extraFields']] } return data
79ea16b498f6fd5c7c002bf822a34da65de0d2c9
24,945
import pickle def retrieve_model(filename='model.pkl'): """ Retrieve probability model pickled into a file. """ with open(filename, 'rb') as modfile: return pickle.load(modfile)
3e05c3c9b3bc2bc3974dab422ced5f53589c2823
24,949
def aggregator(df, column): """ Return multiple aggregate data values, compiled into a list. summ (total), minn (lowest value), maxx (highest value), avg (mean), med (median), mode (most repeated value). Parameters ---------- df : pandas object dataFrame from which to pull the values column : string column to focus on for creating the aggregate data Returns ------- list of floats [summ, minn, maxx, avg, med, mode] Usage / Example --------------- ps4_sum, _, ps4_max, ps4_avg, _, ps4_mode = aggregator(df_ps4, "Global_Sales") NOTE : values you don't want saved to an object, like min/med in above examples, were ignored by using the underscore character instead of an object name. """ summ = df[column].sum().__round__(3) # Total sales for games on this system minn = df[column].min().__round__(3) # Lowest sales for a game on this sytem maxx = df[column].max().__round__(3) # Highest sales for a game on this system avg = df[column].mean().__round__(3) # Average sales for games on this system med = df[column].median().__round__(3) # Median sales for games on this sytem mode = df[column].mode().__round__(3) # Most repeated value for games sales on this system return [summ, minn, maxx, avg, med, mode]
f940270662715859f42e6b9ffc4411c492085651
24,951
def gen_compare_cmd(single_tree, bootstrapped_trees): """ Returns a command list for Morgan Price's "CompareToBootstrap" perl script <list> Input: single_tree <str> -- path to reference tree file (Newick) bootstrapped_trees <str> -- path to bootstrapped trees file (Newick) """ cmp_prog_path = '/home/alexh/bin/MOTreeComparison/CompareToBootstrap.pl' compare_cmd = ['perl', cmp_prog_path, '-tree', single_tree, '-boot', bootstrapped_trees] return compare_cmd
df9d2d4c21ca28012107b8af69706d45804e5637
24,953
def silent_none(value): """ Return `None` values as empty strings """ if value is None: return '' return value
48b9709dc4bffc659b168f625f4f6be5608a645d
24,955
import os def GetNumCores(): """Returns the number of cores on the machine. For hyperthreaded machines, this will be double the number of actual processors.""" num_cores = os.sysconf("SC_NPROCESSORS_ONLN") if isinstance(num_cores, int) and num_cores > 0: return num_cores return 1
a03536917ef9401a693d076e1a2cebde807e90ab
24,956
def _get_filename(params): """ Return the filename generated from given configuration params. """ def _mem_to_int(mem_type): return {'local': 0, 'no_local': 1}[mem_type] def _algo_to_int(algo_type): return {'standard': 0, 'naive': 1}[algo_type] def _batch_to_int(batch_type): return {'strided': 0, 'interleaved': 1}[batch_type] def _vec_type_to_int(vec_type): return {'none': 0, 'partial': 1, 'full': 2}[vec_type] def _bool_to_int(val): return 1 if val else 0 int_list = [ _mem_to_int(params.mem_type), _algo_to_int(params.algo_type), _batch_to_int(params.batch_type), _bool_to_int(params.double_buffer), _bool_to_int(params.bank_conf_a), _bool_to_int(params.bank_conf_b), _vec_type_to_int(params.vec_type), params.cache_size, params.vec_size, ] + params.tile.to_list() return "tune_gemm_" + "_".join(map(str, int_list)) + ".cpp"
23fd49a2288d6b5386b7863152950f7e3d751246
24,957
def name(action_name): """custom action name, does not override prefix""" def decorator(action_function): """custom action name, does not override prefix""" action_function.action_name = action_name return action_function return decorator
530e107c0062ee5e5f91d833d0810f6eed27650d
24,958
def determina_netmask_bin(putere): """ Determina netmask-ul potrivit :param (int) putere: numarul de 0-uri din netmask :return (str) netmask: string reprezentand netmask-ul """ ones = 32 - putere str_bin_netmask = "" for i in range(0, 32): if i < ones: str_bin_netmask += "1" else: str_bin_netmask += "0" if len(str_bin_netmask.replace(".", "")) % 8 == 0: str_bin_netmask += "." return str_bin_netmask[:len(str_bin_netmask)-1]
93498dc715d18fb8dd6ca593680bc51b648f083d
24,959
def splinter_screenshot_encoding(request): """Browser screenshot html encoding.""" return "utf-8"
17e6055332e7bc63778e1e80ba01c1631b0c876e
24,960
import math def rms_mean(elements): """ A function to calculate the root mean squared mean value of a list of elements :param elements: a list of elements :return: root mean squared mean value """ return math.sqrt(sum(x * x for x in elements) / len(elements))
a1178e70f210063c6559fa15789bfd15c1f89b79
24,961
import subprocess def run_cmd(command, wait=True): """ Execute command with subprocess. """ return subprocess.check_call(command, shell=True)
950716af828f14984c0aea5f797fc0cd46bce554
24,963
from typing import Any def maybebool(value: Any) -> bool: """ A "maybified" version of the bool() function. """ return bool(value)
e40d112291ce7bfb58d94208be662cb5370506d9
24,964
import random def digits() -> str: """Generate a random 4 digit number.""" return str(random.randint(1111, 9999))
8d4f9195e74c2b1b0c31a108a502a028239bea8e
24,965
def parse_probabilities_grep_pos_2_prob(aaseq, pos_2_prob_dict): """ the following didn't work as expected since list is not initialized as expected df[COLUMN_PROB] = df[COLUMN_MODPROB].apply(parse_probabilities, args=([], )) """ try: start_index = aaseq.index("(") except ValueError: return pos_2_prob_dict stop_index = aaseq.index(")") probabilities_new = float(aaseq[start_index + 1 : stop_index]) aaseq_position = start_index - 1 pos_2_prob_dict[aaseq_position] = probabilities_new aaseq = aaseq[:start_index] + aaseq[stop_index + 1:] return parse_probabilities_grep_pos_2_prob(aaseq, pos_2_prob_dict)
069e22809c11fede827e89710807a2b0743e486e
24,966
def get_history_object_for(obj): """Construct history object for obj, i.e. instantiate history object, copy relevant attributes and set a link to obj, but don't save. Any customizations can be done by the caller afterwards. Many-to-many fields are not copied (impossible without save). The history model must use related_name="history_set" for the foreign key connecting to the live model for this function to be able to discover it.""" history_model = obj.history_set.model h = history_model() # copy attributes shared between history and obj history_field_names = set(f.name for f in history_model._meta.fields) for field in obj._meta.fields: if field is not obj._meta.pk and field.name in history_field_names: setattr(h, field.name, getattr(obj, field.name)) # try setting foreign key to obj key_name = obj._meta.object_name.lower() if key_name in history_field_names: setattr(h, key_name, obj) # we can't copy many-to-many fields as h isn't saved yet, leave # that to caller return h
233a9b2d80c80803d1f3e9dbaacbc76290d71f33
24,968
from typing import Sequence def crop_to_bbox_no_channels(image, bbox: Sequence[Sequence[int]]): """ Crops image to bounding box (in spatial dimensions) Args: image (arraylike): 2d or 3d array bbox (Sequence[Sequence[int]]): bounding box coordinated in an interleaved fashion (e.g. (x1, x2), (y1, y2), (z1, z2)) Returns: arraylike: cropped array """ resizer = tuple([slice(_dim[0], _dim[1]) for _dim in bbox]) return image[resizer]
0d6d4a2c77be0343b7557485e06fd8c33a49e72f
24,969
def find_interval(array, value): """ Returns the index idxInf and idxSup of array verifying : array[idxInf] < value < array[idxSup] """ n = [abs(i-value) for i in array] idx = n.index(min(n)) idxInf=-1 idxSup=-1 if value < array.min() or value > array.max(): idxInf=-1 idxSup=-1 elif array[idx] >= value and idx != 0: idxInf=idx-1 idxSup=idx else: idxInf=idx idxSup=idx+1 return idxInf,idxSup
f6399cc1ae0b496ae9d407d38853ba91bcff3640
24,973
def mean_std(feature_space, eps=1e-5): """ Calculates the mean and standard deviation for each channel Arguments: feature_space (torch.Tensor): Feature space of shape (N, C, H, W) """ # eps is a small value added to the variance to avoid divide-by-zero. size = feature_space.size() assert (len(size) == 4), "Feature space shape is NOT structured as N, C, H, W!" N, C = size[:2] feat_var = feature_space.view(N, C, -1).var(dim=2) + eps feature_std = feat_var.sqrt().view(N, C, 1, 1) feature_mean = feature_space.view(N, C, -1).mean(dim=2).view(N, C, 1, 1) return feature_mean, feature_std
2b228edab0397257b1deacdecae95265d955c76c
24,976
import os def resolve_template_filepath(paths, template_reference): """Resolves a template filepath, given a reference and a collection of possible paths where it might be found. This function returns the first matching resolved template filepath, if the chapter files and their children are ambiguously named you might find that using completely relative links results in the wrong files being included. You can always ensure that the correct file is loaded by being more specific about where the file should be loaded from. """ for path in paths: filepath = os.path.abspath(os.path.join(path, template_reference)) if os.path.exists(filepath): return filepath return None
7aa28d330c4d1f225265c116b726e2b51e571b2b
24,977
def fake_training_fn(request): """Fixture used to generate fake training for the tests.""" training_loop, loop_args, metrics = request.param assert len(metrics) in [1, 3] return lambda logdir, **kwargs: training_loop( logdir=logdir, metrics=metrics, **loop_args, **kwargs )
8f44f765126f74bdd06e243015fa2d58259e967b
24,979
def some_text(name): """ Some method that doesn't even know about the decorator :param name: string some name :return: Some ipsum with a name """ return "Ipsum {n}, Ipsum!".format(n=name)
4f76b67f5b3fb34e1bc634ba5374cc5714a2e387
24,980
import torch def bbox_transform(boxes, gtboxes): """ Bounding Box Transform from groundtruth boxes and proposal boxes to deltas Args: boxes: [N, 4] torch.Tensor (xyxy) gtboxes: [N, 4] torch.Tensor (xywh) Return: delta: [N, 4] torch.Tensor """ gt_w = gtboxes[:, 2] - gtboxes[:, 0] + 1 gt_h = gtboxes[:, 3] - gtboxes[:, 1] + 1 # center gt_x = gtboxes[:, 0] + 0.5 * gt_w gt_y = gtboxes[:, 1] + 0.5 * gt_h # Anchors [x,y,w,h] anchor_x = boxes[:, 0] anchor_y = boxes[:, 1] anchor_w = boxes[:, 2] anchor_h = boxes[:, 3] delta_x = (gt_x - anchor_x) / anchor_w delta_y = (gt_y - anchor_y) / anchor_h delta_w = torch.log(gt_w / anchor_w) delta_h = torch.log(gt_h / anchor_h) # [N, 4] return torch.stack([delta_x, delta_y, delta_w, delta_h]).transpose(0, 1)
fa2bf83d24206b83508612ac728636905e80ebcc
24,981
import json def jsonEqual(ja, jb): """ jsonEqual(obj1, obj2) -> Boolean determine two object(can be dumped by json) whether is equal args: obj1 = {1:1, {2:2, "a":"a"}}; obj2 = {1:1, {2:2, "a":"a"}} return: Boolean True """ return json.dumps(ja, sort_keys=True) == json.dumps(jb, sort_keys=True)
15affb59b426f7d5b909ec2c999c86a4f3951e22
24,982
import os def select_path(directories, filename): """Find filename among several directories""" for directory in directories: path = os.path.join(directory, filename) if os.path.exists(path): return path
a3f5b3afcb6f00d2bb44d7bdb9f90944d40dce05
24,983
def port_forward(srcport, destport, rule=None): """Use firewall rule to forward a TCP port to a different port. Useful for redirecting privileged ports to non-privileged ports. """ return NotImplemented
041056ad58efca38b1f681bacd556f002ff7d1de
24,984
from typing import Union from pathlib import Path import pickle def load_picke(ffp: Union[Path, str]): """Loads the pickle file""" with open(ffp, "rb") as f: return pickle.load(f)
39145f2c1dd51226f19b89aaeb984a9434ebb06c
24,985
def check_average_ROI_overlap(df_overlap, percentage): """" Returns True if the mean overlap in the df is greater than percentage""" mean_overlap = df_overlap["%_overlap"].mean() if mean_overlap > percentage: return True return False
3f1d932ba270f64fec85644c5e3613b082cda160
24,986
import argparse def set_parser(): """ set custom parser """ parser = argparse.ArgumentParser(description="") parser.add_argument("-d", "--data", type=str, required=True, help='path to a folder containing days to be predicted (e.g. the test folder of the test dataset)') parser.add_argument("-r", "--region", type=str, required=False, default='R1', help='Region where the data belongs.') parser.add_argument("-f", "--weights_folder", type=str, required=True, help='path to folder containing the model weights') parser.add_argument("-w", "--weights", nargs="+", type=str, required=True, help='list of weights to use') parser.add_argument("-o", "--output", type=str, required=True, help='path to save the outputs of the model for each day.') parser.add_argument("-g", "--device", type=str, required=False, default='cpu', help="which device to use - use 'cuda' for gpu. Default is 'cuda' ") return parser
bf4a04954ec83ad55972acec3f72f936d85c657a
24,987
from typing import List def replace_words_with_prefix_hash(dictionary: List[str], sentence: str) -> str: """ Replace words in a sentence with those in the dictionary based on the prefix match. Intuition: For each word in the sentence, we'll look at successive prefixes and see if we saw them before Algorithm: Store all roots in a set. Then for each word, look at successive prefixes for that word. If we find a prefix that is root, replace the word with that prefix. Otherwise, the prefix will just be the word itself, and we should add that to the final sentence Complexity Analysis: - Time Complexity: O(∑w i2) where wi is the length of the i-th word. We might check every prefix, the i-th of which is O(w_i^2) work. - Space Complexity: O(N) where N is the length of our sentence; the space used by root_set. @param dictionary: List of roots @param sentence: words separated by space to perform replacement on @return: New sentence with replaced words """ root_set = set(dictionary) def replace(word: str) -> str: for x in range(1, len(word)): if word[:x] in root_set: return word[:x] return word return " ".join(map(replace, sentence.split()))
bb46b0dc61eab2be358d44f8fb46782f867e3c30
24,990