content stringlengths 39 14.9k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
from datetime import datetime
def get_time_at(hour: int, minute: int) -> datetime:
"""
Helper which generate the datetime for the current day at a given hour and given minute
"""
now = datetime.now()
return now.replace(hour=hour, minute=minute, second=0, microsecond=0) | aa7b735f2c340ad988ebd9d2eab4b4cf5cd7d1cc | 123,814 |
import requests
def _download_one(ticker: str, interval: str = "1d", period: str = "1y"):
"""
Download historical data for a single ticker.
Parameters
----------
ticker: str
Ticker for which to download historical information.
interval: str
Frequency between data.
period: str
Data period to download.
Returns
-------
data: dict
Scraped dictionary of information.
"""
base_url = 'https://query1.finance.yahoo.com'
params = dict(range=period, interval=interval.lower(), includePrePost=False)
url = "{}/v8/finance/chart/{}".format(base_url, ticker)
data = requests.get(url=url, params=params)
if "Will be right back" in data.text:
raise RuntimeError("*** YAHOO! FINANCE is currently down! ***\n")
data = data.json()
return data | 324f22e0759fe8a87c349d8d2f272276fcb1499c | 123,815 |
import configparser
def read_conf(conf_path):
"""Reads padpt.conf at the specified path.
Then returns a configparser for padpt.conf.
"""
config_parser = configparser.ConfigParser()
with open(conf_path) as conf:
config_parser.read_file(conf)
return config_parser | e08718538da5f51541bdfd174ceedd3e77d4a79a | 123,821 |
from pathlib import Path
import re
import json
def _get_performance(folder: Path, name_re: re.Pattern, dataset_performance: dict, scores_key: str):
"""Gets the performance of all the dataset of one experiment"""
for fn in folder.glob('*.json'):
info = json.load(open(fn, 'r+'))
m = name_re.match(fn.name)
if m is None:
continue
dataset_name = m.group(1)
performance = [p if p > 0 else 0 for p in info[scores_key]]
try:
dataset_performance[dataset_name].extend(performance)
except KeyError:
dataset_performance[dataset_name] = performance
return dataset_performance | 9a676e2181efd54f43d385f628bb2ae1f591b1ca | 123,830 |
import struct
def convert(rows):
"""Unpack each value in the TCell to an unsigned long long."""
# It may be wiser to do this lazily.
for row in rows:
columns = row.columns
for key, tcell in columns.iteritems():
columns[key] = struct.unpack('!Q', tcell.value)[0]
return rows | b9255c6ef5f74b369b98b3d695276187cdb2f74f | 123,831 |
def clip(lower, val, upper):
"""Clips a value. For lower bound L, upper bound U and value V it
makes sure that L <= V <= U holds.
Args:
lower: Lower boundary (including)
val: The value to clip
upper: Upper boundary (including)
Returns:
value within bounds
"""
if val < lower:
return lower
elif val > upper:
return upper
else:
return val | 23d2692a7d6634f0f25cbd79f1b13fd6d457feae | 123,839 |
def clean_data(df):
"""
Function cleaninng data if necessary. In this case it will be removing of duplicates.
Another cleaning activities might be added as needed.
Arguments:
df -> data frame to be cleaned
Output:
df -> cleaned data frame
"""
# remove duplicates
df.drop_duplicates(inplace=True)
return df | b818e97efbe6ee9726e2376f183bc03feb524b2c | 123,843 |
def date_to_directory(date):
"""Convert a datetime.date object into the convention used to name
directories on our system (e.g. '20120825')"""
return date.strftime('%Y%m%d') | 1354ccb975712354d70a67cca305103e6647dd6b | 123,844 |
def get_knot_dates(start_date, knot_idx, freq):
"""
Parameters
----------
start_date : datetime array
knot_idx : ndarray
1D array containing index with `int` type.
freq : datetime delta
Returns
-------
list :
list of knot dates with provided start date time and indices
"""
knot_dates = knot_idx * freq + start_date
return knot_dates | b2e3d2e2c622319c7abbd675030489ace3fad5e5 | 123,847 |
def serialize(root):
"""
Serialize a binary tree in depth first, left first order. We assume node values
do not contain the comma character.
"""
def dfs_preorder(root):
nodes_to_visit = [root]
while len(nodes_to_visit) > 0:
node = nodes_to_visit.pop(0)
if node is not None:
nodes_to_visit.insert(0, node.left)
nodes_to_visit.insert(1, node.right)
yield node
return ','.join((
node.val if node is not None else 'None'
for node in dfs_preorder(root)
)) | 08a29597bb3158808606ab049a8da4194e76cb64 | 123,855 |
def get_cumulative_rewards(reward, done, discount=0.99):
"""compute cumulative rewards R(s,a) (a.k.a. G(s,a) in Sutton '16)
`R_t = r_t + gamma*r_{t+1} + gamma^2*r_{t+2} + ...`
The simple way to compute cumulative rewards is to iterate from last to first time tick
and compute R_t = r_t + gamma*R_{t+1} recurrently
Args:
reward: `list`. A list of immediate rewards r(s,a) for the passed episodes.
done: `list`. A list of terminal states for the passed episodes.
discount: `float`. The discount factor.
"""
if discount == 0:
return reward
cumulative_rewards = []
cumulative_reward = 0
for r, d in zip(reward[::-1], done[::-1]):
if d:
cumulative_reward = 0.0
cumulative_reward = r + discount * cumulative_reward
cumulative_rewards.insert(0, cumulative_reward)
return cumulative_rewards | eaeab96455715bdcfd6e5fc90889e52f980fd28f | 123,856 |
def extra_hour_bits(value):
"""
practice getting extra bits out of the hours bytes
>>> extra_hour_bits(0x28)
[0, 0, 1]
>>> extra_hour_bits(0x8)
[0, 0, 0]
"""
masks = [ ( 0x80, 7), (0x40, 6), (0x20, 5), ]
nibbles = [ ]
for mask, shift in masks:
nibbles.append( ( (value & mask) >> shift ) )
return nibbles | 635c7f8d4aa752001b75eba8518d1e3c857bc297 | 123,858 |
import math
def circleSDF(x, y, cx, cy, cr):
"""Return:
negative if (x, y) is inside the circle;
positive if (x, y) is outside the circle;
zero if (x, y) is on the circle
"""
return math.sqrt((x - cx) * (x - cx) + (y - cy) * (y - cy)) - cr | d69a5dc762b72b33529d2763a00259d6a6261b6c | 123,860 |
def setup_hook(hook_type):
"""
This decorator marks the decorated function to be interested in a certain hook
"""
def wrap(function):
"""
Wrapper function
"""
if not hasattr(function, 'hooks'):
function.hooks = []
if isinstance(hook_type, list):
function.hooks += hook_type
else:
function.hooks.append(hook_type)
return function
return wrap | ad0a6261e62eccc33673792091fa269b833040a6 | 123,863 |
from pathlib import Path
def get_file_path(path_end: str) -> str:
"""
Get the correct path to the file needed for this test
"""
# Test if the testcase was run directly or over in a global test-run.
# Match the needed path to the config file in both cases
path = Path(__file__).parent.resolve()
return str(path.joinpath(path_end)) | 2e5966aec8ddf616fa59a6801170485f283fc2f9 | 123,864 |
def get_str_clean(cp_section, key, default_value):
"""take a ConfigParser section, get key and strip leading and trailing \' and \" chars"""
value = cp_section.get(key, default_value)
if not value:
return ""
return value.lstrip('"').lstrip("'").rstrip('"').rstrip("'") | 75c77c5350593b2723eff919fd47984fd51acbbe | 123,867 |
import re
def get_spot_analysis(dated_lines, pattern_spot):
"""Return the list of the dates when the event is matching the pattern."""
spot_analysis = [] # [ <datetime> ]
pattern_spot_re = re.compile(pattern_spot)
for d, data, original_line in dated_lines:
if pattern_spot_re.search(data):
spot_analysis.append(d)
return spot_analysis | 72a5899b13abc3ca4a9cf0c61df6decdea8dc185 | 123,869 |
import torch
def kld_std_guss(mu, log_var):
"""
from Appendix B from VAE paper:
Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014
https://arxiv.org/abs/1312.6114
KL = -0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)
"""
kld = -0.5 * torch.sum(log_var + 1. - mu**2 - torch.exp(log_var), dim=1)
return kld | 39c01b9c7d87e09d7c17b470c6f12adecec59dd6 | 123,870 |
import re
def get_scale_fs(timescale):
"""Convert sdf timescale to scale factor to femtoseconds as int
>>> get_scale_fs('1.0 fs')
1
>>> get_scale_fs('1ps')
1000
>>> get_scale_fs('10 ns')
10000000
>>> get_scale_fs('10.0 us')
10000000000
>>> get_scale_fs('100.0ms')
100000000000000
>>> get_scale_fs('100 s')
100000000000000000
>>> try:
... get_scale_fs('2s')
... except AssertionError as e:
... print(e)
Invalid SDF timescale 2s
"""
mm = re.match(r'(10{0,2})(\.0)? *([munpf]?s)', timescale)
sc_lut = {
's': 1e15,
'ms': 1e12,
'us': 1e9,
'ns': 1e6,
'ps': 1e3,
'fs': 1,
}
assert mm is not None, "Invalid SDF timescale {}".format(timescale)
base, _, sc = mm.groups()
return int(base) * int(sc_lut[sc]) | 132c831e12fd25fbbd349da9874ff11dc9769f11 | 123,872 |
def clumping_factor_HB(z, beta=2):
"""Clumping factor as a function of redshift used by Haiman & Bryan (2006).
See Haiman & Bryan (2006ApJ...650....7H).
"""
return 1 + 9 * ((1 + z)/7)**(-beta) | ff9c9b302718b839b1571c9b05588a73073fdca7 | 123,873 |
import hashlib
def get_string_id(string):
""" Return a unique identifier for the given string"""
return hashlib.sha256(string.encode('utf-8')).hexdigest()[-7:] | afe5d99e6b141c9781f49b811974558cb6e9bf04 | 123,875 |
import random
def random_select_i(items):
""" Make a random selection from a list
Used to randomly select a stem to add or remove from an assembled structure
"""
return(random.choice(items)) | a841efabadf48f4f6ab7b494f52c992ad15981d7 | 123,876 |
def copy_file(old_location: str, new_location: str) -> str:
"""
copy the content of a file from one location to another.
:param old_location: path to original file.
:param new_location: path to new file.
:return: new path location.
"""
with open(old_location, 'r') as o:
lines = o.readlines()
with open(new_location, 'w') as n:
n.writelines(lines)
return new_location | ca08d6f96058fbb0341549f9625761fdfa6342c9 | 123,878 |
import csv
def get_usernames_from_csv(filename):
"""Return a list of usernames"""
with open(filename, 'r') as csvfile:
csvreader = csv.reader(row for row in csvfile
if not row.startswith('#'))
return [row[0] for row in csvreader] | f51ea5d0c5ae50c1edc31c00438ab26cc7cc6224 | 123,881 |
import re
def sub_drive_names(drive):
"""Perform regex substitution actions on the drive name for better query results."""
# Replace generic companies with real companies?
drive = re.sub(r'JLMS', 'Lite-ON', drive)
drive = re.sub(r'HL-DT-ST', 'LG Electronics', drive)
drive = re.sub(r'Matshita', 'MATSHITA', drive)
drive = re.sub(r'TSSTcorp(BD|CD|DVD)', r'TSSTcorp \1', drive)
drive = re.sub(r'(\s+-\s|\s+)', ' ', drive)
return drive | 9138212fa1e607af38162202154e949c323de96f | 123,883 |
def multis_2_mono(table):
"""
Converts each multiline string in a table to single line.
Parameters
----------
table : list of list of str
A list of rows containing strings
Returns
-------
table : list of lists of str
"""
for row in range(len(table)):
for column in range(len(table[row])):
table[row][column] = table[row][column].replace('\n', ' ')
return table | bceba12aefc7a64a2a022f981d3299fd01baa205 | 123,889 |
from typing import Optional
import torch
def create_meshgrid(
height: int,
width: int,
normalized_coordinates: bool = True,
device: Optional[torch.device] = torch.device('cpu'),
dtype: torch.dtype = torch.float32,
) -> torch.Tensor:
"""Generates a coordinate grid for an image.
When the flag `normalized_coordinates` is set to True, the grid is
normalized to be in the range [-1,1] to be consistent with the pytorch
function grid_sample.
http://pytorch.org/docs/master/nn.html#torch.nn.functional.grid_sample
Args:
height (int): the image height (rows).
width (int): the image width (cols).
normalized_coordinates (bool): whether to normalize
coordinates in the range [-1, 1] in order to be consistent with the
PyTorch function grid_sample.
device (torch.device): the device on which the grid will be generated. Default: cpu.
dtype (torch.dtype): the data type of the generated gird. Default: float32.
Return:
torch.Tensor: returns a grid tensor with shape :math:`(1, H, W, 2)`.
"""
xs: torch.Tensor = torch.linspace(0, width - 1, width, device=device, dtype=dtype)
ys: torch.Tensor = torch.linspace(0, height - 1, height, device=device, dtype=dtype)
if normalized_coordinates:
xs = (xs / (width - 1) - 0.5) * 2
ys = (ys / (height - 1) - 0.5) * 2
# generate grid by stacking coordinates
base_grid: torch.Tensor = torch.stack(torch.meshgrid([xs, ys])).transpose(1, 2) # 2xHxW
return torch.unsqueeze(base_grid, dim=0).permute(0, 2, 3, 1) | e824774521bd73525c03c7d163421d96ee703240 | 123,895 |
def ionization_efficiency(Z):
"""
Calculate the the ionization efficiency for an atom from the atomic number Z
It is calculated by $$\\frac{\\eta}{\\eta_{N_{2}}}=\\frac{0.4Z}{15}+0.6$$
"""
return (((0.4*Z)/14)+0.6) | 86e3cd009c2ccffb1620fdac79dd0c9198dc6a84 | 123,898 |
def near_extremes(x, a, b, r):
"""checks if x is within r of a or b, and between them"""
assert a <= b
if x >= a and x <= b:
if x - a < r or b - x < r:
return True
return False | 9885ce52894d027b6fd96aef8e19ed8a3f248292 | 123,900 |
def split_nth(seq, separator, n):
"""
Split sequence at the n-th occurence of separator
Args:
seq(str) : sequence to split
separator(str): separator to split on
n(int) : split at the n-th occurence
"""
pos = 0
for i in range(n):
pos = seq.index(separator, pos + 1)
return seq[:pos], seq[pos + 1:] | 7eac612c842639c8bc8b85d16142e4137360da84 | 123,905 |
def overlay(background_image, foreground_image, x_offset, y_offset):
"""
Overlays a png image on another image.
:param background_image: OpenCv image to be overlaid with foreground image
:param foreground_image: OpenCv image to overlay
:param x_offset: Position of the overlay in x direction
:param y_offset: Position of the overlay in y direction
:return: Image with overlay
Example:
s_img = cv2.imread("foreground.png", -1)
l_img = cv2.imread("background.png")
img = overlay(l_img, s_img, 50, 50)
cv2.imshow("Overlay", img)
cv2.waitKey(0)
cv2.destroyAllWindows()
"""
y1, y2 = y_offset, y_offset + foreground_image.shape[0]
x1, x2 = x_offset, x_offset + foreground_image.shape[1]
alpha_s = foreground_image[:, :, 3] / 255.0
alpha_l = 1.0 - alpha_s
for c in range(0, 3):
background_image[y1:y2, x1:x2, c] = (alpha_s * foreground_image[:, :, c] +
alpha_l * background_image[y1:y2, x1:x2, c])
return background_image | c7740a79f06567df42b75406e1b863bf63bd34cf | 123,907 |
import csv
def load_data(filename):
"""
Load data from csv
"""
paths = []
with open(filename) as f:
reader = csv.DictReader(f)
for row in reader:
name_a = row["name_a"]
lat_a = row["lat_a"]
lng_a = row["lng_a"]
name_b = row["name_b"]
lat_b = row["lat_b"]
lng_b = row["lng_b"]
path = [[name_a, lat_a, lng_a], [name_b, lat_b, lng_b]]
paths.append(path)
return paths | 50a465afebdb8e02938e5c08640576e9934d8b91 | 123,911 |
from ssl import _create_unverified_context
from urllib.error import HTTPError, URLError
from urllib.request import urlopen
def url_is_alive(url: str) -> object:
"""
Checks that a given URL is reachable
"""
try:
return urlopen(url, context=_create_unverified_context())
except HTTPError:
return False
except URLError:
return False | fa651fd941bc27b907491ad75ab5940331de75e9 | 123,912 |
def invert_dict(d):
"""Return a dictionary whose keys are the values of d, and whose
values are lists of the corresponding keys of d
"""
inverse = dict()
for key in d:
val = d[key]
inverse.setdefault(val,[]).append(key)
return inverse | fcbd3a7ee806ff480bbceee0d5462c8bdbbe9fc0 | 123,914 |
from typing import List
def construct_default_hostnames_for_computer(computer_name: str, domain_dns_name: str) -> List[str]:
""" Construct the default hostnames for a computer in AD. The short hostname is the computer name capitalized,
and the fqdn is lowercase of the computer name dot the domain.
"""
return [computer_name.upper(), computer_name.lower() + '.' + domain_dns_name.lower()] | 8e6c041fd336a7cf2c3bc3adfc96390515cd3f6b | 123,918 |
def pythonTypeToJSONType(value):
"""Convert Python data type to JSON data type"""
if isinstance(value, dict): return 'object'
elif isinstance(value, (list, tuple)): return 'array'
elif isinstance(value, str): return 'string'
elif isinstance(value, bool): return 'boolean'
elif isinstance(value, (int, float)): return 'number'
elif value is None: return 'null'
else: return 'unknown' | 480bac008cc737566918032c5d984b07ceca42c1 | 123,922 |
def all_served(state, goal):
"""Checks whether all passengers in goal have been served"""
for thisPassenger in state.served:
if not state.served[thisPassenger] and thisPassenger in goal.served and goal.served[thisPassenger]:
return False
return True | f4b44131ad82c0e6775e7dc2d59123494c3ff399 | 123,923 |
def get_class_from_testname(clazz: str) -> str:
"""
get the test class from the full test method name
:param clazz: e.g. org.apache.commons.lang3.text.translate.NumericEntityUnescaperTest::testSupplementaryUnescaping
:return: e.g. org.apache.commons.lang3.text.translate.NumericEntityUnescaperTest
"""
return clazz[:clazz.rfind("::")] | 32dc27d09dde231d64a0584ebc39719f9859dfc0 | 123,925 |
import torch
def load_checkpoint(cpdir, model, optimizer, device=torch.device('cpu')):
"""Load model and optimizer parameters from checkpoint
Note:
If optimizer is None, do not load optimizer.
The checkpoint is expected to be a dict containing theh following keys,
'model_state_dict': state dict of the model,
'optimizer_state_dict': state dict of the optimizer,
'epoch': the epoch count.
'global_step': the global step count.
Args:
cpdir (str): path to the checkpoint.
model: the model to load the parameters to.
optimizer: the optimizer to load parameters to.
If None (e.g. test, deploy, etc.), do not load optimizer.
Returns:
start_global_step (int): the global step from the checkpoint.
start_epoch (int): the epoch from the checkpoint.
"""
checkpoint = torch.load(cpdir, map_location=device)
model.load_state_dict(checkpoint['model_state_dict'])
if optimizer is not None:
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
start_epoch = checkpoint['epoch']
start_global_step = checkpoint['global_step']
return start_global_step, start_epoch | ea9d193b439f4961677f3810f4d40244d63c84ce | 123,928 |
def get_bot_scores_id_from_parts(problem_id, username, botname):
"""
:return: e.g. 'crizcraig#goodbot-on-deepdrive#unprotected_left'
"""
# Forward slashes not allowed on firestore
# https://stackoverflow.com/a/54918283/134077
problem_id = problem_id.replace('/', '#')
ret = f'{username}#{botname}-on-{problem_id}'
return ret | 0105a54719fe1530add814d211b37fa08ebbe893 | 123,929 |
def check_list(in_lst, dtype=str):
"""Helper function to ensure input is a list of correct data type."""
assert isinstance(in_lst, (list, dtype, tuple))
if isinstance(in_lst, list):
for itm in in_lst:
assert isinstance(itm, dtype)
else:
in_lst = [in_lst]
return in_lst | 597c7cbde33dd0dd53ede958338b8bd71ca14458 | 123,930 |
def findOptimalDivisor(number: int, target_quotient: int):
"""Finds the optimal int divisor for the given number that results in the quotient as close to the given quotient as possible
Parameters
----------
number : int
The number that will be divided by the optimal divisor
target_quotient : int
The quotient that you want the result of the division to be as close to as possible
Returns
-------
int
Returns the result of the optimal divison.
"""
divisors = [i for i in range(1,number) if number % i==0]
quotients = [number/divisor for divisor in divisors]
min_loss = min(quotients, key=lambda x:abs(x-target_quotient))
return min_loss | 864083f99002958bcb1f3d51d823cd8efaf3ce03 | 123,934 |
def split_and_strip(val_str, sep=","):
"""
Simple split val_str by sep and drop strip the space chars for each items.
:param val_str: string
:param sep: split separator
"""
return [
i for i in (
i.strip() for i in val_str.split(sep)
)
if i
] | a3f62b0648e6fcc8817b27a5976eac59a542df65 | 123,935 |
def mean(iterable):
""" Returns mean of given list or generator."""
s = 0.0
n = 0
for num in iterable:
s += num
n += 1
return s/n | 718b350fe76ddc7d1052cfe1f54ba096f5a83b27 | 123,936 |
import pathlib
import json
def json_data(request, name: str):
"""Loads a json file."""
path = pathlib.Path(request.fspath, "fixtures", name)
with path.open("r") as fp:
return json.load(fp) | ccc7fb361a25ebf3d0f97b832f2e6ba104a30c1d | 123,942 |
import re
def tel_is_valid(tel: str) -> bool:
"""
Check if given telephone number is a valid number adhering to the format +XX XXX XXXXXXXX.
:param tel: Telephone number as a string.
:return: True if telephone number is a valid telephone number, False otherwise.
"""
tel_no_spaces = ''.join(tel.split())
if re.fullmatch("\\+?\\d{11,14}", tel_no_spaces):
return True
return False | 8e5ede40f4c2e5113641b27c2e55f364c81ff082 | 123,944 |
from typing import Dict
def save_epoch_logs(epoch_logs: Dict, loss: float, score: Dict, stage: str):
"""
Function to improve readability and avoid code repetition in the
training/validation loop within the Trainer's fit method
Parameters
----------
epoch_logs: Dict
Dict containing the epoch logs
loss: float
loss value
score: Dict
Dictionary where the keys are the metric names and the values are the
corresponding values
stage: str
one of 'train' or 'val'
"""
epoch_logs["_".join([stage, "loss"])] = loss
if score is not None:
for k, v in score.items():
log_k = "_".join([stage, k])
epoch_logs[log_k] = v
return epoch_logs | e4dd9d3819ce8e7f3464d21992aaf9029b6e2eb4 | 123,950 |
def dms_to_deg(degrees: int, minutes: int, seconds: float) -> float:
"""Converts degrees minutes seconds to dgrees in a float point format.
Args:
degrees (int): Number of degrees
minutes (int): Number of minutes
seconds (float): Number of seconds
Returns:
float: The degrees of lattitude or longitude in flaoting point format
"""
# Calculates total minutes form minutes and seconds
mins: float = float(minutes) + (seconds / 60)
# Calculates total degrees from mins and degrees
degs: float = degrees + (mins / 60)
return degs | 741d3204355353c8e319f18324e1c3aa73ef73b7 | 123,951 |
def valid_test_filter(_, __, event_dict):
""" This is a test filter that is inline with the construction requirements
of Structlog processors. If processor ingestion is correct, the
corresponding log response will have 'is_valid=True' in the returned
event dictionary.
Args:
event_dict (dict): Logging metadata accumulated
Returns:
Updated event metadata (dict)
"""
event_dict['is_valid'] = True
return event_dict | aecce6a1c703af592c06f2a835594b9275e670aa | 123,952 |
def replace_ha_services(services, options):
"""Replace services with their HA services.
Given a list of services and options, return a new list of services where
services that are enabled for HA are replaced with their HA counterparts.
"""
transform_map = {}
if options.ha_clouddriver_enabled:
transform_map['clouddriver'] = \
['clouddriver-caching', 'clouddriver-rw', 'clouddriver-ro', 'clouddriver-ro-deck']
if options.ha_echo_enabled:
transform_map['echo'] = \
['echo-scheduler', 'echo-worker']
transformed_services = []
for service in services:
transformed_services.extend(transform_map.get(service, [service]))
return transformed_services | 753f66f5add4fd84ba006002bc2581adcc56de1f | 123,953 |
import torch
def batch_viewpoint_params_to_matrix(batch_towards, batch_angle):
"""Transform approach vectors and in-plane rotation angles to rotation
matrices.
Input:
batch_towards: [torch.FloatTensor, (N,3)]
approach vectors in batch
batch_angle: [torch.floatTensor, (N,)]
in-plane rotation angles in batch
Output:
batch_matrix: [torch.floatTensor, (N,3,3)]
rotation matrices in batch
"""
axis_x = batch_towards
ones = torch.ones(axis_x.shape[0],
dtype=axis_x.dtype,
device=axis_x.device)
zeros = torch.zeros(axis_x.shape[0],
dtype=axis_x.dtype,
device=axis_x.device)
axis_y = torch.stack([-axis_x[:, 1], axis_x[:, 0], zeros], dim=-1)
mask_y = (torch.norm(axis_y, dim=-1) == 0)
axis_y[mask_y, 1] = 1
axis_x = axis_x / torch.norm(axis_x, dim=-1, keepdim=True)
axis_y = axis_y / torch.norm(axis_y, dim=-1, keepdim=True)
axis_z = torch.cross(axis_x, axis_y)
sin = torch.sin(batch_angle)
cos = torch.cos(batch_angle)
R1 = torch.stack([ones, zeros, zeros, zeros, cos, -sin, zeros, sin, cos],
dim=-1)
R1 = R1.reshape([-1, 3, 3])
R2 = torch.stack([axis_x, axis_y, axis_z], dim=-1)
batch_matrix = torch.matmul(R2, R1)
return batch_matrix | c0b00f1afd0e66cc7dcb862ec66c61d62933aabc | 123,957 |
def flatten_list(original_list):
""" This function flattens a list using recursion. The list can contain elements
of any type. For example: ['a', [1, 'b']] will be flattened to ['a', 1, 'b'].
"""
if isinstance(original_list, list):
flattened_list = []
for element in original_list:
if isinstance(element, list):
flattened_list.extend(flatten_list(element))
else:
flattened_list.append(element)
return flattened_list
else:
raise TypeError('flatten_list() must be passed a list!') | 70d9ae1b78a2b74bcadfc1aaabc7b6990af8564e | 123,959 |
def get_bigram_scores(text_words, min_bgfreq=2.):
"""
compute scores for identifying bigrams in a collection of texts
compute unigram and bigram frequencies (of words) and give a score for every bigram.
depending on this score (e.g. if higher than a threshold), bigram phrases can be identified
in the raw texts before splitting them into words
--> once the lists of words in the text_words are replaced by appropriate bigrams, the whole thing could be repeated
to find trigrams, etc.
Input:
- text_words: a list (or generator) of lists of (preprocessed) words
- min_bgfreq: how often a bigram has to have to occur in the corpus to be recognized (if both words occur just once
and that in combination, it still doesn't mean it's a true phrase because we have too little observations of the words
Returns:
- bigram_scores: a dict with bigrams ("word1 word2") and their score
"""
unigram_freq = {}
bigram_freq = {}
for wordlist in text_words:
for i, word in enumerate(wordlist):
# count unigrams and bigrams
try:
unigram_freq[word] += 1.
except KeyError:
unigram_freq[word] = 1.
if i:
try:
bigram_freq["%s %s" % (wordlist[i - 1], word)] += 1.
except KeyError:
bigram_freq["%s %s" % (wordlist[i - 1], word)] = 1.
# compute bigram scores
bigram_scores = {}
for bigram in bigram_freq:
# discount to ensure a word combination occurred a sufficient amount of times
if max(0., bigram_freq[bigram] - min_bgfreq):
bigram_scores[bigram] = bigram_freq[bigram] / \
max(unigram_freq[bigram.split()[0]], unigram_freq[bigram.split()[1]])
return bigram_scores | f1725128948ab2d2f99132e12d0108a7efc63c27 | 123,969 |
import re
def simplify_tags(iob):
"""
Simplify tags obtained from the dataset in order to follow Wikipedia
scheme (PER, LOC, ORG, MISC). 'PER', 'LOC' and 'ORG' keep their tags, while
'GPE_LOC' is simplified to 'LOC', 'GPE_ORG' to 'ORG' and all remaining tags to
'MISC'.
"""
new_iob = []
for tag in iob:
tag_match = re.match('([A-Z_]+)-([A-Z_]+)', tag)
if tag_match:
prefix = tag_match.group(1)
suffix = tag_match.group(2)
if suffix == 'GPE_LOC':
suffix = 'LOC'
elif suffix == 'GPE_ORG':
suffix = 'ORG'
elif suffix != 'PER' and suffix != 'LOC' and suffix != 'ORG':
suffix = 'MISC'
tag = prefix + '-' + suffix
new_iob.append(tag)
return new_iob | b2fa160e942ed4b08cdfe208207bb0c171a8714a | 123,971 |
def _check_arg(arg):
"""Checks if the argument is True bool or string meaning True"""
true_strings = ('True', 'true', 'T', 't', '.true.', '.True.')
if arg is None:
return 'n'
else:
if isinstance(arg, bool):
if arg:
return 'y'
else:
return 'n'
if isinstance(arg, str):
if arg in true_strings:
return 'y'
else:
return 'n'
return 'add' | adda26d9a06f8aa9eb74009d08a603f2dae460b5 | 123,975 |
def format_climatology(param, clm, sensor_range, site, node, sensor, stream):
"""
Creates a dictionary object that can later be saved to a CSV formatted
file for use in the Climatology lookup tables.
:param param: parameter name of the variable for the calculated climatology
:param clm: results of the climatology test, used to create the table
:param sensor_range: list of vendor defined ranges for valid data
:param site: Site designator, extracted from the first part of the reference
designator
:param node: Node designator, extracted from the second part of the reference
designator
:param sensor: Sensor designator, extracted from the third and fourth part of
the reference designator
:param stream: Stream name that contains the data of interest
:return qc_dict: dictionary with the sensor and user gross range values
added in the formatting expected by the QC lookup
"""
# create the lookup dictionary
var_explained = clm.regression['variance_explained']
qc_dict = {
'subsite': site,
'node': node,
'sensor': sensor,
'stream': stream,
'parameters': {'inp': param, 'tinp': 'time', 'zinp': 'None'},
'climatologyTable': 'climatology_tables/{}-{}-{}-{}.csv'.format(site, node, sensor, param),
'source': 'The variance explained by the climatological model is {:.1%}.'.format(var_explained[0])
}
# create the climatology table
header_str = ''
value_str = '[0, 0]'
for idx, mu in enumerate(clm.monthly_fit):
# use the index number to create the header row
header_str += ',[{}, {}]'.format(idx+1, idx+1)
# calculate the climatological ranges
cmin = mu - clm.monthly_std.values[idx] * 3
if cmin < sensor_range[0]:
cmin = sensor_range[0]
cmax = mu + clm.monthly_std.values[idx] * 3
if cmax > sensor_range[1]:
cmax = sensor_range[1]
# append the data to ranges
value_str += ',[{:.2f}, {:.2f}]'.format(cmin, cmax)
clm_table = header_str + '\n' + value_str
return qc_dict, clm_table | c7412e9467ccf8eaff4a292425f27a034f5c3861 | 123,976 |
def by_crlf(*parts):
"""return parts joined by crlf - DOS style"""
return '\r\n'.join(parts) | 2abc2ac1104de201d27e0c907f8bb252a64997ec | 123,979 |
def parse_dependency(value):
"""
Basic support for version expression. Right now it just parses
mypackage==1.0.0 -> ('mypackage', '1.0.0')
mypackage -> ('mypackage', None)
"""
# Split into parts
parts = value.split('==')
# We always have name
name = parts[0]
# Pull out the version, or report an error
if len(parts) == 1:
version = None
elif len(parts) == 2:
version = parts[1]
else:
raise Exception('Invalid package expression: "%s"' % value)
return (name, version) | 724e43a0afcd44c85edcac9793dd70566e4322d5 | 123,982 |
def make_lua_table(obj):
"""
Generates table. Fingerprints don't contain any special chars
so they don't need to be escaped. The output may be
sorted but it is not required.
"""
fp = obj[u'fingerprints']
print("sqlifingerprints = {")
for f in fp:
print(' ["{0}"]=true,'.format(f))
print("}")
return 0 | b522c4172efec3f0dff05628953337d806d43d5c | 123,983 |
def consistency_index(sel1, sel2, num_features):
""" Compute the consistency index between two sets of features.
Parameters
----------
sel1: set
First set of indices of selected features
sel2: set
Second set of indices of selected features
num_features: int
Total number of features
Returns
-------
cidx: float
Consistency index between the two sets.
Reference
---------
Kuncheva, L.I. (2007). A Stability Index for Feature Selection.
AIAC, pp. 390--395.
"""
observed = float(len(sel1.intersection(sel2)))
expected = len(sel1) * len(sel2) / float(num_features)
maxposbl = float(min(len(sel1), len(sel2)))
cidx = -1.
# It's 0 and not 1 as expected if num_features == len(sel1) == len(sel2) => observed = n
# Because "take everything" and "take nothing" are trivial solutions we don't want to select
if expected != maxposbl:
cidx = (observed - expected) / (maxposbl - expected)
return cidx | ef6bc1de51d4920d7b3c4a388723f8dcab95cebd | 123,986 |
def convertType(type):
"""
Converts given protobuf type in Swift type
"""
mapper = { "google.protobuf.Empty": "SwiftProtobuf.Google_Protobuf_Empty",
"string": "String", "double": "Double", "float": "Float", "int32": "Int32",
"uint32": "UInt32", "uint64": "UInt64", "sint32": "Int32", "sint64": "Int64",
"fixed32": "UInt32", "fixed64": "UInt64", "sfixed32": "Int32", "sfixed64": "Int64",
"bool": "Bool", "bytes": "Data"}
if type in mapper:
return mapper[type]
else:
return type | def424907054eff59bfce50f8223ab267eb27217 | 123,987 |
def mean_color(arr):
""" Returns [R,G,B], mean color of the WxHx3 numpy image ``arr``. """
return arr.mean(axis=0).mean(axis=0) | 6ff22bf93591bf30f6a58ca14a525114dd335e05 | 123,993 |
def part_day(x):
""" Returns part of day based on the timestamp hour """
x = x.hour
if (x > 4) and (x <= 8):
return 1
elif (x > 8) and (x <= 12):
return 2
elif (x > 12) and (x <= 14):
return 3
elif (x > 14) and (x <= 18):
return 4
elif (x > 18) and (x <= 22):
return 5
else:
return 6 | f169712c775d908236d3888a3400a3c0e6dcce74 | 123,997 |
def transform_triples(triples, relation_types, entities):
"""
Groups a list of relations triples by their relations and returns a suitable data structure.
Args:
triples (list): List of relation triples as tuples.
relation_types (dict): Dictionary with relations as key and the amount of triples with this relation as a key.
entities (set): Set of unique entities.
Returns:
tuple: Dictionary with relation as key and a list of entity tuples as value and an augmented set of unique
entities.
"""
grouped_triples = {key: [] for key in range(len(relation_types.keys()))}
for triple in triples:
entities.add(triple[0])
entities.add(triple[2])
grouped_triples[triple[1]].append((triple[0], triple[2]))
return grouped_triples, entities | b016bd8f8bf90f208856c219b05680f7e665229a | 124,000 |
def get_user_id(slackc, user_name):
""" Get user ID from name """
api_call = slackc.api_call('users.list')
if api_call.get('ok'):
users = api_call.get('members')
for user in users:
if 'name' in user and user.get('name') == user_name:
return user.get('id')
return None | 62b0af62d6c84164765590f754bb439e1652f217 | 124,001 |
def mols_are_equal(mol1, mol2):
"""
Computes if two molecules are equal.
Parameters
----------
mol1: rdkit.Chem.Mol
RDKit molecule.
mol2: rdkit.Chem.Mol
RDKit molecule.
Returns
-------
bool
Boolean whether molecules are equal.
"""
if mol1.HasSubstructMatch(mol2) and mol2.HasSubstructMatch(mol1):
return True
else:
return False | 88d85630a76172f23f605ff418c2d6df773a8fa9 | 124,004 |
import torch
def _fix_empty_tensors(boxes: torch.Tensor) -> torch.Tensor:
"""Empty tensors can cause problems in DDP mode, this methods corrects them."""
if boxes.numel() == 0 and boxes.ndim == 1:
return boxes.unsqueeze(0)
return boxes | cefa905c69795dee00cdf93a97583229cbc5b2c2 | 124,006 |
def get_num_days_between(day1: int, day2: int) -> int:
"""Return number of days between two days as their number in week"""
one_week = 7
return day2 - day1 if day1 <= day2 else day2+one_week - day1 | 98a9e2d71f5d19216d05dc0195cb85e462641799 | 124,008 |
import re
def get_font_family_names(css_data):
""" Gets the font family names from the css data """
return re.findall('font-family: \'(.*?)\'', css_data) | cac3ab5314511e5def47a1b2cda998807406057d | 124,015 |
def get_fill(card):
"""Returns the card's fill
Args:
card (webelement): a visible card
Returns:
str: card's fill
"""
fill = card.find_element_by_xpath(".//div/*[name()='svg']/*[name()='use'][1]").get_attribute("fill")
if (fill == "transparent"):
fill = "clear"
else:
mask = card.find_element_by_xpath(".//div/*[name()='svg']/*[name()='use'][1]").get_attribute("mask")
if (mask == "url(#mask-stripe)"):
fill = "half"
else:
fill = "solid"
return fill | fe02bcf071f6c5bf57fb949e5859a834b303f7e8 | 124,017 |
def split_comma(s, func=None):
"""Splits a string at commas and removes blanks."""
if not s:
return []
parts = s.split(",")
if func is None:
return [el.strip() for el in parts]
return [func(el.strip()) for el in parts] | baadd47750fa9e2614f8e62b60dace4d5ff56529 | 124,020 |
import math
def angleFromString(angleStr):
"""Converts from an angle string to a float angle in radians."""
if len(angleStr) > 3 and angleStr[-3:] == 'deg':
return math.radians(float(angleStr[:-3]))
elif len(angleStr) > 4 and angleStr[-4:] == 'grad':
return float(angleStr[:-4])*math.pi/200.0
elif len(angleStr) > 3 and angleStr[-3:] == 'rad':
return float(angleStr[:-3])
elif len(angleStr) > 4 and angleStr[-4:] == 'turn':
return float(angleStr[:-4])*2.0*math.pi
return math.radians(float(angleStr)) | 35e14a2297632afdf5ae97a78113b7353c3450a1 | 124,024 |
def _remove_id_and_empty(dct: dict) -> dict:
"""
In serialization (convert object to dict data), a common case is we ignore
the id field and those key-valur pair having None value or empty collection
object. This helper function does that.
"""
new_dct = dict()
for k, v in dct.items():
if k == "id":
continue
if isinstance(v, (list, dict)) and len(v) == 0:
continue
if v is None:
continue
new_dct[k] = v
return new_dct | 6bd86a587977b7646eccacbf215e5a6b2dd4726f | 124,025 |
import torch
def hard_dice(input_, target, threshold=0.5, reduction='mean', epsilon=1e-8):
"""
Hard dice score coefficient after thresholding.
Arguments:
preds (torch tensor): raw probability outputs
targets (torch tensor): ground truth
threshold (float): threshold value, default: 0.5
reduction (string): one of 'none', 'mean' or 'sum'
epsilon (float): epsilon for numerical stability, default: 1e-8
Returns:
dice (torch tensor): hard dice score coefficient
"""
if not input_.shape == target.shape:
raise ValueError
# if not (input_.max() <= 1.0 and input_.min() >= 0.0):
# raise ValueError
if not ((target.max() == 1.0 and target.min() == 0.0 and(target.unique().numel() == 2))
or (target.max() == 0.0 and target.min() == 0.0 and(target.unique().numel() == 1))):
raise ValueError
input_threshed = input_.clone()
input_threshed[input_ < threshold] = 0.0
input_threshed[input_ >= threshold] = 1.0
intesection = torch.sum(input_threshed * target, dim=-1)
input_norm = torch.sum(input_threshed, dim=-1)
target_norm = torch.sum(target, dim=-1)
dice = torch.div(2.0 * intesection + epsilon,
input_norm + target_norm + epsilon)
if reduction == 'none':
pass
elif reduction == 'mean':
dice = torch.mean(dice)
elif reduction == 'sum':
dice = torch.sum(dice)
else:
raise NotImplementedError
return dice | 4a617852a5f96a895d56e1f8149e5f64c05bd0c3 | 124,026 |
def lminus(L1,L2):
"""Language subtraction of two languages (sets of strings)
Can do it as L1.difference(L2) also.
"""
return L1 - L2 | 8cc4543df303f5e047b14163a68837f2b34270c0 | 124,027 |
def checksum(key, data, errors, context):
"""
Validates the `'checksum'` values of the CKAN package. A checksum is
considered valid under the following conditions:
- when either `hash` or `hash_algorithm` is present, the other must too be
present
- all given `'hash'` properties which are present are also valid
:param Any key: Injected by CKAN core
:param dict[Any, Any] data: Injected by CKAN core
:param dict[Any, Any] errors: Injected by CKAN core
:param dict[Any, Any] context: Injected by CKAN core
:rtype: None
"""
hash_tuple = ('resources', key[1], 'hash')
if hash_tuple in data and data[hash_tuple] == '':
data.pop(hash_tuple, None)
errors.pop(hash_tuple, None)
properties = [hash_tuple, ('resources', key[1], 'hash_algorithm')]
properties_present = (prop in data for prop in properties if prop)
message_format = '{0} is required when providing a valid checksum'
if not any(properties_present):
return key, data, errors, context
if not all(properties_present):
for prop in properties:
errors[prop].append(message_format.format(prop[2]))
return key, data, errors, context | b20114979de2803333da29c410e2ed30915d8372 | 124,031 |
def make_raster_list_for_mosaic(csv_data):
"""Extract the names of the rasters for the CSV data."""
raster_path_index = 0
rasters = []
for record in csv_data:
if record[raster_path_index] is not None:
rasters.append(record[raster_path_index])
return rasters | 4c5e0ccb387bd05bc3f9e30a9f46d6065a2e4bfc | 124,037 |
import re
def format_row(row):
"""Formats the row
Args:
row (string): The row of the file.
Returns:
dict: The dictionary containing the following attributes:
- query (string): The query.
- document (string): The document.
- relevance (integer): The relevance label.
"""
splitted_values = re.split(r"\t+", row)
if len(splitted_values) == 3:
rel, query, document = splitted_values
return {
"query": query.strip(),
"document": document.strip(),
"relevance": 1 if int(rel.strip()) > 0 else 0,
}
else:
return None | da779f84f79e2b9feb8cdb47213525605a8d14cc | 124,038 |
def _get_order(plan):
"""
For observations scheduled in plan, get the observation indices in the order they appear, and return the
array indices of schedule period boundaries observation.
Example
-------
>>> plan = [2, 2, 2, 2, 1, 1, 1, 1, 5, 5, 4, 4, 4, 4]
>>> ind_order, i_start, i_end = _get_order(plan)
>>> print(ind_order)
>>> print(i_start)
>>> print(i_end)
[2, 1, 5, 4]
[0, 4, 8, 10]
[3, 7, 9, 13]
Parameters
----------
plan : numpy integer array
Observation indices throughout night.
Returns
-------
order : list of ints
order that indices appear in plan.
i_start : list of ints
indices of time block beginnings corresponding to plan.
i_end : list of ints
indices of time block endings corresponding to plan.
"""
ind_order = [plan[0]]
i_start = [0]
i_end = []
for i in range(1, len(plan)):
prev = plan[i-1]
if plan[i] != prev:
ind_order.append(plan[i])
i_end.append(i-1)
i_start.append(i)
if i == len(plan)-1:
i_end.append(i)
return ind_order, i_start, i_end | 7e70d4d64cfd20529f3efe702214de9860a34b54 | 124,039 |
import re
import logging
def sanitize(name):
"""Clean a proposed hostname so it it is LocalHostName compliant.
:param name: Name to sanitize.
:return: Sanitized name.
CLI Example::
salt '*' hostname.sanitize 2L33T_4_u_
"""
new_name = re.sub(r'[^0-9A-Z-]', '-', name, flags=re.IGNORECASE)
if new_name != name:
logging.warning("Hostname was sanitized from '%s' to '%s'.", name, new_name)
return new_name | 45b935ef174ce34797e5d5cfa58a7b9fa420bf6e | 124,041 |
def verify_probability(probability):
""" Makes sure it's a probability. """
return probability>=0 and probability<=1 | 8e4418ed87490d1e1569cf759de24488eac910a6 | 124,042 |
def veckmul(vec1, k):
"""Multiply vector by scalar"""
return [v1 * k for v1 in vec1] | 7743e515a77c7c040911fd1c4051b0b4b3de73ee | 124,045 |
def preprocess_line(line: str) -> str:
""" Removes trailing whitespace and end-line. """
return line.strip().replace("\n", "") | 4bfbc227763ee362bb9e4cd4c0e76d263a9a2e8b | 124,046 |
def zero(t):
"""Return default function for current."""
return 0.0 | 32fb47efacb9dace2b38e85111fbef4d5bc5e456 | 124,047 |
def decorator(func):
"""
Decorator for decorator functions. Allows for syntax:
```
@decorator
def foo(func, kwarg1=None):
pass
```
which works both when a parameter list is given when using the decorator foo or not.
```
@foo
def bar():
pass
@foo(5)
def bar():
pass
```
"""
def wrapper(*args, **kwargs):
# If no parameters given (only function)
if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):
return func(*args, **kwargs)
# If parameters given
def decorator(f):
return func(f, *args, **kwargs)
return decorator
return wrapper | 9397829837fac3d7ed1c7d7f19e60639e1b1ef10 | 124,049 |
from typing import Callable
def update_array_generic(issue: dict, update: dict, field: str, function: Callable):
"""
Updates an array based on a function which retrieves the full
data type.
:param issue: the issue which will be updated
:param update: the historical update
:param field: schema of the field to update
:param function: callable retrieving the full data type
"""
_current = issue['fields'][field]
_project = issue['fields']['project']['key']
_from = function(_project, update['from'])
_to = function(_project, update['to'])
if not _from:
return [item for item in _current if item['id'] != _to['id']]
_current.append(_from)
return _current | 0cf3b53a775f38d6a231fbe4c4805cf57886da79 | 124,053 |
from datetime import datetime
def get_latest_year() -> int:
"""
Returns the last year if we are not in december yet otherwise
returns the current year.
"""
today = datetime.now()
if today.month < 12:
return today.year - 1
return today.year | 6cd7b36948de39546659bb4a56e113e7e365b68b | 124,055 |
def sort_parts(wf):
"""Sort jobs and job uses to make test deterministic"""
for job in wf["jobs"]:
job["uses"].sort(key=lambda u: u["lfn"])
wf["jobs"].sort(key=lambda j: j["id"])
return wf | bf28281a30c1a23cf64182661211e8a7ac52662a | 124,056 |
def signature(part):
""" return the signature of a partial object """
return (part.func, part.args, part.keywords, part.__dict__) | 522ae88538d6dd880492292c6f2ef169f3bbd06d | 124,057 |
def obter_freq_alimentacao(ani):
""" obter_freq_alimentacao: animal --> int
Recebe uma animal e devolve um inteiro correspondendo ao valor da frequência de alimentação do mesmo
"""
return ani['a'][1] | 5f63b81ca0ea27ac8ea30f3745d0fef135d5bd49 | 124,059 |
def transform_wiki_url(file_name: str) -> str:
"""
Transforms attach url to original wiki url
:param file_name: name of the file
:return: file url
>>> transform_wiki_url('1040017000.png')
'http://gbf-wiki.com/attach2/696D67_313034303031373030302E706E67.png'
>>> b'img'.hex().upper()
'696D67'
"""
url = r'http://gbf-wiki.com/attach2/696D67_{}.png'
return url.format(file_name.encode('utf-8').hex().upper()) | 870d75db1889e3c9dd13ad92d1fc6d115b7c9b0a | 124,061 |
def s_star_index_node(i):
"""
Given an index in the sequence, get the name of node corresponding to this index in s_star.
:param i: Index in the sequence.
:return: Name of node corresponding to this index in s_star.
**Example:** ::
s_star_index_node(3) = 's_star_3'
.. The indented line above will appear as highlighted code.
.. The "Example" will appear in bold.
..
This comment and the one above will both not appear in the sphinx documentation.
"""
return 's_star_{}'.format(i) | 2b1b9266a07e4389897e44dc1a81e7ee48ebed41 | 124,062 |
def ReadContent(infile):#{{{
"""
Read content, strip all front end white spaces
return "" if read failed
"""
content = ""
try:
content = open(infile, "r").read().strip()
except IOError:
content = ""
return content | 80fc2b8ad99118a248b547b86a32eb16e627e511 | 124,067 |
def get_circle_points(xy, radius):
""" Returns tuples of (x0, y0), (x1, y1) for a circle centered at x, y with radius
Arguments:
xy: tuple of x, y coordinates
radius: radius of circle to draw
Returns:
[(x0, y0), (x1, y1)] for bounding box of circle centered at x, y
"""
x, y = xy
x0, y0 = x - radius, y - radius
x1, y1 = x + radius, y + radius
return [(x0, y0), (x1, y1)] | d89145d1ec5ede042003fc9f952e78f6c136424c | 124,069 |
def get_accuracy(pred_list : list, target_list : list) -> float:
"""
Get prediction's accuracy
=========================
Parameters
----------
pred_list : list
List of predicitions.
target_list : list
List of targets.
Returns
-------
float
The rate of good predictions.
"""
good_count = 0
for pred, target in zip(pred_list, target_list):
if pred == target:
good_count += 1
return good_count / len(pred_list) | f8aa82ee0ae37017c1258ce3062567f73ff56113 | 124,071 |
def split_chrom_name(s):
"""
Split chromosome name for comparison and sorting
Splits a chromosome name into a list, to be
used in comparison and sorting operations.
For example:
>>> split_chrom_name("chr1")
... [1]
>>> split_chrom_name("chr17_gl000203_random")
... [17,"gl000203","random"]
Arguments:
s (str): chromosome name to split
Returns:
List: list representation of the name.
"""
s0 = s.split('_')
if s0[0].startswith('chr'):
s0[0] = s0[0][3:]
try:
s0[0] = int(s0[0])
except ValueError:
pass
return s0 | a5fd64931905c6d25dcd0f7069f2b0396cb3b8e3 | 124,074 |
def clean_antibody_name(name):
"""Get clean antibody name from FASTA identifier
- Remove chain type suffix such as "_VH" or "_VL"
"""
for suffix in ['_VH', '_VL', '_HC', '_LC']:
if name.endswith(suffix):
name = name[:-len(suffix)]
return name | 2a9fbf186a419bca52c8222b09689ca8bd17a9c2 | 124,080 |
def contains(main_str: str, sub_str: str) -> bool:
"""
>>> "a" in "abc"
True
>>> "bc" in "abc"
True
>>> "ac" in "abc"
False
>>> "" in "abc"
True
>>> "" in ""
True
"""
return sub_str in main_str | 7d949e1dcc6d5dfad020bf6debe34fd1a13cf40c | 124,084 |
def mirc_format(s):
"""
Replaces mIRC-Codes (for example ^K for Strg+K for colors) with the corresponding chars
"""
s = s.replace("^B", chr(0x02))
s = s.replace("^K", chr(0x03))
s = s.replace("^R", chr(0x0f))
return s; | 379c4f3bf9a7003ff2a459ba79c98bfe3d832929 | 124,088 |
def env_info(env, brain):
"""Prints and returns environment information
Params
======
env : unity environment object
brain : unity brain object
"""
# reset the environment
env_info = env.reset(train_mode=True)[brain.brain_name]
# number of agents in the environment
print('Number of agents:', len(env_info.agents))
# number of actions
action_size = brain.vector_action_space_size
print('Number of actions:', action_size)
# examine the state space
state = env_info.vector_observations[0]
print('States look like:', state)
state_size = len(state)
print('States have length:', state_size)
return state_size, action_size | 4d84d7d3a439ba635b1fe218abd096197986f5c3 | 124,089 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.