content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
def gravatar_url(email, size=16):
"""Return the gravatar image for the given email address."""
return 'http://www.gravatar.com/avatar/%s?d=identicon&s=%d' % \
(md5(email.strip().lower().encode('utf-8')).hexdigest(), size)
|
d3e24e1898d41df791368e7909461135c8118f90
| 3,646,000
|
def x2bin(v):
"""
convert a value into a binary string
v: int, bytes, bytearray
bytes, bytearray must be in *big* endian.
"""
if isinstance(v, int):
bits = bin(v)
size = 8
elif isinstance(v, (bytes,bytearray)):
bits = bin(int.from_bytes(v, "big"))
size = len(v)*8
return bits[2:].zfill(size)
|
fcb4f1ab05b5a3878939c84074e70fc2d5ee6397
| 3,646,001
|
import re
def normalize_url(url):
"""Function to normalize the url. It will be used as document id value.
Returns:
the normalized url string.
"""
norm_url = re.sub(r'http://', '', url)
norm_url = re.sub(r'https://', '', norm_url)
norm_url = re.sub(r'/', '__', norm_url)
return norm_url
|
79197b9fa1c47da601bdb9c34d626d236b649173
| 3,646,002
|
import fsspec
def is_remote_filesystem(fs: fsspec.AbstractFileSystem) -> bool:
"""
Validates if filesystem has remote protocol.
Args:
fs (``fsspec.spec.AbstractFileSystem``): An abstract super-class for pythonic file-systems, e.g. :code:`fsspec.filesystem(\'file\')` or :class:`datasets.filesystems.S3FileSystem`
"""
if fs is not None and fs.protocol != "file":
return True
else:
return False
|
c40f9bb4845bbd1fc1a4cf9fce2c1b366cd22354
| 3,646,003
|
def get_element_attribute_or_empty(element, attribute_name):
"""
Args:
element (element): The xib's element.
attribute_name (str): The desired attribute's name.
Returns:
The attribute's value, or an empty str if none exists.
"""
return element.attributes[attribute_name].value if element.hasAttribute(attribute_name) else ""
|
dbc7f5c24d321c40b46f1c78950d7cf254719b5c
| 3,646,004
|
def Matcher(y_true, y_pred_logits, y_pred_bbox):
"""
y_true: GT list of len batch with each element is an array of
shape (n_gt_objects, 5) ; n_gt_objects are number of
objects in that image sample and 5 -> (cx,cy,w,h,class_label)
where cordinates are in [0,1] range
y_pred_logits: model output of shape (batch, num_queries, classes)
y_pred_bbox: model output of shape (batch, num_queries, 4) in [0,1] range -> cx,cy,w,h
"""
y_pred_bbox = y_pred_bbox.numpy()
out_loss = 0
batch = len(y_true)
b,num_queries,_ = y_pred_logits.shape
assert b == batch, 'Batch mismatch!!'
batch_query_indices = []
y_pred_logits = tf.math.softmax(y_pred_logits).numpy()
for i in range(batch):
out_cls_loss = -y_pred_logits[i][:,(y_true[i][:,-1]).astype(int)]
out_cdist = distance.cdist(y_pred_bbox[i], y_true[i][:,:4], 'euclidean')
out_iou = []
for j in range(len(y_true[i])):
giou = tfa.losses.giou_loss(cxcywh_to_xyxy(y_pred_bbox[i]), cxcywh_to_xyxy(y_true[i][j,:4][np.newaxis,:]))
out_iou.append(giou)
out_iou = -np.array(out_iou).transpose(1,0)
comb_loss = out_cls_loss + out_cdist + out_iou
row_ind, col_ind = linear_sum_assignment(comb_loss)
batch_query_indices.append((row_ind,col_ind))
return batch_query_indices
|
0918becd40feca73a54ce158a3cb86946cb377ff
| 3,646,005
|
import os
def set_test_variables():
"""
Sets up variables for the unit tests below.
:return: dictionary of test input variables for the unit tests.
"""
test_variables = {
"asl_valid_full": os.path.join(
SRC_ROOT, "resources/schemas/tests_jsons/asl_valid/test_asl_schema001.json"
),
"asl_valid_absent_conditional_field": os.path.join(
SRC_ROOT, "resources/schemas/tests_jsons/asl_valid/test_asl_schema002.json"
),
"asl_valid_labeling_duration_array": os.path.join(
SRC_ROOT, "resources/schemas/tests_jsons/asl_valid/test_asl_schema003.json"
),
"asl_schema": os.path.join(SRC_ROOT, "resources/schemas/asl_bids_schema.json"),
}
return test_variables
|
67dbf1cdb8f99bc05350b5ccf3c81bb5892ce47f
| 3,646,006
|
import warnings
def _addBindInput(self, name, type = DEFAULT_TYPE_STRING):
"""(Deprecated) Add a BindInput to this shader reference."""
warnings.warn("This function is deprecated; shader references have been replaced with shader nodes in 1.38.", DeprecationWarning, stacklevel = 2)
return self.addInput(name, type)
|
79404122d895814f64000876e8f926ecc7d54e3e
| 3,646,007
|
import mpmath
def sf(x, c, d, scale):
"""
Survival function of the Burr type XII distribution.
"""
_validate_params(c, d, scale)
with mpmath.extradps(5):
x = mpmath.mpf(x)
c = mpmath.mpf(c)
d = mpmath.mpf(d)
scale = mpmath.mpf(scale)
if x < 0:
return mpmath.mp.one
return (1 + (x/scale)**c)**(-d)
|
2e2649eeb4d32739027eb6ad5a0f3b8c50f7e341
| 3,646,008
|
def substitute_word(text):
"""
word subsitution to make it consistent
"""
words = text.split(" ")
preprocessed = []
for w in words:
substitution = ""
if w == "mister":
substitution = "mr"
elif w == "missus":
substitution = "mrs"
else:
substitution = w
preprocessed.append(substitution)
return " ".join(preprocessed)
|
0709f4223cb06ddfdc5e9704048f418f275429d1
| 3,646,009
|
import re
import requests
from bs4 import BeautifulSoup
def google_wiki(keyword, langid='en', js={}):
"""Google query targets, output if English wikipedia entry is found"""
targets = []
headers = {'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.9; rv:32.0) Gecko/20100101 Firefox/32.0',}
googlerx = re.compile('(http[s]?[^\&]*)') # /url?q=https://fr.wikipedia.org/wiki/La_Banque_postale&sa=U&ei=Zn...
infoboxrx = re.compile('infobox')
domainrx = re.compile('^[a-zA-Z\-]+\.([a-zA-Z\-]+\.)*[a-zA-Z\-]+$')
# query = 'http://www.google.com/search?q=wikipedia%20{}%20{}'.format(langid, keyword)
query = 'http://www.google.com/search?q=wikipedia%20{}'.format(keyword)
r = requests.get(query, headers=headers)
soup = BeautifulSoup(r.content)
keywords = extract_keywords(js)
# phish_tokens = set([word for li in keywords for word in li])
# print(phish_tokens)
for a in soup.find_all('a'):
search = googlerx.search(a.get('href', ''))
if not search:
continue
url = search.groups()[0]
mld, rd = registered_domain(url)
if rd == 'wikipedia.org' and '#' not in url:
# if '.wikipedia.org' in url and '#' not in url:
# if url.startswith('https://{}.wikipedia.org'.format(langid)) and '#' not in url:
wikiurl = url
r = requests.get(url)
html = str(r.content)
wikisoup = BeautifulSoup(r.content)
title = wikisoup.find(id="firstHeading")
title = title.text
if not title or keyword not in title.lower():
continue
print(wikiurl)
infobox = wikisoup.find(class_=infoboxrx)
if infobox:
for anchor in infobox.find_all('a'):
if 'href' in anchor.attrs:
targeturl = anchor['href']
# is the link internal
if targeturl.startswith('/'):
continue
reg_domain = registered_domain(targeturl)[1]
if reg_domain:
t = (title, reg_domain, wikiurl)
print(reg_domain)
targets.append(t)
external_links = wikisoup.find_all('a', class_="external text")
external_domains = set()
for anchor in external_links.find_all('a'):
if 'href' in anchor.attrs:
targeturl = anchor['href']
# is the link internal
if targeturl.startswith('/'):
continue
reg_domain = registered_domain(targeturl)[1]
if reg_domain:
external_domains.add((title, reg_domain, wiki_url))
return targets, sorted(external_domains)
|
23a449b0b8825d043d1ca3722cba4c373fcc5c3c
| 3,646,010
|
from .compile_words import read_words
import os
def _get_test_words() -> WordDict:
"""
>>> _get_test_words()['?og']
['dog', 'log']
"""
dir_path = os.path.dirname(os.path.realpath(__file__))
return read_words(os.path.join(dir_path, 'test_data.txt'))
|
d97d3125101096a3acba11705f4b9318386e8ae9
| 3,646,011
|
def escape(line, chars):
"""Escapes characters 'chars' with '\\' in 'line'."""
def esc_one_char(ch):
if ch in chars:
return "\\" + ch
else:
return ch
return u"".join([esc_one_char(ch) for ch in line])
|
f69409c92eacbbcab4232f7bb0ee244c77a4f219
| 3,646,012
|
def polinomsuzIntegralHesapla(veriler):
"""
Gelen verileri kullanarak integral hesaplar.
:param veriler: İntegrali hesaplanacak veriler. Liste tipinde olmalı.
"""
a,b=5,len(veriler)
deltax = 1
integral = 0
n = int((b - a) / deltax)
for i in range(n-1):
integral += deltax * (veriler[a] + veriler[a+deltax]) / 2
a += deltax
return integral
|
468e02da8ff077b04456f71f0af6d77bf5a47d68
| 3,646,013
|
def _keep_extensions(files, extension):
""" Filters by file extension, this can be more than the extension!
E.g. .png is the extension, gray.png is a possible extension"""
if isinstance(extension, str):
extension = [extension]
def one_equal_extension(some_string, extension_list):
return any([some_string.endswith(one_extension) for one_extension in extension_list])
return list(filter(lambda x: one_equal_extension(x, extension), files))
|
009233e381e2015ff4d919338225057d94d40a82
| 3,646,014
|
from typing import Dict
def make_all_rules(
schema: "BaseOpenAPISchema", bundles: Dict[str, CaseInsensitiveDict], connections: EndpointConnections
) -> Dict[str, Rule]:
"""Create rules for all endpoints, based on the provided connections."""
return {
f"rule {endpoint.verbose_name}": make_rule(
endpoint, bundles[endpoint.path][endpoint.method.upper()], connections
)
for endpoint in schema.get_all_endpoints()
}
|
3b92fdea984b5bfbe2b869640b978398106f098b
| 3,646,015
|
def audio_to_magnitude_db_and_phase(n_fft, hop_length_fft, audio):
"""This function takes an audio and convert into spectrogram,
it returns the magnitude in dB and the phase"""
stftaudio = librosa.stft(audio, n_fft=n_fft, hop_length=hop_length_fft)
stftaudio_magnitude, stftaudio_phase = librosa.magphase(stftaudio)
stftaudio_magnitude_db = librosa.amplitude_to_db(
stftaudio_magnitude, ref=np.max)
return stftaudio_magnitude_db, stftaudio_phase
|
b9927f11bc353610fe7edbee9b710bd78fc13899
| 3,646,016
|
import os
def check_racs_exists(base_dir: str) -> bool:
"""
Check if RACS directory exists
Args:
base_dir: Path to base directory
Returns:
True if exists, False otherwise.
"""
return os.path.isdir(os.path.join(base_dir, "EPOCH00"))
|
efad779a5310b10b2eeefdc10e90f0b78d428bf4
| 3,646,017
|
def has_rc_object(rc_file, name):
"""
Read keys and values corresponding to one settings location
to the qutiprc file.
Parameters
----------
rc_file : str
String specifying file location.
section : str
Tags for the saved data.
"""
config = ConfigParser()
try:
config.read(_full_path(rc_file))
except (MissingSectionHeaderError, ParsingError):
return False
return section in config
|
e7edd4ba8545257d7d489a7ac8f6e9595b4f087d
| 3,646,018
|
import torch
def apply_transform_test(batch_size, image_data_dir, tensor_data_dir, limited_num = None, shuffle_seed = 123, dataset = None):
"""
"""
std = [1.0, 1.0, 1.0]
mean = [0.0, 0.0, 0.0]
# if dataset is None:
# std = [1.0, 1.0, 1.0]
# mean = [0.0, 0.0, 0.0]
# elif dataset == "cifar10":
# std = [0.247, 0.243, 0.261]
# mean = [0.4914, 0.4822, 0.4465]
# elif dataset == "cifar100":
# std = [0.2673342858792401, 0.2564384629170883, 0.27615047132568404]
# mean = [0.5070751592371323, 0.48654887331495095, 0.4409178433670343]
# elif dataset == "imagenet":
# std = [0.229, 0.224, 0.225]
# mean = [0.485, 0.456, 0.406]
# elif dataset == "facescrub":
# std = [0.5, 0.5, 0.5]
# mean = [0.5, 0.5, 0.5]
trainTransform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize(mean, std)
])
dataset = ImageTensorFolder(img_path=image_data_dir, tensor_path=tensor_data_dir, label_path=tensor_data_dir,
img_fmt="jpg", tns_fmt="pt", lbl_fmt="label", transform=trainTransform, limited_num = limited_num)
# dataset_size = len(dataset)
# indices = list(range(dataset_size))
# np.random.seed(shuffle_seed)
# np.random.shuffle(indices)
# test_indices = indices[0:]
# test_sampler = SubsetRandomSampler(test_indices)
testloader = torch.utils.data.DataLoader(dataset,
batch_size=batch_size,
shuffle=False, num_workers=4)
return testloader
|
664e9fadeb4897aee7ef26abeec9c128ec7cef56
| 3,646,019
|
import re
import collections
import os
def split_datasets(data_dir, word_dict, num_folds, fold_idx):
"""Split known words (including silence) and unknown words into training
and validation datasets respectively.
"""
modes = ['training', 'validation']
knowns = {m: [] for m in modes}
unknowns = {m: [] for m in modes}
word_excluded = set()
reg = re.compile('.*/([^/]+)/(.*)_nohash_(.*).wav')
# to find the most common known word
known_counter = {m: collections.Counter() for m in modes}
key_words = word_dict.key_words
divider = set_divider(data_dir, key_words, num_folds)
for wav in gfile.Glob(os.path.join(data_dir, '*', '*nohash*.wav')):
groups = reg.search(wav).groups()
word = groups[0].lower()
speaker = groups[1].lower()
mode = which_set(speaker, divider, fold_idx)
indices = word_dict.word_to_indices(word)
if indices:
if word in key_words:
knowns[mode].append([wav, indices])
known_counter[mode][word] += 1
else:
unknowns[mode].append([wav, indices])
else:
word_excluded.add(word)
print('words not in word_map.txt:', word_excluded)
# make an all-zero silence wave
silence_dir = os.path.join(data_dir, SILENCE_CLASS)
if not os.path.exists(silence_dir):
os.makedirs(silence_dir)
silence_0 = os.path.join(silence_dir, '%s_0.wav' % SILENCE_WORD)
encode_audio(np.zeros([SILENCE_LENGTH]), silence_0)
for mode in modes:
silence_indices = word_dict.word_to_indices(SILENCE_CLASS)
silence_size = known_counter[mode].most_common(1)[0][1]
knowns[mode] += [[silence_0, silence_indices]] * silence_size
return knowns, unknowns
|
1ac3e2d38bb331f8525f095740024cbaa02264d6
| 3,646,020
|
def take_t(n):
"""
Transformation for Sequence.take
:param n: number to take
:return: transformation
"""
return Transformation(
"take({0})".format(n), lambda sequence: islice(sequence, 0, n), None
)
|
1e485cc59160dbec8d2fa3f358f51055115eafdd
| 3,646,021
|
def metric_fn(loss):
"""Evaluation metric Fn which runs on CPU."""
perplexity = tf.exp(tf.reduce_mean(loss))
return {
"eval/loss": tf.metrics.mean(loss),
"eval/perplexity": tf.metrics.mean(perplexity),
}
|
3614ed0ccc3e390aeaf4036805dfeff351d4d150
| 3,646,022
|
def gensig_choi(distsmat, minlength=1, maxlength=None, rank=0):
""" The two dimensional sigma function for the c99 splitting """
if rank:
distsmat = rankify(distsmat, rank)
def sigma(a, b):
length = (b - a)
beta = distsmat[a:b, a:b].sum()
alpha = (b - a)**2
if minlength:
if (b - a) < minlength:
beta += np.inf
if maxlength:
if (b - a) > maxlength:
beta += np.inf
return (-beta, alpha)
return sigma
|
75568f854eff9c3bbc13b4a46be8b1a2b9651b9b
| 3,646,023
|
from typing import Optional
def check_ie_v3(base, add: Optional[str] = None) -> str:
"""Check country specific VAT-Id"""
s = sum((w * int(c) for w, c in zip(range(8, 1, -1), base)),
9 * (ord(add) - ord('@'))) # 'A' - 'I' -> 1 - 9
i = s % 23
return _IE_CC_MAP[i]
|
19fd495db8301ed7193881cc637e3ce1b75e368c
| 3,646,024
|
def filter_experiment_model(faultgroup, faultmodel, interestlist=None):
"""
Filter for a specific fault model. If interestlist is given only experiments
in this list will be analysed.
0 set 0
1 set 1
2 Toggle
"""
if not isinstance(faultmodel, int):
if "set0" in faultmodel:
faultmodel = 0
elif "set1" in faultmodel:
faultmodel = 1
elif "toggle" in faultmodel:
faultmodel = 2
else:
raise ValueError("Faultmodel not understood")
return generic_filter_faults(faultgroup, 'fault_model', faultmodel, None, interestlist)
|
7594f7f5a410c3bf9231996989259d1267ed250b
| 3,646,025
|
import argparse
def _str2bool(v):
"""Parser type utility function."""
if v.lower() in ('yes', 'true', 't', 'y', 'on', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', 'off', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean type expected. '
'Received {}'.format(v))
|
7fa90cdff3f2dbe161d28555ec0ef65bae8b6bf3
| 3,646,026
|
import inspect
def _default_command(cmds, argv):
"""Evaluate the default command, handling ``**kwargs`` case.
`argparse` and `argh` do not understand ``**kwargs``, i.e. pass through command.
There's a case (`pykern.pkcli.pytest`) that requires pass through so we wrap
the command and clear `argv` in the case of ``default_command(*args, **kwargs)``.
Args:
cmds (list): List of commands
argv (list): arguments (may be edited)
Returns:
function: default command or None
"""
if len(cmds) != 1 or cmds[0].__name__ != DEFAULT_COMMAND:
return None
dc = cmds[0]
spec = inspect.getargspec(dc)
if not (spec.varargs and spec.keywords):
return dc
save_argv = argv[:]
def _wrap_default_command():
return dc(*save_argv)
del argv[:]
return _wrap_default_command
|
aecaaba610ec473b41f1cd546cb5c551541d9fab
| 3,646,027
|
import os
def keypair_to_file(keypair):
"""Looks for the SSH private key for keypair under ~/.ssh/
Prints an error if the file doesn't exist.
Args:
keypair (string) : AWS keypair to locate a private key for
Returns:
(string|None) : SSH private key file path or None is the private key doesn't exist.
"""
file = os.path.expanduser("~/.ssh/{}.pem".format(keypair))
if not os.path.exists(file):
print("Error: SSH Key '{}' does not exist".format(file))
return None
return file
|
0f544fd7d67530853bc4d8a91df01458d6408255
| 3,646,028
|
def nameable_op(node_factory_function): # type: (Callable) -> Callable
"""Set the name to the ngraph operator returned by the wrapped function."""
@wraps(node_factory_function)
def wrapper(*args, **kwargs): # type: (*Any, **Any) -> Node
node = node_factory_function(*args, **kwargs)
node = _set_node_name(node, **kwargs)
return node
return wrapper
|
0230c96e40b91772dc06a0b2c9cf358d1e0b08c7
| 3,646,029
|
import os
import yaml
def load_mlflow(output):
"""Load the mlflow run id.
Args:
output (str): Output directory
"""
with open(os.path.join(output, STAT_FILE_NAME), 'r') as stream:
stats = load(stream, Loader=yaml.FullLoader)
return stats['mlflow_run_id']
|
01b2c05a9d3d8f4c83f7997cdb31d213cbce342a
| 3,646,030
|
import re
def by_pattern(finding: finding.Entry, ignore: ignore_list.Entry) -> bool:
"""Process a regex ignore list entry."""
# Short circuit if no pattern is set.
if not ignore.pattern:
return False
# If there's a match on the path, check whether the ignore is for the same module.
if re.search(ignore.pattern, finding.path):
if ignore.module != finding.source.module:
return False
# Then check whether the ignore is for the particular reference.
if ignore.references:
if finding.source.reference in ignore.references:
return True
return False
# Or check whether the ignore is for the same offest.
if ignore.offset is not None:
if finding.location.offset == ignore.offset:
return True
return False
# In this case this is a fairly permissive ignore.
return True
return False
|
bbeb7d8ab740273bd21c120ca7bc42dc205e4a2b
| 3,646,031
|
def hex2int(s: str):
"""Convert a hex-octets (a sequence of octets) to an integer"""
return int(s, 16)
|
ecdb3152f8c661c944edd2811d016fce225c3d51
| 3,646,032
|
from operator import index
def into_two(lhs, ctx):
"""Element I
(num) -> push a spaces
(str) -> equivlaent to `qp`
(lst) -> split a list into two halves
"""
ts = vy_type(lhs, simple=True)
return {
NUMBER_TYPE: lambda: " " * int(lhs),
str: lambda: quotify(lhs, ctx) + lhs,
list: lambda: [
index(lhs, [None, int(len(lhs) / 2)], ctx),
index(lhs, [int(len(lhs) / 2), None], ctx),
],
}.get(ts)()
|
6fae5eb7c5ae58a0e7faef6e46334201ccc6df10
| 3,646,033
|
def find_renter_choice(par,sol,t,i_beta,i_ht_lag,i_p,a_lag,
inv_v,inv_mu,v,mu,p,valid,do_mu=True):
""" find renter choice - used in both solution and simulation """
v_agg = np.zeros(2)
p_agg = np.zeros(2)
# a. x
iota_lag = -1
i_h_lag = -1
LTV_lag = np.nan
_m,x,_LTV = misc.mx_func(t,iota_lag,i_h_lag,i_p,LTV_lag,a_lag,par)
i_x = linear_interp.binary_search(0,par.Nx,par.grid_x,x)
wx = (x-par.grid_x[i_x])/(par.grid_x[i_x+1]-par.grid_x[i_x])
# b. choices
# 1. renter
i = 0
j = i + par.Nrt
inv_v0 = sol.rt_inv_v[t,i_beta,i_ht_lag,i_p,i_x,:].ravel()
inv_v1 = sol.rt_inv_v[t,i_beta,i_ht_lag,i_p,i_x+1,:].ravel()
inv_mu0 = sol.rt_inv_mu[t,i_beta,i_ht_lag,i_p,i_x,:]
inv_mu1 = sol.rt_inv_mu[t,i_beta,i_ht_lag,i_p,i_x+1,:]
v_agg[0] = update(par,i,j,inv_v0,inv_v1,inv_mu0,inv_mu1,inv_v,inv_mu,wx,valid,v,p,mu,do_mu)
i_rt = i
j_rt = j
# 2. buyer
i = j
j = i + par.Nbt # = par.Ncr
inv_v0 = sol.bt_inv_v[t,i_beta,i_p,i_x,:,:,:].ravel()
inv_v1 = sol.bt_inv_v[t,i_beta,i_p,i_x+1,:,:,:].ravel()
inv_mu0 = sol.bt_inv_mu[t,i_beta,i_p,i_x,:,:,:].ravel()
inv_mu1 = sol.bt_inv_mu[t,i_beta,i_p,i_x+1,:,:,:].ravel()
v_agg[1] = update(par,i,j,inv_v0,inv_v1,inv_mu0,inv_mu1,inv_v,inv_mu,wx,valid,v,p,mu,do_mu)
i_bt = i
j_bt = j
# c. aggregate
if np.any(~np.isinf(v_agg)):
_logsum = logsum_and_choice_probabilities(v_agg,par.sigma_agg,p_agg)
p[i_rt:j_rt] *= p_agg[0]
p[i_bt:j_bt] *= p_agg[1]
Ev = np.nansum(p*v)
if do_mu:
Emu = np.nansum(p*mu)
else:
Emu = np.nan
else:
p[:] = np.nan
Ev = np.nan
Emu = np.nan
return Ev,Emu
|
f6cffb4ce6ed3ddaa98edefefdb6962536fbffb8
| 3,646,034
|
def met_zhengkl_gh(p, rx, cond_source, n, r):
"""
Zheng 2000 test implemented with Gauss Hermite quadrature.
"""
X, Y = sample_xy(rx, cond_source, n, r)
rate = (cond_source.dx() + cond_source.dy()) * 4./5
# start timing
with util.ContextTimer() as t:
# the test
zheng_gh = cgof.ZhengKLTestGaussHerm(p, alpha, rate=rate)
result = zheng_gh.perform_test(X, Y)
return {
# 'test': zheng_test,
'test_result': result, 'time_secs': t.secs}
|
41bef090ccc515be895bd08eda451864c330327e
| 3,646,035
|
from typing import Optional
from typing import Sequence
def get_domains(admin_managed: Optional[bool] = None,
include_unverified: Optional[bool] = None,
only_default: Optional[bool] = None,
only_initial: Optional[bool] = None,
only_root: Optional[bool] = None,
supports_services: Optional[Sequence[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetDomainsResult:
"""
Use this data source to access information about existing Domains within Azure Active Directory.
## API Permissions
The following API permissions are required in order to use this data source.
When authenticated with a service principal, this data source requires one of the following application roles: `Domain.Read.All` or `Directory.Read.All`
When authenticated with a user principal, this data source does not require any additional roles.
## Example Usage
```python
import pulumi
import pulumi_azuread as azuread
aad_domains = azuread.get_domains()
pulumi.export("domainNames", [__item.domain_name for __item in [aad_domains.domains]])
```
:param bool admin_managed: Set to `true` to only return domains whose DNS is managed by Microsoft 365. Defaults to `false`.
:param bool include_unverified: Set to `true` if unverified Azure AD domains should be included. Defaults to `false`.
:param bool only_default: Set to `true` to only return the default domain.
:param bool only_initial: Set to `true` to only return the initial domain, which is your primary Azure Active Directory tenant domain. Defaults to `false`.
:param bool only_root: Set to `true` to only return verified root domains. Excludes subdomains and unverified domains.
:param Sequence[str] supports_services: A list of supported services that must be supported by a domain. Possible values include `Email`, `Sharepoint`, `EmailInternalRelayOnly`, `OfficeCommunicationsOnline`, `SharePointDefaultDomain`, `FullRedelegation`, `SharePointPublic`, `OrgIdAuthentication`, `Yammer` and `Intune`.
"""
__args__ = dict()
__args__['adminManaged'] = admin_managed
__args__['includeUnverified'] = include_unverified
__args__['onlyDefault'] = only_default
__args__['onlyInitial'] = only_initial
__args__['onlyRoot'] = only_root
__args__['supportsServices'] = supports_services
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azuread:index/getDomains:getDomains', __args__, opts=opts, typ=GetDomainsResult).value
return AwaitableGetDomainsResult(
admin_managed=__ret__.admin_managed,
domains=__ret__.domains,
id=__ret__.id,
include_unverified=__ret__.include_unverified,
only_default=__ret__.only_default,
only_initial=__ret__.only_initial,
only_root=__ret__.only_root,
supports_services=__ret__.supports_services)
|
e7cb3a42ec7be45153c67d860b4802669f8043e5
| 3,646,036
|
import six
def get_writer(Writer=None, fast_writer=True, **kwargs):
"""
Initialize a table writer allowing for common customizations. Most of the
default behavior for various parameters is determined by the Writer class.
Parameters
----------
Writer : ``Writer``
Writer class (DEPRECATED). Defaults to :class:`Basic`.
delimiter : str
Column delimiter string
comment : str
String defining a comment line in table
quotechar : str
One-character string to quote fields containing special characters
formats : dict
Dictionary of format specifiers or formatting functions
strip_whitespace : bool
Strip surrounding whitespace from column values.
names : list
List of names corresponding to each data column
include_names : list
List of names to include in output.
exclude_names : list
List of names to exclude from output (applied after ``include_names``)
fast_writer : bool
Whether to use the fast Cython writer.
Returns
-------
writer : `~astropy.io.ascii.BaseReader` subclass
ASCII format writer instance
"""
if Writer is None:
Writer = basic.Basic
if 'strip_whitespace' not in kwargs:
kwargs['strip_whitespace'] = True
writer = core._get_writer(Writer, fast_writer, **kwargs)
# Handle the corner case of wanting to disable writing table comments for the
# commented_header format. This format *requires* a string for `write_comment`
# because that is used for the header column row, so it is not possible to
# set the input `comment` to None. Without adding a new keyword or assuming
# a default comment character, there is no other option but to tell user to
# simply remove the meta['comments'].
if (isinstance(writer, (basic.CommentedHeader, fastbasic.FastCommentedHeader))
and not isinstance(kwargs.get('comment', ''), six.string_types)):
raise ValueError("for the commented_header writer you must supply a string\n"
"value for the `comment` keyword. In order to disable writing\n"
"table comments use `del t.meta['comments']` prior to writing.")
return writer
|
0e51b930d90585905b37ddd585cdfd53de111aa9
| 3,646,037
|
def find_last_service(obj):
"""Identify last service event for instrument"""
return Service.objects.filter(equipment=obj).order_by('-date').first()
|
5c7c74b376568fc57268e8fe7c1970c03d61ad2c
| 3,646,038
|
def SectionsMenu(base_title=_("Sections"), section_items_key="all", ignore_options=True):
"""
displays the menu for all sections
:return:
"""
items = get_all_items("sections")
return dig_tree(SubFolderObjectContainer(title2=_("Sections"), no_cache=True, no_history=True), items, None,
menu_determination_callback=determine_section_display, pass_kwargs={"base_title": base_title,
"section_items_key": section_items_key,
"ignore_options": ignore_options},
fill_args={"title": "section_title"})
|
ea3766d923337e1dffc07dec5e5d042e2c85050c
| 3,646,039
|
def perform_save_or_create_role(is_professor, created_user, req_main, is_creating):
"""Performs update or create Student or Professor for user"""
response_verb = 'created' if is_creating else 'updated'
if is_professor is True:
professor_data = None
if 'professor' in req_main.keys():
professor_data = req_main['professor']
if professor_data is not None:
serialized_prof = CreateUpdateProfessorSerializer(data=professor_data)
if serialized_prof.is_valid():
save_or_create_data_in_role(professor_data,
True, is_creating, 'Professor',
created_user)
return 'success'
else:
return Response({"message": f"Professor account could not not be {response_verb}."},
status=status.HTTP_400_BAD_REQUEST)
else:
student_data = None
if 'student' in req_main.keys():
student_data = req_main['student']
if student_data is not None:
serialized_student = CreateUpdateStudentSerializer(data=student_data)
if serialized_student.is_valid():
save_or_create_data_in_role(student_data,
False,
is_creating,
'Student',
created_user)
return 'success'
else:
return Response({"message": f"Student account could not not be {response_verb}."},
status=status.HTTP_400_BAD_REQUEST)
return 'success'
|
6161aaf886b31209a8387426f812cda73b739df2
| 3,646,040
|
def ecg_rsp(ecg_rate, sampling_rate=1000, method="vangent2019"):
"""Extract ECG Derived Respiration (EDR).
This implementation is far from being complete, as the information in the related papers
prevents me from getting a full understanding of the procedure. Help is required!
Parameters
----------
ecg_rate : array
The heart rate signal as obtained via `ecg_rate()`.
sampling_rate : int
The sampling frequency of the signal that contains the R-peaks (in Hz,
i.e., samples/second). Defaults to 1000Hz.
method : str
Can be one of 'vangent2019' (default), 'soni2019', 'charlton2016' or 'sarkar2015'.
Returns
-------
array
A Numpy array containing the heart rate.
Examples
--------
>>> import neurokit2 as nk
>>> import pandas as pd
>>>
>>> # Get heart rate
>>> data = nk.data("bio_eventrelated_100hz")
>>> rpeaks, info = nk.ecg_peaks(data["ECG"], sampling_rate=100)
>>> ecg_rate = nk.signal_rate(rpeaks, sampling_rate=100, desired_length=len(rpeaks))
>>>
>>>
>>> # Get ECG Derived Respiration (EDR)
>>> edr = nk.ecg_rsp(ecg_rate, sampling_rate=100)
>>> nk.standardize(pd.DataFrame({"EDR": edr, "RSP": data["RSP"]})).plot() #doctest: +ELLIPSIS
<AxesSubplot:>
>>>
>>> # Method comparison (the closer to 0 the better)
>>> nk.standardize(pd.DataFrame({"True RSP": data["RSP"],
... "vangent2019": nk.ecg_rsp(ecg_rate, sampling_rate=100, method="vangent2019"),
... "sarkar2015": nk.ecg_rsp(ecg_rate, sampling_rate=100, method="sarkar2015"),
... "charlton2016": nk.ecg_rsp(ecg_rate, sampling_rate=100, method="charlton2016"),
... "soni2019": nk.ecg_rsp(ecg_rate, sampling_rate=100,
... method="soni2019")})).plot() #doctest: +ELLIPSIS
<AxesSubplot:>
References
----------
- van Gent, P., Farah, H., van Nes, N., & van Arem, B. (2019). HeartPy: A novel heart rate algorithm
for the analysis of noisy signals. Transportation research part F: traffic psychology and behaviour,
66, 368-378.
- Sarkar, S., Bhattacherjee, S., & Pal, S. (2015). Extraction of respiration signal from ECG for
respiratory rate estimation.
- Charlton, P. H., Bonnici, T., Tarassenko, L., Clifton, D. A., Beale, R., & Watkinson, P. J. (2016).
An assessment of algorithms to estimate respiratory rate from the electrocardiogram and photoplethysmogram.
Physiological measurement, 37(4), 610.
- Soni, R., & Muniyandi, M. (2019). Breath rate variability: a novel measure to study the meditation
effects. International Journal of Yoga, 12(1), 45.
"""
method = method.lower()
if method in [
"sarkar2015"
]: # https://www.researchgate.net/publication/304221962_Extraction_of_respiration_signal_from_ECG_for_respiratory_rate_estimation # noqa: E501
rsp = signal_filter(ecg_rate, sampling_rate, lowcut=0.1, highcut=0.7, order=6)
elif method in ["charlton2016"]: # https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5390977/#__ffn_sectitle
rsp = signal_filter(ecg_rate, sampling_rate, lowcut=4 / 60, highcut=60 / 60, order=6)
elif method in ["soni2019"]: # https://www.ncbi.nlm.nih.gov/pmc/articles/PMC6329220/
rsp = signal_filter(ecg_rate, sampling_rate, highcut=0.5, order=6)
elif method in [
"vangent2019"
]: # https://github.com/paulvangentcom/heartrate_analysis_python/blob/1597e8c0b2602829428b22d8be88420cd335e939/heartpy/analysis.py#L541 # noqa: E501
rsp = signal_filter(ecg_rate, sampling_rate, lowcut=0.1, highcut=0.4, order=2)
else:
raise ValueError(
"NeuroKit error: ecg_rsp(): 'method' should be "
"one of 'sarkar2015', 'charlton2016', 'soni2019' or "
"'vangent2019'."
)
return rsp
|
ec4ecdbf4489216124ef82399c548461968ca45b
| 3,646,041
|
def bootstrap(request):
"""Concatenates bootstrap.js files from all installed Hue apps."""
# Has some None's for apps that don't have bootsraps.
all_bootstraps = [(app, app.get_bootstrap_file()) for app in appmanager.DESKTOP_APPS if request.user.has_hue_permission(action="access", app=app.name)]
# Iterator over the streams.
concatenated = ["\n/* %s */\n%s" % (app.name, b.read()) for app, b in all_bootstraps if b is not None]
# HttpResponse can take an iteratable as the first argument, which
# is what happens here.
return HttpResponse(concatenated, content_type='text/javascript')
|
94be21562c383ad93c7a0530810bb08f41f3eb26
| 3,646,042
|
def get_selection(selection):
"""Return a valid model selection."""
if not isinstance(selection, str) and not isinstance(selection, list):
raise TypeError('The selection setting must be a string or a list.')
if isinstance(selection, str):
if selection.lower() == 'all' or selection == '':
selection = None
elif selection.startswith('topics'):
selection = [selection]
return selection
|
996d0af844e7c1660bcc67e24b33c31861296d93
| 3,646,043
|
def getAllImageFilesInHierarchy(path):
"""
Returns a list of file paths relative to 'path' for all images under the given directory,
recursively looking in subdirectories
"""
return [f for f in scan_tree(path)]
|
821147ac2def3f04cb9ecc7050afca85d54b6543
| 3,646,044
|
def list_package(connection, args):
"""List information about package contents"""
package = sap.adt.Package(connection, args.name)
for pkg, subpackages, objects in sap.adt.package.walk(package):
basedir = '/'.join(pkg)
if basedir:
basedir += '/'
if not args.recursive:
for subpkg in subpackages:
print(f'{basedir}{subpkg}')
for obj in objects:
print(f'{basedir}{obj.name}')
if not args.recursive:
break
if not subpackages and not objects:
print(f'{basedir}')
return 0
|
7c8e0cb8a6d5e80a95ae216ad2b85309f0b4d45c
| 3,646,045
|
def calc_hebrew_bias(probs):
"""
:param probs: list of negative log likelihoods for a Hebrew corpus
:return: gender bias in corpus
"""
bias = 0
for idx in range(0, len(probs), 16):
bias -= probs[idx + 1] + probs[idx + 5] + probs[idx + 9] + probs[idx + 13]
bias += probs[idx + 2] + probs[idx + 6] + probs[idx + 10] + probs[idx + 14]
return bias / 4
|
565be51b51d857c671ee44e090c5243e4d207942
| 3,646,046
|
import pickle
def load_wiki(size = 128, validate = True):
"""
Return malaya pretrained wikipedia ELMO size N.
Parameters
----------
size: int, (default=128)
validate: bool, (default=True)
Returns
-------
dictionary: dictionary of dictionary, reverse dictionary and vectors
"""
if not isinstance(size, int):
raise ValueError('size must be an integer')
if size not in [128, 256]:
raise ValueError('size only support [128,256]')
if validate:
check_file(PATH_ELMO[size], S3_PATH_ELMO[size])
else:
if not check_available(PATH_ELMO[size]):
raise Exception(
'elmo-wiki is not available, please `validate = True`'
)
with open(PATH_ELMO[size]['setting'], 'rb') as fopen:
setting = pickle.load(fopen)
g = load_graph(PATH_ELMO[size]['model'])
return ELMO(
g.get_tensor_by_name('import/tokens_characters:0'),
g.get_tensor_by_name('import/tokens_characters_reverse:0'),
g.get_tensor_by_name('import/softmax_score:0'),
generate_session(graph = g),
setting['dictionary'],
setting['char_maxlen'],
setting['steps'],
setting['softmax_weight'],
)
|
6daaa592000f8cb4ac54632729bda60c7325548d
| 3,646,047
|
def generate_pibindex_rois_fs(aparc_aseg):
""" given an aparc aseg in pet space:
generate wm, gm and pibindex rois
make sure they are non-overlapping
return 3 rois"""
wm = mask_from_aseg(aparc_aseg, wm_aseg())
gm = mask_from_aseg(aparc_aseg, gm_aseg())
pibi = mask_from_aseg(aparc_aseg, pibindex_aseg())
# make non-overlapping
wm[pibi==1] = 0
gm[pibi ==1] = 0
gm[wm==1] = 0
return wm, gm, pibi
|
35023b9084bb90f6b74f3f39b8fd79f03eb825d9
| 3,646,048
|
from datetime import datetime
import sys
import glob
import os
def get_log_name(GLASNOST_ROOT, start_time, client_ip, mlab_server):
"""Helper method that given a test key, finds the logfile"""
log_glob = "%s/%s.measurement-lab.org/%s_%s_*" % (start_time.strftime('%Y/%m/%d'), mlab_server, start_time.strftime('%Y-%m-%dT%H:%M:%S'), client_ip)
if start_time < datetime(2010,1,8,5,7,0):
# before this time, the days are +1 in the filenames
dy = start_time.day + 1
log_glob = log_glob[:8] + '%02d'%dy + log_glob[10:]
log_glob = log_glob[:51] + '%02d'%dy + log_glob[53:]
if not sys.platform.startswith("linux"):
log_glob = log_glob.replace(':','_')
logs = glob.glob(os.path.join(GLASNOST_ROOT, log_glob))
if not logs:
# sometimes filename seconds differs by +/-1! change to wildcard
log_glob = log_glob[:61] + '?' + log_glob[62:]
logs = glob.glob(os.path.join(GLASNOST_ROOT, log_glob))
if not logs:
log_glob = log_glob[:60] + '?' + log_glob[61:]
logs = glob.glob(os.path.join(GLASNOST_ROOT, log_glob))
#endif
if len(logs)!=1:
raise Exception('!! log file not found (=%d): %s' % (len(logs),log_glob))
return logs[0]
|
2492ad96563a7a93a9b00d9e5e20742b7f645ec3
| 3,646,049
|
import torch
def generate_mpc_imitate(dataset, data_params, nn_params, train_params):
"""
Will be used for imitative control of the model predictive controller.
Could try adding noise to the sampled acitons...
"""
class ImitativePolicy(nn.Module):
def __init__(self, nn_params):
super(ImitativePolicy, self).__init__()
# Store the parameters:
self.hidden_w = nn_params['hid_width']
self.depth = nn_params['hid_depth']
self.n_in_input = nn_params['dx']
self.n_out = nn_params['du']
self.activation = nn_params['activation']
self.d = nn_params['dropout']
self.loss_fnc = nn.MSELoss()
# super(ImitativePolicy, self).__init__()
# Takes objects from the training parameters
layers = []
layers.append(nn.Linear(self.n_in_input, self.hidden_w)
) # input layer
layers.append(self.activation)
layers.append(nn.Dropout(p=self.d))
for d in range(self.depth):
# add modules
# input layer
layers.append(nn.Linear(self.hidden_w, self.hidden_w))
layers.append(self.activation)
layers.append(nn.Dropout(p=self.d))
# output layer
layers.append(nn.Linear(self.hidden_w, self.n_out))
self.features = nn.Sequential(*layers)
# Need to scale the state variables again etc
# inputs state, output an action (PWMs)
self.scalarX = StandardScaler() # MinMaxScaler(feature_range=(-1, 1))
self.scalarU = MinMaxScaler(feature_range=(-1, 1))
def forward(self, x):
# Max pooling over a (2, 2) window
x = self.features(x)
return x
def preprocess(self, dataset): # X, U):
"""
Preprocess X and U for passing into the neural network. For simplicity, takes in X and U as they are output from generate data, but only passed the dimensions we want to prepare for real testing. This removes a lot of potential questions that were bugging me in the general implementation. Will do the cosine and sin conversions externally.
"""
# Already done is the transformation from
# [yaw, pitch, roll, x_ddot, y_ddot, z_ddot] to
# [sin(yaw), sin(pitch), sin(roll), cos(pitch), cos(yaw), cos(roll), x_ddot, y_ddot, z_ddot]
# dX = np.array([utils_data.states2delta(val) for val in X])
if len(dataset) == 2:
X = dataset[0]
U = dataset[1]
else:
raise ValueError("Improper data shape for training")
self.scalarX.fit(X)
self.scalarU.fit(U)
#Normalizing to zero mean and unit variance
normX = self.scalarX.transform(X)
normU = self.scalarU.transform(U)
inputs = torch.Tensor(normX)
outputs = torch.Tensor(normU)
return list(zip(inputs, outputs))
def postprocess(self, U):
"""
Given the raw output from the neural network, post process it by rescaling by the mean and variance of the dataset
"""
# de-normalize so to say
U = self.U.inverse_transform(U.reshape(1, -1))
U = U.ravel()
return np.array(U)
def train_cust(self, dataset, train_params, gradoff=False):
"""
Train the neural network.
if preprocess = False
dataset is a list of tuples to train on, where the first value in the tuple is the training data (should be implemented as a torch tensor), and the second value in the tuple
is the label/action taken
if preprocess = True
dataset is simply the raw output of generate data (X, U)
Epochs is number of times to train on given training data,
batch_size is hyperparameter dicating how large of a batch to use for training,
optim is the optimizer to use (options are "Adam", "SGD")
split is train/test split ratio
"""
epochs = train_params['epochs']
batch_size = train_params['batch_size']
optim = train_params['optim']
split = train_params['split']
lr = train_params['lr']
lr_step_eps = train_params['lr_schedule'][0]
lr_step_ratio = train_params['lr_schedule'][1]
preprocess = train_params['preprocess']
if preprocess:
dataset = self.preprocess(dataset) # [0], dataset[1])
trainLoader = DataLoader(
dataset[:int(split*len(dataset))], batch_size=batch_size, shuffle=True)
testLoader = DataLoader(
dataset[int(split*len(dataset)):], batch_size=batch_size)
# Papers seem to say ADAM works better
if(optim == "Adam"):
optimizer = torch.optim.Adam(
super(ImitativePolicy, self).parameters(), lr=lr)
elif(optim == "SGD"):
optimizer = torch.optim.SGD(
super(ImitativePolicy, self).parameters(), lr=lr)
else:
raise ValueError(optim + " is not a valid optimizer type")
# most results at .6 gamma, tried .33 when got NaN
if lr_step_eps != []:
scheduler = torch.optim.lr_scheduler.StepLR(
optimizer, step_size=lr_step_eps, gamma=lr_step_ratio)
testloss, trainloss = self._optimize(
self.loss_fnc, optimizer, split, scheduler, epochs, batch_size, dataset) # trainLoader, testLoader)
return testloss, trainloss
def predict(self, X):
"""
Given a state X, predict the desired action U. This function is used when simulating, so it does all pre and post processing for the neural net
"""
#normalizing and converting to single sample
normX = self.scalarX.transform(X.reshape(1, -1))
input = torch.Tensor(normX)
NNout = self.forward(input).data[0]
return NNout
# trainLoader, testLoader):
def _optimize(self, loss_fn, optim, split, scheduler, epochs, batch_size, dataset, gradoff=False):
errors = []
error_train = []
split = split
testLoader = DataLoader(
dataset[int(split*len(dataset)):], batch_size=batch_size)
trainLoader = DataLoader(
dataset[:int(split*len(dataset))], batch_size=batch_size, shuffle=True)
for epoch in range(epochs):
scheduler.step()
avg_loss = torch.zeros(1)
num_batches = len(trainLoader)/batch_size
for i, (input, target) in enumerate(trainLoader):
# Add noise to the batch
if False:
if self.prob:
n_out = int(self.n_out/2)
else:
n_out = self.n_out
noise_in = torch.tensor(np.random.normal(
0, .01, (input.size())), dtype=torch.float)
noise_targ = torch.tensor(np.random.normal(
0, .01, (target.size())), dtype=torch.float)
input.add_(noise_in)
target.add_(noise_targ)
optim.zero_grad() # zero the gradient buffers
# compute the output
output = self.forward(input)
loss = loss_fn(output, target)
# add small loss term on the max and min logvariance if probablistic network
# note, adding this term will backprob the values properly
if loss.data.numpy() == loss.data.numpy():
# print(self.max_logvar, self.min_logvar)
if not gradoff:
# backpropagate from the loss to fill the gradient buffers
loss.backward()
optim.step() # do a gradient descent step
# print('tain: ', loss.item())
# if not loss.data.numpy() == loss.data.numpy(): # Some errors make the loss NaN. this is a problem.
else:
# This is helpful: it'll catch that when it happens,
print("loss is NaN")
# print("Output: ", output, "\nInput: ", input, "\nLoss: ", loss)
errors.append(np.nan)
error_train.append(np.nan)
# and give the output and input that made the loss NaN
return errors, error_train
# update the overall average loss with this batch's loss
avg_loss += loss.item()/(len(trainLoader)*batch_size)
# self.features.eval()
test_error = torch.zeros(1)
for i, (input, target) in enumerate(testLoader):
output = self.forward(input)
loss = loss_fn(output, target)
test_error += loss.item()/(len(testLoader)*batch_size)
test_error = test_error
#print("Epoch:", '%04d' % (epoch + 1), "loss=", "{:.9f}".format(avg_loss.data[0]),
# "test_error={:.9f}".format(test_error))
if (epoch % 1 == 0):
print("Epoch:", '%04d' % (epoch + 1), "train loss=", "{:.6f}".format(
avg_loss.data[0]), "test loss=", "{:.6f}".format(test_error.data[0]))
# if (epoch % 50 == 0) & self.prob: print(self.max_logvar, self.min_logvar)
error_train.append(avg_loss.data[0].numpy())
errors.append(test_error.data[0].numpy())
#loss_fn.print_mmlogvars()
return errors, error_train
# create policy object
policy = ImitativePolicy(nn_params)
# train policy
# X, U, _ = df_to_training(df, data_params)
X = dataset[0]
U = dataset[1]
acctest, acctrain = policy.train_cust((X, U), train_params)
if True:
ax1 = plt.subplot(211)
# ax1.set_yscale('log')
ax1.plot(acctest, label='Test Loss')
plt.title('Test Loss')
ax2 = plt.subplot(212)
# ax2.set_yscale('log')
ax2.plot(acctrain, label='Train Loss')
plt.title('Training Loss')
ax1.legend()
plt.show()
# return policy!
return policy
|
2aefbb10256a008e58f329cb84f0351255deb304
| 3,646,050
|
import logging
def rescale(img, mask, factor):
"""Rescale image and mask."""
logging.info('Scaling: %s', array_info(img))
info = img.info
img = ndimage.interpolation.zoom(img, factor + (1,), order=0)
info['spacing'] = [s/f for s, f in zip(info['spacing'], factor)]
mask = rescale_mask(mask, factor)
assert img[..., 0].shape == mask.shape, (img.shape, mask.shape)
img = dwi.image.Image(img, info=info)
return img, mask
|
9f8676a34e58eec258227d8ba41891f4bab7e895
| 3,646,051
|
def get_data_all(path):
"""
Get all data of Nest and reorder them.
:param path: the path of the Nest folder
:return:
"""
nb = count_number_of_label(path+ 'labels.csv')
data_pop = {}
for i in range(nb):
label, type = get_label_and_type(path + 'labels.csv', i)
field, data = get_data(label, path)
if type == 'spikes':
data_pop[label]=reorder_data_spike_detector(data)
else:
data_pop[label]=reorder_data_multimeter(data)
return data_pop
|
94409d671b287ce213088817a4ad41be86126508
| 3,646,052
|
def from_tfrecord_parse(
record,
pre_process_func=None,
jpeg_encoded=False):
"""
This function is made to work with the prepare_data.TFRecordWriter class.
It parses a single tf.Example records.
Arguments:
record : the tf.Example record with the features of
prepare_data.TFRecordWriter
pre_process_func: if not None, must be a pre-processing function that will be applied on the data.
jpeg_encoded : is the data encoded in jpeg format?
Returns:
image: a properly shaped and encoded 2D image.
label: its corresponding label.
"""
features = tf.io.parse_single_example(record, features={
'shape': tf.io.FixedLenFeature([3], tf.int64),
'image': tf.io.FixedLenFeature([], tf.string),
'label': tf.io.FixedLenFeature([1], tf.int64)})
data = tf.io.decode_jpeg(features['image']) if jpeg_encoded else tf.io.decode_raw(features['image'], tf.uint8)
data = tf.reshape(data, features['shape'])
labels = features['label']
# data pre_processing
if pre_process_func:
data, labels = pre_process_func(data, labels)
return data, labels
|
2e32accfe7485058fa4aff7c683265b939184c94
| 3,646,053
|
def load_shapes_coords(annotation_path):
"""
> TODO: Ensure and correct the clockwise order of the coords of a QUAD
"""
quads_coords = pd.read_csv(annotation_path, header=None)
quads_coords = quads_coords.iloc[:,:-1].values # [n_box, 8]
quads_coords = quads_coords.reshape(-1, 4, 2)
if geometry == "QUAD":
shapes_coords = quads_coords
elif geometry == "RBOX":
shapes_coords = quads_to_rboxes(coords)
else:
raise ValueError("Invalid Geometry")
return shapes_coords
|
8761ccfa1dac718c4a7df7cb6ced12df3e656a7a
| 3,646,054
|
import sys
import os
def caller_path(steps=1, names=None):
"""Return the path to the file of the current frames' caller."""
frame = sys._getframe(steps + 1)
try:
path = os.path.dirname(frame.f_code.co_filename)
finally:
del frame
if not path:
path = os.getcwd()
if names is not None:
path = os.path.join(path, *names)
return os.path.realpath(path)
|
74d617d970df49854e98c7147d7b830e2dc230cf
| 3,646,055
|
import json
import re
def reader_json_totals(list_filenames):
"""
This reads the json files with totals and returns them as a list of dicts.
It will verify that the name of the file starts with totals.json to read it.
This way, we can just send to the function all the files in the directory and it will take care
of selecting the appropriate.
Returns
----------
list_totals_dict: list dicts
list of dictionaries with the totals
"""
list_totals_dict = []
for file in list_filenames:
# if it is a json results file, we process it.
if "totals.json" in file:
with open(file, 'r') as fp:
data = json.load(fp)
try:
data['1st_react_temp'] = float(re.findall(r"(\d+)C", file)[0])
except IndexError:
data['1st_react_temp'] = np.nan
try:
data['2nd_react_temp'] = float(re.findall(r"(\d+)C", file)[1])
except IndexError:
data['2nd_react_temp'] = np.nan
try:
data['mass ug'] = float(re.findall(r"(\d+) ug", file)[0])
except IndexError:
data['mass ug'] = np.nan
list_totals_dict.append(data)
return list_totals_dict
|
57f27dbb3eabee014a23449b7975295b088b5e72
| 3,646,056
|
def is_hitachi(dicom_input):
"""
Use this function to detect if a dicom series is a hitachi dataset
:param dicom_input: directory with dicom files for 1 scan of a dicom_header
"""
# read dicom header
header = dicom_input[0]
if 'Manufacturer' not in header or 'Modality' not in header:
return False # we try generic conversion in these cases
# check if Modality is mr
if header.Modality.upper() != 'MR':
return False
# check if manufacturer is hitachi
if 'HITACHI' not in header.Manufacturer.upper():
return False
return True
|
c039c0535823edda2f66c3e445a5800a9890f155
| 3,646,057
|
def index_of_first_signal(evt_index, d, qsets, MAXT3):
""" Check the evt_index of the last signal triplet (MC truth).
Args:
Returns:
"""
first_index = -1
k = 0
for tset in qsets:
for ind in tset: # Pick first of alternatives and break
#[HERE ADD THE OPTION TO CHOOSE e.g. THE BEST RECONSTRUCTION QUALITY !!]
y = np.asarray(d['_BToKEE_is_signal'][evt_index])[ind]
break
if y == 1:
first_index = k
break
k += 1
return first_index
|
cd156faceaf3cf3261b2ba217cda5a6c0e3ce4b8
| 3,646,058
|
import numpy
def readcrd(filename, REAL):
"""
It reads the crd file, file that contains the charges information.
Arguments
----------
filename : name of the file that contains the surface information.
REAL : data type.
Returns
-------
pos : (Nqx3) array, positions of the charges.
q : (Nqx1) array, value of the charges.
Nq : int, number of charges.
"""
pos = []
q = []
start = 0
with open(filename, 'r') as f:
lines = f.readlines()
for line in lines:
line = line.split()
if len(line) > 8 and line[0] != '*': # and start==2:
x = line[4]
y = line[5]
z = line[6]
q.append(REAL(line[9]))
pos.append([REAL(x), REAL(y), REAL(z)])
pos = numpy.array(pos)
q = numpy.array(q)
return pos, q
|
efafc2e53eebeacbe6a1a5b1e346d0e121fa7a62
| 3,646,059
|
def load_and_initialize_hub_module(module_path, signature='default'):
"""Loads graph of a TF-Hub module and initializes it into a session.
Args:
module_path: string Path to TF-Hub module.
signature: string Signature to use when creating the apply graph.
Return:
graph: tf.Graph Graph of the module.
session: tf.Session Session with initialized variables and tables.
inputs: dict Dictionary of input tensors.
outputs: dict Dictionary of output tensors.
Raises:
ValueError: If signature contains a SparseTensor on input or output.
"""
graph = tf.Graph()
with graph.as_default():
tf.compat.v1.logging.info('Importing %s', module_path)
module = hub.Module(module_path)
signature_inputs = module.get_input_info_dict(signature)
signature_outputs = module.get_output_info_dict(signature)
# First check there are no SparseTensors in input or output.
for key, info in list(signature_inputs.items()) + list(
signature_outputs.items()):
if info.is_sparse:
raise ValueError(
'Signature "%s" has a SparseTensor on input/output "%s".'
' SparseTensors are not supported.' % (signature, key))
# Create placeholders to represent the input of the provided signature.
inputs = {}
for input_key, input_info in signature_inputs.items():
inputs[input_key] = tf.compat.v1.placeholder(
shape=input_info.get_shape(), dtype=input_info.dtype, name=input_key)
outputs = module(inputs=inputs, signature=signature, as_dict=True)
session = tf.compat.v1.Session(graph=graph)
session.run(tf.compat.v1.global_variables_initializer())
session.run(tf.compat.v1.tables_initializer())
return graph, session, inputs, outputs
|
b04b5f77c7e0207d314ebb5910ec1c5e61f4755c
| 3,646,060
|
def get_mention_token_dist(m1, m2):
""" Returns distance in tokens between two mentions """
succ = m1.tokens[0].doc_index < m2.tokens[0].doc_index
first = m1 if succ else m2
second = m2 if succ else m1
return max(0, second.tokens[0].doc_index - first.tokens[-1].doc_index)
|
84052f805193b1d653bf8cc22f5d37b6f8de66f4
| 3,646,061
|
def shlcar3x3(x,y,z, ps):
"""
This subroutine returns the shielding field for the earth's dipole, represented by
2x3x3=18 "cartesian" harmonics, tilted with respect to the z=0 plane (nb#4, p.74)
:param x,y,z: GSM coordinates in Re (1 Re = 6371.2 km)
:param ps: geo-dipole tilt angle in radius.
:return: bx,by,bz. Field components in GSM system, in nT.
"""
# The 36 coefficients enter in pairs in the amplitudes of the "cartesian" harmonics (A(1)-A(36).
# The 14 nonlinear parameters (A(37)-A(50) are the scales Pi,Ri,Qi,and Si entering the arguments of exponents, sines, and cosines in each of the
# 18 "cartesian" harmonics plus two tilt angles for the cartesian harmonics (one for the psi=0 mode and another for the psi=90 mode)
a = np.array([
-901.2327248,895.8011176,817.6208321,-845.5880889,-83.73539535,
86.58542841,336.8781402,-329.3619944,-311.2947120,308.6011161,
31.94469304,-31.30824526,125.8739681,-372.3384278,-235.4720434,
286.7594095,21.86305585,-27.42344605,-150.4874688,2.669338538,
1.395023949,-.5540427503,-56.85224007,3.681827033,-43.48705106,
5.103131905,1.073551279,-.6673083508,12.21404266,4.177465543,
5.799964188,-.3977802319,-1.044652977,.5703560010,3.536082962,
-3.222069852,9.620648151,6.082014949,27.75216226,12.44199571,
5.122226936,6.982039615,20.12149582,6.150973118,4.663639687,
15.73319647,2.303504968,5.840511214,.8385953499E-01,.3477844929])
p1,p2,p3, r1,r2,r3, q1,q2,q3, s1,s2,s3 = a[36:48]
t1,t2 = a[48:50]
cps=np.cos(ps)
sps=np.sin(ps)
s2ps=2*cps # modified here (sin(2*ps) instead of sin(3*ps))
st1=np.sin(ps*t1)
ct1=np.cos(ps*t1)
st2=np.sin(ps*t2)
ct2=np.cos(ps*t2)
x1=x*ct1-z*st1
z1=x*st1+z*ct1
x2=x*ct2-z*st2
z2=x*st2+z*ct2
# make the terms in the 1st sum ("perpendicular" symmetry):
# i=1:
sqpr= np.sqrt(1/p1**2+1/r1**2)
cyp = np.cos(y/p1)
syp = np.sin(y/p1)
czr = np.cos(z1/r1)
szr = np.sin(z1/r1)
expr= np.exp(sqpr*x1)
fx1 =-sqpr*expr*cyp*szr
hy1 = expr/p1*syp*szr
fz1 =-expr*cyp/r1*czr
hx1 = fx1*ct1+fz1*st1
hz1 =-fx1*st1+fz1*ct1
sqpr= np.sqrt(1/p1**2+1/r2**2)
cyp = np.cos(y/p1)
syp = np.sin(y/p1)
czr = np.cos(z1/r2)
szr = np.sin(z1/r2)
expr= np.exp(sqpr*x1)
fx2 =-sqpr*expr*cyp*szr
hy2 = expr/p1*syp*szr
fz2 =-expr*cyp/r2*czr
hx2 = fx2*ct1+fz2*st1
hz2 =-fx2*st1+fz2*ct1
sqpr= np.sqrt(1/p1**2+1/r3**2)
cyp = np.cos(y/p1)
syp = np.sin(y/p1)
czr = np.cos(z1/r3)
szr = np.sin(z1/r3)
expr= np.exp(sqpr*x1)
fx3 =-expr*cyp*(sqpr*z1*czr+szr/r3*(x1+1/sqpr))
hy3 = expr/p1*syp*(z1*czr+x1/r3*szr/sqpr)
fz3 =-expr*cyp*(czr*(1+x1/r3**2/sqpr)-z1/r3*szr)
hx3 = fx3*ct1+fz3*st1
hz3 =-fx3*st1+fz3*ct1
# i=2:
sqpr= np.sqrt(1/p2**2+1/r1**2)
cyp = np.cos(y/p2)
syp = np.sin(y/p2)
czr = np.cos(z1/r1)
szr = np.sin(z1/r1)
expr= np.exp(sqpr*x1)
fx4 =-sqpr*expr*cyp*szr
hy4 = expr/p2*syp*szr
fz4 =-expr*cyp/r1*czr
hx4 = fx4*ct1+fz4*st1
hz4 =-fx4*st1+fz4*ct1
sqpr= np.sqrt(1/p2**2+1/r2**2)
cyp = np.cos(y/p2)
syp = np.sin(y/p2)
czr = np.cos(z1/r2)
szr = np.sin(z1/r2)
expr= np.exp(sqpr*x1)
fx5 =-sqpr*expr*cyp*szr
hy5 = expr/p2*syp*szr
fz5 =-expr*cyp/r2*czr
hx5 = fx5*ct1+fz5*st1
hz5 =-fx5*st1+fz5*ct1
sqpr= np.sqrt(1/p2**2+1/r3**2)
cyp = np.cos(y/p2)
syp = np.sin(y/p2)
czr = np.cos(z1/r3)
szr = np.sin(z1/r3)
expr= np.exp(sqpr*x1)
fx6 =-expr*cyp*(sqpr*z1*czr+szr/r3*(x1+1/sqpr))
hy6 = expr/p2*syp*(z1*czr+x1/r3*szr/sqpr)
fz6 =-expr*cyp*(czr*(1+x1/r3**2/sqpr)-z1/r3*szr)
hx6 = fx6*ct1+fz6*st1
hz6 =-fx6*st1+fz6*ct1
# i=3:
sqpr= np.sqrt(1/p3**2+1/r1**2)
cyp = np.cos(y/p3)
syp = np.sin(y/p3)
czr = np.cos(z1/r1)
szr = np.sin(z1/r1)
expr= np.exp(sqpr*x1)
fx7 =-sqpr*expr*cyp*szr
hy7 = expr/p3*syp*szr
fz7 =-expr*cyp/r1*czr
hx7 = fx7*ct1+fz7*st1
hz7 =-fx7*st1+fz7*ct1
sqpr= np.sqrt(1/p3**2+1/r2**2)
cyp = np.cos(y/p3)
syp = np.sin(y/p3)
czr = np.cos(z1/r2)
szr = np.sin(z1/r2)
expr= np.exp(sqpr*x1)
fx8 =-sqpr*expr*cyp*szr
hy8 = expr/p3*syp*szr
fz8 =-expr*cyp/r2*czr
hx8 = fx8*ct1+fz8*st1
hz8 =-fx8*st1+fz8*ct1
sqpr= np.sqrt(1/p3**2+1/r3**2)
cyp = np.cos(y/p3)
syp = np.sin(y/p3)
czr = np.cos(z1/r3)
szr = np.sin(z1/r3)
expr= np.exp(sqpr*x1)
fx9 =-expr*cyp*(sqpr*z1*czr+szr/r3*(x1+1/sqpr))
hy9 = expr/p3*syp*(z1*czr+x1/r3*szr/sqpr)
fz9 =-expr*cyp*(czr*(1+x1/r3**2/sqpr)-z1/r3*szr)
hx9 = fx9*ct1+fz9*st1
hz9 =-fx9*st1+fz9*ct1
a1=a[0]+a[1]*cps
a2=a[2]+a[3]*cps
a3=a[4]+a[5]*cps
a4=a[6]+a[7]*cps
a5=a[8]+a[9]*cps
a6=a[10]+a[11]*cps
a7=a[12]+a[13]*cps
a8=a[14]+a[15]*cps
a9=a[16]+a[17]*cps
bx=a1*hx1+a2*hx2+a3*hx3+a4*hx4+a5*hx5+a6*hx6+a7*hx7+a8*hx8+a9*hx9
by=a1*hy1+a2*hy2+a3*hy3+a4*hy4+a5*hy5+a6*hy6+a7*hy7+a8*hy8+a9*hy9
bz=a1*hz1+a2*hz2+a3*hz3+a4*hz4+a5*hz5+a6*hz6+a7*hz7+a8*hz8+a9*hz9
# make the terms in the 2nd sum ("parallel" symmetry):
# i=1
sqqs= np.sqrt(1/q1**2+1/s1**2)
cyq = np.cos(y/q1)
syq = np.sin(y/q1)
czs = np.cos(z2/s1)
szs = np.sin(z2/s1)
exqs= np.exp(sqqs*x2)
fx1 =-sqqs*exqs*cyq*czs *sps
hy1 = exqs/q1*syq*czs *sps
fz1 = exqs*cyq/s1*szs *sps
hx1 = fx1*ct2+fz1*st2
hz1 =-fx1*st2+fz1*ct2
sqqs= np.sqrt(1/q1**2+1/s2**2)
cyq = np.cos(y/q1)
syq = np.sin(y/q1)
czs = np.cos(z2/s2)
szs = np.sin(z2/s2)
exqs= np.exp(sqqs*x2)
fx2 =-sqqs*exqs*cyq*czs *sps
hy2 = exqs/q1*syq*czs *sps
fz2 = exqs*cyq/s2*szs *sps
hx2 = fx2*ct2+fz2*st2
hz2 =-fx2*st2+fz2*ct2
sqqs= np.sqrt(1/q1**2+1/s3**2)
cyq = np.cos(y/q1)
syq = np.sin(y/q1)
czs = np.cos(z2/s3)
szs = np.sin(z2/s3)
exqs= np.exp(sqqs*x2)
fx3 =-sqqs*exqs*cyq*czs *sps
hy3 = exqs/q1*syq*czs *sps
fz3 = exqs*cyq/s3*szs *sps
hx3 = fx3*ct2+fz3*st2
hz3 =-fx3*st2+fz3*ct2
# i=2:
sqqs= np.sqrt(1/q2**2+1/s1**2)
cyq = np.cos(y/q2)
syq = np.sin(y/q2)
czs = np.cos(z2/s1)
szs = np.sin(z2/s1)
exqs= np.exp(sqqs*x2)
fx4 =-sqqs*exqs*cyq*czs *sps
hy4 = exqs/q2*syq*czs *sps
fz4 = exqs*cyq/s1*szs *sps
hx4 = fx4*ct2+fz4*st2
hz4 =-fx4*st2+fz4*ct2
sqqs= np.sqrt(1/q2**2+1/s2**2)
cyq = np.cos(y/q2)
syq = np.sin(y/q2)
czs = np.cos(z2/s2)
szs = np.sin(z2/s2)
exqs= np.exp(sqqs*x2)
fx5 =-sqqs*exqs*cyq*czs *sps
hy5 = exqs/q2*syq*czs *sps
fz5 = exqs*cyq/s2*szs *sps
hx5 = fx5*ct2+fz5*st2
hz5 =-fx5*st2+fz5*ct2
sqqs= np.sqrt(1/q2**2+1/s3**2)
cyq = np.cos(y/q2)
syq = np.sin(y/q2)
czs = np.cos(z2/s3)
szs = np.sin(z2/s3)
exqs= np.exp(sqqs*x2)
fx6 =-sqqs*exqs*cyq*czs *sps
hy6 = exqs/q2*syq*czs *sps
fz6 = exqs*cyq/s3*szs *sps
hx6 = fx6*ct2+fz6*st2
hz6 =-fx6*st2+fz6*ct2
# i=3:
sqqs= np.sqrt(1/q3**2+1/s1**2)
cyq = np.cos(y/q3)
syq = np.sin(y/q3)
czs = np.cos(z2/s1)
szs = np.sin(z2/s1)
exqs= np.exp(sqqs*x2)
fx7 =-sqqs*exqs*cyq*czs *sps
hy7 = exqs/q3*syq*czs *sps
fz7 = exqs*cyq/s1*szs *sps
hx7 = fx7*ct2+fz7*st2
hz7 =-fx7*st2+fz7*ct2
sqqs= np.sqrt(1/q3**2+1/s2**2)
cyq = np.cos(y/q3)
syq = np.sin(y/q3)
czs = np.cos(z2/s2)
szs = np.sin(z2/s2)
exqs= np.exp(sqqs*x2)
fx8 =-sqqs*exqs*cyq*czs *sps
hy8 = exqs/q3*syq*czs *sps
fz8 = exqs*cyq/s2*szs *sps
hx8 = fx8*ct2+fz8*st2
hz8 =-fx8*st2+fz8*ct2
sqqs= np.sqrt(1/q3**2+1/s3**2)
cyq = np.cos(y/q3)
syq = np.sin(y/q3)
czs = np.cos(z2/s3)
szs = np.sin(z2/s3)
exqs= np.exp(sqqs*x2)
fx9 =-sqqs*exqs*cyq*czs *sps
hy9 = exqs/q3*syq*czs *sps
fz9 = exqs*cyq/s3*szs *sps
hx9 = fx9*ct2+fz9*st2
hz9 =-fx9*st2+fz9*ct2
a1=a[18]+a[19]*s2ps
a2=a[20]+a[21]*s2ps
a3=a[22]+a[23]*s2ps
a4=a[24]+a[25]*s2ps
a5=a[26]+a[27]*s2ps
a6=a[28]+a[29]*s2ps
a7=a[30]+a[31]*s2ps
a8=a[32]+a[33]*s2ps
a9=a[34]+a[35]*s2ps
bx=bx+a1*hx1+a2*hx2+a3*hx3+a4*hx4+a5*hx5+a6*hx6+a7*hx7+a8*hx8+a9*hx9
by=by+a1*hy1+a2*hy2+a3*hy3+a4*hy4+a5*hy5+a6*hy6+a7*hy7+a8*hy8+a9*hy9
bz=bz+a1*hz1+a2*hz2+a3*hz3+a4*hz4+a5*hz5+a6*hz6+a7*hz7+a8*hz8+a9*hz9
return bx, by, bz
|
5729e0999ddefaf2ee39ad9588009cc58f983130
| 3,646,062
|
def raan2ltan(date, raan, type="mean"):
"""Conversion to True Local Time at Ascending Node (LTAN)
Args:
date (Date) : Date of the conversion
raan (float) : RAAN in radians, in EME2000
type (str) : either "mean" or "true"
Return:
float : LTAN in hours
"""
if type == "mean":
mean_solar_angle = raan - _mean_sun_raan(date)
ltan = (12 + mean_solar_angle * 12 / np.pi) % 24
elif type == "true":
theta_sun = (
get_body("Sun")
.propagate(date)
.copy(frame="EME2000", form="spherical")
.theta
)
ltan = ((24 * (raan - theta_sun) / (2 * np.pi)) + 12) % 24
else: # pragma: no cover
raise ValueError("Unknwon Local Time type : {}".format(type))
return ltan
|
90956203d7b5787f5d49941f89b7871d021e5e74
| 3,646,063
|
def _extract_bbox_annotation(prediction, b, obj_i):
"""Constructs COCO format bounding box annotation."""
height = prediction['eval_height'][b]
width = prediction['eval_width'][b]
bbox = _denormalize_to_coco_bbox(
prediction['groundtruth_boxes'][b][obj_i, :], height, width)
if 'groundtruth_area' in prediction:
area = float(prediction['groundtruth_area'][b][obj_i])
else:
# Using the box area to replace the polygon area. This value will not affect
# real evaluation but may fail the unit test.
area = bbox[2] * bbox[3]
annotation = {
'id': b * 1000 + obj_i, # place holder of annotation id.
'image_id': int(prediction['source_id'][b]), # source_id,
'category_id': int(prediction['groundtruth_classes'][b][obj_i]),
'bbox': bbox,
'iscrowd': int(prediction['groundtruth_is_crowd'][b][obj_i]),
'area': area,
'segmentation': [],
}
return annotation
|
c79a066b719e33704d50128f4d01420af0be27ce
| 3,646,064
|
def value_and_entropy(emax, F, bw, grid_size=1000):
"""
Compute the value function and entropy levels for a θ path
increasing until it reaches the specified target entropy value.
Parameters
==========
emax: scalar
The target entropy value
F: array_like
The policy function to be evaluated
bw: str
A string specifying whether the implied shock path follows best
or worst assumptions. The only acceptable values are 'best' and
'worst'.
Returns
=======
df: pd.DataFrame
A pandas DataFrame containing the value function and entropy
values up to the emax parameter. The columns are 'value' and
'entropy'.
"""
if bw == 'worst':
θs = 1 / np.linspace(1e-8, 1000, grid_size)
else:
θs = -1 / np.linspace(1e-8, 1000, grid_size)
df = pd.DataFrame(index=θs, columns=('value', 'entropy'))
for θ in θs:
df.loc[θ] = evaluate_policy(θ, F)
if df.loc[θ, 'entropy'] >= emax:
break
df = df.dropna(how='any')
return df
|
c9b215d91c6a0affbb4ad8f344614a1f2b6b9a13
| 3,646,065
|
import os
import glob
def browse_directory():
"""
Browse the local file system starting at the given path and provide the following information:
- project_name_unique: If the given project name is not yet registered in the projects list
- project_path_prefix: The given path with a final separator, e.g. /data/
- project_dir: Name of the project directory generated from the project name
- project_dir_exists: If the project directory already exists in the given path
- path_exists: If the given path exists
- path_unique: If the given path is not yet registered for another project
- subdirs: The list of sub-directories at the given path
"""
data = request.json
path = data['path']
project = data['project']
subdirs = [d for d in glob.glob(f'{path}*') if os.path.isdir(d)] if os.path.isabs(path) else []
project_dir = project_utils.get_folder_name_for_project(project)
full_path = os.path.join(path, project_dir)
video_files = [f for f in glob.glob(f'{path}*{VIDEO_EXT}')]
projects = project_utils.load_project_overview_config()
return jsonify(
project_name_unique=project not in projects,
project_path_prefix=os.path.join(path, ''), # Append a separator
project_dir=project_dir,
project_dir_exists=os.path.exists(full_path),
path_exists=os.path.exists(path),
path_unique=path not in [p['path'] for p in projects.values()],
subdirs=subdirs,
video_files=video_files,
)
|
e8daa6cb0c28f50f536223bf9aac0ae1008d9001
| 3,646,066
|
def _biorthogonal_window_loopy(analysis_window, shift):
"""
This version of the synthesis calculation is as close as possible to the
Matlab implementation in terms of variable names.
The results are equal.
The implementation follows equation A.92 in
Krueger, A. Modellbasierte Merkmalsverbesserung zur robusten automatischen
Spracherkennung in Gegenwart von Nachhall und Hintergrundstoerungen
Paderborn, Universitaet Paderborn, Diss., 2011, 2011
"""
fft_size = len(analysis_window)
assert np.mod(fft_size, shift) == 0
number_of_shifts = len(analysis_window) // shift
sum_of_squares = np.zeros(shift)
for synthesis_index in range(0, shift):
for sample_index in range(0, number_of_shifts+1):
analysis_index = synthesis_index + sample_index * shift
if analysis_index + 1 < fft_size:
sum_of_squares[synthesis_index] \
+= analysis_window[analysis_index] ** 2
sum_of_squares = np.kron(np.ones(number_of_shifts), sum_of_squares)
synthesis_window = analysis_window / sum_of_squares / fft_size
# Why? Line created by Hai, Lukas does not know, why it exists.
synthesis_window *= fft_size
return synthesis_window
|
5fc5dd23cb0b01af93a02812210d3b44b2fe84ab
| 3,646,067
|
def depthwise(data, N, H, W, CI, k_ch, KH, KW, PAD_H, PAD_W, SH, SW, block_size, use_bias=False):
"""
Depthwise 5-D convolutions,every channel has its filter-kernel
Args:
data (list):a list,the size is 3 if use_bias else the size is 2;
data[0] tvm.tensor.Tensor of type float16 ,shape 5D(N, CI//C0, C0, H, W)
data[1] tvm.tensor.Tensor of type float16 ,shape 6D(CI//(CI//C0)//C0, KH, KW, k_ch*CI//C0, C0, C0)
data[2] tvm.tensor.Tensor of type float16 ,shape 5D(N, CI*k_ch//C0, OH, OW, C0)
N (int): batchsize
H (int): height of featureMap
W (int): width of featureMap
CI (int): channel of featureMap
k_ch (int): channel of Filter
KH (int): height of Filter
KW (int): width of Filter
PAD_H (int): padding pixels in vertical direction
PAD_W (int): padding pixels in horizontal direction
SH (int): stride in vertical direction
SW (int): stride in horizontal direction
block_size (int): a int var also called "C0"
use_bias (bool ): If True need add bias, else bias equal to zero.
Returns:
akg.tvm.Tensor of same type as data, shape is 5D(N, CI*k_ch//C0, OH, OW, C0)
"""
check_list = ["float16"]
dtype = data[0].dtype
if not (dtype in check_list):
raise RuntimeError("depthwise only support %s while dtype is %s" % (",".join(check_list), dtype))
for i in range(len(data)):
shape = data[i].shape
utils.check_shape(shape)
conv_dtype = 'float16'
group = CI // block_size
CO = CI * k_ch
assert k_ch == 1
assert CO % group == 0 and CI % group == 0
assert CO % block_size == 0 and (CI // group) % block_size == 0
clear = False # if clear, use auto tiling
# (N, CI, H, W) -> (N, C0, H, W, C1)
A = data[0]
# (CO, CI // group, KH, KW) -> (CI // group // block * KH * KW, CO // block, block, block)
B = data[1]
if use_bias:
bias = data[2]
bias_name = bias.op.name
else:
bias = None
bias_name = "bias_name"
key = [N, H, W, CI, k_ch, KH, KW, PAD_H, PAD_W, SH, SW]
hash_key = str((tuple(key)))
if hash_key in depthwise_set_dim_map:
cutH, cutCo, cutM, cutK, cutN = depthwise_set_dim_map[hash_key]
else:
# raise RuntimeError("other can not find cutH, cutCo, cutM, cutK, cutN")
cutH = (KH - 1) * KH + 1
cutCo = 16
cutM = 16
cutK = 16 * KH * KW
cutN = 16
clear = True # use auto tiling
OH = (H + 2 * PAD_H - KH) // SH + 1
OW = (W + 2 * PAD_W - KW) // SW + 1
kc1 = akg.tvm.reduce_axis((0, CI // block_size // group), name="kc1")
kh = akg.tvm.reduce_axis((0, KH), name="kh")
kw = akg.tvm.reduce_axis((0, KW), name="kw")
kc0 = akg.tvm.reduce_axis((0, block_size), name="kc0")
p_top, p_bottom, p_left, p_right = PAD_H, PAD_H, PAD_W, PAD_W
output_name = "output"
output_bias_name = "output_bias"
attr = {
"pragma_conv_kernel_n": CO,
"pragma_conv_kernel_h": KH,
"pragma_conv_kernel_w": KW,
"pragma_conv_padding_top": p_top,
"pragma_conv_padding_bottom": p_bottom,
"pragma_conv_padding_left": p_left,
"pragma_conv_padding_right": p_right,
"pragma_conv_bypass_l1": 1,
"pragma_conv_stride_h": SH,
"pragma_conv_stride_w": SW,
"pragma_conv_fm_n": N,
"pragma_conv_fm_c": CI,
"pragma_conv_fm_h": H,
"pragma_conv_fm_w": W,
"pragma_conv_dilation_h": 1,
"pragma_conv_dilation_w": 1,
"feature": A.op.name,
"filter": B.op.name,
"bias": bias_name,
"res": output_name,
"res_bias": output_bias_name
}
if not clear:
attr["pragma_conv_h_cut"] = cutH
attr["pragma_conv_w_cut"] = W + 2 * PAD_W
attr["pragma_conv_co_cut"] = cutCo
attr["pragma_conv_m_cut"] = cutM
attr["pragma_conv_k_cut"] = cutK
attr["pragma_conv_n_cut"] = cutN
C = akg.tvm.compute((N, CO // block_size, OH, OW, block_size),
lambda n, c1, h, w, c0: akg.lang.ascend.mmad(
akg.tvm.if_then_else(akg.tvm.any((h * SH + kh) < p_top, (h * SH + kh) > (H + p_top - 1),
(w * SW + kw) < p_left, (w * SW + kw) > (W + p_left - 1)),
akg.tvm.const(0.0, conv_dtype),
A[n, c1 // ((CO // block_size) // group) * (
(CI // block_size) // group) + kc1, (
h * SH + kh - p_top), (w * SW + kw - p_left), kc0])
# A[n, kc1, (h * SH + kh - p_top), (w * SW + kw - p_left), kc0])
* B[(kc1 * KH + kh) * KW + kw, c1, c0, kc0], axis=[kc1, kh, kw, kc0]),
attrs=attr, name=output_name)
if use_bias:
out = akg.tvm.compute(C.shape, lambda n, c1, h, w, c0: C[n, c1, h, w, c0] + bias[0, c1, 0, 0, c0],
name=output_bias_name)
else:
out = C
return out
|
c47ef1cc929ecc9b26550f1eabe1e52073b82028
| 3,646,068
|
import json
async def get_last_recipe_json():
""" Doc Str """
with open(DEBUG_DIR.joinpath("last_recipe.json"), "r") as f:
return json.loads(f.read())
|
71122ff4b4580f30f9d1cec33891289ecb7c574a
| 3,646,069
|
def FindDescendantComponents(config, component_def):
"""Return a list of all nested components under the given component."""
path_plus_delim = component_def.path.lower() + '>'
return [cd for cd in config.component_defs
if cd.path.lower().startswith(path_plus_delim)]
|
f9734442bbe3a01460970b3521827dda4846f448
| 3,646,070
|
def _get_source(loader, fullname):
"""
This method is here as a replacement for SourceLoader.get_source. That
method returns unicode, but we prefer bytes.
"""
path = loader.get_filename(fullname)
try:
return loader.get_data(path)
except OSError:
raise ImportError('source not available through get_data()',
name=fullname)
|
af43b79fa1d90abbbdb66d7d1e3ead480e27cdd1
| 3,646,071
|
from pathlib import Path
def get_source_files(sf: Path) -> list:
"""
Search for files ending in .FLAC/.flac and add them to a list.
Args:
sf (str/pathlib.Path): Folder location to search for files.
Returns:
list: List of file locations found to match .FLAC/.fladc.
"""
return re_file_search.get_list(sf, r".+\.[fF][lL][aA][cC]$")
|
3828d81528b144367c3d5a74ee212caf2a01b111
| 3,646,072
|
import io
def extract_features(clip):
"""
Feature extraction from an audio clip
Args:
clip ():
Returns: A list of feature vectors
"""
sr, clip_array = wav_read(io.BytesIO(clip))
if clip_array.ndim > 1:
clip_array = clip_array[:, 0]
segments = frame_breaker.get_frames(clip_array, sample_rate=sr)
segments_encoded = [np2base64(s, sr) for s in segments]
segment_features = [
[f.feature_value for f in extract_feats_for_segment(s).features]
for s in segments_encoded
]
# extracted_feats = speech_feat_client.extract_speech_features(
# clip,
# opensmile_config=emorec_pytorch_config.ModelBaseline.opensmile_config,
# response_format='list'
# )
# feats = np.array([f.feature_value for f in extracted_feats])
return segment_features
|
13c6c18be92067847eaabada17952a0dab142a3f
| 3,646,073
|
def comparison_func(target: TwoQubitWeylDecomposition,
basis: TwoQubitBasisDecomposer,
base_fid: float,
comp_method: str):
"""
Decompose traces for arbitrary angle rotations.
This assumes that the tq angles go from highest to lowest.
"""
dep_param = (4 * base_fid - 1)/3
if comp_method == 'fid':
traces = fixed_traces(target, basis)
values = [((abs(tr)**2 - 1) * dep_param**i + 1)/ 16
for i, tr in enumerate(traces)]
elif comp_method == 'arb_fid':
traces = arb_traces(target)
values = [((abs(tr)**2 - 1) * dep_param**i + 1)/ 16
for i, tr in enumerate(traces)]
elif comp_method == 'arb_total':
traces = arb_traces(target)
total_angles = [
0,
abs(target.a),
abs(target.a) + abs(target.b),
abs(target.a) + abs(target.b) + abs(target.c)
]
values = [((abs(tr)**2 - 1) * dep_param**(a/np.pi) + 1)/ 16
for a, tr in zip(total_angles, traces)]
elif comp_method == 'arb_total_quad':
traces = arb_traces(target)
total_angles = [
0,
abs(target.a),
abs(target.a) + abs(target.b),
abs(target.a) + abs(target.b) + abs(target.c)
]
values = [((abs(tr)**2 - 1) * dep_param**((a/np.pi)**2) + 1) / 16
for a, tr in zip(total_angles, traces)]
elif comp_method == 'arb_total_sqrt':
traces = arb_traces(target)
total_angles = [
0,
abs(target.a),
abs(target.a) + abs(target.b),
abs(target.a) + abs(target.b) + abs(target.c)
]
values = [((abs(tr)**2 - 1) * dep_param**(np.sqrt(a/np.pi)) + 1) / 16
for a, tr in zip(total_angles, traces)]
elif comp_method == 'total_angle':
traces = arb_traces(target)
# negate to find smallest total angle (uses max later)
values = [-10, -10, -10, -abs(target.a) - abs(target.b) - abs(target.c)]
return values
|
a222138a8c3a01aaf6c3657c6bbbaca284332b76
| 3,646,074
|
from bs4 import BeautifulSoup
def create_bs4_obj(connection):
"""Creates a beautiful Soup object"""
soup = BeautifulSoup(connection, 'html.parser')
return soup
|
b3956b13756e29cd57a0e12457a2d665959fb03d
| 3,646,075
|
def __create_dataframe_from_cassandra(query,con):
"""
Function to query into Cassandra and Create Pandas DataFrame
Parameter
---------
query : String - Cassandra Query
con : cassandra connection object
Return
------
df : pd.DataFrame - DataFrame created using the cassandra query output
"""
all_records = list(con.execute(query))
df = pd.DataFrame(all_records)
return df
|
af839ce372af100b4a496350ed6e21ae12b82444
| 3,646,076
|
import os
import pickle
import tqdm
def get_onehot_attributes(attr_dict, attr2idx, split):
"""get the labels in onehot format
Args:
attr_dict (dict: the dictory contains image_id and its top 5 attributes
attr2idx (dict): the dictory contains corresponding index of attributes
split (str): the split of the dataset (train, val, test)
Returns:
dict: the dictory contains every image and its top 5 attributes
"""
# print("Getting the onehot labels of images...")
attr_label_file_name = os.path.join(
WORKING_PATH, "finetune", split + "_onehot_attribute.pickle"
)
if os.path.exists(attr_label_file_name):
# print(
# "The {} has already existed...".format(split + "_onehot_attribute.pickle")
# )
attr_label_file = open(attr_label_file_name, "rb")
attr_label = pickle.load(attr_label_file)
return attr_label
attr_label = defaultdict()
def generate_onehot(attr):
onehot = [0] * 1000
for idx in attr:
onehot[idx] = 1
return tf.stack(onehot)
for img_id in tqdm(attr_dict.keys()):
attr_index = [attr2idx[word] for word in attr_dict[img_id]]
attr_label[img_id] = generate_onehot(attr_index)
attr_label_file = open(attr_label_file_name, "wb")
pickle.dump(attr_label, attr_label_file)
return attr_label
|
51352fa5ccd24104830d4a4fc4100382055949a4
| 3,646,077
|
from typing import List
from typing import Tuple
from typing import Optional
import ssl
from typing import Iterable
from typing import Union
async def post(
url: str,
content: bytes,
*,
headers: List[Tuple[bytes, bytes]] = None,
loop: Optional[AbstractEventLoop] = None,
cafile: Optional[str] = None,
capath: Optional[str] = None,
cadata: Optional[str] = None,
ssl_context: Optional[ssl.SSLContext] = None,
protocols: Iterable[str] = DEFAULT_PROTOCOLS,
ciphers: Iterable[str] = DEFAULT_CIPHERS,
options: Iterable[int] = DEFAULT_OPTIONS,
chunk_size: int = -1,
connect_timeout: Optional[Union[int, float]] = None,
middleware: Optional[List[HttpClientMiddlewareCallback]] = None
) -> Optional[bytes]:
"""Issues a POST request
Args:
url (str): The url
content (bytes): The body content
headers (List[Tuple[bytes, bytes]], optional): Any extra headers required. Defaults to
None.
loop (Optional[AbstractEventLoop], optional): The optional asyncio event
loop.. Defaults to None.
cafile (Optional[str], optional): The path to a file of concatenated CA
certificates in PEM format. Defaults to None.
capath (Optional[str], optional): The path to a directory containing
several CA certificates in PEM format. Defaults to None.
cadata (Optional[str], optional): Either an ASCII string of one or more
PEM-encoded certificates or a bytes-like object of DER-encoded
certificates. Defaults to None.
ssl_context (Optional[SSLContext], optional): An ssl context to be
used instead of generating one from the certificates.
protocols (Iterable[str], optional): The supported protocols. Defaults
to DEFAULT_PROTOCOLS.
ciphers (Iterable[str], optional): The supported ciphers. Defaults
to DEFAULT_CIPHERS.
options (Iterable[int], optional): The ssl.SSLContext.options. Defaults
to DEFAULT_OPTIONS.
chunk_size (int, optional): The size of each chunk to send or -1 to send
as a single chunk.. Defaults to -1.
connect_timeout (Optional[Union[int, float]], optional): The number
of seconds to wait for the connection. Defaults to None.
middleware (Optional[List[HttpClientMiddlewareCallback]], optional):
Optional middleware. Defaults to None.
Raises:
HTTPError: Is the status code is not ok.
asyncio.TimeoutError: If the connect times out.
Returns:
Optional[bytes]: The response body
"""
data = bytes_writer(content, chunk_size) if content else None
async with HttpClient(
url,
method='POST',
headers=headers,
body=data,
loop=loop,
cafile=cafile,
capath=capath,
cadata=cadata,
ssl_context=ssl_context,
protocols=protocols,
ciphers=ciphers,
options=options,
connect_timeout=connect_timeout,
middleware=middleware
) as response:
await response.raise_for_status()
return await response.raw()
|
8aa95bb43f3937ea09ffa0a55b107bf367ffa5bc
| 3,646,078
|
import toml
def parse_config_file(path):
"""Parse TOML config file and return dictionary"""
try:
with open(path, 'r') as f:
return toml.loads(f.read())
except:
open(path,'a').close()
return {}
|
599164f023c0db5bffa0b6c4de07654daae1b995
| 3,646,079
|
def wordnet_pos(tag):
"""
Transforms nltk part-of-speech tag strings to wordnet part-of-speech tag string.
:param tag: nltk part-of-speech tag string
:type: str
:return: the corresponding wordnet tag
:type: wordnet part-of-speech tag string
"""
return getattr(nltk_wordnet_pos_dict, tag[0], nltk_wordnet_pos_dict["N"])
|
66b915cb63036553d765af5474d690ea4e0f3859
| 3,646,080
|
import requests
def call_telegram_api(function: str, data: dict):
"""Make a raw call to Telegram API."""
return requests.post(
f'https://api.telegram.org/bot{TELEGRAM_TOKEN}/{function}', data=data)
|
547626545942b290dc64cd4f1d75277205751eaf
| 3,646,081
|
def test_POMDP(POMDP, policy, test_data, status):
"""simulation"""
# Basic settings
p = POMDP
ind_iter = 0
horizon = len(test_data)
state = status
action = p.actions[0]
belief = p.init_belief
reward = 0
state_set = [state]
action_set = []
observation_set = ["null"]
alpha_length = len(p.states)
while True:
# make an action
ind_key = np.argmax([
np.dot(
policy[key][:alpha_length],
belief
)
for key in policy.keys()
])
action = policy[list(policy.keys())[ind_key]][alpha_length]
action_set.append(action)
# get a reward
reward = reward + p.reward_func(state=state, action=action)
# check stop condition
ind_iter = ind_iter + 1
if ind_iter >= horizon:
break
# state doesn't change
state = state
state_set.append(state)
# make an observation
observation = test_data.iloc[ind_iter]
observation_set.append(observation)
# update belief
belief = [
p.observ_func(observation, s_new, action) *
np.sum([
p.trans_func(s_new, s_old, action) *
belief[p.states.index(s_old)]
for s_old in p.states
])
for s_new in p.states
]
normalize_const = 1 / sum(belief)
belief = np.multiply(belief, normalize_const)
return action_set
|
0f2dfe7c18d254ca0b2953b11aaac2386d4fe920
| 3,646,082
|
import matplotlib.pyplot as plt
import numpy as np
from copy import deepcopy
def compute_bkr_collection(myCollection,percentile=10,make_images=False,image_name=''):
""" Computes a synthetic background value for a given collection, based on
the lowest values at each point for each image. it treats row (long axis of laser line)
and wavenumber seperately.
Notes:
-Does NOT consider bleach! If you want to include the 'bleach' in this analysis, you should run the 'use_bleach' code
inputs:
myCollection: This is your collection data. Should be Collection class
percentile: this is the lower percentile you wish to treat as the 'background'.
default:10%
make_images: this tells this code to dump images of the BKR.
default: False
image_name = this is prepended onto the image filenames if make_image
is set to true.
default: ''
outputs:
bkr_values: ths is a nxm matrix where n is the number of pixels along the
laser long axis and m is the number of bins in the wavenumber dimension.
This value correspoinds to the "synthetic" background values.
"""
#GEt a dircube. Currently only use the 'replicates'
#
# There was an issue here but it is fixed now.
#
dc00,dx,dz,t,fl,ft,fm = collection_process(myCollection,method='avg')
num_rows,num_wns,num_images = dc00.shape
pctl = np.nanpercentile(dc00,percentile,axis=2)
nudc_pctl = deepcopy(dc00)
for j in range(0,num_images):
#Make all values here equal to NaN
myv = nudc_pctl[:,:,j]
myv[np.where(myv > pctl)] = np.NaN
bkr_values = np.nanmean(nudc_pctl,axis=2)
#Should we output figures?
if make_images==False:
return bkr_values
else:
for myn in range(0,num_rows):
#Name to use to save the data
savename_fign = image_name + '_' + str(myn)
mydcv = dc00[myn,:,:]
#####
##
## THis shows a list of sorted values at various locations
##
#####
#THis could be used to test different values
plt.subplot(2,1,2)
plt.plot(np.diff(np.sort(mydcv[111,:]))) #420...this is* in the realm of montmorillonite.
plt.plot(np.diff(np.sort(mydcv[272,:])))#INorganicSpike
plt.plot(np.diff(np.sort(mydcv[367,:])))#D/G
plt.plot(np.diff(np.sort(mydcv[445,:])))#D/G
plt.plot(np.diff(np.sort(mydcv[909,:])))#CH
plt.plot(np.diff(np.sort(mydcv[600,:])))
plt.plot(np.diff(np.sort(mydcv[700,:])))
plt.plot(np.diff(np.sort(mydcv[1000,:])))
plt.legend(('420','1000','D','G','CH','Test1','Test2','end'),loc=9,ncol=4,prop={'size':12})
plt.ylim([0,0.002])
plt.subplot(2,1,1)
plt.plot(np.sort(mydcv[111,:])) #420...this is* in the realm of montmorillonite.
plt.plot(np.sort(mydcv[272,:]))#INorganicSpike
plt.plot(np.sort(mydcv[367,:]))#D/G
plt.plot(np.sort(mydcv[445,:]))#D/G
plt.plot(np.sort(mydcv[909,:]))#CH
plt.plot(np.sort(mydcv[600,:]))
plt.plot(np.sort(mydcv[700,:]))
plt.plot(np.sort(mydcv[1000,:]))
#plt.legend(('420','1000','D','G','CH','Test1','Test2','end'),loc=9,ncol=2)
plt.savefig(savename_fign + '_sorted.png',transparent=True)
plt.close()
#Plot raw values (gray)
for j in range(0,num_images):
plt.plot(dc00[myn,:,j],color=[0.6,0.6,0.6])
#Plot 10th percentile data (blue)
plt.plot(pctl[myn,:],color='magenta')
plt.plot(bkr_values[myn,:],color='cyan')
savename_fign = image_name + '_' + str(j)
savedata = savename_fign + 'allcomp.jpg'
plt.savefig(savedata)
plt.close()
return bkr_values
|
61d4e73bca55e24c934d7a34888316232cd6e7ff
| 3,646,083
|
def courses_to_take(input):
"""
Time complexity: O(n) (we process each course only once)
Space complexity: O(n) (array to store the result)
"""
# Normalize the dependencies, using a set to track the
# dependencies more efficiently
course_with_deps = {}
to_take = []
for course, deps in input.items():
if not deps:
# Course with no dependencies:
# candidate to start the search
to_take.append(course)
else:
course_with_deps[course] = set(deps)
result = []
while to_take:
course = to_take.pop()
# Add course to journey
result.append(course)
# Iterate through courses and remove this course from
# dependencies
for prereq_course, prereq_deps in course_with_deps.items():
if course in prereq_deps:
prereq_deps.remove(course)
if not prereq_deps:
# Course has all the dependencies solved:
# add to the "to_take" queue
to_take.append(prereq_course)
del course_with_deps[prereq_course]
return result if len(result) == len(input) else None
|
eb0fe7271497fb8c5429360d37741d20f691ff3c
| 3,646,084
|
def _merge_GlyphOrders(font, lst, values_lst=None, default=None):
"""Takes font and list of glyph lists (must be sorted by glyph id), and returns
two things:
- Combined glyph list,
- If values_lst is None, return input glyph lists, but padded with None when a glyph
was missing in a list. Otherwise, return values_lst list-of-list, padded with None
to match combined glyph lists.
"""
if values_lst is None:
dict_sets = [set(l) for l in lst]
else:
dict_sets = [{g:v for g,v in zip(l,vs)} for l,vs in zip(lst,values_lst)]
combined = set()
combined.update(*dict_sets)
sortKey = font.getReverseGlyphMap().__getitem__
order = sorted(combined, key=sortKey)
# Make sure all input glyphsets were in proper order
assert all(sorted(vs, key=sortKey) == vs for vs in lst)
del combined
paddedValues = None
if values_lst is None:
padded = [[glyph if glyph in dict_set else default
for glyph in order]
for dict_set in dict_sets]
else:
assert len(lst) == len(values_lst)
padded = [[dict_set[glyph] if glyph in dict_set else default
for glyph in order]
for dict_set in dict_sets]
return order, padded
|
cc671625bcaa2016cb8562c5727d7afa624699a9
| 3,646,085
|
def sig_for_ops(opname):
"""sig_for_ops(opname : str) -> List[str]
Returns signatures for operator special functions (__add__ etc.)"""
# we have to do this by hand, because they are hand-bound in Python
assert opname.endswith('__') and opname.startswith('__'), "Unexpected op {}".format(opname)
name = opname[2:-2]
if name in binary_ops:
return ['def {}(self, other: Any) -> Tensor: ...'.format(opname)]
elif name in comparison_ops:
# unsafe override https://github.com/python/mypy/issues/5704
return ['def {}(self, other: Any) -> Tensor: ... # type: ignore'.format(opname)]
elif name in unary_ops:
return ['def {}(self) -> Tensor: ...'.format(opname)]
elif name in to_py_type_ops:
if name in {'bool', 'float', 'complex'}:
tname = name
elif name == 'nonzero':
tname = 'bool'
else:
tname = 'int'
if tname in {'float', 'int', 'bool', 'complex'}:
tname = 'builtins.' + tname
return ['def {}(self) -> {}: ...'.format(opname, tname)]
else:
raise Exception("unknown op", opname)
|
7f5850c5719ed631d4aabc22b757969d1161eee2
| 3,646,086
|
def add_qos(tenant_id, qos_name, qos_desc):
"""Adds a qos to tenant association."""
LOG.debug(_("add_qos() called"))
session = db.get_session()
try:
qos = (session.query(network_models_v2.QoS).
filter_by(tenant_id=tenant_id).
filter_by(qos_name=qos_name).one())
raise c_exc.QosNameAlreadyExists(qos_name=qos_name,
tenant_id=tenant_id)
except exc.NoResultFound:
qos = network_models_v2.QoS(qos_id=uuidutils.generate_uuid(),
tenant_id=tenant_id,
qos_name=qos_name,
qos_desc=qos_desc)
session.add(qos)
session.flush()
return qos
|
a9186776eada52c35a38bc02667284575bed33d6
| 3,646,087
|
def lstm2(hidden_nodes, steps_in=5, steps_out=1, features=1):
"""
A custom LSTM model.
:param hidden_nodes: number of hidden nodes
:param steps_in: number of (look back) time steps for each sample input
:param steps_out: number of (look front) time steps for each sample output
:param features: number of features for each sample input (e.g. 1 for univariate or 2+ for multivariate time series)
:return: simple LSTM model
"""
model = Sequential()
model.add(LSTM(hidden_nodes, input_shape=(steps_in, features), return_sequences=True)) # default activation: tanh
model.add(LSTM(hidden_nodes)) # default activation: tanh
model.add(Dense(steps_out)) # default activation: None
model.compile(optimizer='adam', loss='mse')
return model
|
c84c1d4a3cb31ca74fd3d135a9c48c72f5f6c715
| 3,646,088
|
def build_carousel_scroller(items):
"""
Usage:
item_layout = widgets.Layout(height='120px', min_width='40px')
items = [pn.Row(a_widget, layout=item_layout, margin=0, background='black') for a_widget in single_pf_output_panels]
# items = [widgets.Button(layout=item_layout, description=str(i), button_style='success') for i in range(40)]
# build_carousel_scroller(items)
build_carousel_scroller(single_pf_output_panels)
"""
box_layout = pn.widgets.Layout(overflow_x='scroll', border='3px solid black',
width='1024px',
height='',
flex_flow='row',
display='flex')
carousel = pn.widgets.Box(children=items, layout=box_layout)
return pn.widgets.VBox([pn.widgets.Label('Scroll horizontally:'), carousel])
|
2c48ef11de7647320833c74e5dc155365c0ae847
| 3,646,089
|
import sys
def smolsolve(x, xBound, f0, t, K_A, source, Nt):
""" solve Smoluchowski equations
Input: x, initial condition, time, kernel, # timestep
Output: solution f(t,x)
"""
dx = xBound[1] - xBound[0]
Nx = x.size
dt = t / Nt
g = x * f0
for t in range(Nt):
JL = 0*x
fsrc = 0*x
# source term for f
if source == 'none':
fsrc = 0*x
elif source == 'myGaussian':
fsrc = 0.05*np.exp(-((x-8.)**2))
else:
sys.exit("source incorrectly specified")
# Flux term
for i in range(1,Nx):
for p in range(0,i):
# K_A = 1
# this is analytic expression for int_{x_j}^{x_j+1} K_A(x_mid(i),y)/y \, dy
if K_A == '1':
kernBndry = np.log(xBound[i-p]/x[i-p-1])
kern = np.log(xBound[i-p+1:-1]/xBound[i-p:-2])
# K_A = x*y
elif K_A == 'x*y':
xA = x[i-p-1]
xB = xBound[i-p]
kernBndry = (xB - xA) * x[p]
xA = xBound[i-p:-2]
xB = xBound[i-p+1:-1]
kern = (xB - xA) * x[p]
elif K_A == '2+(x/y)^2+(y/x)^2':
xA = x[i-p-1]
xB = xBound[i-p]
kernBndry = (-xA**2 + xB**2 + x[p]**4 * (1./xA**2-1./xB**2)) / (2.*x[p]**2) + 2.*np.log(xB/xA)
xA = xBound[i-p:-2]
xB = xBound[i-p+1:-1]
kern = (-xA**2 + xB**2 + x[p]**4 * (1./xA**2-1./xB**2)) / (2.*x[p]**2) + 2.*np.log(xB/xA)
elif K_A == '(x*y)^(15/14)*(x+y)^(9/14)': # https://arxiv.org/pdf/astro-ph/0201102.pdf
normConst = 0.001 # make physically meaningful!
xA = x[i-p-1]
xB = xBound[i-p]
kernBndry = normConst*-(7./120.) * x[p]**(15./14.) * (x[p] * (9. * x[p] * (xA/(xA+x[p])**5.)**(1./14.)+19. * (xA**15./(xA+x[p])**5.)**(1./14.)-9. * x[p] * (xB/(xB+x[p])**5.)**(1./14.)-19.*(xB**15./(xB+x[p])**5.)**(1./14.))+10.*((xA**29./(xA+x[p])**5.)**(1./14.)-(xB**29./(xB+x[p])**5.)**(1./14.))-9.*(xA * x[p]**23.)**(1./14.) * sps.hyp2f1(1./14.,5./14.,15./14.,-(xA/x[p]))+9.*(xB * x[p]**23.)**(1./14.)*sps.hyp2f1(1./14.,5./14.,15./14.,-(xB/x[p])))
xA = xBound[i-p:-2]
xB = xBound[i-p+1:-1]
kern = normConst*-(7./120.) * x[p]**(15./14.) * (x[p] * (9. * x[p] * (xA/(xA+x[p])**5.)**(1./14.)+19. * (xA**15./(xA+x[p])**5.)**(1./14.)-9. * x[p] * (xB/(xB+x[p])**5.)**(1./14.)-19.*(xB**15./(xB+x[p])**5.)**(1./14.))+10.*((xA**29./(xA+x[p])**5.)**(1./14.)-(xB**29./(xB+x[p])**5.)**(1./14.))-9.*(xA * x[p]**23.)**(1./14.) * sps.hyp2f1(1./14.,5./14.,15./14.,-(xA/x[p]))+9.*(xB * x[p]**23.)**(1./14.)*sps.hyp2f1(1./14.,5./14.,15./14.,-(xB/x[p])))
else:
sys.exit("kernel incorrectly specified")
JL[i] = JL[i] + dx*g[p] * (kernBndry*g[i-p-1] + np.sum(kern*g[i-p:-1]))
JR = np.roll(JL,-1)
JR[-1]= 0
g = g - dt / dx * ( JR - JL ) + dt*fsrc*x
f = g / x
return f
|
8503f13ae0031ab80fd6f83a1ba36dc512cc701f
| 3,646,090
|
def base_round(x, base):
"""
This function takes in a value 'x' and rounds it to the nearest multiple
of the value 'base'.
Parameters
----------
x : int
Value to be rounded
base : int
Tase for x to be rounded to
Returns
-------
int
The rounded value
"""
return base*round(x/base)
|
e5b1a1b81c7baf990b7921fe27a20075c0305935
| 3,646,091
|
def _update_schema_1_to_2(table_metadata, table_path):
"""
Given a `table_metadata` of version 1, update it to version 2.
:param table_metadata: Table Metadata
:param table_path: [String, ...]
:return: Table Metadata
"""
table_metadata['path'] = tuple(table_path)
table_metadata['schema_version'] = 2
table_metadata.pop('table_mappings', None)
return table_metadata
|
6b0c8bc72100cceeb1b9da5552e53bc3c9bad3fa
| 3,646,092
|
def train_cnn_7layer(data, file_name, params, num_epochs=10, batch_size=256, train_temp=1, init=None, lr=0.01, decay=1e-5, momentum=0.9, activation="relu", optimizer_name="sgd"):
"""
Train a 7-layer cnn network for MNIST and CIFAR (same as the cnn model in Clever)
mnist: 32 32 64 64 200 200
cifar: 64 64 128 128 256 256
"""
# create a Keras sequential model
model = Sequential()
print("training data shape = {}".format(data.train_data.shape))
params = [int(p) for p in params]
# define model structure
model.add(Conv2D(params[0], (3, 3),
input_shape=data.train_data.shape[1:]))
model.add(Lambda(tf.atan) if activation == "arctan" else Activation(activation))
model.add(Conv2D(params[1], (3, 3)))
model.add(Lambda(tf.atan) if activation == "arctan" else Activation(activation))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(params[2], (3, 3)))
model.add(Lambda(tf.atan) if activation == "arctan" else Activation(activation))
model.add(Conv2D(params[3], (3, 3)))
model.add(Lambda(tf.atan) if activation == "arctan" else Activation(activation))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(params[4]))
model.add(Lambda(tf.atan) if activation == "arctan" else Activation(activation))
model.add(Dropout(0.5))
model.add(Dense(params[5]))
model.add(Lambda(tf.atan) if activation == "arctan" else Activation(activation))
model.add(Dense(200))
# load initial weights when given
if init != None:
model.load_weights(init)
# define the loss function which is the cross entropy between prediction and true label
def fn(correct, predicted):
return tf.nn.softmax_cross_entropy_with_logits(labels=correct,
logits=predicted/train_temp)
if optimizer_name == "sgd":
# initiate the SGD optimizer with given hyper parameters
optimizer = SGD(lr=lr, decay=decay, momentum=momentum, nesterov=True)
elif optimizer_name == "adam":
optimizer = Adam(lr=lr, beta_1 = 0.9, beta_2 = 0.999, epsilon = None, decay=decay, amsgrad=False)
# compile the Keras model, given the specified loss and optimizer
model.compile(loss=fn,
optimizer=optimizer,
metrics=['accuracy'])
model.summary()
print("Traing a {} layer model, saving to {}".format(len(params) + 1, file_name))
# run training with given dataset, and print progress
history = model.fit(data.train_data, data.train_labels,
batch_size=batch_size,
validation_data=(data.validation_data, data.validation_labels),
epochs=num_epochs,
shuffle=True)
# save model to a file
if file_name != None:
model.save(file_name)
print('model saved to ', file_name)
return {'model':model, 'history':history}
|
c2d020b4390aca6bb18de6f7e83c801475ac1a03
| 3,646,093
|
def validate():
"""
Goes over all season, make sure they are all there, and they should have length 52, except last one
"""
input_file = processedDatafileName
clusters_file = 'data/SeasonClustersFinal'
seasonDic = {}
allSeasons = {}
for line in open(clusters_file):
arr = line.strip().split()
year = int(arr[0])
season = int(arr[1])
seasonDic[year] = season
allSeasons[season] = True
# indexed by region
all_data = {}
in_f = open(input_file)
in_f.readline()
in_f.readline()
for line in in_f:
raw = line.strip().split(',')
region = raw[1].strip()
year = int(raw[2].strip())
week = int(raw[3].strip())
## upto 20th week belongs to last years cycle
if(week <= 20):
year -= 1
infection = raw[4].strip()
inf = 0
if is_number(infection):
inf = float(infection)
if region not in all_data:
all_data[region]={}
if year not in all_data[region]:
all_data[region][year] = []
all_data[region][year].append(inf)
isValid = True
region_order = []
for region, raw in all_data.items():
region_order.append(region)
keylist = list(raw.keys())
keylist.sort()
for year in keylist:
if year>=1998 and year<=2018 and len(raw[year]) != 52:
print(region, year)
isValid = False
return isValid
|
883d21b656730c3e26a94ba8581107f359b337b4
| 3,646,094
|
def get_registry(): # noqa: E501
"""Get registry information
Get information about the registry # noqa: E501
:rtype: Registry
"""
try:
res = Registry(
name="Challenge Registry",
description="A great challenge registry",
user_count=DbUser.objects.count(),
org_count=DbOrg.objects.count(),
challenge_count=DbChallenge.objects.count(),
)
status = 200
except Exception as error:
status = 500
res = Error("Internal error", status, str(error))
return res, status
|
7798da55bee2ad1d6edce37f7c00e4597412491d
| 3,646,095
|
def get_fraction_vaccinated(model, trajectories, area=None, include_recovered=True):
"""Get fraction of individuals that are vaccinated or immune (by area) by
state.
Parameters
----------
model : amici.model
Amici model which should be evaluated.
trajectories : pd.DataFrame
Trajectories of the model simulation.
areas : list
List of area names as strings.
include_recovered : bool
If True, recovered individuals are counted as well.
Returns
-------
percentage_vaccinated: pd.Series
Trajectories of the fraction that is vaccinated or immune.
"""
vaccinated = get_vaccinated_model(model, area=area)
sus_inf = get_alive_model(model, area=area)
df_vaccinated = trajectories[vaccinated]
df_sus_inf = trajectories[sus_inf]
total_vaccinated = df_vaccinated.sum(axis=1)
sus_inf = vaccinated = df_sus_inf.sum(axis=1)
percentage_vaccinated = total_vaccinated / sus_inf
return percentage_vaccinated
|
f11cbeb737c8592528441293b9fd25fed4bee37f
| 3,646,096
|
def test_function(client: Client) -> str:
"""
Performs test connectivity by valid http response
:param client: client object which is used to get response from api
:return: raise ValueError if any error occurred during connection
"""
client.http_request(method='GET', url_suffix=URL_SUFFIX['TEST_MODULE'])
return 'ok'
|
8c773fd9a87a45270157f682cb3229b83ba4a9e0
| 3,646,097
|
import pkgutil
import doctest
def load_tests(loader, tests, ignore):
"""Create tests from all docstrings by walking the package hierarchy."""
modules = pkgutil.walk_packages(rowan.__path__, rowan.__name__ + ".")
for _, module_name, _ in modules:
tests.addTests(doctest.DocTestSuite(module_name, globs={"rowan": rowan}))
return tests
|
d8495b32c6cb95a94857f611700f07b9183a9b63
| 3,646,098
|
def k_hot_array_from_string_list(context,
typename,
entity_names):
"""Create a numpy array encoding a k-hot set.
Args:
context: a NeuralExpressionContext
typename: type of entity_names
entity_names: list of names of type typename
Returns:
A k-hot-array representation of the set of entity_names. For frozen
dictionaries, unknown entity names are mapped to the unknown_id of their
type or discarded if the unknown_value of the type is None. Unknown entity
names will throw an nql.EntityNameException for non-frozen dictionaries.
It is possible for this method to return an all-zeros array.
"""
# Empty string is not a valid entity_name.
ids = [context.get_id(e, typename) for e in entity_names if e]
# None is not a valid id.
valid_ids = [x for x in ids if x is not None]
max_id = context.get_max_id(typename)
result = np.zeros((max_id,), dtype='float32')
if valid_ids:
result[valid_ids] = 1.
return result
|
66c987f7c5d1e3af2b419d0db301ad811a8df5b7
| 3,646,099
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.