hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
fd7ddf414c153d91f9749982752c427d082f2d2f
| 6,313
|
py
|
Python
|
src/models/optimizer_bank.py
|
ChihHsuLin/cellular_image_classification
|
5ea81b4a0f42d17ecb95c41ff4349ef610841394
|
[
"MIT"
] | null | null | null |
src/models/optimizer_bank.py
|
ChihHsuLin/cellular_image_classification
|
5ea81b4a0f42d17ecb95c41ff4349ef610841394
|
[
"MIT"
] | null | null | null |
src/models/optimizer_bank.py
|
ChihHsuLin/cellular_image_classification
|
5ea81b4a0f42d17ecb95c41ff4349ef610841394
|
[
"MIT"
] | 1
|
2021-09-24T12:22:28.000Z
|
2021-09-24T12:22:28.000Z
|
import math
import torch
import torchcontrib
from torch.optim.optimizer import Optimizer
import itertools as it
class StochasticWeightAverage(torchcontrib.optim.SWA):
def __init__(self, optimizer, swa_start=None, swa_freq=None, swa_lr=None):
super(StochasticWeightAverage, self).__init__(optimizer, swa_start, swa_freq, swa_lr)
def has_swa(self):
for group in self.param_groups:
for p in group['params']:
param_state = self.state[p]
if 'swa_buffer' not in param_state:
return False
return True
def set_swa_param(self, swa_start=None, swa_freq=None, swa_lr=None):
if swa_start is not None:
self.swa_start = swa_start
if swa_freq is not None:
self.swa_freq = swa_freq
if swa_lr is not None:
self.swa_lr = swa_lr
# from torch.optim import Optimizer
# credit - Lookahead implementation from LonePatient - https://github.com/lonePatient/lookahead_pytorch/blob/master/optimizer.py
# credit2 - RAdam code by https://github.com/LiyuanLucasLiu/RAdam/blob/master/radam.py
class Ranger(Optimizer):
def __init__(self, params, lr=1e-3, alpha=0.5, k=6, N_sma_threshhold=5, betas=(.95, 0.999), eps=1e-8,
weight_decay=0):
# parameter checks
if not 0.0 <= alpha <= 1.0:
raise ValueError(f'Invalid slow update rate: {alpha}')
if not 1 <= k:
raise ValueError(f'Invalid lookahead steps: {k}')
if not lr > 0:
raise ValueError(f'Invalid Learning Rate: {lr}')
if not eps > 0:
raise ValueError(f'Invalid eps: {eps}')
# parameter comments:
# beta1 (momentum) of .95 seems to work better than .90...
# N_sma_threshold of 5 seems better in testing than 4.
# In both cases, worth testing on your dataset (.90 vs .95, 4 vs 5) to make sure which works best for you.
# prep defaults and init torch.optim base
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
super(Ranger, self).__init__(params, defaults)
# adjustable threshold
self.N_sma_threshhold = N_sma_threshhold
# now we can get to work...
for group in self.param_groups:
group["step_counter"] = 0
# print("group step counter init")
# look ahead params
self.alpha = alpha
self.k = k
# radam buffer for state
self.radam_buffer = [[None, None, None] for ind in range(10)]
# lookahead weights
self.slow_weights = [[p.clone().detach() for p in group['params']]
for group in self.param_groups]
# don't use grad for lookahead weights
for w in it.chain(*self.slow_weights):
w.requires_grad = False
def __setstate__(self, state):
print("set state called")
super(Ranger, self).__setstate__(state)
def step(self, closure=None):
loss = None
# note - below is commented out b/c I have other work that passes back the loss as a float, and thus not a callable closure.
# Uncomment if you need to use the actual closure...
# if closure is not None:
# loss = closure()
# ------------ radam
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError('RAdam does not support sparse gradients')
p_data_fp32 = p.data.float()
state = self.state[p]
if len(state) == 0:
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p_data_fp32)
state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
else:
state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
exp_avg.mul_(beta1).add_(1 - beta1, grad)
state['step'] += 1
buffered = self.radam_buffer[int(state['step'] % 10)]
if state['step'] == buffered[0]:
N_sma, step_size = buffered[1], buffered[2]
else:
buffered[0] = state['step']
beta2_t = beta2 ** state['step']
N_sma_max = 2 / (1 - beta2) - 1
N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)
buffered[1] = N_sma
if N_sma > self.N_sma_threshhold:
step_size = group['lr'] * math.sqrt(
(1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / (
N_sma_max - 2)) / (1 - beta1 ** state['step'])
else:
step_size = group['lr'] / (1 - beta1 ** state['step'])
buffered[2] = step_size
if group['weight_decay'] != 0:
p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)
if N_sma > self.N_sma_threshhold:
denom = exp_avg_sq.sqrt().add_(group['eps'])
p_data_fp32.addcdiv_(-step_size, exp_avg, denom)
else:
p_data_fp32.add_(-step_size, exp_avg)
p.data.copy_(p_data_fp32)
# ---------------- end radam step
# look ahead tracking and updating if latest batch = k
for group, slow_weights in zip(self.param_groups, self.slow_weights):
group['step_counter'] += 1
if group['step_counter'] % self.k != 0:
continue
for p, q in zip(group['params'], slow_weights):
if p.grad is None:
continue
q.data.add_(self.alpha, p.data - q.data)
p.data.copy_(q.data)
return loss
| 38.03012
| 132
| 0.545699
|
0d7687ea57c2206103bf0f5dd9a10c2ea2f50369
| 6,472
|
py
|
Python
|
tests/mechanic/launcher_test.py
|
paulcoghlan/rally
|
76f82265814836565ccf53edda3d5426e4c2db8a
|
[
"Apache-2.0"
] | null | null | null |
tests/mechanic/launcher_test.py
|
paulcoghlan/rally
|
76f82265814836565ccf53edda3d5426e4c2db8a
|
[
"Apache-2.0"
] | null | null | null |
tests/mechanic/launcher_test.py
|
paulcoghlan/rally
|
76f82265814836565ccf53edda3d5426e4c2db8a
|
[
"Apache-2.0"
] | null | null | null |
from unittest import TestCase, mock
from esrally import config, exceptions
from esrally.utils import opts
from esrally.mechanic import launcher
class MockMetricsStore:
def add_meta_info(self, scope, scope_key, key, value):
pass
class MockClientFactory:
def __init__(self, hosts, client_options):
self.client_options = client_options
def create(self):
return MockClient(self.client_options)
class MockClient:
def __init__(self, client_options):
self.client_options = client_options
self.cluster = SubClient({
"cluster_name": "rally-benchmark-cluster",
"nodes": {
"FCFjozkeTiOpN-SI88YEcg": {
"name": "Nefarius",
"host": "127.0.0.1"
}
}
})
self.nodes = SubClient({
"nodes": {
"FCFjozkeTiOpN-SI88YEcg": {
"name": "Nefarius",
"host": "127.0.0.1",
"os": {
"name": "Mac OS X",
"version": "10.11.4",
"available_processors": 8
},
"jvm": {
"version": "1.8.0_74",
"vm_vendor": "Oracle Corporation"
}
}
}
})
self._info = {
"version":
{
"number": "5.0.0",
"build_hash": "abc123"
}
}
def info(self):
if self.client_options.get("raise-error-on-info", False):
import elasticsearch
raise elasticsearch.ConnectionError("Unittest error")
return self._info
def search(self, *args, **kwargs):
return {}
class SubClient:
def __init__(self, info):
self._info = info
def stats(self, *args, **kwargs):
return self._info
def info(self, *args, **kwargs):
return self._info
class ExternalLauncherTests(TestCase):
test_host = opts.TargetHosts("127.0.0.1:9200,10.17.0.5:19200")
client_options = opts.ClientOptions("timeout:60")
def test_setup_external_cluster_single_node(self):
cfg = config.Config()
cfg.add(config.Scope.application, "mechanic", "telemetry.devices", [])
cfg.add(config.Scope.application, "client", "hosts", self.test_host)
cfg.add(config.Scope.application, "client", "options",self.client_options)
m = launcher.ExternalLauncher(cfg, MockMetricsStore(), client_factory_class=MockClientFactory)
m.start()
# automatically determined by launcher on attach
self.assertEqual(cfg.opts("mechanic", "distribution.version"), "5.0.0")
def test_setup_external_cluster_multiple_nodes(self):
cfg = config.Config()
cfg.add(config.Scope.application, "mechanic", "telemetry.devices", [])
cfg.add(config.Scope.application, "client", "hosts", self.test_host)
cfg.add(config.Scope.application, "client", "options", self.client_options)
cfg.add(config.Scope.application, "mechanic", "distribution.version", "2.3.3")
m = launcher.ExternalLauncher(cfg, MockMetricsStore(), client_factory_class=MockClientFactory)
m.start()
# did not change user defined value
self.assertEqual(cfg.opts("mechanic", "distribution.version"), "2.3.3")
class ClusterLauncherTests(TestCase):
test_host = opts.TargetHosts("10.0.0.10:9200,10.0.0.11:9200")
client_options = opts.ClientOptions('timeout:60')
def test_launches_cluster_with_post_launch_handler(self):
on_post_launch = mock.Mock()
cfg = config.Config()
cfg.add(config.Scope.application, "client", "hosts", self.test_host)
cfg.add(config.Scope.application, "client", "options", self.client_options)
cfg.add(config.Scope.application, "mechanic", "telemetry.devices", [])
cfg.add(config.Scope.application, "mechanic", "telemetry.params", {})
cluster_launcher = launcher.ClusterLauncher(cfg, MockMetricsStore(),
on_post_launch=on_post_launch, client_factory_class=MockClientFactory)
cluster = cluster_launcher.start()
self.assertEqual([{"host": "10.0.0.10", "port":9200}, {"host": "10.0.0.11", "port":9200}], cluster.hosts)
self.assertIsNotNone(cluster.telemetry)
# this requires at least Python 3.6
# on_post_launch.assert_called_once()
self.assertEqual(1, on_post_launch.call_count)
def test_launches_cluster_without_post_launch_handler(self):
cfg = config.Config()
cfg.add(config.Scope.application, "client", "hosts", self.test_host)
cfg.add(config.Scope.application, "client", "options", self.client_options)
cfg.add(config.Scope.application, "mechanic", "telemetry.devices", [])
cfg.add(config.Scope.application, "mechanic", "telemetry.params", {})
cluster_launcher = launcher.ClusterLauncher(cfg, MockMetricsStore(), client_factory_class=MockClientFactory)
cluster = cluster_launcher.start()
self.assertEqual([{"host": "10.0.0.10", "port":9200}, {"host": "10.0.0.11", "port":9200}], cluster.hosts)
self.assertIsNotNone(cluster.telemetry)
@mock.patch("time.sleep")
def test_error_on_cluster_launch(self, sleep):
on_post_launch = mock.Mock()
cfg = config.Config()
cfg.add(config.Scope.application, "client", "hosts", self.test_host)
# Simulate that the client will raise an error upon startup
cfg.add(config.Scope.application, "client", "options", opts.ClientOptions("raise-error-on-info:true"))
#cfg.add(config.Scope.application, "client", "options", {"raise-error-on-info": True})
cfg.add(config.Scope.application, "mechanic", "telemetry.devices", [])
cfg.add(config.Scope.application, "mechanic", "telemetry.params", {})
cluster_launcher = launcher.ClusterLauncher(cfg, MockMetricsStore(),
on_post_launch=on_post_launch, client_factory_class=MockClientFactory)
with self.assertRaisesRegex(exceptions.LaunchError,
"Elasticsearch REST API layer is not available. Forcefully terminated cluster."):
cluster_launcher.start()
self.assertEqual(0, on_post_launch.call_count)
| 40.198758
| 122
| 0.612021
|
a21a9d6c77a9a4b3b388e91b2ff2c17fc6c3c815
| 9,065
|
py
|
Python
|
scripts/comb_model.py
|
fpl-analytics/gr_crypto
|
2b0ab451c9c205a9f572c4bca23fffbb68ca188f
|
[
"MIT"
] | null | null | null |
scripts/comb_model.py
|
fpl-analytics/gr_crypto
|
2b0ab451c9c205a9f572c4bca23fffbb68ca188f
|
[
"MIT"
] | null | null | null |
scripts/comb_model.py
|
fpl-analytics/gr_crypto
|
2b0ab451c9c205a9f572c4bca23fffbb68ca188f
|
[
"MIT"
] | null | null | null |
# G-Research Crypto Kaggle Project
# Running tf model based on vwap.
# I want to try this a couple of different ways :
# - Normalised (whole column)
# - Normalised (by each "row" in the 3d array)
# I think row based will be better, as Target is Growth not absolute
# Setup
import os
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import numpy as np
import tensorflow as tf
import multiprocessing
from sklearn.decomposition import PCA
from sklearn.metrics import confusion_matrix
cores = multiprocessing.cpu_count()
tf.config.threading.set_inter_op_parallelism_threads(cores-1)
tf.keras.backend.set_floatx('float64')
root_folder = os.getcwd() + "/data"
# Reading
wide_vwap = pd.read_csv(root_folder + "/working/wide_vwap.csv")
wide_high = pd.read_csv(root_folder + "/working/wide_high.csv")
wide_low = pd.read_csv(root_folder + "/working/wide_low.csv")
wide_target = pd.read_csv(root_folder + "/working/wide_target.csv")
asset_details = pd.read_csv(root_folder + "/asset_details.csv")
# assets = list(asset_details["Asset_Name"])
assets = [str(i) for i in asset_details["Asset_ID"]]
# Preprocess DataFrame
## Getting high_rel and low_rel
high_rel = pd.DataFrame(columns=[s + "_high" for s in assets], index=range(len(wide_low)))
low_rel = pd.DataFrame(columns=[s + "_low" for s in assets], index=range(len(wide_low)))
for a in assets:
high_rel[a + "_high"] = np.log(wide_high[a]) - np.log(wide_vwap[a])
low_rel[a + "_low"] = np.log(wide_low[a]) - np.log(wide_vwap[a])
# Adding time back to df
high_rel["time"] = wide_high["time"]
low_rel["time"] = wide_low["time"]
## Get vwap diff
# define function to compute log returns
def log_return(series, periods=1):
return np.log(series).diff(periods=periods)
# Get minute by minute vwap returns for each asset
df = wide_vwap[assets].apply(log_return)
df["time"] = wide_vwap["time"]
## Converting Target df to just bitcoin, for merging on
btc_num = asset_details[asset_details["Asset_Name"] == "Bitcoin"]["Asset_ID"] \
.to_string(index = False).strip()
btc = wide_target[["time", btc_num]]
btc = btc.rename(columns = {btc_num: "Target"})
df = df.merge(btc, how = "left", on = "time") \
.merge(high_rel, how = "left", on = "time") \
.merge(low_rel, how = "left", on = "time")
## Convert Inf to NA. Makes dropna() work and generally easier to work with
df.replace([np.inf, -np.inf], np.nan, inplace=True)
# Changes of "Target" above or below 0.025 I think are causing problems.
# I'm going to manually set to NA for now, a more elegant solution may follow
outliers = np.abs(df["Target"]) > 0.025
print("Number of outliers is ", np.sum(outliers))
df["Target"] = np.where(np.abs(df["Target"]) > 0.025, np.nan, df["Target"])
## Checking that we don't have missing minutes
print(df.shape)
# print(sum(df["time"] == (df["time"].shift(1) + pd.Timedelta(minutes = 1))))
# Filtering
"""
Before creating 3d Array, I'm going to filter for rows after all coins exist.
This just makes the dataset smaller.
NB - dogecoin only exists after ~mid 2019, so removing na rows removes all data before
this point. If you want more data you could impute Dogecoin, although this of course
comes with its own problems.
"""
# NB - This doesn't seem to be working properly. I think there's some noise
# in Dogecoin, where a random row is = 0
first = df[assets].apply(pd.Series.first_valid_index)
df_filt = df[df.index >= max(first)]
df_filt = df_filt.reset_index(drop = True)
## Drop time (not part of modelling)
time = df_filt["time"]
df_filt = df_filt.drop("time", axis = 1)
## Normalise Data
means = df_filt.mean()
stds = df_filt.std()
df_filt_norm = (df_filt - df_filt.mean()) / df_filt.std()
df_filt_norm["time"] = time
# long = df_filt_norm.dropna().melt(var_name = "column", value_name = "value")
# ax = sns.boxplot(x='column', y='value', data = long)
# _ = ax.set_xticklabels(long.keys(), rotation=90)
# Highs and Lows have long heads / tails respectively. Not sure how
# much of a problem this will be at this point...
# PCA
# Running PCA on non bitcoin columns
# TODO: Tidy up this whole section
cols = [btc_num, btc_num + "_low", btc_num + "_high", "Target"]
temp = df_filt_norm.dropna()
time = pd.to_datetime(temp["time"])
no_na = temp.drop("time", axis = 1)
no_na = no_na.reset_index(drop = True)
bitcoin = no_na[cols]
other = no_na.drop(cols, axis = 1)
pca = PCA()
pca.fit(other)
## Scree plot
PC_values = np.arange(pca.n_components_) + 1
plt.plot(PC_values, pca.explained_variance_ratio_, 'ro-', linewidth=2)
plt.title('Scree Plot')
plt.xlabel('Principal Component')
plt.ylabel('Proportion of Variance Explained')
plt.show()
out_sum = np.cumsum(pca.explained_variance_ratio_)
print ("Cumulative Prop. Variance Explained: ", out_sum)
# I'm going to take 10 for now, although as always with PCA this could be up for debate
# I'd also like to look at other techniques, e.g. Kernel PCA.
other_pca_np = pca.transform(other)
other_pca = pd.DataFrame(other_pca_np,
columns=["pca_" + str(i) for i in list(range(1, other_pca_np.shape[1]+1))])
everything = pd.concat([other_pca[["pca_" + str(i) for i in list(range(1, 11))]],
bitcoin], axis = 1).set_index(time)
everything = everything.reindex(pd.date_range(everything.index.min(),
everything.index.max(),
name = "time", freq = "1 min"))
x = np.array(everything.drop("Target", axis = 1))
y = np.array(everything["Target"]).reshape(-1, 1)
# Clear some space
del(outliers)
del(other_pca_np)
del(other)
del(other_pca)
del(no_na)
del(df)
del(df_filt)
# del(long)
del(everything)
del([wide_high, wide_low, wide_target, wide_vwap])
# Create a 3D input
def create_dataset (X, y, time_steps = 1):
Xs, ys = [], []
for i in range(len(X)-time_steps):
v = X[i:(i+time_steps), :]
Xs.append(v)
ys.append(y[i+time_steps])
return np.array(Xs), np.array(ys)
all_x, all_y = create_dataset(x, y, 60)
# Remove NAs
## Checking for X (e.g. 3D)
### Boolean for NA rows
missing_x = ~np.isnan(all_x).any(axis=1)
missing_row_x = ~np.logical_not(missing_x).any(axis=1)
## Checking for Y
missing_y = ~np.isnan(all_y).any(axis=1)
## Combining
both = np.logical_and(missing_row_x, missing_y)
## Filtering arrays
filt_x = all_x[both, :, :]
filt_y = all_y[both, :]
# Train / Test Splits
# TODO: Spit into folds for CV
n = len(filt_y)
train_x = filt_x[0:int(n*0.8),:,:]
train_y = filt_y[0:int(n*0.8),:]
test_x = filt_x[int(n*0.8):, :, :]
test_y = filt_y[int(n*0.8):,:]
print("train_x is ", train_x.shape)
print("train_y is ", train_y.shape)
print("test_x is ", test_x.shape)
print("test_y is ", test_y.shape)
lstm_model = tf.keras.models.Sequential([
tf.keras.layers.LSTM(52, return_sequences = True, dropout = 0.4),
tf.keras.layers.LSTM(104, dropout = 0.2),
tf.keras.layers.Dense(52, activation='relu'),
tf.keras.layers.Dense(26, activation='relu'),
# Shape => [batch, time, features]
tf.keras.layers.Dense(units=1)
])
mult_dense_mod = tf.keras.Sequential([
# Shape: (time, features) => (time*features)
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(units=1000, activation='relu'),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(units=500, activation='relu'),
tf.keras.layers.Dropout(0.1),
tf.keras.layers.Dense(units=100, activation='relu'),
tf.keras.layers.Dense(units=1),
# Add back the time dimension.
# Shape: (outputs) => (1, outputs)
tf.keras.layers.Reshape([1, -1]),
])
def compile_and_fit(model, x, y, patience=5, epochs = 10):
early_stopping = tf.keras.callbacks.EarlyStopping(monitor='val_loss',
patience=patience,
mode='min')
model.compile(loss=tf.losses.MeanSquaredError(),
optimizer=tf.optimizers.Adam(),
metrics=[tf.metrics.MeanAbsoluteError()])
history = model.fit(x = x, y = y, epochs=epochs,
validation_split=0.2, callbacks=[early_stopping])
return history
lstm_history = compile_and_fit(lstm_model, train_x, train_y, patience = 2, epochs = 10)
# dense_history = compile_and_fit(mult_dense_mod, train_x, train_y, patience = 2, epochs = 10)
preds = lstm_model.predict(test_x)
plt.hist(preds)
preds_actual = (preds * stds["Target"]) + means["Target"]
y_actual = (test_y * stds["Target"]) + means["Target"]
plt.hist(y_actual, bins = 50)
plt.hist(preds_actual, bins = 50)
def evaluate_regression(pred, actual):
errors = pred - actual
mse = np.square(errors).mean()
rmse = np.sqrt(mse)
mae = np.abs(errors).mean()
print('Mean Absolute Error: {:.4f}'.format(mae))
print('Root Mean Square Error: {:.4f}'.format(rmse))
def evaluate_up_down(pred, actual):
pred_up = pred > 0
actual_up = actual > 0
print(confusion_matrix(actual_up, pred_up))
evaluate_regression(preds_actual, y_actual)
evaluate_up_down(preds_actual, y_actual)
| 31.919014
| 94
| 0.677882
|
444a69435675d1abac498373dfdc6f793bda43f7
| 3,462
|
py
|
Python
|
api/admin/controller/cdn_services.py
|
aseefahmed/circulation
|
17cbc9186ab3cde9606912559f92b393ac18ecaa
|
[
"Apache-2.0"
] | null | null | null |
api/admin/controller/cdn_services.py
|
aseefahmed/circulation
|
17cbc9186ab3cde9606912559f92b393ac18ecaa
|
[
"Apache-2.0"
] | 44
|
2022-01-20T01:31:32.000Z
|
2022-03-31T01:50:41.000Z
|
api/admin/controller/cdn_services.py
|
jonathangreen/circulation
|
118866f8257e2a97431a28ea5ba8e34e5bd393eb
|
[
"Apache-2.0"
] | null | null | null |
import flask
from flask import Response
from flask_babel import lazy_gettext as _
from api.admin.problem_details import *
from core.model import Configuration, ExternalIntegration
from core.util.problem_detail import ProblemDetail
from . import SettingsController
class CDNServicesController(SettingsController):
def __init__(self, manager):
super(CDNServicesController, self).__init__(manager)
self.protocols = [
{
"name": ExternalIntegration.CDN,
"sitewide": True,
"settings": [
{
"key": ExternalIntegration.URL,
"label": _("CDN URL"),
"required": True,
"format": "url",
},
{
"key": Configuration.CDN_MIRRORED_DOMAIN_KEY,
"label": _("Mirrored domain"),
"required": True,
},
],
}
]
self.goal = ExternalIntegration.CDN_GOAL
def process_cdn_services(self):
self.require_system_admin()
if flask.request.method == "GET":
return self.process_get()
else:
return self.process_post()
def process_get(self):
services = self._get_integration_info(self.goal, self.protocols)
return dict(
cdn_services=services,
protocols=self.protocols,
)
def process_post(self):
name = flask.request.form.get("name")
protocol = flask.request.form.get("protocol")
fields = {"name": name, "protocol": protocol}
form_field_error = self.validate_form_fields(**fields)
if form_field_error:
return form_field_error
is_new = False
id = flask.request.form.get("id")
if id:
# Find an existing service in order to edit it
service = self.look_up_service_by_id(id, protocol)
else:
service, is_new = self._create_integration(
self.protocols, protocol, self.goal
)
if isinstance(service, ProblemDetail):
self._db.rollback()
return service
name_error = self.check_name_unique(service, name)
if name_error:
self._db.rollback()
return name_error
protocol_error = self.set_protocols(service, protocol)
if protocol_error:
self._db.rollback()
return protocol_error
service.name = name
if is_new:
return Response(str(service.id), 201)
else:
return Response(str(service.id), 200)
def validate_form_fields(self, **fields):
"""The 'name' and 'protocol' fields cannot be blank, and the protocol must
be selected from the list of recognized protocols. The URL must be valid."""
name = fields.get("name")
protocol = fields.get("protocol")
if not name:
return INCOMPLETE_CONFIGURATION
if protocol:
error = self.validate_protocol()
if error:
return error
else:
wrong_format = self.validate_formats()
if wrong_format:
return wrong_format
def process_delete(self, service_id):
return self._delete_integration(service_id, self.goal)
| 31.472727
| 85
| 0.561236
|
c5512f0e5d970c7647efe67ebc0959b8f8a9a047
| 102,258
|
py
|
Python
|
montepython/analyze.py
|
syasini/montepython_public
|
d33537664b9719c172dab72273939f4301f2f3ba
|
[
"MIT"
] | 2
|
2021-01-11T13:15:09.000Z
|
2022-03-04T00:44:59.000Z
|
montepython/analyze.py
|
syasini/montepython_public
|
d33537664b9719c172dab72273939f4301f2f3ba
|
[
"MIT"
] | 1
|
2019-08-10T00:54:28.000Z
|
2019-08-10T00:54:28.000Z
|
montepython/analyze.py
|
syasini/montepython_public
|
d33537664b9719c172dab72273939f4301f2f3ba
|
[
"MIT"
] | 1
|
2019-06-26T10:38:21.000Z
|
2019-06-26T10:38:21.000Z
|
"""
.. module:: analyze
:synopsis: Extract data from chains and produce plots
.. moduleauthor:: Karim Benabed <benabed@iap.fr>
.. moduleauthor:: Benjamin Audren <benjamin.audren@epfl.ch>
Collection of functions needed to analyze the Markov chains.
This module defines as well a class :class:`Information`, that stores useful
quantities, and shortens the argument passing between the functions.
.. note::
Some of the methods used in this module are directly adapted from the
`CosmoPmc <http://www.cosmopmc.info>`_ code from Kilbinger et. al.
"""
import os
import math
import numpy as np
from itertools import count
# The root plotting module, to change options like font sizes, etc...
import matplotlib
# The following line suppresses the need for an X server
matplotlib.use("Agg")
# Module for handling display
import matplotlib.pyplot as plt
# Module to handle warnings from matplotlib
import warnings
import importlib
import io_mp
from itertools import ifilterfalse
from itertools import ifilter
import scipy.ndimage
import scipy.special
import numpy.linalg as la
# Defined to remove the burnin for all the points that were produced before the
# first time where -log-likelihood <= min-minus-log-likelihood+LOG_LKL_CUTOFF
LOG_LKL_CUTOFF = 3
NUM_COLORS = 6
def analyze(command_line):
"""
Main function, does the entire analysis.
It calls in turn all the other routines from this module. To limit the
arguments of each function to a reasonnable size, a :class:`Information`
instance is used. This instance is initialized in this function, then
appended by the other routines.
"""
# Check if the scipy module has the interpolate method correctly
# installed (should be the case on every linux distribution with
# standard numpy)
try:
from scipy.interpolate import interp1d
Information.has_interpolate_module = True
except ImportError:
Information.has_interpolate_module = False
warnings.warn(
'No cubic interpolation done (no interpolate method found ' +
'in scipy), only linear')
# Determine how many different folders are asked through the 'info'
# command, and create as many Information instances
files = separate_files(command_line.files)
# Create an instance of the Information class for each subgroup found in
# the previous function. They will each hold all relevant information, and
# be used as a compact way of exchanging information between functions
information_instances = []
for item in files:
info = Information(command_line)
information_instances.append(info)
# Prepare the files, according to the case, load the log.param, and
# prepare the output (plots folder, .covmat, .info and .log files).
# After this step, info.files will contain all chains.
status = prepare(item, info)
# If the preparation step generated new files (for instance,
# translating from NS or CH to Markov Chains) this routine should stop
# now.
if not status:
return
# Compute the mean, maximum of likelihood, 1-sigma variance for this
# main folder. This will create the info.chain object, which contains
# all the points computed stacked in one big array.
convergence(info)
# check if analyze() is called directly by the user, or by the mcmc loop during an updating phase
try:
# command_line.update is defined when called by the mcmc loop
command_line.update
except:
# in case it was not defined (i.e. when analyze() is called directly by user), set it to False
command_line.update = 0
# check if analyze() is called directly by the user, or by the mcmc loop at the start of an adaptive run
try:
# command_line.adaptive is defined when called by the mcmc loop
command_line.adaptive
except:
# in case it was not defined (i.e. when analyze() is called directly by user), set it to False
command_line.adaptive = 0
# compute covariance matrix, except when we are in update mode and convergence is too bad or good enough
# or if we are in adaptive mode and only want a first guess for the covmat
if command_line.update and (np.amax(info.R) > 3. or np.amax(info.R) < 0.4 or np.isnan(np.sum(info.R))) and not command_line.adaptive:
print '--> Not computing covariance matrix'
else:
try:
if command_line.want_covmat:
print '--> Computing covariance matrix'
info.covar = compute_covariance_matrix(info)
# Writing it out in name_of_folder.covmat
io_mp.write_covariance_matrix(
info.covar, info.backup_names, info.cov_path)
except:
print '--> Computing covariance matrix failed'
pass
# Store an array, sorted_indices, containing the list of indices
# corresponding to the line with the highest likelihood as the first
# element, and then as decreasing likelihood
info.sorted_indices = info.chain[:, 1].argsort(0)
# Writing the best-fit model in name_of_folder.bestfit
bestfit_line = [elem*info.scales[i, i] for i, elem in
enumerate(info.chain[info.sorted_indices[0], 2:])]
io_mp.write_bestfit_file(bestfit_line, info.backup_names,
info.best_fit_path)
# Overwrite center of Fisher matrix from log.param with the bestfit
# from the last set of chains provided
# DEBUG: This doesn't plot the first parameter (omega_b), possibly
# because it's re-scaled (since it's the only one that is rescaled
# and the rest are plotted)?
if command_line.center_fisher:
for index, elem in enumerate(info.ref_names):
info.centers[index] = bestfit_line[index]/info.scales[index, index]
if not command_line.minimal:
# Computing 1,2 and 3-sigma errors, and plot. This will create the
# triangle and 1d plot by default.
compute_posterior(information_instances)
print '--> Writing .info and .tex files'
for info in information_instances:
info.write_information_files()
# when called by MCMC in update mode, return R values so that they can be written for information in the chains
if command_line.update:
return info.R
def prepare(files, info):
"""
Scan the whole input folder, and include all chains in it.
Since you can decide to analyze some file(s), or a complete folder, this
function first needs to separate between the two cases.
.. warning::
If someday you change the way the chains are named, remember to change
here too, because this routine assumes the chains have a double
underscore in their names.
.. note::
Only files ending with .txt will be selected, to keep compatibility
with CosmoMC format
.. note::
New in version 2.0.0: if you ask to analyze a MultiNest
sub-folder (i.e. something that ends in `NS` with capital letters), the
analyze module will translate the output from MultiNest to
standard chains for Monte Python, and stops. You can then run the
`-- info` flag on the whole folder. **This procedure is not necessary
if the run was complete, but only if the MultiNest run was killed
before completion**.
Parameters
----------
files : list
list of potentially only one element, containing the files to analyze.
This can be only one file, or the encompassing folder, files
info : Information instance
Used to store the result
"""
# First test if the folder is a MultiNest, PolyChord or CosmoHammer folder.
# If so, call the module's own routine through the clean conversion
# function, which will translate the output of this other sampling into
# MCMC chains that can then be analyzed.
modules = ['MultiNest', 'PolyChord', 'cosmo_hammer']
tags = ['NS', 'PC', 'CH']
for module_name, tag in zip(modules, tags):
action_done = clean_conversion(module_name, tag, files[0])
if action_done:
return False
# If the input command was an entire folder, then grab everything in it.
# Too small files (below 600 octets) and subfolders are automatically
# removed.
folder, files, basename = recover_folder_and_files(files)
info.files = files
info.folder = folder
info.basename = basename
# Check if the log.param file exists
parameter_file_path = os.path.join(folder, 'log.param')
if os.path.isfile(parameter_file_path):
if os.path.getsize(parameter_file_path) == 0:
raise io_mp.AnalyzeError(
"The log param file %s " % os.path.join(folder, 'log.param') +
"seems empty")
else:
raise io_mp.AnalyzeError(
"The log param file %s " % os.path.join(folder, 'log.param') +
"is missing in the analyzed folder?")
# If the folder has no subdirectory, then go for a simple infoname,
# otherwise, call it with the last name
basename = (os.path.basename(folder) if os.path.basename(folder) != '.'
else os.path.basename(os.path.abspath(
os.path.join(folder, '..'))))
info.v_info_path = os.path.join(folder, basename+'.v_info')
info.h_info_path = os.path.join(folder, basename+'.h_info')
info.tex_path = os.path.join(folder, basename+'.tex')
info.cov_path = os.path.join(folder, basename+'.covmat')
info.log_path = os.path.join(folder, basename+'.log')
info.best_fit_path = os.path.join(folder, basename+'.bestfit')
info.param_path = parameter_file_path
return True
def convergence(info):
"""
Compute convergence for the desired chains, using Gelman-Rubin diagnostic
Chains have been stored in the info instance of :class:`Information`. Note
that the G-R diagnostic can be computed for a single chain, albeit it will
most probably give absurd results. To do so, it separates the chain into
three subchains.
"""
# Recovering parameter names and scales, creating tex names,
extract_parameter_names(info)
# Now that the number of parameters is known, the array containing bounds
# can be initialised
info.bounds = np.zeros((len(info.ref_names), len(info.levels), 2))
# Circle through all files to find the global maximum of likelihood
#print '--> Finding global maximum of likelihood'
find_maximum_of_likelihood(info)
# Restarting the circling through files, this time removing the burnin,
# given the maximum of likelihood previously found and the global variable
# LOG_LKL_CUTOFF. spam now contains all the accepted points that were
# explored once the chain moved within min_minus_lkl - LOG_LKL_CUTOFF.
# If the user asks for a keep_fraction <1, this is also the place where
# a fraction (1-keep_fraction) is removed at the beginning of each chain.
#print '--> Removing burn-in'
spam = remove_bad_points(info)
info.remap_parameters(spam)
# Now that the list spam contains all the different chains removed of
# their respective burn-in, proceed to the convergence computation
# 2D arrays for mean and var, one column will contain the total (over
# all chains) mean (resp. variance), and each other column the
# respective chain mean (resp. chain variance). R only contains the
# values for each parameter. Therefore, mean and var will have len(spam)+1
# as a first dimension
mean = np.zeros((len(spam)+1, info.number_parameters))
var = np.zeros((len(spam)+1, info.number_parameters))
R = np.zeros(info.number_parameters)
# Store the total number of points, and the total in each chain
total = np.zeros(len(spam)+1)
for j in xrange(len(spam)):
total[j+1] = spam[j][:, 0].sum()
total[0] = total[1:].sum()
# Compute mean and variance for each chain
print '--> Computing mean values'
compute_mean(mean, spam, total)
print '--> Computing variance'
compute_variance(var, mean, spam, total)
print '--> Computing convergence criterium (Gelman-Rubin)'
# Gelman Rubin Diagnostic:
# Computes a quantity linked to the ratio of the mean of the variances of
# the different chains (within), and the variance of the means (between)
# Note: This is not strictly speaking the Gelman Rubin test, defined for
# same-length MC chains. Our quantity is defined without the square root,
# which should not change much the result: a small sqrt(R) will still be a
# small R. The same convention is used in CosmoMC, except for the weighted
# average: we decided to do the average taking into account that longer
# chains should count more
within = 0
between = 0
for i in xrange(np.shape(mean)[1]):
for j in xrange(len(spam)):
within += total[j+1]*var[j+1, i]
between += total[j+1]*(mean[j+1, i]-mean[0, i])**2
within /= total[0]
between /= (total[0]-1)
R[i] = between/within
if i == 0:
print ' -> R-1 is %.6f' % R[i], '\tfor ', info.ref_names[i]
else:
print ' %.6f' % R[i], '\tfor ', info.ref_names[i]
# Log finally the total number of steps, and absolute loglikelihood
with open(info.log_path, 'a') as log:
log.write("--> Total number of steps: %d\n" % (
info.steps))
log.write("--> Total number of accepted steps: %d\n" % (
info.accepted_steps))
log.write("--> Minimum of -logLike : %.2f" % (
info.min_minus_lkl))
# Store the remaining members in the info instance, for further writing to
# files, storing only the mean and total of all the chains taken together
info.mean = mean[0]
info.R = R
info.total = total[0]
# Create the main chain, which consists in all elements of spam
# put together. This will serve for the plotting.
info.chain = np.vstack(spam)
def compute_posterior(information_instances):
"""
computes the marginalized posterior distributions, and optionnally plots
them
Parameters
----------
information_instances : list
list of information objects, initialised on the given folders, or list
of file, in input. For each of these instance, plot the 1d and 2d
posterior distribution, depending on the flags stored in the instances,
comming from command line arguments or read from a file.
"""
# For convenience, store as `conf` the first element of the list
# information_instances, since it will be called often to check for
# configuration parameters
conf = information_instances[0]
# Pre configuration of the output, note that changes to the font size
# will occur later on as well, to obtain a nice scaling.
matplotlib.rc('text', usetex=True)
matplotlib.rc('font', size=11)
matplotlib.rc('xtick', labelsize='8')
matplotlib.rc('ytick', labelsize='8')
# Recover max and min values for each instance, defining the a priori place
# of ticks (in case of a comparison, this should change)
for info in information_instances:
info.define_ticks()
# If plots/ folder in output folder does not exist, create it
if os.path.isdir(os.path.join(info.folder, 'plots')) is False:
os.mkdir(os.path.join(info.folder, 'plots'))
# Determine the total number of parameters to plot, based on the list
# without duplicates of the plotted parameters of all information instances
plotted_parameters = []
# For printing not in latex
ref_names = []
for info in information_instances:
for index, name in enumerate(info.plotted_parameters):
if name not in plotted_parameters:
plotted_parameters.append(name)
ref_names.append(info.ref_names[index])
if len(plotted_parameters) == 0:
raise io_mp.AnalyzeError(
"You provided no parameters to analyze, probably by selecting"
" wrong parameters names in the '--extra' file.")
# Find the appropriate number of columns and lines for the 1d posterior
# plot
if conf.num_columns_1d == None:
num_columns = int(round(math.sqrt(len(plotted_parameters))))
else:
num_columns = conf.num_columns_1d
num_lines = int(math.ceil(len(plotted_parameters)*1.0/num_columns))
# For special needs, you can impose here a different number of columns and lines in the 1d plot
# Here is a commented example:
# if (len(plotted_parameters) == 10):
# num_columns = 5
# num_lines = 2
# Create the figures
# which will be 3*3 inches per subplot, quickly growing!
if conf.plot:
fig1d = plt.figure(num=1, figsize=(
3*num_columns,
3*num_lines), dpi=80)
if conf.plot_2d:
fig2d = plt.figure(num=2, figsize=(
3*len(plotted_parameters),
3*len(plotted_parameters)), dpi=80)
# Create the name of the files, concatenating the basenames with
# underscores.
file_name = "_".join(
[info.basename for info in information_instances])
# JL: READ HERE INVERSE FISHER
if info.plot_fisher:
try:
# read inv_fisher file
file_name = os.path.join(info.folder, 'inv_fisher.mat')
n=0
with open(file_name, 'r') as f:
inv_fisher = np.zeros((len(info.ref_names), len(info.ref_names)), 'float64')
for line in f:
if line.find('#') != -1:
fisher_num_param = len(line.split())-1
fisher_indices = np.zeros(fisher_num_param, 'int')
for i in range(fisher_num_param):
fisher_name = line.split()[i+1].replace(',', '')
try:
fisher_indices[i] = info.ref_names.index(fisher_name)
print 'Read fisher matrix entry for parameter ',fisher_name
except:
print 'Input fisher matrix contained unknown parameter ',fisher_name
fisher_indices[i] = -1
else:
if fisher_indices[n] >= 0:
for m in range(fisher_num_param):
if fisher_indices[m] >= 0:
inv_fisher[fisher_indices[n],fisher_indices[m]]=line.split()[m]
n += 1
#print 'Read Fisher matrix:'
#print 'param center scale (Fii)^1/2 (Fii)^-1/2'
#for i in range(len(info.ref_names)):
# if fisher[i,i] != 0.:
# print info.ref_names[i],info.centers[i],info.scales[i,i],math.sqrt(fisher[i,i]),1./math.sqrt(fisher[i,i])
# else:
# print info.ref_names[i],info.centers[i],' ---'
except Warning:
warnings.warn("Did not find inv_fisher file %s" % file_name)
# Loop over all the plotted parameters
# There will be two indices at all time, the one running over the plotted
# parameters, `index`, and the one corresponding to the actual column in
# the actual file, `native_index`. For instance, if you try to plot only
# two columns of a several columns file, index will vary from 0 to 1, but
# the corresponding native indices might be anything.
# Obviously, since plotted parameters contain potentially names not
# contained in some files (in case of a comparison), native index might be
# undefined.
# Defined the legends object, which will store the plot style, to display
# at the level of the figure
legends = [None for _ in range(len(information_instances))]
if not conf.legendnames:
legend_names = [info.basename.replace('_', ' ')
for info in information_instances]
else:
legend_names = conf.legendnames
print '-----------------------------------------------'
for index, name in enumerate(plotted_parameters):
# Adding the subplots to the respective figures, this will correspond
# to the diagonal on the triangle plot.
if conf.plot_2d:
ax2d = fig2d.add_subplot(
len(plotted_parameters),
len(plotted_parameters),
index*(len(plotted_parameters)+1)+1,
yticks=[])
if conf.plot:
ax1d = fig1d.add_subplot(
num_lines, num_columns, index+1, yticks=[])
# check for each instance if the name is part of the list of plotted
# parameters, and if yes, store the native_index. If not, store a flag
# to ignore any further plotting or computing issues concerning this
# particular instance.
for info in information_instances:
try:
info.native_index = info.ref_names.index(name)
info.ignore_param = False
standard_name = info.backup_names[info.native_index]
except ValueError:
info.ignore_param = True
# The limits might have been enforced by the user
if name in conf.force_limits.iterkeys():
x_span = conf.force_limits[name][1]-conf.force_limits[name][0]
tick_min = conf.force_limits[name][0] +0.1*x_span
tick_max = conf.force_limits[name][1] -0.1*x_span
ticks = np.linspace(tick_min,
tick_max,
info.ticknumber)
for info in information_instances:
if not info.ignore_param:
info.x_range[info.native_index] = conf.force_limits[name]
info.ticks[info.native_index] = ticks
# otherwise, find them automatically
else:
adjust_ticks(name, information_instances)
print ' -> Computing histograms for ', name
for info in information_instances:
if not info.ignore_param:
# 1D posterior normalised to P_max=1 (first step)
#
# simply the histogram from the chains, with few bins
#
info.hist, info.bin_edges = np.histogram(
info.chain[:, info.native_index+2], bins=info.bins,
weights=info.chain[:, 0], normed=False, density=False)
info.hist = info.hist/info.hist.max()
# Correct for temperature
info.hist = info.hist**conf.temperature
info.bincenters = 0.5*(info.bin_edges[1:]+info.bin_edges[:-1])
# 1D posterior normalised to P_max=1 (second step)
#
# returns a histogram still normalised to one, but with a ten times finer sampling;
# >> first, tries a method with spline interpolation between bin centers and extrapolation at the edges
# >> if it fails, a simpler and more robust method of linear interpolation between bin centers is used
# >> if the interpolation module is not installed, this step keeps the same posterior
#
info.interp_hist, info.interp_grid = cubic_interpolation(
info, info.hist, info.bincenters)
# minimum credible interval (method by Jan Haman). Fails for
# multimodal histograms
bounds = minimum_credible_intervals(info)
info.bounds[info.native_index] = bounds
# plotting
for info in information_instances:
if not info.ignore_param:
# 1D posterior normalised to P_max=1 (third step, used only for plotting)
#
# apply gaussian smoothing (obsolete - we don't do it anymore since the option --posterior-smoothing
# was defined, so we commented out this part)
#
# factor by which the grid has been made thinner (10 means 10 times more bins)
interpolation_factor = float(len(info.interp_grid))/float(len(info.bincenters))
# factor for gaussian smoothing
sigma = interpolation_factor*info.gaussian_smoothing
# smooth
#smoothed_interp_hist = scipy.ndimage.filters.gaussian_filter(info.interp_hist,sigma)
# re-normalised
#smoothed_interp_hist = smoothed_interp_hist/smoothed_interp_hist.max()
if conf.plot_2d:
##################################################
# plot 1D posterior in diagonal of triangle plot #
##################################################
plot = ax2d.plot(
info.interp_grid,
# version without gaussian smoothing:
info.interp_hist,
# version with gaussian smoothing (commented)
#smoothed_interp_hist,
linewidth=info.line_width, ls='-',
color = info.MP_color_cycle[info.id][1],
# the [1] picks up the color of the 68% contours
# with [0] you would get that of the 95% contours
alpha = info.alphas[info.id])
legends[info.id] = plot[0]
ax2d.set_xticks(info.ticks[info.native_index])
if conf.legend_style == 'top':
ax2d.set_title(
'%s=$%.{0}g^{{+%.{0}g}}_{{%.{0}g}}$'.format(
info.decimal) % (
info.tex_names[info.native_index],
info.mean[info.native_index],
info.bounds[info.native_index, 0, -1],
info.bounds[info.native_index, 0, 0]),
fontsize=info.fontsize)
ax2d.set_xticklabels(
['%.{0}g'.format(info.decimal) % s
for s in info.ticks[info.native_index]],
fontsize=info.ticksize)
elif conf.legend_style == 'sides':
# Except for the last 1d plot (bottom line), don't
# print ticks
if index == len(plotted_parameters)-1:
ax2d.set_xticklabels(
['%.{0}g'.format(info.decimal) % s
for s in info.ticks[info.native_index]],
fontsize=info.ticksize)
ax2d.tick_params('x',direction='inout')
ax2d.set_xlabel(
info.tex_names[info.native_index],
fontsize=info.fontsize)
else:
ax2d.set_xticklabels([])
ax2d.axis([info.x_range[info.native_index][0],
info.x_range[info.native_index][1],
0, 1.05])
if conf.plot:
if conf.short_title_1d:
ax1d.set_title(
'%s'.format(info.decimal) % (
info.tex_names[info.native_index]),
fontsize=info.fontsize)
else:
# Note the use of double curly brackets {{ }} to produce
# the desired LaTeX output. This is necessary because the
# format function would otherwise understand single
# brackets as fields.
ax1d.set_title(
'%s=$%.{0}g^{{+%.{0}g}}_{{%.{0}g}}$'.format(
info.decimal) % (
info.tex_names[info.native_index],
info.mean[info.native_index],
info.bounds[info.native_index, 0, -1],
info.bounds[info.native_index, 0, 0]),
fontsize=info.fontsize)
ax1d.set_xticks(info.ticks[info.native_index])
ax1d.set_xticklabels(
['%.{0}g'.format(info.decimal) % s
for s in info.ticks[info.native_index]],
fontsize=info.ticksize)
ax1d.axis([info.x_range[info.native_index][0],
info.x_range[info.native_index][1],
0, 1.05])
# Execute some customisation scripts for the 1d plots
if (info.custom1d != []):
for elem in info.custom1d:
execfile('plot_files/'+elem)
##################################################
# plot 1D posterior in 1D plot #
##################################################
ax1d.plot(
info.interp_grid,
# 1d posterior without gaussian filter:
info.interp_hist,
# gaussian filtered 1d posterior (commented):
#smoothed_interp_hist,
# raw 1d posterior:
#info.interp_hist,
lw=info.line_width, ls='-',
color = info.MP_color_cycle[info.id][1],
# the [1] picks up the color of the 68% contours
# with [0] you would get that of the 95% contours
alpha = info.alphas[info.id])
# uncomment if you want to see the raw points from the histogram
# (to check whether the inteprolation and smoothing generated artefacts)
#ax1d.plot(
# info.bincenters,
# info.hist,
# 'ro')
if conf.mean_likelihood:
for info in information_instances:
if not info.ignore_param:
try:
# 1D mean likelihood normalised to P_max=1 (first step)
#
# simply the histogram from the chains, weighted by mutiplicity*likelihood
#
lkl_mean, _ = np.histogram(
info.chain[:, info.native_index+2],
bins=info.bin_edges,
normed=False,
weights=np.exp(
conf.min_minus_lkl-info.chain[:, 1])*info.chain[:, 0])
lkl_mean /= lkl_mean.max()
# 1D mean likelihood normalised to P_max=1 (second step)
#
# returns a histogram still normalised to one, but with a ten times finer sampling;
# >> first, tries a method with spline interpolation between bin centers and extrapolation at the edges
# >> if it fails, a simpler and more robust method of linear interpolation between bin centers is used
# >> if the interpolation module is not installed, this step keeps the same posterior
#
interp_lkl_mean, interp_grid = cubic_interpolation(
info, lkl_mean, info.bincenters)
# 1D mean likelihood normalised to P_max=1 (third step, used only for plotting)
#
# apply gaussian smoothing (obsolete - we don't do it anymore since the option --posterior-smoothing
# was defined, so we commented out this part)
#
# smooth
#smoothed_interp_lkl_mean = scipy.ndimage.filters.gaussian_filter(interp_lkl_mean,sigma)
# re-normalised
#smoothed_interp_lkl_mean = smoothed_interp_lkl_mean/smoothed_interp_lkl_mean.max()
# Execute some customisation scripts for the 1d plots
if (info.custom1d != []):
for elem in info.custom1d:
execfile('plot_files/'+elem)
########################################################
# plot 1D mean likelihood in diagonal of triangle plot #
########################################################
if conf.plot_2d:
# raw mean likelihoods:
#ax2d.plot(info.bincenter, lkl_mean,
# ls='--', lw=conf.line_width,
# color = info.MP_color_cycle[info.id][1],
# alpha = info.alphas[info.id])
# smoothed and interpolated mean likelihoods:
ax2d.plot(interp_grid,
# version without gaussian smoothing:
interp_lkl_mean,
# version with gaussian smoothing (commented)
#smoothed_interp_lkl_mean,
ls='--', lw=conf.line_width,
color = info.MP_color_cycle[info.id][1],
alpha = info.alphas[info.id])
########################################################
# plot 1D mean likelihood in 1D plot #
########################################################
if conf.plot:
# raw mean likelihoods:
#ax1d.plot(info.bincenters, lkl_mean,
# ls='--', lw=conf.line_width,
# color = info.MP_color_cycle[info.id][1],
# alpha = info.alphas[info.id])
# smoothed and interpolated mean likelihoods:
ax1d.plot(interp_grid,
# version without gaussian smoothing
interp_lkl_mean,
# version with gaussian smoothing (commented)
#smoothed_interp_lkl_mean,
ls='--', lw=conf.line_width,
color = info.MP_color_cycle[info.id][1],
alpha = info.alphas[info.id])
except:
print 'could not find likelihood contour for ',
print info.ref_parameters[info.native_index]
if conf.subplot is True:
if conf.plot_2d:
extent2d = ax2d.get_window_extent().transformed(
fig2d.dpi_scale_trans.inverted())
fig2d.savefig(os.path.join(
conf.folder, 'plots', file_name+'.'+conf.extension),
bbox_inches=extent2d.expanded(1.1, 1.4))
if conf.plot:
extent1d = ax1d.get_window_extent().transformed(
fig1d.dpi_scale_trans.inverted())
fig1d.savefig(os.path.join(
conf.folder, 'plots', file_name+'.'+conf.extension),
bbox_inches=extent1d.expanded(1.1, 1.4))
# Store the function in a file
for info in information_instances:
if not info.ignore_param:
hist_file_name = os.path.join(
info.folder, 'plots',
info.basename+'_%s.hist' % (
standard_name))
write_histogram(hist_file_name,
info.interp_grid, info.interp_hist)
# Now do the rest of the triangle plot
if conf.plot_2d:
for second_index in xrange(index):
second_name = plotted_parameters[second_index]
for info in information_instances:
if not info.ignore_param:
try:
info.native_second_index = info.ref_names.index(
plotted_parameters[second_index])
info.has_second_param = True
second_standard_name = info.backup_names[
info.native_second_index]
except ValueError:
info.has_second_param = False
else:
info.has_second_param = False
ax2dsub = fig2d.add_subplot(
len(plotted_parameters),
len(plotted_parameters),
(index)*len(plotted_parameters)+second_index+1)
for info in information_instances:
if info.has_second_param:
ax2dsub.axis([info.x_range[info.native_second_index][0],
info.x_range[info.native_second_index][1],
info.x_range[info.native_index][0],
info.x_range[info.native_index][1]])
# 2D likelihood (first step)
#
# simply the histogram from the chains, with few bins only
#
info.n, info.xedges, info.yedges = np.histogram2d(
info.chain[:, info.native_index+2],
info.chain[:, info.native_second_index+2],
weights=info.chain[:, 0],
bins=(info.bins, info.bins),
normed=False)
# Correct for temperature:
info.n = info.n**conf.temperature
info.extent = [
info.x_range[info.native_second_index][0],
info.x_range[info.native_second_index][1],
info.x_range[info.native_index][0],
info.x_range[info.native_index][1]]
info.x_centers = 0.5*(info.xedges[1:]+info.xedges[:-1])
info.y_centers = 0.5*(info.yedges[1:]+info.yedges[:-1])
# 2D likelihood (second step)
#
# like for 1D, interpolate to get a finer grid
# TODO: we should not only interpolate between bin centers, but also extrapolate between side bin centers and bin edges
#
interp_y_centers = scipy.ndimage.zoom(info.y_centers,info.interpolation_smoothing, mode='reflect')
interp_x_centers = scipy.ndimage.zoom(info.x_centers,info.interpolation_smoothing, mode='reflect')
interp_likelihood = scipy.ndimage.zoom(info.n,info.interpolation_smoothing, mode='reflect')
# 2D likelihood (third step)
#
# gaussian smoothing
#
sigma = info.interpolation_smoothing*info.gaussian_smoothing
interp_smoothed_likelihood = scipy.ndimage.filters.gaussian_filter(interp_likelihood,[sigma,sigma], mode='reflect')
# Execute some customisation scripts for the 2d contour plots
if (info.custom2d != []):
for elem in info.custom2d:
execfile('plot_files/'+elem)
# plotting contours, using the ctr_level method (from Karim
# Benabed). Note that only the 1 and 2 sigma contours are
# displayed (due to the line with info.levels[:2])
try:
###########################
# plot 2D filled contours #
###########################
if not info.contours_only:
contours = ax2dsub.contourf(
interp_y_centers,
interp_x_centers,
interp_smoothed_likelihood,
extent=info.extent,
levels=ctr_level(
interp_smoothed_likelihood,
info.levels[:2]),
zorder=4,
colors = info.MP_color_cycle[info.id],
alpha=info.alphas[info.id])
# now add a thin darker line
# around the 95% contour
ax2dsub.contour(
interp_y_centers,
interp_x_centers,
interp_smoothed_likelihood,
extent=info.extent,
levels=ctr_level(
interp_smoothed_likelihood,
info.levels[1:2]),
zorder=4,
colors = info.MP_color_cycle[info.id][1],
alpha = info.alphas[info.id],
linewidths=1)
###########################
# plot 2D contours #
###########################
if info.contours_only:
contours = ax2dsub.contour(
interp_y_centers,
interp_x_centers,
interp_smoothed_likelihood,
extent=info.extent, levels=ctr_level(
interp_smoothed_likelihood,
info.levels[:2]),
zorder=4,
colors = info.MP_color_cycle[info.id],
alpha = info.alphas[info.id],
linewidths=info.line_width)
except Warning:
warnings.warn(
"The routine could not find the contour of the " +
"'%s-%s' 2d-plot" % (
info.plotted_parameters[info.native_index],
info.plotted_parameters[info.native_second_index]))
# ADDING FISHER CONTOURS
if info.plot_fisher:
sub_inv_fisher = np.zeros((2,2), 'float64')
sub_inv_fisher[0,0] = inv_fisher[info.native_index,info.native_index]/info.scales[info.native_index,info.native_index]/info.scales[info.native_index,info.native_index]
sub_inv_fisher[1,1] = inv_fisher[info.native_second_index,info.native_second_index]/info.scales[info.native_second_index,info.native_second_index]/info.scales[info.native_second_index,info.native_second_index]
sub_inv_fisher[0,1] = inv_fisher[info.native_index,info.native_second_index]/info.scales[info.native_index,info.native_index]/info.scales[info.native_second_index,info.native_second_index]
sub_inv_fisher[1,0] = sub_inv_fisher[0,1]
if sub_inv_fisher[0,0]*sub_inv_fisher[1,1] != 0.:
inv_sub_inv_fisher = np.linalg.inv(sub_inv_fisher)
x = scipy.ndimage.zoom(info.x_centers,info.interpolation_smoothing, mode='reflect')
y = scipy.ndimage.zoom(info.y_centers,info.interpolation_smoothing, mode='reflect')
z = np.zeros((len(x),len(y)), 'float64')
#print info.ref_names[info.native_index]
#print info.scales
#print info.boundaries[info.native_index]
#print info.centers[info.native_index]
for ix in range(len(x)):
dx = (x[ix] - info.centers[info.native_index])
for iy in range(len(y)):
dy = (y[iy] - info.centers[info.native_second_index])
z[ix,iy] = dx*inv_sub_inv_fisher[0,0]*dx + dy*inv_sub_inv_fisher[1,1]*dy + 2.*dx*inv_sub_inv_fisher[0,1]*dy
ax2dsub.contour(y,x,z,
extent=info.extent,
levels=[2.3,6.18],
#levels=[9.30,15.79],
zorder=4, colors='k')
ax2dsub.set_xticks(info.ticks[info.native_second_index])
ax2dsub.set_yticks(info.ticks[info.native_index])
ax2dsub.tick_params('both',direction='inout',top=True,bottom=True,left=True,right=True)
if index == len(plotted_parameters)-1:
ax2dsub.set_xticklabels(
['%.{0}g'.format(info.decimal) % s for s in
info.ticks[info.native_second_index]],
fontsize=info.ticksize)
if conf.legend_style == 'sides':
ax2dsub.set_xlabel(
info.tex_names[info.native_second_index],
fontsize=info.fontsize)
else:
ax2dsub.set_xticklabels([''])
ax2dsub.set_yticks(info.ticks[info.native_index])
if second_index == 0:
ax2dsub.set_yticklabels(
['%.{0}g'.format(info.decimal) % s for s in
info.ticks[info.native_index]],
fontsize=info.ticksize)
else:
ax2dsub.set_yticklabels([''])
if conf.legend_style == 'sides':
if second_index == 0:
ax2dsub.set_ylabel(
info.tex_names[info.native_index],
fontsize=info.fontsize)
if conf.subplot is True:
# Store the individual 2d plots.
if conf.plot_2d:
area = ax2dsub.get_window_extent().transformed(
fig2d.dpi_scale_trans.inverted())
# Pad the saved area by 10% in the x-direction and 20% in
# the y-direction
fig2d.savefig(os.path.join(
conf.folder, 'plots',
file_name+'_2d_%s-%s.%s' % (
standard_name, second_standard_name,
conf.extension)),
bbox_inches=area.expanded(1.4, 1.4))
# store the coordinates of the points for further
# plotting.
store_contour_coordinates(
conf, standard_name, second_standard_name, contours)
for info in information_instances:
if not info.ignore_param and info.has_second_param:
info.hist_file_name = os.path.join(
info.folder, 'plots',
'{0}_2d_{1}-{2}.hist'.format(
info.basename,
standard_name,
second_standard_name))
write_histogram_2d(
info.hist_file_name, info.x_centers, info.y_centers,
info.extent, info.n)
print '-----------------------------------------------'
if conf.plot:
print '--> Saving figures to .{0} files'.format(info.extension)
plot_name = '-vs-'.join([os.path.split(elem.folder)[-1]
for elem in information_instances])
if conf.plot_2d:
# Legend of triangle plot
if ((conf.plot_legend_2d == None) and (len(legends) > 1)) or (conf.plot_legend_2d == True):
# Create a virtual subplot in the top right corner,
# just to be able to anchor the legend nicely
ax2d = fig2d.add_subplot(
len(plotted_parameters),
len(plotted_parameters),
len(plotted_parameters),
)
ax2d.axis('off')
try:
ax2d.legend(legends, legend_names,
loc='upper right',
borderaxespad=0.,
fontsize=info.legendsize)
except TypeError:
ax2d.legend(legends, legend_names,
loc='upper right',
borderaxespad=0.,
prop={'fontsize': info.legendsize})
fig2d.subplots_adjust(wspace=0, hspace=0)
fig2d.savefig(
os.path.join(
conf.folder, 'plots', '{0}_triangle.{1}'.format(
plot_name, info.extension)),
bbox_inches='tight')
# Legend of 1D plot
if conf.plot:
if ((conf.plot_legend_1d == None) and (len(legends) > 1)) or (conf.plot_legend_1d == True):
# no space left: add legend to thr right
if len(plotted_parameters)<num_columns*num_lines:
fig1d.legend(legends, legend_names,
loc= ((num_columns-0.9)/num_columns,0.1/num_columns),
fontsize=info.legendsize)
# space left in lower right part: add legend there
else:
fig1d.legend(legends, legend_names,
loc= 'center right',
bbox_to_anchor = (1.2,0.5),
fontsize=info.legendsize)
fig1d.tight_layout()
fig1d.savefig(
os.path.join(
conf.folder, 'plots', '{0}_1d.{1}'.format(
plot_name, info.extension)),
bbox_inches='tight')
def ctr_level(histogram2d, lvl, infinite=False):
"""
Extract the contours for the 2d plots (Karim Benabed)
"""
hist = histogram2d.flatten()*1.
hist.sort()
cum_hist = np.cumsum(hist[::-1])
cum_hist /= cum_hist[-1]
alvl = np.searchsorted(cum_hist, lvl)[::-1]
clist = [0]+[hist[-i] for i in alvl]+[hist.max()]
if not infinite:
return clist[1:]
return clist
def minimum_credible_intervals(info):
"""
Extract minimum credible intervals (method from Jan Haman) FIXME
"""
histogram = info.hist
bincenters = info.bincenters
levels = info.levels
bounds = np.zeros((len(levels), 2))
j = 0
delta = bincenters[1]-bincenters[0]
left_edge = max(histogram[0] - 0.5*(histogram[1]-histogram[0]), 0.)
right_edge = max(histogram[-1] + 0.5*(histogram[-1]-histogram[-2]), 0.)
failed = False
for level in levels:
norm = float(
(np.sum(histogram)-0.5*(histogram[0]+histogram[-1]))*delta)
norm += 0.25*(left_edge+histogram[0])*delta
norm += 0.25*(right_edge+histogram[-1])*delta
water_level_up = np.max(histogram)*1.0
water_level_down = np.min(histogram)*1.0
top = 0.
iterations = 0
while (abs((top/norm)-level) > 0.0001) and not failed:
top = 0.
water_level = (water_level_up + water_level_down)/2.
#ontop = [elem for elem in histogram if elem > water_level]
indices = [i for i in range(len(histogram))
if histogram[i] > water_level]
# check for multimodal posteriors
if ((indices[-1]-indices[0]+1) != len(indices)):
warnings.warn(
"could not derive minimum credible intervals " +
"for this multimodal posterior")
warnings.warn(
"please try running longer chains or reducing " +
"the number of bins with --bins BINS (default: 20)")
failed = True
break
top = (np.sum(histogram[indices]) -
0.5*(histogram[indices[0]]+histogram[indices[-1]]))*(delta)
# left
if indices[0] > 0:
top += (0.5*(water_level+histogram[indices[0]]) *
delta*(histogram[indices[0]]-water_level) /
(histogram[indices[0]]-histogram[indices[0]-1]))
else:
if (left_edge > water_level):
top += 0.25*(left_edge+histogram[indices[0]])*delta
else:
top += (0.25*(water_level + histogram[indices[0]]) *
delta*(histogram[indices[0]]-water_level) /
(histogram[indices[0]]-left_edge))
# right
if indices[-1] < (len(histogram)-1):
top += (0.5*(water_level + histogram[indices[-1]]) *
delta*(histogram[indices[-1]]-water_level) /
(histogram[indices[-1]]-histogram[indices[-1]+1]))
else:
if (right_edge > water_level):
top += 0.25*(right_edge+histogram[indices[-1]])*delta
else:
top += (0.25*(water_level + histogram[indices[-1]]) *
delta * (histogram[indices[-1]]-water_level) /
(histogram[indices[-1]]-right_edge))
if top/norm >= level:
water_level_down = water_level
else:
water_level_up = water_level
# safeguard, just in case
iterations += 1
if (iterations > 1000):
warnings.warn(
"the loop to check for sigma deviations was " +
"taking too long to converge")
failed = True
break
# min
if failed:
bounds[j][0] = np.nan
elif indices[0] > 0:
bounds[j][0] = bincenters[indices[0]] - delta*(histogram[indices[0]]-water_level)/(histogram[indices[0]]-histogram[indices[0]-1])
else:
if (left_edge > water_level):
bounds[j][0] = bincenters[0]-0.5*delta
else:
bounds[j][0] = bincenters[indices[0]] - 0.5*delta*(histogram[indices[0]]-water_level)/(histogram[indices[0]]-left_edge)
# max
if failed:
bounds[j][1] = np.nan
elif indices[-1] < (len(histogram)-1):
bounds[j][1] = bincenters[indices[-1]] + delta*(histogram[indices[-1]]-water_level)/(histogram[indices[-1]]-histogram[indices[-1]+1])
else:
if (right_edge > water_level):
bounds[j][1] = bincenters[-1]+0.5*delta
else:
bounds[j][1] = bincenters[indices[-1]] + \
0.5*delta*(histogram[indices[-1]]-water_level) / \
(histogram[indices[-1]]-right_edge)
j += 1
for elem in bounds:
for j in (0, 1):
elem[j] -= info.mean[info.native_index]
return bounds
def write_h(info_file, indices, name, string, quantity, modifiers=None):
"""
Write one horizontal line of output
"""
info_file.write('\n '+name+'\t: ')
for i in indices:
info_file.write(string % quantity[i]+'\t')
def cubic_interpolation(info, hist, bincenters):
"""
Small routine to accomodate the absence of the interpolate module
"""
# we start from a try becuase if anything goes wrong, we want to return the raw histogram rather than nothing
try:
# test that all elements are strictly positive, otherwise we could not take the log, and we must switch to the robust method
for i,elem in enumerate(hist):
if elem == 0.:
hist[i] = 1.e-99
elif elem <0:
print hist[i]
raise exception()
# One of our methods (using polyfit) does assume that the input histogram has a maximum value of 1.
# If in a future version this is not guaranteedanymore, we should renormalise it here.
# This is important for computing weights and thresholds.
# The threshold below which the likelihood will be
# approximated as zero is hard-codeed here (could become an
# input parameter but that would not clearly be useful).:
threshold = 1.e-3
# prepare the interpolation on log(Like):
ln_hist = np.log(hist)
# define a finer grid on a wider range (assuming that the following method is fine both for inter- and extra-polation)
left = bincenters[0]-2.5*(bincenters[1]-bincenters[0])
if (info.boundaries[info.native_index][0] != None):
if (info.boundaries[info.native_index][0] > left):
left = info.boundaries[info.native_index][0]
right = bincenters[-1]+2.5*(bincenters[-1]-bincenters[-2])
if (info.boundaries[info.native_index][1] != None):
if (info.boundaries[info.native_index][1] < right):
right = info.boundaries[info.native_index][1]
interp_grid = np.linspace(left, right, (len(bincenters)+4)*10+1)
######################################
# polynomial fit method (default): #
#####################################W
if info.posterior_smoothing >= 2:
# the points in the histogram with a very low likelihood (i.e. hist[i]<<1 hist is normalised to a maximum of one)
# have a lot of Poisson noise and are unreliable. However, if we do nothing, they may dominate the outcome of the fitted polynomial.
# Hence we can:
# 1) give them less weight (weight = sqrt(hist) seems to work well)
# 2) cut them at some threshold value and base the fit only on higher points
# 3) both
# the one working best seems to be 2). We also wrote 1) below, but copmmented out.
# method 1):
#f = np.poly1d(np.polyfit(bincenters,ln_hist,info.posterior_smoothing,w=np.sqrt(hist)))
#interp_hist = f(interp_grid)
# method 2):
# find index values such that hist is negligble everywhere excepted in hist[sub_indices[0]], hist[sub_indices[-1]]
sub_indices = [i for i,elem in enumerate(hist) if elem > threshold]
# The interpolation is done precisely in this range: hist[sub_indices[0]] < x < hist[sub_indices[-1]]
g = np.poly1d(np.polyfit(bincenters[sub_indices],ln_hist[sub_indices],info.posterior_smoothing)) #,w=np.sqrt(hist[sub_indices])))
# The extrapolation is done in a range including one more bin on each side, excepted when the boundary is hit
if (info.boundaries[info.native_index][0] == None):
extrapolation_range_left = [bincenters[sub_indices[0]] if sub_indices[0] == 0 else bincenters[sub_indices[0]-1]]
else:
extrapolation_range_left = [info.boundaries[info.native_index][0] if sub_indices[0] == 0 else bincenters[sub_indices[0]-1]]
if (info.boundaries[info.native_index][1] == None):
extrapolation_range_right = [bincenters[sub_indices[-1]] if sub_indices[-1] == len(hist)-1 else bincenters[sub_indices[-1]+1]]
else:
extrapolation_range_right = [info.boundaries[info.native_index][1] if sub_indices[-1] == len(hist)-1 else bincenters[sub_indices[-1]+1]]
# outside of this range, log(L) is brutally set to a negligible value,e, log(1.e-10)
interp_hist = [g(elem) if (elem > extrapolation_range_left and elem < extrapolation_range_right) else np.log(1.e-10) for elem in interp_grid]
elif info.posterior_smoothing<0:
raise io_mp.AnalyzeError(
"You passed --posterior-smoothing %d, this value is not understood"%info.posterior_smoothing)
############################################################
# other methods: #
# - linear inter/extra-polation if posterior_smoothing = 0 #
# - cubic inter/extra-polation if posterior_smoothing = 0 #
############################################################
else:
# try first inter/extra-polation
try:
# prepare to interpolate and extrapolate:
if info.posterior_smoothing == 0:
f = scipy.interpolate.interp1d(bincenters, ln_hist, kind='linear', fill_value='extrapolate')
else:
f = scipy.interpolate.interp1d(bincenters, ln_hist, kind='cubic', fill_value='extrapolate')
interp_hist = f(interp_grid)
# failure probably caused by old scipy not having the fill_value='extrapolate' argument. Then, only interpoolate.
except:
# define a finer grid but not a wider one
left = bincenters[0]
if (info.boundaries[info.native_index][0] != None):
if (info.boundaries[info.native_index][0] > left):
left = info.boundaries[info.native_index][0]
right = bincenters[-1]
if (info.boundaries[info.native_index][1] != None):
if (info.boundaries[info.native_index][1] < right):
right = info.boundaries[info.native_index][1]
interp_grid = np.linspace(left, right, len(bincenters)*10+1)
# prepare to interpolate only:
if info.posterior_smoothing == 0:
f = scipy.interpolate.interp1d(bincenters, ln_hist, kind='linear')
else:
f = scipy.interpolate.interp1d(bincenters, ln_hist, kind='cubic')
interp_hist = f(interp_grid)
# final steps used b y all methods
# go back from ln_Like to Like
interp_hist = np.exp(interp_hist)
# re-normalise the interpolated curve
interp_hist = interp_hist / interp_hist.max()
return interp_hist, interp_grid
except:
# we will end up here if anything went wrong before
# do nothing (raw histogram)
warnings.warn(
"The 1D posterior could not be processed normally, probably" +
"due to incomplete or obsolete numpy and/or scipy versions." +
"So the raw histograms will be plotted.")
return hist, bincenters
def write_histogram(hist_file_name, x_centers, hist):
"""
Store the posterior distribution to a file
"""
with open(hist_file_name, 'w') as hist_file:
hist_file.write("# 1d posterior distribution\n")
hist_file.write("\n# x_centers\n")
hist_file.write(", ".join(
[str(elem) for elem in x_centers])+"\n")
hist_file.write("\n# Histogram\n")
hist_file.write(", ".join(
[str(elem) for elem in hist])+"\n")
print 'wrote ', hist_file_name
def read_histogram(histogram_path):
"""
Recover a stored 1d posterior
"""
with open(histogram_path, 'r') as hist_file:
for line in hist_file:
if line:
if line.find("# x_centers") != -1:
x_centers = [float(elem) for elem in
hist_file.next().split(",")]
elif line.find("# Histogram") != -1:
hist = [float(elem) for elem in
hist_file.next().split(",")]
x_centers = np.array(x_centers)
hist = np.array(hist)
return x_centers, hist
def write_histogram_2d(hist_file_name, x_centers, y_centers, extent, hist):
"""
Store the histogram information to a file, to plot it later
"""
with open(hist_file_name, 'w') as hist_file:
hist_file.write("# Interpolated histogram\n")
hist_file.write("\n# x_centers\n")
hist_file.write(", ".join(
[str(elem) for elem in x_centers])+"\n")
hist_file.write("\n# y_centers\n")
hist_file.write(", ".join(
[str(elem) for elem in y_centers])+"\n")
hist_file.write("\n# Extent\n")
hist_file.write(", ".join(
[str(elem) for elem in extent])+"\n")
hist_file.write("\n# Histogram\n")
for line in hist:
hist_file.write(", ".join(
[str(elem) for elem in line])+"\n")
def read_histogram_2d(histogram_path):
"""
Read the histogram information that was stored in a file.
To use it, call something like this:
.. code::
x_centers, y_centers, extent, hist = read_histogram_2d_from_file(path)
fig, ax = plt.subplots()
ax.contourf(
y_centers, x_centers, hist, extent=extent,
levels=ctr_level(hist, [0.68, 0.95]),
zorder=5, cma=plt.cm.autumn_r)
plt.show()
"""
with open(histogram_path, 'r') as hist_file:
length = 0
for line in hist_file:
if line:
if line.find("# x_centers") != -1:
x_centers = [float(elem) for elem in
hist_file.next().split(",")]
length = len(x_centers)
elif line.find("# y_centers") != -1:
y_centers = [float(elem) for elem in
hist_file.next().split(",")]
elif line.find("# Extent") != -1:
extent = [float(elem) for elem in
hist_file.next().split(",")]
elif line.find("# Histogram") != -1:
hist = []
for index in range(length):
hist.append([float(elem) for elem in
hist_file.next().split(",")])
x_centers = np.array(x_centers)
y_centers = np.array(y_centers)
extent = np.array(extent)
hist = np.array(hist)
return x_centers, y_centers, extent, hist
def clean_conversion(module_name, tag, folder):
"""
Execute the methods "convert" from the different sampling algorithms
Returns True if something was made, False otherwise
"""
has_module = False
subfolder_name = tag+"_subfolder"
try:
# Don't try to import the wrong (unrequested) module, in case it's not installed
if not folder.lower().endswith(tag.lower()):
raise ImportError
module = importlib.import_module(module_name)
subfolder = getattr(module, subfolder_name)
has_module = True
except ImportError:
# The module is not installed, the conversion can not take place
pass
if has_module and os.path.isdir(folder):
# Remove any potential trailing slash
folder = os.path.join(
*[elem for elem in folder.split(os.path.sep) if elem])
if folder.split(os.path.sep)[-1] == subfolder:
try:
getattr(module, 'from_%s_output_to_chains' % tag)(folder)
except IOError:
raise io_mp.AnalyzeError(
"You asked to analyze a %s folder which " % tag +
"seems to come from an unfinished run, or to be empty " +
"or corrupt. Please make sure the run went smoothly " +
"enough.")
warnings.warn(
"The content of the %s subfolder has been " % tag +
"translated for Monte Python. Please run an "
"analysis of the entire folder now.")
return True
else:
return False
def separate_files(files):
"""
Separate the input files in folder
Given all input arguments to the command line files entry, separate them in
a list of lists, grouping them by folders. The number of identified folders
will determine the number of information instances to create
"""
final_list = []
temp = [files[0]]
folder = (os.path.dirname(files[0]) if os.path.isfile(files[0])
else files[0])
if len(files) > 1:
for elem in files[1:]:
new_folder = (os.path.dirname(elem) if os.path.isfile(elem)
else elem)
if new_folder == folder:
temp.append(elem)
else:
folder = new_folder
final_list.append(temp)
temp = [elem]
final_list.append(temp)
return final_list
def recover_folder_and_files(files):
"""
Distinguish the cases when analyze is called with files or folder
Note that this takes place chronologically after the function
`separate_files`"""
# The following list defines the substring that a chain should contain for
# the code to recognise it as a proper chain.
substrings = ['.txt', '__']
limit = 10
# If the first element is a folder, grab all chain files inside
if os.path.isdir(files[0]):
folder = os.path.normpath(files[0])
files = [os.path.join(folder, elem) for elem in os.listdir(folder)
if not os.path.isdir(os.path.join(folder, elem))
and not os.path.getsize(os.path.join(folder, elem)) < limit
and all([x in elem for x in substrings])]
# Otherwise, extract the folder from the chain file-name.
else:
# If the name is completely wrong, say it
if not os.path.exists(files[0]):
raise io_mp.AnalyzeError(
"You provided a non-existant folder/file to analyze")
folder = os.path.relpath(
os.path.dirname(os.path.realpath(files[0])), os.path.curdir)
files = [os.path.join(folder, elem) for elem in os.listdir(folder)
if os.path.join(folder, elem) in np.copy(files)
and not os.path.isdir(os.path.join(folder, elem))
and not os.path.getsize(os.path.join(folder, elem)) < limit
and all([x in elem for x in substrings])]
basename = os.path.basename(folder)
return folder, files, basename
def extract_array(line):
"""
Return the array on the RHS of the line
>>> extract_array("toto = ['one', 'two']\n")
['one', 'two']
>>> extract_array('toto = ["one", 0.2]\n')
['one', 0.2]
"""
# Recover RHS of the equal sign, and remove surrounding spaces
rhs = line.split('=')[-1].strip()
# Remove array signs
rhs = rhs.strip(']').lstrip('[')
# Recover each element of the list
sequence = [e.strip().strip('"').strip("'") for e in rhs.split(',')]
for index, elem in enumerate(sequence):
try:
sequence[index] = int(elem)
except ValueError:
try:
sequence[index] = float(elem)
except ValueError:
pass
return sequence
def extract_dict(line):
"""
Return the key and value of the dictionary element contained in line
>>> extract_dict("something['toto'] = [0, 1, 2, -2, 'cosmo']")
'toto', [0, 1, 2, -2, 'cosmo']
"""
# recovering the array
sequence = extract_array(line)
# Recovering only the LHS
lhs = line.split('=')[0].strip()
# Recovering the name from the LHS
name = lhs.split('[')[-1].strip(']')
name = name.strip('"').strip("'")
return name, sequence
def extract_parameter_names(info):
"""
Reading the log.param, store in the Information instance the names
"""
backup_names = []
plotted_parameters = []
boundaries = []
ref_names = []
tex_names = []
scales = []
rescales = []
centers = []
with open(info.param_path, 'r') as param:
for line in param:
if line.find('#') == -1:
if line.find('data.experiments') != -1:
info.experiments = extract_array(line)
if line.find('data.parameters') != -1:
name, array = extract_dict(line)
original = name
# Rename the names according the .extra file (opt)
if name in info.to_change.iterkeys():
name = info.to_change[name]
# If the name corresponds to a varying parameter (fourth
# entry in the initial array being non-zero, or a derived
# parameter (could be designed as fixed, it does not make
# any difference)), then continue the process of analyzing.
if array[3] != 0 or array[5] == 'derived':
# The real name is always kept, to have still the class
# names in the covmat
backup_names.append(original)
# With the list "to_plot", we can potentially restrict
# the variables plotted. If it is empty, though, simply
# all parameters will be plotted.
if info.to_plot == []:
plotted_parameters.append(name)
else:
if name in info.to_plot:
plotted_parameters.append(name)
# Append to the boundaries array
boundaries.append([
None if elem == 'None' or (isinstance(elem, int)
and elem == -1)
else elem for elem in array[1:3]])
ref_names.append(name)
# Take care of the scales
scale = array[4]
rescale = 1.
if name in info.new_scales.iterkeys():
rescale = info.new_scales[name]/array[4]
scales.append(scale)
rescales.append(rescale)
# Given the scale, decide for the pretty tex name
number = 1./(scale*rescale)
tex_names.append(
io_mp.get_tex_name(name, number=number))
# Read starting values (useful for plotting Fisher)
centers.append(array[0])
scales = np.diag(scales)
rescales = np.diag(rescales)
info.ref_names = ref_names
info.tex_names = tex_names
info.boundaries = boundaries
info.backup_names = backup_names
info.scales = scales
info.rescales = rescales
# Beware, the following two numbers are different. The first is the total
# number of parameters stored in the chain, whereas the second is for
# plotting purpose only.
info.number_parameters = len(ref_names)
info.plotted_parameters = plotted_parameters
info.centers = centers
def find_maximum_of_likelihood(info):
"""
Finding the global maximum of likelihood
min_minus_lkl will be appended with all the maximum likelihoods of files,
then will be replaced by its own maximum. This way, the global
maximum likelihood will be used as a reference, and not each chain's
maximum.
"""
min_minus_lkl = []
for chain_file in info.files:
# cheese will brutally contain everything (- log likelihood) in the
# file chain_file being scanned.
# This could potentially be faster with pandas, but is already quite
# fast
#
# This would read the chains including comment lines:
#cheese = (np.array([float(line.split()[1].strip())
# for line in open(chain_file, 'r')]))
#
# This reads the chains excluding comment lines:
with open(chain_file, 'r') as f:
cheese = (np.array([float(line.split()[1].strip())
for line in ifilterfalse(iscomment,f)]))
try:
min_minus_lkl.append(cheese[:].min())
except ValueError:
pass
# beware, it is the min because we are talking about
# '- log likelihood'
# Selecting only the true maximum.
try:
min_minus_lkl = min(min_minus_lkl)
except ValueError:
raise io_mp.AnalyzeError(
"No decently sized chain was found in the desired folder. " +
"Please wait to have more accepted point before trying " +
"to analyze it.")
info.min_minus_lkl = min_minus_lkl
def remove_bad_points(info):
"""
Create an array with all the points from the chains, after removing non-markovian, burn-in and fixed fraction
"""
# spam will brutally contain all the chains with sufficient number of
# points, after the burn-in was removed.
spam = list()
# Recover the longest file name, for pleasing display
max_name_length = max([len(e) for e in info.files])
# Total number of steps done:
steps = 0
accepted_steps = 0
# Open the log file
log = open(info.log_path, 'w')
for index, chain_file in enumerate(info.files):
# To improve presentation, and print only once the full path of the
# analyzed folder, we recover the length of the path name, and
# create an empty complementary string of this length
total_length = 18+max_name_length
empty_length = 18+len(os.path.dirname(chain_file))+1
basename = os.path.basename(chain_file)
if index == 0:
exec "print '--> Scanning file %-{0}s' % chain_file,".format(
max_name_length)
else:
exec "print '%{0}s%-{1}s' % ('', basename),".format(
empty_length, total_length-empty_length)
# cheese will brutally contain everything in the chain chain_file being
# scanned
#
# This would read the chains including comment lines:
#cheese = (np.array([[float(elem) for elem in line.split()]
# for line in open(chain_file, 'r')]))
#
# This read the chains excluding comment lines:
with open(chain_file, 'r') as f:
cheese = (np.array([[float(elem) for elem in line.split()]
for line in ifilterfalse(iscomment,f)]))
# If the file contains a broken line with a different number of
# elements, the previous array generation might fail, and will not have
# the correct shape. Hence the following command will fail. To avoid
# that, the error is caught.
try:
local_min_minus_lkl = cheese[:, 1].min()
except IndexError:
raise io_mp.AnalyzeError(
"Error while scanning %s." % chain_file +
" This file most probably contains "
"an incomplete line, rendering the analysis impossible. "
"I think that the following line(s) is(are) wrong:\n %s" % (
'\n '.join(
['-> %s' % line for line in
open(chain_file, 'r') if
len(line.split()) != len(info.backup_names)+2])))
line_count = float(sum(1 for line in open(chain_file, 'r')))
# Logging the information obtained until now.
number_of_steps = cheese[:, 0].sum()
log.write("%s\t " % os.path.basename(chain_file))
log.write(" Number of steps:%d\t" % number_of_steps)
log.write(" Steps accepted:%d\t" % line_count)
log.write(" acc = %.2g\t" % (float(line_count)/number_of_steps))
log.write("min(-loglike) = %.2f\n" % local_min_minus_lkl)
steps += number_of_steps
accepted_steps += line_count
# check if analyze() is called directly by the user, or by the mcmc loop during an updating phase
try:
# command_line.update is defined when called by the mcmc loop
info.update
except:
# in case it was not defined (i.e. when analyze() is called directly by user), set it to False
info.update = 0
# check if analyze() is called directly by the user, or by the mcmc loop during an updating phase
try:
# command_line.adaptive is defined when called by the mcmc loop
info.adaptive
except:
# in case it was not defined (i.e. when analyze() is called directly by user), set it to False
info.adaptive = 0
# Removing non-markovian part, burn-in, and fraction= (1 - keep-fraction)
start = 0
markovian=0
try:
# Read all comments in chains about times when proposal was updated
# The last of these comments gives the number of lines to be skipped in the files
if info.markovian and not info.update:
with open(chain_file, 'r') as f:
for line in ifilter(iscomment,f):
if info.only_markovian or ('update proposal' in line):
start = int(line.split()[2])
else:
pass
markovian = start
# Remove burn-in, defined as all points until the likelhood reaches min_minus_lkl+LOG_LKL_CUTOFF
# except when it is run in adaptive mode
if not info.adaptive:
while cheese[start, 1] > info.min_minus_lkl+LOG_LKL_CUTOFF:
start += 1
burnin = start-markovian
# Remove fixed fraction as requested by user (usually not useful if non-markovian is also removed)
if info.keep_fraction < 1:
start = start + int((1.-info.keep_fraction)*(line_count - start))
print ": Removed",
if info.markovian:
print "%d non-markovian points," % markovian,
print "%d points of burn-in," % burnin,
if info.keep_fraction < 1:
print "and first %.0f percent," % (100.*(1-info.keep_fraction)),
print "keep %d steps" % (line_count-start)
except IndexError:
print ': Removed everything: chain not converged'
# ham contains cheese without the burn-in, if there are any points
# left (more than 5)
if np.shape(cheese)[0] > start+5:
ham = np.copy(cheese[int(start)::])
# Deal with single file case
if len(info.files) == 1:
warnings.warn("Convergence computed for a single file")
bacon = np.copy(cheese[::3, :])
egg = np.copy(cheese[1::3, :])
sausage = np.copy(cheese[2::3, :])
spam.append(bacon)
spam.append(egg)
spam.append(sausage)
continue
# Adding resulting table to spam
spam.append(ham)
# Test the length of the list
if len(spam) == 0:
raise io_mp.AnalyzeError(
"No decently sized chain was found. " +
"Please wait a bit to analyze this folder")
# Applying now new rules for scales, if the name is contained in the
# referenced names
for name in info.new_scales.iterkeys():
try:
index = info.ref_names.index(name)
for i in xrange(len(spam)):
spam[i][:, index+2] *= 1./info.rescales[index, index]
except ValueError:
# there is nothing to do if the name is not contained in ref_names
pass
info.steps = steps
info.accepted_steps = accepted_steps
return spam
def compute_mean(mean, spam, total):
"""
"""
for i in xrange(np.shape(mean)[1]):
for j in xrange(len(spam)):
submean = np.sum(spam[j][:, 0]*spam[j][:, i+2])
mean[j+1, i] = submean / total[j+1]
mean[0, i] += submean
mean[0, i] /= total[0]
def compute_variance(var, mean, spam, total):
"""
"""
for i in xrange(np.shape(var)[1]):
for j in xrange(len(spam)):
var[0, i] += np.sum(
spam[j][:, 0]*(spam[j][:, i+2]-mean[0, i])**2)
var[j+1, i] = np.sum(
spam[j][:, 0]*(spam[j][:, i+2]-mean[j+1, i])**2) / \
(total[j+1]-1)
var[0, i] /= (total[0]-1)
def compute_covariance_matrix(info):
"""
"""
covar = np.zeros((len(info.ref_names), len(info.ref_names)))
for i in xrange(len(info.ref_names)):
for j in xrange(i, len(info.ref_names)):
covar[i, j] = (
info.chain[:, 0]*(
(info.chain[:, i+2]-info.mean[i]) *
(info.chain[:, j+2]-info.mean[j]))).sum()
if i != j:
covar[j, i] = covar[i, j]
covar /= info.total
# Removing scale factors in order to store true parameter covariance
covar = np.dot(info.scales.T, np.dot(covar, info.scales))
return covar
def adjust_ticks(param, information_instances):
"""
"""
if len(information_instances) == 1:
return
# Recovering all x_range and ticks entries from the concerned information
# instances
x_ranges = []
ticks = []
for info in information_instances:
if not info.ignore_param:
x_ranges.append(info.x_range[info.native_index])
ticks.append(info.ticks[info.native_index])
# The new x_range and tick should min/max all the existing ones
new_x_range = np.array(
[min([e[0] for e in x_ranges]), max([e[1] for e in x_ranges])])
temp_ticks = np.array(
[min([e[0] for e in ticks]), max([e[-1] for e in ticks])])
new_ticks = np.linspace(temp_ticks[0],
temp_ticks[1],
info.ticknumber)
for info in information_instances:
if not info.ignore_param:
info.x_range[info.native_index] = new_x_range
info.ticks[info.native_index] = new_ticks
def store_contour_coordinates(info, name1, name2, contours):
"""docstring"""
file_name = os.path.join(
info.folder, 'plots', '{0}_2d_{1}-{2}.dat'.format(
info.basename, name1, name2))
with open(file_name, 'w') as plot_file:
plot_file.write(
'# contour for confidence level {0}\n'.format(
info.levels[1]))
for elem in contours.collections[0].get_paths():
points = elem.vertices
for k in range(np.shape(points)[0]):
plot_file.write("%.8g\t %.8g\n" % (
points[k, 0], points[k, 1]))
# stop to not include the inner contours
if k != 0:
if all(points[k] == points[0]):
plot_file.write("\n")
break
plot_file.write("\n\n")
plot_file.write(
'# contour for confidence level {0}\n'.format(
info.levels[0]))
for elem in contours.collections[1].get_paths():
points = elem.vertices
for k in range(np.shape(points)[0]):
plot_file.write("%.8g\t %.8g\n" % (
points[k, 0], points[k, 1]))
if k != 0:
if all(points[k] == points[0]):
plot_file.write("\n")
break
plot_file.write("\n\n")
def iscomment(s):
"""
Define what we call a comment in MontePython chain files
"""
return s.startswith('#')
class Information(object):
"""
Hold all information for analyzing runs
"""
# Counting the number of instances, to choose the color map
_ids = count(0)
# Flag checking the absence or presence of the interp1d function
has_interpolate_module = False
# Actual pairs of colors used by MP.
# For each pair, the first color is for the 95% contour,
# and the second for the 68% contour + the 1d probability.
# Note that, as with the other customisation options, you can specify new
# values for this in the extra plot_file.
MP_color = {
'Red':['#E37C80','#CE121F'],
'Blue':['#7A98F6','#1157EF'],
'Green':['#88B27A','#297C09'],
'Orange':['#F3BE82','#ED920F'],
'Grey':['#ABABAB','#737373'],
'Purple':['#B87294','#88004C']
}
# order used when several directories are analysed
MP_color_cycle = [
MP_color['Red'],
MP_color['Blue'],
MP_color['Green'],
MP_color['Orange'],
MP_color['Grey'],
MP_color['Purple']
]
# in the same order, list of transparency levels
alphas = [0.9, 0.9, 0.9, 0.9, 0.9, 0.9]
def __init__(self, command_line, other=None):
"""
The following initialization creates the three tables that can be
customized in an extra plot_file (see :mod:`parser_mp`).
Parameters
----------
command_line : Namespace
it contains the initialised command line arguments
"""
self.to_change = {}
"""
Dictionary whose keys are the old parameter names, and values are the
new ones. For instance :code:`{'beta_plus_lambda':'beta+lambda'}`
"""
self.to_plot = []
"""
Array of names of parameters to plot. If left empty, all will be
plotted.
.. warning::
If you changed a parameter name with :attr:`to_change`, you need to
give the new name to this array
"""
self.new_scales = {}
"""
Dictionary that redefines some scales. The keys will be the parameter
name, and the value its scale.
"""
# Assign a unique id to this instance
self.id = self._ids.next()
# Defining the sigma contours (1, 2 and 3-sigma)
self.levels = np.array([68.26, 95.4, 99.7])/100.
# Follows a bunch of initialisation to provide default members
self.ref_names, self.backup_names = [], []
self.scales, self.plotted_parameters = [], []
self.spam = []
# Store directly all information from the command_line object into this
# instance, except the protected members (begin and end with __)
for elem in dir(command_line):
if elem.find('__') == -1:
setattr(self, elem, getattr(command_line, elem))
# initialise the legend flags
self.plot_legend_1d = None
self.plot_legend_2d = None
# initialize the legend size to be the same as fontsize, but can be
# altered in the extra file
self.legendsize = self.fontsize
self.legendnames = []
# initialize the customisation script flags
self.custom1d = []
self.custom2d = []
# initialise the dictionary enmforcing limit
self.force_limits = {}
# Read a potential file describing changes to be done for the parameter
# names, and number of paramaters plotted (can be let empty, all will
# then be plotted), but also the style of the plot. Note that this
# overrides the command line options
if command_line.optional_plot_file:
plot_file_vars = {'info': self,'plt': plt}
execfile(command_line.optional_plot_file, plot_file_vars)
# check and store keep_fraction
if command_line.keep_fraction<=0 or command_line.keep_fraction>1:
raise io_mp.AnalyzeError("after --keep-fraction you should pass a float >0 and <=1")
self.keep_fraction = command_line.keep_fraction
def remap_parameters(self, spam):
"""
Perform substitutions of parameters for analyzing
.. note::
for arbitrary combinations of parameters, the prior will not
necessarily be flat.
"""
if hasattr(self, 'redefine'):
for key, value in self.redefine.iteritems():
# Check that the key was an original name
if key in self.backup_names:
print ' /|\ Transforming', key, 'into', value
# We recover the indices of the key
index_to_change = self.backup_names.index(key)+2
print('/_o_\ The new variable will be called ' +
self.ref_names[self.backup_names.index(key)])
# Recover all indices of all variables present in the
# remapping
variable_names = [elem for elem in self.backup_names if
value.find(elem) != -1]
indices = [self.backup_names.index(name)+2 for name in
variable_names]
# Now loop over all files in spam
for i in xrange(len(spam)):
# Assign variables to their values
for index, name in zip(indices, variable_names):
exec("%s = spam[i][:, %i]" % (name, index))
# Assign to the desired index the combination
exec("spam[i][:, %i] = %s" % (index_to_change, value))
def define_ticks(self):
"""
"""
self.max_values = self.chain[:, 2:].max(axis=0)
self.min_values = self.chain[:, 2:].min(axis=0)
self.span = (self.max_values-self.min_values)
# Define the place of ticks, given the number of ticks desired, stored
# in conf.ticknumber
self.ticks = np.array(
[np.linspace(self.min_values[i]+self.span[i]*0.1,
self.max_values[i]-self.span[i]*0.1,
self.ticknumber) for i in range(len(self.span))])
# Define the x range (ticks start not exactly at the range boundary to
# avoid display issues)
self.x_range = np.array((self.min_values, self.max_values)).T
# In case the exploration hit a boundary (as defined in the parameter
# file), at the level of precision defined by the number of bins, the
# ticks and x_range should be altered in order to display this
# meaningful number instead.
for i in range(np.shape(self.ticks)[0]):
x_range = self.x_range[i]
bounds = self.boundaries[i]
# Left boundary
if bounds[0] is not None:
if abs(x_range[0]-bounds[0]) < self.span[i]/self.bins:
self.ticks[i][0] = bounds[0]
self.x_range[i][0] = bounds[0]
# Right boundary
if bounds[-1] is not None:
if abs(x_range[-1]-bounds[-1]) < self.span[i]/self.bins:
self.ticks[i][-1] = bounds[-1]
self.x_range[i][-1] = bounds[-1]
def write_information_files(self):
# Store in info_names only the tex_names that were plotted, for this
# instance, and in indices the corresponding list of indices. It also
# removes the $ signs, for clarity
self.info_names = [
name for index, name in enumerate(self.tex_names) if
self.ref_names[index] in self.plotted_parameters]
self.indices = [self.tex_names.index(name) for name in self.info_names]
self.tex_names = [name for index, name in enumerate(self.tex_names) if
self.ref_names[index] in self.plotted_parameters]
self.info_names = [name.replace('$', '') for name in self.info_names]
# Define the bestfit array
self.bestfit = np.zeros(len(self.ref_names))
for i in xrange(len(self.ref_names)):
self.bestfit[i] = self.chain[self.sorted_indices[0], :][2+i]
# Write down to the .h_info file all necessary information
self.write_h_info()
self.write_v_info()
self.write_tex()
def write_h_info(self):
with open(self.h_info_path, 'w') as h_info:
h_info.write(' param names\t: ')
for name in self.info_names:
h_info.write("%-14s" % name)
write_h(h_info, self.indices, 'R-1 values', '% .6f', self.R)
write_h(h_info, self.indices, 'Best Fit ', '% .6e', self.bestfit)
write_h(h_info, self.indices, 'mean ', '% .6e', self.mean)
write_h(h_info, self.indices, 'sigma ', '% .6e',
(self.bounds[:, 0, 1]-self.bounds[:, 0, 0])/2.)
h_info.write('\n')
write_h(h_info, self.indices, '1-sigma - ', '% .6e',
self.bounds[:, 0, 0])
write_h(h_info, self.indices, '1-sigma + ', '% .6e',
self.bounds[:, 0, 1])
write_h(h_info, self.indices, '2-sigma - ', '% .6e',
self.bounds[:, 1, 0])
write_h(h_info, self.indices, '2-sigma + ', '% .6e',
self.bounds[:, 1, 1])
write_h(h_info, self.indices, '3-sigma - ', '% .6e',
self.bounds[:, 2, 0])
write_h(h_info, self.indices, '3-sigma + ', '% .6e',
self.bounds[:, 2, 1])
# bounds
h_info.write('\n')
write_h(h_info, self.indices, '1-sigma > ', '% .6e',
self.mean+self.bounds[:, 0, 0])
write_h(h_info, self.indices, '1-sigma < ', '% .6e',
self.mean+self.bounds[:, 0, 1])
write_h(h_info, self.indices, '2-sigma > ', '% .6e',
self.mean+self.bounds[:, 1, 0])
write_h(h_info, self.indices, '2-sigma < ', '% .6e',
self.mean+self.bounds[:, 1, 1])
write_h(h_info, self.indices, '3-sigma > ', '% .6e',
self.mean+self.bounds[:, 2, 0])
write_h(h_info, self.indices, '3-sigma < ', '% .6e',
self.mean+self.bounds[:, 2, 1])
def write_v_info(self):
"""Write vertical info file"""
with open(self.v_info_path, 'w') as v_info:
v_info.write('%-15s\t: %-11s' % ('param names', 'R-1'))
v_info.write(' '.join(['%-11s' % elem for elem in [
'Best fit', 'mean', 'sigma', '1-sigma -', '1-sigma +',
'2-sigma -', '2-sigma +', '1-sigma >', '1-sigma <',
'2-sigma >', '2-sigma <']]))
for index, name in zip(self.indices, self.info_names):
v_info.write('\n%-15s\t: % .4e' % (name, self.R[index]))
v_info.write(' '.join(['% .4e' % elem for elem in [
self.bestfit[index], self.mean[index],
(self.bounds[index, 0, 1]-self.bounds[index, 0, 0])/2.,
self.bounds[index, 0, 0], self.bounds[index, 0, 1],
self.bounds[index, 1, 0], self.bounds[index, 1, 1],
self.mean[index]+self.bounds[index, 0, 0],
self.mean[index]+self.bounds[index, 0, 1],
self.mean[index]+self.bounds[index, 1, 0],
self.mean[index]+self.bounds[index, 1, 1]]]))
def write_tex(self):
"""Write a tex table containing the main results """
with open(self.tex_path, 'w') as tex:
tex.write("\\begin{tabular}{|l|c|c|c|c|} \n \\hline \n")
tex.write("Param & best-fit & mean$\pm\sigma$ ")
tex.write("& 95\% lower & 95\% upper \\\\ \\hline \n")
for index, name in zip(self.indices, self.tex_names):
tex.write("%s &" % name)
tex.write("$%.4g$ & $%.4g_{%.2g}^{+%.2g}$ " % (
self.bestfit[index], self.mean[index],
self.bounds[index, 0, 0], self.bounds[index, 0, 1]))
tex.write("& $%.4g$ & $%.4g$ \\\\ \n" % (
self.mean[index]+self.bounds[index, 1, 0],
self.mean[index]+self.bounds[index, 1, 1]))
tex.write("\\hline \n \\end{tabular} \\\\ \n")
tex.write("$-\ln{\cal L}_\mathrm{min} =%.6g$, " % (
self.min_minus_lkl))
tex.write("minimum $\chi^2=%.4g$ \\\\ \n" % (
self.min_minus_lkl*2.))
| 44.928822
| 237
| 0.536056
|
7f20a9b827484444a291861e56b197b9a601ca91
| 466
|
py
|
Python
|
backend/group/migrations/0003_auto_20200829_1930.py
|
cjc7373/hackergame
|
86971b4cf8a2761044d417b4c8bd934c3309d6fd
|
[
"MIT"
] | 2
|
2020-07-12T13:11:43.000Z
|
2020-07-14T08:12:17.000Z
|
backend/group/migrations/0003_auto_20200829_1930.py
|
cjc7373/hackergame
|
86971b4cf8a2761044d417b4c8bd934c3309d6fd
|
[
"MIT"
] | 1
|
2020-08-13T13:56:18.000Z
|
2020-09-29T12:39:08.000Z
|
backend/group/migrations/0003_auto_20200829_1930.py
|
cjc7373/hackergame
|
86971b4cf8a2761044d417b4c8bd934c3309d6fd
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1 on 2020-08-29 11:30
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('group', '0002_auto_20200812_2011'),
]
operations = [
migrations.AlterField(
model_name='application',
name='status',
field=models.CharField(choices=[('accepted', 'accepted'), ('pending', 'pending')], default='pending', max_length=10),
),
]
| 24.526316
| 129
| 0.609442
|
9ee4359f215d9507c158cdc207f2be19e67ed873
| 261
|
py
|
Python
|
Plotting/Standard/Pandas/Scatter plot.py
|
fraunhofer-iais/IAIS-Python-Snippets
|
a3ee610d6270cda2c891688851696c34831ffa2b
|
[
"MIT"
] | null | null | null |
Plotting/Standard/Pandas/Scatter plot.py
|
fraunhofer-iais/IAIS-Python-Snippets
|
a3ee610d6270cda2c891688851696c34831ffa2b
|
[
"MIT"
] | null | null | null |
Plotting/Standard/Pandas/Scatter plot.py
|
fraunhofer-iais/IAIS-Python-Snippets
|
a3ee610d6270cda2c891688851696c34831ffa2b
|
[
"MIT"
] | null | null | null |
# Directly plot a scatter-plot from a pandas DataFrame
attribute1 = ''
attribute2 = ''
data.plot.scatter(x=attribute1, y=attribute2, figsize=(10,6), alpha=0.5, s=50)
ax = plt.gca()
ax.set_xlabel(attribute1, fontsize=14)
ax.set_ylabel(attribute2, fontsize=14);
| 29
| 78
| 0.739464
|
fc3f0bae1fdb96441d5be8afd33686f1016d308a
| 805
|
py
|
Python
|
Reading/manage.py
|
SnowmanZhang/CharacterTest
|
c4936174557594fca93a747b3a9893446e9afd67
|
[
"MIT"
] | null | null | null |
Reading/manage.py
|
SnowmanZhang/CharacterTest
|
c4936174557594fca93a747b3a9893446e9afd67
|
[
"MIT"
] | null | null | null |
Reading/manage.py
|
SnowmanZhang/CharacterTest
|
c4936174557594fca93a747b3a9893446e9afd67
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Reading.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| 35
| 77
| 0.642236
|
44e8971d46e5d5896fea6c265508a5bd004cb952
| 16,883
|
py
|
Python
|
qa/rpc-tests/pruning.py
|
realzzt/BitCoin2013
|
a8e6863dd39570db449318296c0809fb8476174d
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/pruning.py
|
realzzt/BitCoin2013
|
a8e6863dd39570db449318296c0809fb8476174d
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/pruning.py
|
realzzt/BitCoin2013
|
a8e6863dd39570db449318296c0809fb8476174d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test pruning code
# ********
# WARNING:
# This test uses 4GB of disk space.
# This test takes 30 mins or more (up to 2 hours)
# ********
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
def calc_usage(blockdir):
return sum(os.path.getsize(blockdir+f) for f in os.listdir(blockdir) if os.path.isfile(blockdir+f)) / (1024. * 1024.)
class PruneTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 3
self.utxo = []
self.address = ["",""]
self.txouts = gen_return_txouts()
def setup_network(self):
self.nodes = []
self.is_network_split = False
# Create nodes 0 and 1 to mine
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=999000", "-checkblocks=5"], timewait=900))
self.nodes.append(start_node(1, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=999000", "-checkblocks=5"], timewait=900))
# Create node 2 to test pruning
self.nodes.append(start_node(2, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-prune=550"], timewait=900))
self.prunedir = self.options.tmpdir+"/node2/regtest/blocks/"
self.address[0] = self.nodes[0].getnewaddress()
self.address[1] = self.nodes[1].getnewaddress()
# Determine default relay fee
self.relayfee = self.nodes[0].getnetworkinfo()["relayfee"]
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[1], 2)
connect_nodes(self.nodes[2], 0)
sync_blocks(self.nodes[0:3])
def create_big_chain(self):
# Start by creating some coinbases we can spend later
self.nodes[1].generate(200)
sync_blocks(self.nodes[0:2])
self.nodes[0].generate(150)
# Then mine enough full blocks to create more than 550MiB of data
for i in range(645):
self.mine_full_block(self.nodes[0], self.address[0])
sync_blocks(self.nodes[0:3])
def test_height_min(self):
if not os.path.isfile(self.prunedir+"blk00000.dat"):
raise AssertionError("blk00000.dat is missing, pruning too early")
print("Success")
print("Though we're already using more than 550MiB, current usage:", calc_usage(self.prunedir))
print("Mining 25 more blocks should cause the first block file to be pruned")
# Pruning doesn't run until we're allocating another chunk, 20 full blocks past the height cutoff will ensure this
for i in range(25):
self.mine_full_block(self.nodes[0],self.address[0])
waitstart = time.time()
while os.path.isfile(self.prunedir+"blk00000.dat"):
time.sleep(0.1)
if time.time() - waitstart > 30:
raise AssertionError("blk00000.dat not pruned when it should be")
print("Success")
usage = calc_usage(self.prunedir)
print("Usage should be below target:", usage)
if (usage > 550):
raise AssertionError("Pruning target not being met")
def create_chain_with_staleblocks(self):
# Create stale blocks in manageable sized chunks
print("Mine 24 (stale) blocks on Node 1, followed by 25 (main chain) block reorg from Node 0, for 12 rounds")
for j in range(12):
# Disconnect node 0 so it can mine a longer reorg chain without knowing about node 1's soon-to-be-stale chain
# Node 2 stays connected, so it hears about the stale blocks and then reorg's when node0 reconnects
# Stopping node 0 also clears its mempool, so it doesn't have node1's transactions to accidentally mine
stop_node(self.nodes[0],0)
self.nodes[0]=start_node(0, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=999000", "-checkblocks=5"], timewait=900)
# Mine 24 blocks in node 1
self.utxo = self.nodes[1].listunspent()
for i in range(24):
if j == 0:
self.mine_full_block(self.nodes[1],self.address[1])
else:
self.nodes[1].generate(1) #tx's already in mempool from previous disconnects
# Reorg back with 25 block chain from node 0
self.utxo = self.nodes[0].listunspent()
for i in range(25):
self.mine_full_block(self.nodes[0],self.address[0])
# Create connections in the order so both nodes can see the reorg at the same time
connect_nodes(self.nodes[1], 0)
connect_nodes(self.nodes[2], 0)
sync_blocks(self.nodes[0:3])
print("Usage can be over target because of high stale rate:", calc_usage(self.prunedir))
def reorg_test(self):
# Node 1 will mine a 300 block chain starting 287 blocks back from Node 0 and Node 2's tip
# This will cause Node 2 to do a reorg requiring 288 blocks of undo data to the reorg_test chain
# Reboot node 1 to clear its mempool (hopefully make the invalidate faster)
# Lower the block max size so we don't keep mining all our big mempool transactions (from disconnected blocks)
stop_node(self.nodes[1],1)
self.nodes[1]=start_node(1, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=5000", "-checkblocks=5", "-disablesafemode"], timewait=900)
height = self.nodes[1].getblockcount()
print("Current block height:", height)
invalidheight = height-287
badhash = self.nodes[1].getblockhash(invalidheight)
print("Invalidating block at height:",invalidheight,badhash)
self.nodes[1].invalidateblock(badhash)
# We've now switched to our previously mined-24 block fork on node 1, but thats not what we want
# So invalidate that fork as well, until we're on the same chain as node 0/2 (but at an ancestor 288 blocks ago)
mainchainhash = self.nodes[0].getblockhash(invalidheight - 1)
curhash = self.nodes[1].getblockhash(invalidheight - 1)
while curhash != mainchainhash:
self.nodes[1].invalidateblock(curhash)
curhash = self.nodes[1].getblockhash(invalidheight - 1)
assert(self.nodes[1].getblockcount() == invalidheight - 1)
print("New best height", self.nodes[1].getblockcount())
# Reboot node1 to clear those giant tx's from mempool
stop_node(self.nodes[1],1)
self.nodes[1]=start_node(1, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=5000", "-checkblocks=5", "-disablesafemode"], timewait=900)
print("Generating new longer chain of 300 more blocks")
self.nodes[1].generate(300)
print("Reconnect nodes")
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[2], 1)
sync_blocks(self.nodes[0:3], timeout=120)
print("Verify height on node 2:",self.nodes[2].getblockcount())
print("Usage possibly still high bc of stale blocks in block files:", calc_usage(self.prunedir))
print("Mine 220 more blocks so we have requisite history (some blocks will be big and cause pruning of previous chain)")
self.nodes[0].generate(220) #node 0 has many large tx's in its mempool from the disconnects
sync_blocks(self.nodes[0:3], timeout=300)
usage = calc_usage(self.prunedir)
print("Usage should be below target:", usage)
if (usage > 550):
raise AssertionError("Pruning target not being met")
return invalidheight,badhash
def reorg_back(self):
# Verify that a block on the old main chain fork has been pruned away
try:
self.nodes[2].getblock(self.forkhash)
raise AssertionError("Old block wasn't pruned so can't test redownload")
except JSONRPCException as e:
print("Will need to redownload block",self.forkheight)
# Verify that we have enough history to reorg back to the fork point
# Although this is more than 288 blocks, because this chain was written more recently
# and only its other 299 small and 220 large block are in the block files after it,
# its expected to still be retained
self.nodes[2].getblock(self.nodes[2].getblockhash(self.forkheight))
first_reorg_height = self.nodes[2].getblockcount()
curchainhash = self.nodes[2].getblockhash(self.mainchainheight)
self.nodes[2].invalidateblock(curchainhash)
goalbestheight = self.mainchainheight
goalbesthash = self.mainchainhash2
# As of 0.10 the current block download logic is not able to reorg to the original chain created in
# create_chain_with_stale_blocks because it doesn't know of any peer thats on that chain from which to
# redownload its missing blocks.
# Invalidate the reorg_test chain in node 0 as well, it can successfully switch to the original chain
# because it has all the block data.
# However it must mine enough blocks to have a more work chain than the reorg_test chain in order
# to trigger node 2's block download logic.
# At this point node 2 is within 288 blocks of the fork point so it will preserve its ability to reorg
if self.nodes[2].getblockcount() < self.mainchainheight:
blocks_to_mine = first_reorg_height + 1 - self.mainchainheight
print("Rewind node 0 to prev main chain to mine longer chain to trigger redownload. Blocks needed:", blocks_to_mine)
self.nodes[0].invalidateblock(curchainhash)
assert(self.nodes[0].getblockcount() == self.mainchainheight)
assert(self.nodes[0].getbestblockhash() == self.mainchainhash2)
goalbesthash = self.nodes[0].generate(blocks_to_mine)[-1]
goalbestheight = first_reorg_height + 1
print("Verify node 2 reorged back to the main chain, some blocks of which it had to redownload")
waitstart = time.time()
while self.nodes[2].getblockcount() < goalbestheight:
time.sleep(0.1)
if time.time() - waitstart > 900:
raise AssertionError("Node 2 didn't reorg to proper height")
assert(self.nodes[2].getbestblockhash() == goalbesthash)
# Verify we can now have the data for a block previously pruned
assert(self.nodes[2].getblock(self.forkhash)["height"] == self.forkheight)
def mine_full_block(self, node, address):
# Want to create a full block
# We'll generate a 66k transaction below, and 14 of them is close to the 1MB block limit
for j in range(14):
if len(self.utxo) < 14:
self.utxo = node.listunspent()
inputs=[]
outputs = {}
t = self.utxo.pop()
inputs.append({ "txid" : t["txid"], "vout" : t["vout"]})
remchange = t["amount"] - 100*self.relayfee # Fee must be above min relay rate for 66kb tx
outputs[address]=remchange
# Create a basic transaction that will send change back to ourself after account for a fee
# And then insert the 128 generated transaction outs in the middle rawtx[92] is where the #
# of txouts is stored and is the only thing we overwrite from the original transaction
rawtx = node.createrawtransaction(inputs, outputs)
newtx = rawtx[0:92]
newtx = newtx + self.txouts
newtx = newtx + rawtx[94:]
# Appears to be ever so slightly faster to sign with SIGHASH_NONE
signresult = node.signrawtransaction(newtx,None,None,"NONE")
txid = node.sendrawtransaction(signresult["hex"], True)
# Mine a full sized block which will be these transactions we just created
node.generate(1)
def run_test(self):
print("Warning! This test requires 4GB of disk space and takes over 30 mins (up to 2 hours)")
print("Mining a big blockchain of 995 blocks")
self.create_big_chain()
# Chain diagram key:
# * blocks on main chain
# +,&,$,@ blocks on other forks
# X invalidated block
# N1 Node 1
#
# Start by mining a simple chain that all nodes have
# N0=N1=N2 **...*(995)
print("Check that we haven't started pruning yet because we're below PruneAfterHeight")
self.test_height_min()
# Extend this chain past the PruneAfterHeight
# N0=N1=N2 **...*(1020)
print("Check that we'll exceed disk space target if we have a very high stale block rate")
self.create_chain_with_staleblocks()
# Disconnect N0
# And mine a 24 block chain on N1 and a separate 25 block chain on N0
# N1=N2 **...*+...+(1044)
# N0 **...**...**(1045)
#
# reconnect nodes causing reorg on N1 and N2
# N1=N2 **...*(1020) *...**(1045)
# \
# +...+(1044)
#
# repeat this process until you have 12 stale forks hanging off the
# main chain on N1 and N2
# N0 *************************...***************************(1320)
#
# N1=N2 **...*(1020) *...**(1045) *.. ..**(1295) *...**(1320)
# \ \ \
# +...+(1044) &.. $...$(1319)
# Save some current chain state for later use
self.mainchainheight = self.nodes[2].getblockcount() #1320
self.mainchainhash2 = self.nodes[2].getblockhash(self.mainchainheight)
print("Check that we can survive a 288 block reorg still")
(self.forkheight,self.forkhash) = self.reorg_test() #(1033, )
# Now create a 288 block reorg by mining a longer chain on N1
# First disconnect N1
# Then invalidate 1033 on main chain and 1032 on fork so height is 1032 on main chain
# N1 **...*(1020) **...**(1032)X..
# \
# ++...+(1031)X..
#
# Now mine 300 more blocks on N1
# N1 **...*(1020) **...**(1032) @@...@(1332)
# \ \
# \ X...
# \ \
# ++...+(1031)X.. ..
#
# Reconnect nodes and mine 220 more blocks on N1
# N1 **...*(1020) **...**(1032) @@...@@@(1552)
# \ \
# \ X...
# \ \
# ++...+(1031)X.. ..
#
# N2 **...*(1020) **...**(1032) @@...@@@(1552)
# \ \
# \ *...**(1320)
# \ \
# ++...++(1044) ..
#
# N0 ********************(1032) @@...@@@(1552)
# \
# *...**(1320)
print("Test that we can rerequest a block we previously pruned if needed for a reorg")
self.reorg_back()
# Verify that N2 still has block 1033 on current chain (@), but not on main chain (*)
# Invalidate 1033 on current chain (@) on N2 and we should be able to reorg to
# original main chain (*), but will require redownload of some blocks
# In order to have a peer we think we can download from, must also perform this invalidation
# on N0 and mine a new longest chain to trigger.
# Final result:
# N0 ********************(1032) **...****(1553)
# \
# X@...@@@(1552)
#
# N2 **...*(1020) **...**(1032) **...****(1553)
# \ \
# \ X@...@@@(1552)
# \
# +..
#
# N1 doesn't change because 1033 on main chain (*) is invalid
print("Done")
if __name__ == '__main__':
PruneTest().main()
| 49.365497
| 168
| 0.573239
|
f27e97ed8084150b94c02d541e89ddc9fbc8ce1a
| 5,770
|
py
|
Python
|
tests/integrational/native_threads/test_channel_groups.py
|
17media/pubnub-python
|
ee372eec82f16d3a80a4cd027bca8976755b817f
|
[
"MIT"
] | null | null | null |
tests/integrational/native_threads/test_channel_groups.py
|
17media/pubnub-python
|
ee372eec82f16d3a80a4cd027bca8976755b817f
|
[
"MIT"
] | null | null | null |
tests/integrational/native_threads/test_channel_groups.py
|
17media/pubnub-python
|
ee372eec82f16d3a80a4cd027bca8976755b817f
|
[
"MIT"
] | null | null | null |
import logging
import threading
import time
import unittest
import pubnub
from pubnub.models.consumer.channel_group import PNChannelGroupsAddChannelResult, PNChannelGroupsListResult, \
PNChannelGroupsRemoveChannelResult, PNChannelGroupsRemoveGroupResult
from pubnub.pubnub import PubNub
from tests.helper import pnconf_copy
from tests.integrational.vcr_helper import use_cassette_and_stub_time_sleep_native
pubnub.set_stream_logger('pubnub', logging.DEBUG)
class TestPubNubChannelGroups(unittest.TestCase):
def setUp(self):
self.event = threading.Event()
def callback(self, response, status):
self.response = response
self.status = status
self.event.set()
@use_cassette_and_stub_time_sleep_native(
'tests/integrational/fixtures/native_threads/channel_groups/single_channel.yaml',
filter_query_parameters=['uuid'])
def test_single_channel(self):
ch = "channel-groups-unit-ch"
gr = "channel-groups-unit-cg"
pubnub = PubNub(pnconf_copy())
# add
pubnub.add_channel_to_channel_group() \
.channels(ch) \
.channel_group(gr) \
.async(self.callback)
self.event.wait()
assert not self.status.is_error()
assert isinstance(self.response, PNChannelGroupsAddChannelResult)
self.event.clear()
time.sleep(1)
# list
pubnub.list_channels_in_channel_group() \
.channel_group(gr) \
.async(self.callback)
self.event.wait()
assert isinstance(self.response, PNChannelGroupsListResult)
assert len(self.response.channels) == 1
assert self.response.channels[0] == ch
self.event.clear()
# remove
pubnub.remove_channel_from_channel_group() \
.channels(ch) \
.channel_group(gr) \
.async(self.callback)
self.event.wait()
assert isinstance(self.response, PNChannelGroupsRemoveChannelResult)
self.event.clear()
time.sleep(1)
# list
pubnub.list_channels_in_channel_group() \
.channel_group(gr) \
.async(self.callback)
self.event.wait()
assert isinstance(self.response, PNChannelGroupsListResult)
assert len(self.response.channels) == 0
self.event.clear()
@use_cassette_and_stub_time_sleep_native(
'tests/integrational/fixtures/native_threads/channel_groups/add_remove_multiple_channels.yaml',
filter_query_parameters=['uuid'])
def test_add_remove_multiple_channels(self):
ch1 = "channel-groups-unit-ch1"
ch2 = "channel-groups-unit-ch2"
gr = "channel-groups-unit-cg"
pubnub = PubNub(pnconf_copy())
# add
pubnub.add_channel_to_channel_group() \
.channels([ch1, ch2]) \
.channel_group(gr) \
.async(self.callback)
self.event.wait()
assert not self.status.is_error()
assert isinstance(self.response, PNChannelGroupsAddChannelResult)
self.event.clear()
time.sleep(1)
# list
pubnub.list_channels_in_channel_group() \
.channel_group(gr) \
.async(self.callback)
self.event.wait()
assert isinstance(self.response, PNChannelGroupsListResult)
assert len(self.response.channels) == 2
assert ch1 in self.response.channels
assert ch2 in self.response.channels
self.event.clear()
# remove
pubnub.remove_channel_from_channel_group() \
.channels([ch1, ch2]) \
.channel_group(gr) \
.async(self.callback)
self.event.wait()
assert isinstance(self.response, PNChannelGroupsRemoveChannelResult)
self.event.clear()
time.sleep(1)
# list
pubnub.list_channels_in_channel_group() \
.channel_group(gr) \
.async(self.callback)
self.event.wait()
assert isinstance(self.response, PNChannelGroupsListResult)
assert len(self.response.channels) == 0
self.event.clear()
@use_cassette_and_stub_time_sleep_native(
'tests/integrational/fixtures/native_threads/channel_groups/add_channel_remove_group.yaml',
filter_query_parameters=['uuid'])
def test_add_channel_remove_group(self):
ch = "channel-groups-unit-ch"
gr = "channel-groups-unit-cg"
pubnub = PubNub(pnconf_copy())
# add
pubnub.add_channel_to_channel_group() \
.channels(ch) \
.channel_group(gr) \
.async(self.callback)
self.event.wait()
assert not self.status.is_error()
assert isinstance(self.response, PNChannelGroupsAddChannelResult)
self.event.clear()
time.sleep(1)
# list
pubnub.list_channels_in_channel_group() \
.channel_group(gr) \
.async(self.callback)
self.event.wait()
assert isinstance(self.response, PNChannelGroupsListResult)
assert len(self.response.channels) == 1
assert self.response.channels[0] == ch
self.event.clear()
# remove
pubnub.remove_channel_group() \
.channel_group(gr) \
.async(self.callback)
self.event.wait()
assert isinstance(self.response, PNChannelGroupsRemoveGroupResult)
self.event.clear()
time.sleep(1)
# list
pubnub.list_channels_in_channel_group() \
.channel_group(gr) \
.async(self.callback)
self.event.wait()
assert isinstance(self.response, PNChannelGroupsListResult)
assert len(self.response.channels) == 0
self.event.clear()
| 30.855615
| 110
| 0.638128
|
b9b511a07527d05beb6d79489f4bd928241160d7
| 966
|
py
|
Python
|
awwards_users/urls.py
|
andyjohn23/awwards-clone
|
6f4297f37dbc15e95e845d837149d7f4d97eecad
|
[
"MIT"
] | null | null | null |
awwards_users/urls.py
|
andyjohn23/awwards-clone
|
6f4297f37dbc15e95e845d837149d7f4d97eecad
|
[
"MIT"
] | null | null | null |
awwards_users/urls.py
|
andyjohn23/awwards-clone
|
6f4297f37dbc15e95e845d837149d7f4d97eecad
|
[
"MIT"
] | null | null | null |
from django.urls import path
from .views import CategoryListView, PostDeleteView, PostListView, PostCreateView, PersonalPostListView, UserPostListView, PostUpdateView, PostDetailView
from . import views
urlpatterns = [
path('', PostListView.as_view(), name='index'),
path('rates/', views.rating_project, name='rating'),
path('site/submission/', PostCreateView.as_view(), name="post-create"),
path('user/<str:username>', UserPostListView.as_view(), name="user-details"),
path('user/details/', PersonalPostListView.as_view(), name="user-detail"),
path('project/<int:pk>/', PostDetailView.as_view(), name="post-detail"),
path('project/<int:pk>/update/', PostUpdateView.as_view(), name="post-update"),
path('project/<int:pk>/delete/', PostDeleteView.as_view(), name="post-delete"),
path('category/<category>/', views.CategoryListView.as_view(), name="category"),
path('search/', views.project_search, name='project-search'),
]
| 50.842105
| 153
| 0.716356
|
abbf1e9fceca32a098bb5178c2d6d4421a679168
| 1,560
|
py
|
Python
|
trinity/plugins/registry.py
|
stringray55/trinity
|
d14ad79bfb679d7a8430f5a73605c2c57e542fb0
|
[
"MIT"
] | null | null | null |
trinity/plugins/registry.py
|
stringray55/trinity
|
d14ad79bfb679d7a8430f5a73605c2c57e542fb0
|
[
"MIT"
] | null | null | null |
trinity/plugins/registry.py
|
stringray55/trinity
|
d14ad79bfb679d7a8430f5a73605c2c57e542fb0
|
[
"MIT"
] | null | null | null |
import pkg_resources
from typing import (
Tuple,
)
from trinity.extensibility import (
BasePlugin,
)
from trinity.plugins.builtin.attach.plugin import (
AttachPlugin
)
from trinity.plugins.builtin.ethstats.plugin import (
EthstatsPlugin,
)
from trinity.plugins.builtin.fix_unclean_shutdown.plugin import (
FixUncleanShutdownPlugin
)
from trinity.plugins.builtin.json_rpc.plugin import (
JsonRpcServerPlugin,
)
from trinity.plugins.builtin.peer_discovery.plugin import (
PeerDiscoveryPlugin,
)
from trinity.plugins.builtin.tx_pool.plugin import (
TxPlugin,
)
from trinity.plugins.builtin.light_peer_chain_bridge.plugin import (
LightPeerChainBridgePlugin
)
def is_ipython_available() -> bool:
try:
pkg_resources.get_distribution('IPython')
except pkg_resources.DistributionNotFound:
return False
else:
return True
BASE_PLUGINS: Tuple[BasePlugin, ...] = (
AttachPlugin(use_ipython=is_ipython_available()),
FixUncleanShutdownPlugin(),
JsonRpcServerPlugin(),
PeerDiscoveryPlugin(),
)
ETH1_NODE_PLUGINS: Tuple[BasePlugin, ...] = (
EthstatsPlugin(),
LightPeerChainBridgePlugin(),
TxPlugin(),
)
def discover_plugins() -> Tuple[BasePlugin, ...]:
# Plugins need to define entrypoints at 'trinity.plugins' to automatically get loaded
# https://packaging.python.org/guides/creating-and-discovering-plugins/#using-package-metadata
return tuple(
entry_point.load()() for entry_point in pkg_resources.iter_entry_points('trinity.plugins')
)
| 24.761905
| 98
| 0.741667
|
aacc47078076a0cbfd37b382390a1557d587faa1
| 32,013
|
py
|
Python
|
game.py
|
Xe-Xo/Catan
|
f34b82b88bc58b1e698b576bb7e771bf36b4390c
|
[
"MIT"
] | 1
|
2022-01-13T20:07:46.000Z
|
2022-01-13T20:07:46.000Z
|
game.py
|
Xe-Xo/Catan
|
f34b82b88bc58b1e698b576bb7e771bf36b4390c
|
[
"MIT"
] | null | null | null |
game.py
|
Xe-Xo/Catan
|
f34b82b88bc58b1e698b576bb7e771bf36b4390c
|
[
"MIT"
] | null | null | null |
import pygame
import random
import math
from grid import *
from players import *
## TO DO -- BETTER WAY TO TREAT GLOBALS THAT CAN BE ACCESSED BY MAIN AND GAME
class GAME_GLOBALS():
WHITE = (255,255,255)
LIGHT_GRAY = (200,200,200)
GRAY = (150,150,150)
DARK_GRAY = (100,100,100)
BLACK = (50,50,50)
RED = (230,30,30)
BLUE = (30,30,230)
GREEN = (30,230,30)
DARK_GREEN = (0,160,0)
KHAKI = (240,230,140)
OLIVE = (128,128,0)
MOCCASIN = (255,228,181)
LIGHT_YELLOW = (255,255,224)
#RGB codes sourced from https://www.pinterest.com.au/pin/238339005262876109/
BEAUTIFUL_BLUE = (0,104,132)
BEAUTIFUL_AQUA = (0,144,158)
BEAUTIFUL_LIGHT_BLUE = (137,219,236)
BEAUTIFUL_RED = (237,0,38)
BEAUTIFUL_ORANGE = (250, 157, 0)
BEAUTIFUL_SAND = (255,208,141)
BEAUTIFUL_ROSE = (176,0,81)
BEAUTIFUL_PEACH = (246,131,112)
BEAUTIFUL_PINK = (254,171,185)
BEAUTIFUL_PURPLE = (110,0,108)
BEAUTIFUL_LIGHT_PURPLE = (145,39,143)
BEAUTIFUL_GREEN = (149,212,122)
BEAUTIFUL_YELLOW = (254,226,62)
PLAYER_COLORS = [BEAUTIFUL_BLUE,BEAUTIFUL_GREEN,BEAUTIFUL_RED,BEAUTIFUL_YELLOW]
ORE_COLOR = (145,134,126)
WHEAT_COLOR = (201,194,127)
SHEEP_COLOR = (178,200,145)
BRICK_COLOR = (228,153,105)
WOOD_COLOR = (116,161,142)
ROBBER_COLOR = (185,156,107)
SEA_COLOR = (21,52,80)
SAND_COLOR = LIGHT_YELLOW
POS_X = (1,0,0)
NEG_X = (-1,0,0)
POS_Y = (0,1,0)
NEG_Y = (0,-1,0)
POS_Z = (0,0,1)
NEG_Z = (0,0,-1)
RESOURCE_TO_COLOR = [WOOD_COLOR,BRICK_COLOR,ORE_COLOR,WHEAT_COLOR,SHEEP_COLOR,ROBBER_COLOR]
SCREEN_WIDTH = 1200
SCREEN_HEIGHT = 800
SCREEN_CENTER = (SCREEN_WIDTH/2,SCREEN_HEIGHT/2)
GRID_SIZE = 2
NUMBER_HEXES_WIDTH = GRID_SIZE*2+1
HEX_DIAMETER = min(SCREEN_WIDTH/3*2,SCREEN_HEIGHT)/NUMBER_HEXES_WIDTH
HEX_GAP = HEX_DIAMETER/4
G_HEX_DIAMETER = HEX_DIAMETER - HEX_GAP
ALPHA = HEX_DIAMETER/4
BETA = math.sqrt(3) * ALPHA
G_ALPHA = (G_HEX_DIAMETER)/4
G_BETA = math.sqrt(3) * G_ALPHA
ROAD_LENGTH = HEX_DIAMETER/4
ROAD_THICKNESS = ROAD_LENGTH/10
ROLL_RANK = {
2:1,
3:2,
4:3,
5:4,
6:5,
7:0,
8:5,
9:4,
10:3,
11:2,
12:1
}
def ColorRange(color1,color2,additional_steps=0):
colorlist = []
c1r, c1g, c1b = color1
c2r, c2g, c2b = color2
sr, sg, sb = c2r - c1r, c2g - c1g, c2b - c1b
colorlist.append(color1)
for step in range(0,additional_steps):
newcolor = int(c1r + sr/additional_steps*step+1), int(c1g + sg/additional_steps*step+1), int(c1b + sb/additional_steps*step+1)
colorlist.append(newcolor)
colorlist.append(color2)
return colorlist
class Scene():
"""This class represents an instance of the gamestate"""
"""subclass to be used for State Machine between Main Menu, Game and GameOver screens"""
def __init__(self):
pass
def move_state(self, state_index):
if state_index == 0 and type(self) != MainMenu:
return MainMenu()
elif state_index == 1 and type(self) != Game:
return Game()
else:
return self
def process_events(self):
"""Process all the events. Return a True if we need to close the window"""
for event in pygame.event.get():
if event.type == pygame.QUIT:
return True, self
return False, self
def run_logic(self):
pass
def display_frame(self,screen):
pygame.display.update()
def text_to_screen(self,screen,text,xy_tuple,size=16,color = GAME_GLOBALS.BLACK, font_type = None,offset=(0,0),background_color=None):
try:
text = str(text)
font = pygame.font.Font(font_type,size)
text = font.render(text,True,color)
x_offset, y_offset = offset
x_offset -= text.get_rect().width / 2
y_offset -= text.get_rect().height / 2
x, y = xy_tuple
x,y = x + x_offset, y + y_offset
if background_color is not None and type(background_color) is tuple:
background_color = self.fix_color(background_color)
pygame.draw.rect(screen,background_color,(x,y,text.get_rect().width, text.get_rect().height))
screen.blit(text,(x,y))
return x,y,text.get_rect().width, text.get_rect().height
except Exception as e:
print('Font Error')
raise e
def point_to_grid_coord(self,center_xy,xy_point,set_sign=None):
mouse_x, mouse_y = xy_point #Location of mouse in window
center_x, center_y = center_xy #Location of the centerpoint of the grid
pixel_x, pixel_y = mouse_x-center_x,mouse_y-center_y #x,y to be turned into grid coords
#coord to point uses matrix multiplication for calculation of x,y
#after rearranging formula into a square matrix (subsistute x = 0 - y - z) we can calculate the inverse matrix that can be multiplied by pixel_x, pixel_y
#see https://www.wolframalpha.com/input/?i=24*%7B%7B-sqrt%283%29%2C1%7D%2C%7B0%2C2%7D%7D*%7Bx%2Cy%7D+
#See Readme notes for detailed breakdown
hex_y = -2*pixel_x/(math.sqrt(3)*GAME_GLOBALS.HEX_DIAMETER) + (2*pixel_y)/(3*GAME_GLOBALS.HEX_DIAMETER)
hex_z = 0*pixel_x + -4*pixel_y/(3*GAME_GLOBALS.HEX_DIAMETER)
hex_x = 0 - hex_y - hex_z
print(Coords(hex_x,hex_y,hex_z),Coords(hex_x,hex_y,hex_z).round(set_total=set_sign),set_sign)
return Coords(hex_x,hex_y,hex_z).round(set_total=set_sign)
def point_to_corner_coord(self,center_xy,xy_point):
pointx, pointy = xy_point
nx, ny, nz = self.point_to_grid_coord(center_xy,xy_point,set_sign=-1).tuple()
px, py, pz = self.point_to_grid_coord(center_xy,xy_point,set_sign=1).tuple()
negposx, negposy = self.coord_to_point(center_xy,nx,ny,nz,gap=False)
posposx, posposy = self.coord_to_point(center_xy,px,py,pz,gap=False)
negdistance = math.pow(pointx - negposx,2) + math.pow(pointy - negposy,2)
posdisance = math.pow(pointx - posposx,2) + math.pow(pointy - posposy,2)
if negdistance < posdisance:
return self.point_to_grid_coord(center_xy,xy_point,set_sign=-1)
else:
return self.point_to_grid_coord(center_xy,xy_point,set_sign=1)
def coord_to_point(self,center_xy,x,y,z,gap=True):
#starting center point generally the xy of 0,0,0 hexagon
#however this changes if finding the points of a hexagon not in the center
#returns a tuple (x,y)
#gap (optional): renders points at approx 2/3rds back from usual render points. Useful for seeing gaps between hexes.
if gap == True:
pixel_x = (GAME_GLOBALS.G_BETA * x) + (-GAME_GLOBALS.G_BETA * y) + 0 * z
pixel_y = (GAME_GLOBALS.G_ALPHA * x) + (GAME_GLOBALS.G_ALPHA * y) + (-2 * GAME_GLOBALS.G_ALPHA * z)
else:
pixel_x = (GAME_GLOBALS.BETA * x) + (-GAME_GLOBALS.BETA * y) + 0 * z
pixel_y = (GAME_GLOBALS.ALPHA * x) + (GAME_GLOBALS.ALPHA * y) + (-2 * GAME_GLOBALS.ALPHA * z)
return (int(pixel_x + center_xy[0]),int(pixel_y + center_xy[1]))
def hex_corners(self,center_xy,x,y,z,gap=True):
#finds the corner x,y points given the hex coordinates.
#Calculates the center location of the hex then calculates the points surrounding it.
#AXIS movement heading clockwise starting at north
points = [(0,0,1),(0,-1,0),(1,0,0),(0,0,-1),(0,1,0),(-1,0,0)]
start_center = center_xy
hex_center = self.coord_to_point(start_center,x,y,z,gap=False)
corner_points = []
for point_xyz in points:
px,py,pz = point_xyz
corner_points.append(self.coord_to_point(hex_center,px,py,pz,gap=gap))
return corner_points
def fix_color(self,color_tuple):
new_color_list = []
for color_int in color_tuple:
if color_int > 255:
new_color = 255
elif color_int < 0:
new_color = 0
else:
new_color = int(color_int)
new_color_list.append(new_color)
return tuple(new_color_list)
def road_xy(self,screen,road_tuple):
x1, y1, x2, y2 = road_tuple
z1 = -1 - x1 - y1
z2 = 1 - x2 - y2
cx1,cy1 = self.coord_to_point(GAME_GLOBALS.SCREEN_CENTER,x1,y1,z1,gap=False)
cx2,cy2 = self.coord_to_point(GAME_GLOBALS.SCREEN_CENTER,x2,y2,z2,gap=False)
nx1,nx2 = cx1 + (cx2 - cx1)/5 , cx2 - (cx2 - cx1)/5
ny1,ny2 = cy1 + (cy2 - cy1)/5 , cy2 - (cy2 - cy1)/5
return ((nx1,ny1),(nx2,ny2))
def road_rect(self,screen,road_tuple):
#return list of points to draw rect
x1, y1, x2, y2 = road_tuple
z1 = -1 - x1 - y1
z2 = 1 - x2 - y2
cx1,cy1 = self.coord_to_point(GAME_GLOBALS.SCREEN_CENTER,x1,y1,z1,gap=False)
cx2,cy2 = self.coord_to_point(GAME_GLOBALS.SCREEN_CENTER,x2,y2,z2,gap=False)
x_delta = cx2 - cx1
y_delta = cy2 - cy1
hypot = math.sqrt(math.pow(x_delta,2) + math.pow(y_delta,2))
n = (hypot-GAME_GLOBALS.ROAD_LENGTH)/2
iX = n * x_delta/hypot
iY = n * y_delta/hypot
try:
angle = math.atan(y_delta/x_delta)
xmove = math.sin(angle) * GAME_GLOBALS.ROAD_THICKNESS
ymove = math.cos(angle) * GAME_GLOBALS.ROAD_THICKNESS
except ZeroDivisionError:
xmove = GAME_GLOBALS.ROAD_THICKNESS
ymove = 0
p1 = cx1 + iX - xmove, cy1 + iY + ymove
p2 = cx1 + iX + xmove, cy1 + iY - ymove
p3 = cx2 - iX + xmove, cy2 - iY - ymove
p4 = cx2 - iX - xmove, cy2 - iY + ymove
return [p1,p2,p3,p4]
class Game(Scene):
"""This class represents an instance of the game"""
def __init__(self):
super().__init__()
self.debug = False
self.road_hover_tuple = None
self.grid_hover_tuple = None
self.corner_hover_tuple = None
"""Constructor. create all our attributes and initialise the game"""
self.game_over = False
"""Create the grid"""
self.grid = Grid(GAME_GLOBALS.GRID_SIZE)
"""Setup grid numbers and resources"""
self.setup_grid_numbers() #Randomly apply numbers to the board
self.setup_grid_resources() #Randomly apply resources to the board
self.setup_corner_ranks() #Calculate the strength of each corner
"""Setup players"""
self.setup_players()
self.setup_settlements()
self.setup_ports()
self.setup_roads()
self.settlements_placed = [] #round, player_id, settlement_tuple
self.roads_placed = [] #round, player_id, road_tuple
self.round = 0
def process_events(self):
"""Process all the events. Return a True if we need to close the Window"""
for event in pygame.event.get():
if event.type == pygame.QUIT:
return True, self
elif self.current_game_state == "place_settlement":
if event.type == pygame.MOUSEBUTTONDOWN:
coord_pointing = self.point_to_corner_coord(GAME_GLOBALS.SCREEN_CENTER,pygame.mouse.get_pos())
if self.valid_settlement(self.turn,coord_pointing):
self.place_settlement(self.turn,coord_pointing,self.round)
self.next_game_state()
return False,self
else:
self.corner_hover_tuple = self.point_to_corner_coord(GAME_GLOBALS.SCREEN_CENTER,pygame.mouse.get_pos())
elif self.current_game_state == "place_road":
if event.type == pygame.MOUSEBUTTONDOWN:
coord_pointing_neg = self.point_to_grid_coord(GAME_GLOBALS.SCREEN_CENTER,pygame.mouse.get_pos(),set_sign=-1)
coord_pointing_pos = self.point_to_grid_coord(GAME_GLOBALS.SCREEN_CENTER,pygame.mouse.get_pos(),set_sign=1)
road_pos = (coord_pointing_neg.x, coord_pointing_neg.y, coord_pointing_pos.x, coord_pointing_pos.y)
if self.valid_road(self.turn,road_pos):
self.place_road(self.turn, road_pos,self.round)
self.next_game_state()
return False,self
else:
coord_pointing_neg = self.point_to_grid_coord(GAME_GLOBALS.SCREEN_CENTER,pygame.mouse.get_pos(),set_sign=-1)
coord_pointing_pos = self.point_to_grid_coord(GAME_GLOBALS.SCREEN_CENTER,pygame.mouse.get_pos(),set_sign=1)
self.road_hover_tuple = (coord_pointing_neg.x, coord_pointing_neg.y, coord_pointing_pos.x, coord_pointing_pos.y)
elif self.current_game_state == "debug":
coord_pointing_neg = self.point_to_grid_coord(GAME_GLOBALS.SCREEN_CENTER,pygame.mouse.get_pos(),set_sign=-1)
coord_pointing_pos = self.point_to_grid_coord(GAME_GLOBALS.SCREEN_CENTER,pygame.mouse.get_pos(),set_sign=1)
road_pos = (coord_pointing_neg.x, coord_pointing_neg.y, coord_pointing_pos.x, coord_pointing_pos.y)
self.debug_road_tuple = road_pos
return self.game_over, self
def run_logic(self):
if self.current_game_state == "allocate_initial_resources":
for round, turn, settlement_tuple in self.settlements_placed:
if round == 1:
hex_coords = self.grid.corners[settlement_tuple].connected_hexes_coords()
for hex in hex_coords:
try:
resource_index = self.grid_resources[hex.tuple()]
self.players[turn].add_resouces(ResourceBunch.from_index(resource_index))
except:
pass
self.next_game_state()
def display_frame(self,screen):
if self.debug == True:
self.display_debug(screen)
elif self.current_game_state == "place_settlement":
self.display_place_settlement(screen,self.turn)
elif self.current_game_state == "place_road":
self.display_place_road(screen,self.turn)
elif self.current_game_state == "player_turn":
self.display_player_turn(screen,self.turn)
pygame.display.update()
"""Display Methods"""
def display_debug(self,screen):
self.draw_background(screen)
self.draw_hexes(screen)
self.draw_roads(screen)
self.draw_potential_road(screen,self.debug_road_tuple)
def display_place_settlement(self,screen,playerindex):
self.draw_background(screen)
self.draw_hexes(screen)
self.draw_hexes_scarcity(screen)
self.draw_game_state(screen)
self.draw_valid_settlements(screen,playerindex)
self.draw_corners_ranks(screen)
self.draw_settlements(screen)
self.draw_roads(screen)
def display_place_road(self,screen,playerindex):
self.draw_background(screen)
self.draw_hexes(screen)
self.draw_hexes_scarcity(screen)
self.draw_game_state(screen)
self.draw_settlements(screen)
self.draw_valid_roads(screen,playerindex)
self.draw_roads(screen)
def display_player_turn(self,screen,player_index):
self.draw_background(screen)
self.draw_hexes(screen)
self.draw_hexes_scarcity(screen)
self.draw_game_state(screen)
self.draw_settlements(screen)
self.draw_roads(screen)
"""Player Methods"""
def setup_players(self):
"""Setup players"""
self.players = []
for playerindex in range(0,4):
self.players.append(Human(playerindex))
self.game_state_queue = [((0,0),"place_settlement"),((0,0),"place_road"),((0,1),"place_settlement"),((0,1),"place_road"),
((0,2),"place_settlement"),((0,2),"place_road"),((0,3),"place_settlement"),((0,3),"place_road"),
((1,3),"place_settlement"),((1,3),"place_road"),((1,2),"place_settlement"),((1,2),"place_road"),
((1,1),"place_settlement"),((1,1),"place_road"),((1,0),"place_settlement"),((1,0),"place_road"),
((2,0),"allocate_initial_resources"),((2,0),"player_turn")
]
self.next_game_state()
def next_player_index(self,current_player_index):
next_player_index = current_player_index + 1
if next_player_index >= 4:
return self.turn + 1, 0
return self.turn, next_player_index
def next_player(self,player):
return self.players[self.next_player_index(player.playerindex)]
def place_settlement(self,player_index,coords,round):
print(f"set settlement {coords.tuple()} --> {player_index}")
self.settlements_placed.append((round,player_index,coords.tuple()))
self.settlements[coords.tuple()] = player_index
def place_road(self,player_index,road_tuple,round):
print(f"set road {road_tuple} --> {player_index}")
self.roads_placed.append((round,player_index,road_tuple))
self.roads[road_tuple] = player_index
"""Game State Methods"""
def next_game_state(self):
try:
(self.round, self.turn), self.current_game_state = self.game_state_queue.pop(0)
except:
(self.round, self.turn), self.current_game_state = self.next_player_index(self.turn), "player_turn"
def valid_settlement(self,player_id,coords,limit_by_road=False):
#TO DO - this is complicated when it really shouldnt be.
#clean up this trainwreck
try:
corner_selected = self.grid.corners[coords.tuple()]
if self.settlements[coords.tuple()] != -1:
return False
for other_corners_coords in corner_selected.connected_corner_coords():
try:
if self.settlements[other_corners_coords.tuple()] != -1:
return False
except:
pass
if limit_by_road:
for road_coords in corner_selected.connected_road_coords():
if self.roads[road_coords] == player_id:
return True
return False
else:
return True
except KeyError:
print("KeyError")
return False
def valid_road(self,player_id,roadcoords):
return True
"""Setup Grid Methods"""
def setup_grid_numbers(self):
"""Load self.grid_numbers and self.number_to_hex with the values"""
self.grid_numbers = {}
self.number_to_hex = {}
possible_numbers = [2,3,3,4,4,5,5,6,6,7,8,8,9,9,10,10,11,11,12]
for num in possible_numbers:
self.number_to_hex[num] = []
random.shuffle(possible_numbers)
for hex in self.grid.hexes.values():
number_picked = possible_numbers.pop(0)
self.number_to_hex[number_picked].append(hex)
self.grid_numbers[hex.coords.tuple()] = number_picked
print("Grid Numbers applied!")
def setup_grid_resources(self):
"""Load self.grid_resources with values"""
resource_index = [0,1,2,3,4]
random.shuffle(resource_index)
resources = [resource_index.pop(0)] * 4
resources += [resource_index.pop(0)] * 4
resources += [resource_index.pop(0)] * 4
resources += [resource_index.pop(0)] * 3
resources += [resource_index.pop(0)] * 3
random.shuffle(resources)
self.grid_resources = {}
for hex_tuple in self.grid.hexes.keys():
if self.grid_numbers[hex_tuple] != 7:
try:
self.grid_resources[hex_tuple] = resources.pop(0)
except IndexError as e:
print(e)
else:
self.grid_resources[hex_tuple] = 5
print("Grid Resources applied!")
def calculate_resource_scarcity(self):
#from 36 rolls with even distribution
#the probability of each resource being given out
#resouce scarcity = SUM(Possible ways number can be rolled
#eg. 2 can be rolled 1 way and there is one placement on board.
# 3 can be rolled 2 ways and there is two placements on board.
#
# sum of this (2x1+2x2+3x2+.....) = 58
resource_scarcity = []
for resource_index in [0,1,2,3,4]:
totalpoints = 0
for hex_tuple in self.grid_resources.keys():
if self.grid_resources[hex_tuple] == resource_index:
totalpoints += GAME_GLOBALS.ROLL_RANK[self.grid_numbers[hex_tuple]]/58
resource_scarcity.append(totalpoints)
return resource_scarcity
def setup_corner_ranks(self):
#Calculates the strength of a corner given the overall resource scarcity
#
self.corner_ranks = {}
for corner in self.grid.corners.values():
rank_value = 0
for hex_coords in corner.connected_hexes_coords():
try:
resource_index = self.grid_resources[hex_coords.tuple()]
try:
resource_scarcity = self.calculate_resouce_scarcity()[resource_index]
except:
resource_scarcity = 1
roll_rank_of_hex = GAME_GLOBALS.ROLL_RANK[self.grid_numbers[hex_coords.tuple()]]
rank_value += roll_rank_of_hex/resource_scarcity
except KeyError as e:
rank_value += 0
self.corner_ranks[corner.coords.tuple()] = int(rank_value)
def setup_settlements(self):
self.settlements = {}
for corner_tuple in self.grid.corners.keys():
self.settlements[corner_tuple] = -1
def setup_roads(self):
self.roads = {}
for road_tuple in self.grid.roads.keys():
self.roads[road_tuple] = -1
def setup_ports(self):
self.ports = {}
possible_ports = [
((),("sheep",2)),
((),("wood",2)),
((),("brick",2)),
((),("ore",2)),
((),("wheat",2)),
((),("any",3)),
((),("any",3)),
((),("any",3))
]
"""Render Methods"""
def draw_background(self,screen):
screen.fill(GAME_GLOBALS.SEA_COLOR)
def draw_hexes(self,screen):
for hex in self.grid.hexes.values():
hx, hy, hz = hex.coords.tuple()
pygame.draw.polygon(screen,GAME_GLOBALS.BEAUTIFUL_LIGHT_BLUE, self.hex_corners(GAME_GLOBALS.SCREEN_CENTER,hx,hy,hz,gap=False),20)
for hex in self.grid.hexes.values():
hx, hy, hz = hex.coords.tuple()
resource_color = GAME_GLOBALS.RESOURCE_TO_COLOR[self.grid_resources[hex.coords.tuple()]]
pygame.draw.polygon(screen,GAME_GLOBALS.SAND_COLOR, self.hex_corners(GAME_GLOBALS.SCREEN_CENTER,hx,hy,hz,gap=False))
pygame.draw.polygon(screen,GAME_GLOBALS.BLACK, self.hex_corners(GAME_GLOBALS.SCREEN_CENTER,hx,hy,hz,gap=False),2)
pygame.draw.polygon(screen,resource_color, self.hex_corners(GAME_GLOBALS.SCREEN_CENTER,hx,hy,hz))
number = self.grid_numbers[hex.coords.tuple()]
self.text_to_screen(screen,number,self.coord_to_point(GAME_GLOBALS.SCREEN_CENTER,hx,hy,hz,gap=False),size=20,color=GAME_GLOBALS.WHITE)
def draw_hexes_scarcity(self,screen):
for hex in self.grid.hexes.values():
hx, hy, hz = hex.coords.tuple()
resource_index = self.grid_resources[hx,hy,hz]
try:
resource_scarcity = self.calculate_resouce_scarcity()[resource_index]
except:
resource_scarcity = 1
roll_rank_of_hex = GAME_GLOBALS.ROLL_RANK[self.grid_numbers[(hx,hy,hz)]]
number = round(roll_rank_of_hex/resource_scarcity,2)
string = "".join(int(number) * ['*'])
self.text_to_screen(screen,string,self.coord_to_point(GAME_GLOBALS.SCREEN_CENTER,hx,hy,hz,gap=False),size=20,offset=(0,10),color=GAME_GLOBALS.WHITE)
def draw_settlements(self,screen):
for corner in self.grid.corners.values():
try:
settlementplayer = self.settlements[corner.coords.tuple()]
if settlementplayer != -1:
pygame.draw.circle(screen, GAME_GLOBALS.PLAYER_COLORS[settlementplayer],self.coord_to_point(GAME_GLOBALS.SCREEN_CENTER,corner.coords.x,corner.coords.y,corner.coords.z,gap=False),int(GAME_GLOBALS.ALPHA/4))
except KeyError as e:
print(corner.tuple())
def draw_roads(self,screen):
for road in self.grid.roads.values():
roadplayer = self.roads[road.coords_tuple()]
if roadplayer != -1:
pointslist = self.road_rect(screen,road.coords_tuple())
pygame.draw.polygon(screen, GAME_GLOBALS.PLAYER_COLORS[roadplayer], pointslist)
def draw_game_state(self,screen):
self.text_to_screen(screen,self.current_game_state,(100,100),size=32,color=GAME_GLOBALS.RED)
self.text_to_screen(screen,self.turn,(150,150),size=32,color=GAME_GLOBALS.RED)
def draw_corners_ranks(self,screen):
for corner in self.grid.corners.values():
cx,cy,cz = corner.coords.tuple()
if self.valid_settlement(1,corner.coords):
color = self.fix_color((int(255/14*(self.corner_ranks[corner.coords.tuple()]-1)),150,150))
try:
pygame.draw.circle(screen, color, self.coord_to_point(GAME_GLOBALS.SCREEN_CENTER, cx, cy, cz, gap=False),int(GAME_GLOBALS.ALPHA/4),2)
except:
print(color)
self.text_to_screen(screen,self.corner_ranks[corner.coords.tuple()],self.coord_to_point(GAME_GLOBALS.SCREEN_CENTER,cx,cy,cz,gap=False),size=16)
def draw_valid_settlements(self,screen,player_index):
for corner in self.grid.corners.values():
if self.valid_settlement(player_index,corner.coords,limit_by_road=False):
pygame.draw.circle(screen, GAME_GLOBALS.LIGHT_GRAY,self.coord_to_point(GAME_GLOBALS.SCREEN_CENTER,corner.coords.x,corner.coords.y,corner.coords.z,gap=False),int(GAME_GLOBALS.ALPHA/4))
def draw_valid_roads(self,screen,player_index):
for road in self.grid.roads.values():
if self.valid_road(player_index,road.coords_tuple()):
pointslist = self.road_rect(screen,road.coords_tuple())
pygame.draw.polygon(screen, GAME_GLOBALS.LIGHT_GRAY, pointslist)
def draw_potential_road(self,screen,road_tuple):
if road_tuple in self.roads.keys():
pointslist = self.road_rect(screen,road_tuple)
pygame.draw.polygon(screen, GAME_GLOBALS.BEAUTIFUL_PINK, pointslist)
def draw_potential_settlement(self,screen,settlement_tuple):
if settlement_tuple in self.settlements.keys():
sx, sy, sz = settlement_tuple
drawpoint = self.coord_to_point(GAME_GLOBALS.SCREEN_CENTER,sx,sy,sz,gap=False)
pygame.draw.circle(screen, GAME_GLOBALS.BEAUTIFUL_PINK,drawpoint,int(GAME_GLOBALS.ALPHA/4))
class MainMenu(Scene):
def __init__(self):
super().__init__()
self.buttons = {}
self.grid = Grid(2)
self.setup_background()
def process_events(self):
"""Process all the events. Return a True if we need to close the window, return GameState should be using after events."""
for event in pygame.event.get():
if event.type == pygame.QUIT:
return True, self
elif event.type == pygame.MOUSEBUTTONDOWN:
for button_coords in self.buttons.keys():
x,y,w,h = button_coords
if self.button_check(pygame.mouse.get_pos(),x,y,w,h):
for function in self.buttons[button_coords]:
if function == "game_start":
return False, self.move_state(1)
elif function == "game_quit":
return True, self
return False, self
def run_logic(self):
x, y = self.background_center
self.background_center = x + self.move, y + self.move
if self.background_center[0] > GAME_GLOBALS.SCREEN_WIDTH or self.background_center[0] < 0 or self.background_center[1] > GAME_GLOBALS.SCREEN_HEIGHT or self.background_center[1] < 0:
self.move = self.move * -1
def display_frame(self,screen):
self.draw_background(screen)
self.draw_menu_background(screen)
self.draw_title(screen)
self.draw_start_button(screen)
pygame.display.update()
def setup_background(self):
#creating the background grid
self.background_center = (0,0)
self.hex_colors = {}
self.move = 1
for hex in self.grid.hexes.values():
self.hex_colors[hex.coords.tuple()] = random.choice(GAME_GLOBALS.RESOURCE_TO_COLOR)
def draw_background(self,screen):
screen.fill(GAME_GLOBALS.SEA_COLOR)
for hex in self.grid.hexes.values():
hx, hy, hz = hex.coords.tuple()
resource_color = self.hex_colors[hx,hy,hz]
pygame.draw.polygon(screen,GAME_GLOBALS.SAND_COLOR, self.hex_corners(self.background_center,hx,hy,hz,gap=False))
pygame.draw.polygon(screen,resource_color, self.hex_corners(self.background_center,hx,hy,hz))
def draw_title(self,screen):
self.text_to_screen(screen,"Settlers of Catan", (GAME_GLOBALS.SCREEN_CENTER[0], 60), size=100, color=GAME_GLOBALS.WHITE)
def draw_menu_background(self,screen):
w, h = GAME_GLOBALS.SCREEN_WIDTH/3 , GAME_GLOBALS.SCREEN_HEIGHT/3
x, y = GAME_GLOBALS.SCREEN_CENTER
x = x - w/2
y = y - h/2
pygame.draw.rect(screen,GAME_GLOBALS.BLACK,(x,y,w,h))
def draw_start_button(self,screen):
center_x, center_y = GAME_GLOBALS.SCREEN_CENTER
button_posx, button_posy = center_x, center_y - GAME_GLOBALS.SCREEN_HEIGHT/10
self.make_button(screen,GAME_GLOBALS.BLACK,GAME_GLOBALS.WHITE,button_posx,button_posy,"START",["game_start"])
def draw_quit_button(self,screen):
center_x, center_y = GAME_GLOBALS.SCREEN_CENTER
button_posx, button_posy = center_x, center_y - GAME_GLOBALS.SCREEN_HEIGHT/4
self.make_button(screen,GAME_GLOBALS.BLACK,GAME_GLOBALS.WHITE,button_posx,button_posy,"QUIT",["game_quit"])
def make_button(self,screen,color,text_color,x,y,text,functionalitylist):
x,y,w,h = self.text_to_screen(screen,text,(x,y),size=32,color=text_color,background_color=color)
self.buttons[x,y,w,h] = functionalitylist
def button_check(self,mouse_pos,x,y,w,h):
#mouse pos from event.pos
#x,y where you render the button
#x1,y1 is the width/height
#returns true if button is clicked
mouse_x, mouse_y = mouse_pos
return mouse_x >= x and mouse_x < x + w and mouse_y >= y and mouse_y < y + h
| 39.86675
| 224
| 0.613001
|
e850dc18a522715904d99e972558fa9712fb49a5
| 12,823
|
py
|
Python
|
src/whop/whopclient/model/create_checkout_log_request.py
|
whopio/whop-python-sdk
|
9b4da585bf81065a9a435cf6651d9a0cd206088c
|
[
"MIT"
] | null | null | null |
src/whop/whopclient/model/create_checkout_log_request.py
|
whopio/whop-python-sdk
|
9b4da585bf81065a9a435cf6651d9a0cd206088c
|
[
"MIT"
] | null | null | null |
src/whop/whopclient/model/create_checkout_log_request.py
|
whopio/whop-python-sdk
|
9b4da585bf81065a9a435cf6651d9a0cd206088c
|
[
"MIT"
] | null | null | null |
"""
Whop API
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: 1.0.10
Contact: support@whop.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from whop.whopclient.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from whop.whopclient.exceptions import ApiAttributeError
class CreateCheckoutLogRequest(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
('status',): {
'SUCCESS': "success",
'DENIED': "denied",
},
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'key': (str,), # noqa: E501
'product_name': (str,), # noqa: E501
'website': (str,), # noqa: E501
'size': (int,), # noqa: E501
'image_url': (str,), # noqa: E501
'price': (float,), # noqa: E501
'status': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'key': 'key', # noqa: E501
'product_name': 'product_name', # noqa: E501
'website': 'website', # noqa: E501
'size': 'size', # noqa: E501
'image_url': 'image_url', # noqa: E501
'price': 'price', # noqa: E501
'status': 'status', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""CreateCheckoutLogRequest - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
key (str): The license key.. [optional] # noqa: E501
product_name (str): The name of the product.. [optional] # noqa: E501
website (str): The website associated with the product.. [optional] # noqa: E501
size (int): The size of the product.. [optional] # noqa: E501
image_url (str): An image URL of the product. [optional] # noqa: E501
price (float): The price of the product.. [optional] # noqa: E501
status (str): The status of the checkout log. Can be: success or denied.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""CreateCheckoutLogRequest - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
key (str): The license key.. [optional] # noqa: E501
product_name (str): The name of the product.. [optional] # noqa: E501
website (str): The website associated with the product.. [optional] # noqa: E501
size (int): The size of the product.. [optional] # noqa: E501
image_url (str): An image URL of the product. [optional] # noqa: E501
price (float): The price of the product.. [optional] # noqa: E501
status (str): The status of the checkout log. Can be: success or denied.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| 44.992982
| 124
| 0.567028
|
9907edefe8393736c52bbad19f029e86a1b29115
| 1,896
|
py
|
Python
|
login/views.py
|
nitinankad/diet-tracker
|
e21afc578e0c2575a7b6168a488e123e9a319c19
|
[
"MIT"
] | 4
|
2020-10-13T18:42:48.000Z
|
2021-08-10T13:57:00.000Z
|
login/views.py
|
nitinankad/diet-tracker
|
e21afc578e0c2575a7b6168a488e123e9a319c19
|
[
"MIT"
] | null | null | null |
login/views.py
|
nitinankad/diet-tracker
|
e21afc578e0c2575a7b6168a488e123e9a319c19
|
[
"MIT"
] | 3
|
2019-12-13T23:32:10.000Z
|
2020-06-02T22:42:28.000Z
|
from django.shortcuts import render
from django.http import HttpResponseRedirect
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from login.forms import LoginForm
def register(request):
if request.method == "POST":
login_form = LoginForm(data=request.POST)
if login_form.is_valid():
user = login_form.save()
print(user.password)
user.set_password(user.password)
user.save()
messages.success(request, "Successfully registered")
return HttpResponseRedirect("/login")
messages.error(request, "Error while registering")
return HttpResponseRedirect("/login")
@login_required
def user_logout(request):
logout(request)
messages.success(request, "Logged out")
return HttpResponseRedirect("/login")
def login_attempt(request):
success = False
if "demo" in request.POST:
username = password = "demo"
else:
username = request.POST["username"]
password = request.POST["password"]
user = authenticate(username=username, password=password)
if user:
if user.is_active:
login(request, user)
success = True
return success
def login_index(request):
if request.method == "POST":
result = login_attempt(request)
if result:
messages.success(request, "Successfully logged in")
else:
messages.error(request, "Error logging in")
return HttpResponseRedirect("/")
form_responses = messages.get_messages(request)
response_message = None
for response in form_responses:
response_message = response
break
form_responses.used = True
return render(request, "login/login.html", {"response_message": response_message})
| 27.882353
| 86
| 0.672996
|
8585d3d969eb2df6f59daea7aa65925c3a46a8d5
| 2,371
|
py
|
Python
|
core.py
|
kmlmhnn/ren
|
fcf83bd16193fc15f27a8b3234ac0e150bd27ce3
|
[
"MIT"
] | null | null | null |
core.py
|
kmlmhnn/ren
|
fcf83bd16193fc15f27a8b3234ac0e150bd27ce3
|
[
"MIT"
] | null | null | null |
core.py
|
kmlmhnn/ren
|
fcf83bd16193fc15f27a8b3234ac0e150bd27ce3
|
[
"MIT"
] | null | null | null |
import os
def prefixfn(string, _, new):
return new + string
def suffixfn(string, _, new):
return string + new
def insertfn(string, substring, new):
if not new or not substring or substring not in string:
return string
before, sep, after = string.partition(substring)
return ''.join([before, new, sep, after])
def appendfn(string, substring, new):
if not new or not substring or substring not in string:
return string
before, sep, after = string.partition(substring)
return ''.join([before, sep, new, after])
def changefn(string, substring, replacement):
if not substring or substring not in string:
return string
before, sep, after = string.partition(substring)
return ''.join([before, replacement, after]) or string
def listdir(path):
os.chdir(path)
return [name for name in os.listdir() if not os.path.isdir(name)]
def rename(path, lst):
os.chdir(path)
count = 0
for (src, dest) in lst:
if src != dest:
os.replace(src, dest)
count += 1
return count
class FilenameCollisionError(Exception):
pass
class Selection:
def __init__(self, filenames):
self.entries = [[name] for name in filenames]
self.stack = [list(range(len(self.entries)))]
def clear(self):
self.stack = [list(range(len(self.entries)))]
def active(self):
return self.stack[-1]
def tighten(self, pattern):
current = self.active()
new = [i for i in current if pattern in self.entries[i][0]]
if current != new:
self.stack.append(new)
def loosen(self):
if len(self.stack) > 1:
self.stack.pop()
def peek(self):
result = []
for i in self.active():
entry = self.entries[i]
result.append((entry[0], entry[-1]))
return result
def transform(self, fn):
for i in self.active():
entry = self.entries[i]
entry.append(fn(entry[-1]))
names = [e[-1] for e in self.entries]
if len(names) != len(set(names)):
self.rollback()
raise FilenameCollisionError('Filename collisions detected.')
def rollback(self):
for i in self.active():
entry = self.entries[i]
if len(entry) > 1:
entry.pop()
| 25.223404
| 73
| 0.592155
|
dee87e2cd732e0965365e9cdbfcf155f0d78d6f0
| 22,397
|
py
|
Python
|
tools/converter.py
|
htwangtw/pydra-fsl
|
84b18e32eb181f61780bff75240be7fa05efa637
|
[
"Apache-2.0"
] | 1
|
2021-06-17T09:58:06.000Z
|
2021-06-17T09:58:06.000Z
|
tools/converter.py
|
htwangtw/pydra-fsl
|
84b18e32eb181f61780bff75240be7fa05efa637
|
[
"Apache-2.0"
] | 16
|
2020-11-03T13:56:12.000Z
|
2022-01-31T17:07:13.000Z
|
tools/converter.py
|
htwangtw/pydra-fsl
|
84b18e32eb181f61780bff75240be7fa05efa637
|
[
"Apache-2.0"
] | 4
|
2020-06-16T17:40:37.000Z
|
2021-02-18T09:42:48.000Z
|
from attr import has
from nipype.interfaces import fsl
from nipype.interfaces.base import traits_extension
from pydra.engine import specs
from pydra.engine.helpers import ensure_list
import os, sys, yaml, black, imp
import traits
from pathlib import Path
import typing as ty
import inspect
import click
import warnings
import functools
sys.path.append(str(Path(__file__).resolve().parent.parent / 'specs'))
import callables
class FSLConverter:
INPUT_KEYS = [
"allowed_values",
"argstr",
"container_path",
"copyfile",
"desc",
"mandatory",
"position",
"requires",
"sep",
"xor",
]
OUTPUT_KEYS = ["desc"]
NAME_MAPPING = {"desc": "help_string"}
TRAITS_IRREL = [
'output_type',
'args',
'environ',
'environ_items',
'__all__',
'trait_added',
'trait_modified',
]
TYPE_REPLACE = [
("\'File\'", "specs.File"),
("\'bool\'", "bool"),
("\'str\'", "str"),
("\'Any\'", "ty.Any"),
("\'int\'", "int"),
("\'float\'", "float"),
("\'list\'", "list"),
("\'dict\'", "dict"),
("\'MultiInputObj\'", "specs.MultiInputObj"),
("\'MultiOutputObj\'", "specs.MultiOutputObj"),
("\'MultiInputFile\'", "specs.MultiInputFile"),
("\'MultiOutputFile\'", "specs.MultiOutputFile"),
]
def __init__(self, interface_name, interface_spec_file):
self.interface_name = interface_name
with interface_spec_file.open() as f:
self.interface_spec = yaml.safe_load(f)[self.interface_name]
if self.interface_spec.get("output_requirements") is None:
self.interface_spec["output_requirements"] = []
if self.interface_spec.get("inputs_metadata") is None:
self.interface_spec["inputs_metadata"] = {}
if self.interface_spec.get("inputs_drop") is None:
self.interface_spec["inputs_drop"] = []
if self.interface_spec.get("output_templates") is None:
self.interface_spec["output_templates"] = {}
if self.interface_spec.get("output_callables") is None:
self.interface_spec["output_callables"] = {}
if (
not self.interface_spec["output_callables"]
.keys()
.isdisjoint(self.interface_spec["output_templates"].keys())
):
raise Exception("output_callables and output_templates have the same keys")
if self.interface_spec.get("doctest") is None:
self.interface_spec["doctest"] = {}
# getting input/output spec from nipype
nipype_interface = getattr(fsl, self.interface_name)
self.cmd = nipype_interface._cmd
self.nipype_input_spec = nipype_interface.input_spec()
self.nipype_output_spec = nipype_interface.output_spec()
def pydra_specs(self, write=False, dirname=None):
"""creating pydra input/output spec from nipype specs
if write is True, a pydra Task class will be written to the file together with tests
"""
input_fields_pdr, inp_templates = self.convert_input_fields()
output_fields_pdr = self.convert_output_spec(fields_from_template=inp_templates)
input_spec_pydra = specs.SpecInfo(
name="Input", fields=input_fields_pdr, bases=(specs.ShellSpec,)
)
output_spec_pydra = specs.SpecInfo(
name="Output", fields=output_fields_pdr, bases=(specs.ShellOutSpec,)
)
if write:
if dirname is None:
raise Exception("dirname has to be provided if write is True")
self.write_pydra_files(
dirname=dirname,
pydra_input_spec=input_fields_pdr,
pydra_output_spec=output_fields_pdr,
)
return input_spec_pydra, output_spec_pydra
def write_pydra_files(self, dirname, pydra_input_spec, pydra_output_spec):
"""writing pydra task and tests to the files"""
testdir = dirname / "tests"
testdir.mkdir(parents=True, exist_ok=True)
Path.touch(dirname / "__init__.py")
Path.touch(testdir / "__init__.py")
filename = dirname / f"{self.interface_name.lower()}.py"
filename_test = testdir / f"test_spec_{filename.name}"
filename_test_run = testdir / f"test_run_{filename.name}"
print("\n FILENAME", filename)
self.write_task(filename, pydra_input_spec, pydra_output_spec)
self.write_test(filename_test=filename_test)
self.write_test(filename_test=filename_test_run, run=True)
def write_task(self, filename, input_fields, output_fields):
"""writing pydra task to the dile based on the input and output spec"""
def types_to_names(spec_fields):
spec_fields_str = []
for el in spec_fields:
el = list(el)
try:
el[1] = el[1].__name__
except (AttributeError):
el[1] = el[1]._name
spec_fields_str.append(tuple(el))
return spec_fields_str
input_fields_str = types_to_names(spec_fields=input_fields)
output_fields_str = types_to_names(spec_fields=output_fields)
functions_str = self.function_callables()
spec_str = "from pydra.engine import specs \nfrom pydra import ShellCommandTask \n"
spec_str += f"import typing as ty\n"
spec_str += functions_str
spec_str += f"input_fields = {input_fields_str}\n"
spec_str += f"{self.interface_name}_input_spec = specs.SpecInfo(name='Input', fields=input_fields, bases=(specs.ShellSpec,))\n\n"
spec_str += f"output_fields = {output_fields_str}\n"
spec_str += f"{self.interface_name}_output_spec = specs.SpecInfo(name='Output', fields=output_fields, bases=(specs.ShellOutSpec,))\n\n"
spec_str += f"class {self.interface_name}(ShellCommandTask):\n"
if self.interface_spec["doctest"]:
spec_str += self.create_doctest()
spec_str += f" input_spec = {self.interface_name}_input_spec\n"
spec_str += f" output_spec = {self.interface_name}_output_spec\n"
spec_str += f" executable='{self.cmd}'\n"
for tp_repl in self.TYPE_REPLACE:
spec_str = spec_str.replace(*tp_repl)
spec_str_black = black.format_file_contents(spec_str, fast=False, mode=black.FileMode())
with open(filename, "w") as f:
f.write(spec_str_black)
def write_test(self, filename_test, run=False):
"""writing tests for the specific interface based on the test spec (from interface_spec)
if run is True the test contains task run,
if run is False only the spec is check by the test
"""
tests_inputs = self.interface_spec["tests_inputs"]
tests_outputs = self.interface_spec["tests_outputs"]
if len(tests_inputs) != len(tests_outputs):
raise Exception("tests and tests_outputs should have the same length")
tests_inp_outp = []
tests_inp_error = []
for i, out in enumerate(tests_outputs):
if isinstance(out, list):
tests_inp_outp.append((tests_inputs[i], out))
elif out is None:
tests_inp_outp.append((tests_inputs[i], []))
# allowing for incomplete or incorrect inputs that should raise an exception
elif out not in ["AttributeError", "Exception"]:
tests_inp_outp.append((tests_inputs[i], [out]))
else:
tests_inp_error.append((tests_inputs[i], out))
spec_str = f"import os, pytest \nfrom pathlib import Path\n"
spec_str += f"from ..{self.interface_name.lower()} import {self.interface_name} \n\n"
if run:
spec_str += (
"@pytest.mark.xfail('FSLDIR' not in os.environ, reason='no FSL found', "
"raises=FileNotFoundError)\n"
)
spec_str += f"@pytest.mark.parametrize('inputs, outputs', {tests_inp_outp})\n"
spec_str += f"def test_{self.interface_name}(test_data, inputs, outputs):\n"
spec_str += f" in_file = Path(test_data) / 'test.nii.gz'\n"
spec_str += f" if inputs is None: inputs = {{}}\n"
spec_str += f" for key, val in inputs.items():\n"
spec_str += f" try: inputs[key] = eval(val)\n"
spec_str += f" except: pass\n"
spec_str += f" task = {self.interface_name}(in_file=in_file, **inputs)\n"
spec_str += (
f" assert set(task.generated_output_names) == "
f"set(['return_code', 'stdout', 'stderr'] + outputs)\n"
)
if run:
spec_str += f" res = task()\n"
spec_str += f" print('RESULT: ', res)\n"
spec_str += f" for out_nm in outputs: assert getattr(res.output, out_nm).exists()\n"
# if test_inp_error is not empty, than additional test function will be created
if tests_inp_error:
spec_str += self.write_test_error(input_error=tests_inp_error)
spec_str_black = black.format_file_contents(spec_str, fast=False, mode=black.FileMode())
with open(filename_test, "w") as f:
f.write(spec_str_black)
def write_test_error(self, input_error):
"""creating a tests for incorrect or incomplete inputs
checking if the exceptions are raised
"""
spec_str = "\n\n"
spec_str += f"@pytest.mark.parametrize('inputs, error', {input_error})\n"
spec_str += f"def test_{self.interface_name}_exception(test_data, inputs, error):\n"
spec_str += f" in_file = Path(test_data) / 'test.nii.gz'\n"
spec_str += f" if inputs is None: inputs = {{}}\n"
spec_str += f" for key, val in inputs.items():\n"
spec_str += f" try: inputs[key] = eval(val)\n"
spec_str += f" except: pass\n"
spec_str += f" task = {self.interface_name}(in_file=in_file, **inputs)\n"
spec_str += f" with pytest.raises(eval(error)):\n"
spec_str += f" task.generated_output_names\n"
return spec_str
def create_doctest(self):
"""adding doctests to the interfaces"""
cmdline = self.interface_spec["doctest"].pop("cmdline")
doctest = ' """\n Example\n -------\n'
doctest += f' >>> task = {self.interface_name}()\n'
for key, val in self.interface_spec["doctest"].items():
if type(val) is str:
doctest += f' >>> task.inputs.{key} = "{val}"\n'
else:
doctest += f' >>> task.inputs.{key} = {val}\n'
doctest += ' >>> task.cmdline\n'
doctest += f" '{cmdline}'"
doctest += '\n """\n'
return doctest
def convert_input_fields(self):
"""creating fields list for pydra input spec"""
fields_pdr_dict = {}
position_dict = {}
has_template = []
for name, fld in self.nipype_input_spec.traits().items():
if name in self.TRAITS_IRREL:
continue
if name in self.interface_spec["inputs_drop"]:
continue
fld_pdr, pos = self.pydra_fld_input(fld, name)
meta_pdr = fld_pdr[-1]
if "output_file_template" in meta_pdr:
has_template.append(name)
fields_pdr_dict[name] = (name,) + fld_pdr
if pos is not None:
position_dict[name] = pos
fields_pdr_l = list(fields_pdr_dict.values())
return fields_pdr_l, has_template
def pydra_fld_input(self, field, nm):
"""converting a single nipype field to one element of fields for pydra input_spec"""
tp_pdr = self.pydra_type_converter(field, spec_type="input", name=nm)
if nm in self.interface_spec["inputs_metadata"]:
metadata_extra_spec = self.interface_spec["inputs_metadata"][nm]
else:
metadata_extra_spec = {}
if "default" in metadata_extra_spec:
default_pdr = metadata_extra_spec.pop("default")
elif getattr(field, "usedefault") and field.default is not traits.ctrait.Undefined:
default_pdr = field.default
else:
default_pdr = None
metadata_pdr = {"help_string": ""}
for key in self.INPUT_KEYS:
key_nm_pdr = self.NAME_MAPPING.get(key, key)
val = getattr(field, key)
if val is not None:
if key == "argstr" and "%" in val:
val = self.string_formats(argstr=val, name=nm)
metadata_pdr[key_nm_pdr] = val
if getattr(field, "name_template"):
template = getattr(field, "name_template")
name_source = ensure_list(getattr(field, "name_source"))
metadata_pdr["output_file_template"] = self.string_formats(
argstr=template, name=name_source[0]
)
if tp_pdr in [specs.File, specs.Directory]:
tp_pdr = str
elif getattr(field, "genfile"):
if nm in self.interface_spec["output_templates"]:
metadata_pdr["output_file_template"] = self.interface_spec["output_templates"][nm]
if tp_pdr in [
specs.File,
specs.Directory,
]: # since this is a template, the file doesn't exist
tp_pdr = str
elif nm not in self.interface_spec["output_callables"]:
raise Exception(
f"the filed {nm} has genfile=True, but no output template or callables provided"
)
metadata_pdr.update(metadata_extra_spec)
pos = metadata_pdr.get("position", None)
if default_pdr is not None and not metadata_pdr.get("mandatory", None):
return (tp_pdr, default_pdr, metadata_pdr), pos
else:
return (tp_pdr, metadata_pdr), pos
def convert_output_spec(self, fields_from_template):
"""creating fields list for pydra input spec"""
fields_pdr_l = []
for name, fld in self.nipype_output_spec.traits().items():
if (
name in self.interface_spec["output_requirements"]
and name not in fields_from_template
):
fld_pdr = self.pydra_fld_output(fld, name)
fields_pdr_l.append((name,) + fld_pdr)
return fields_pdr_l
def pydra_fld_output(self, field, name):
"""converting a single nipype field to one element of fields for pydra output_spec"""
tp_pdr = self.pydra_type_converter(field, spec_type="output", name=name)
metadata_pdr = {}
for key in self.OUTPUT_KEYS:
key_nm_pdr = self.NAME_MAPPING.get(key, key)
val = getattr(field, key)
if val:
metadata_pdr[key_nm_pdr] = val
if self.interface_spec["output_requirements"][name]:
if all(
[isinstance(el, list) for el in self.interface_spec["output_requirements"][name]]
):
requires_l = self.interface_spec["output_requirements"][name]
nested_flag = True
elif all(
[
isinstance(el, (str, dict))
for el in self.interface_spec["output_requirements"][name]
]
):
requires_l = [self.interface_spec["output_requirements"][name]]
nested_flag = False
else:
Exception("has to be either list of list or list of str/dict")
metadata_pdr["requires"] = []
for requires in requires_l:
requires_mod = []
for el in requires:
if isinstance(el, str):
requires_mod.append(el)
elif isinstance(el, dict):
requires_mod += list(el.items())
metadata_pdr["requires"].append(requires_mod)
if nested_flag is False:
metadata_pdr["requires"] = metadata_pdr["requires"][0]
if name in self.interface_spec["output_templates"]:
metadata_pdr["output_file_template"] = self.interface_spec["output_templates"][name]
elif name in self.interface_spec["output_callables"]:
metadata_pdr["callable"] = self.interface_spec["output_callables"][name]
return (tp_pdr, metadata_pdr)
def function_callables(self):
if not self.interface_spec["output_callables"]:
return ""
python_functions_spec = Path(os.path.dirname(__file__)) / "../specs/callables.py"
if not python_functions_spec.exists():
raise Exception(
"specs/callables.py file is needed if output_callables in the spec files"
)
fun_str = ""
fun_names = list(set(self.interface_spec["output_callables"].values()))
fun_names.sort()
for fun_nm in fun_names:
fun = getattr(callables, fun_nm)
fun_str += inspect.getsource(fun) + "\n"
return fun_str
def pydra_type_converter(self, field, spec_type, name):
"""converting types to types used in pydra"""
if spec_type not in ["input", "output"]:
raise Exception(f"spec_type has to be input or output, but {spec_type} provided")
tp = field.trait_type
if isinstance(tp, traits.trait_types.Int):
tp_pdr = int
elif isinstance(tp, traits.trait_types.Float):
tp_pdr = float
elif isinstance(tp, traits.trait_types.Str):
tp_pdr = str
elif isinstance(tp, traits.trait_types.Bool):
tp_pdr = bool
elif isinstance(tp, traits.trait_types.Dict):
tp_pdr = dict
elif isinstance(tp, traits_extension.InputMultiObject):
if isinstance(field.inner_traits[0].trait_type, traits_extension.File):
tp_pdr = specs.MultiInputFile
else:
tp_pdr = specs.MultiInputObj
elif isinstance(tp, traits_extension.OutputMultiObject):
if isinstance(field.inner_traits[0].trait_type, traits_extension.File):
tp_pdr = specs.MultiOutputFile
else:
tp_pdr = specs.MultiOutputObj
elif isinstance(tp, traits.trait_types.List):
if isinstance(field.inner_traits[0].trait_type, traits_extension.File):
if spec_type == "input":
tp_pdr = specs.MultiInputFile
else:
tp_pdr = specs.MultiOutputFile
else:
tp_pdr = list
elif isinstance(tp, traits_extension.File):
if (
spec_type == "output" or tp.exists is True
): # TODO check the hash_file metadata in nipype
tp_pdr = specs.File
else:
tp_pdr = str
else:
tp_pdr = ty.Any
return tp_pdr
def string_formats(self, argstr, name):
import re
if "%s" in argstr:
argstr_new = argstr.replace("%s", f"{{{name}}}")
elif "%d" in argstr:
argstr_new = argstr.replace("%d", f"{{{name}}}")
elif "%f" in argstr:
argstr_new = argstr.replace("%f", f"{{{name}}}")
elif "%g" in argstr:
argstr_new = argstr.replace("%g", f"{{{name}}}")
elif len(re.findall("%[0-9.]+f", argstr)) == 1:
old_format = re.findall("%[0-9.]+f", argstr)[0]
argstr_new = argstr.replace(old_format, f"{{{name}:{old_format[1:]}}}")
else:
raise Exception(f"format from {argstr} is not supported TODO")
return argstr_new
FSL_MODULES = ['aroma', 'dti', 'epi', 'fix', 'maths', 'model', 'possum', 'preprocess', 'utils']
@click.command()
@click.option(
"-i",
"--interface_name",
required=True,
default="all",
help="name of the interface (name used in Nipype, e.g. BET) or all (default)"
"if all is used all interfaces from the spec file will be created",
)
@click.option(
"-m", "--module_name", required=True, help=f"name of the module from the list {FSL_MODULES}"
)
def create_pydra_spec(interface_name, module_name):
if module_name not in FSL_MODULES:
raise Exception(
f"module name {module_name} not available;" f"should be from the list {FSL_MODULES}"
)
spec_file = Path(os.path.dirname(__file__)) / f"../specs/fsl_{module_name}_param.yml"
if not spec_file.exists():
raise Exception(
f"the specification file doesn't exist for the module {module_name},"
f"create the specification file in {spec_file.parent}"
)
@functools.lru_cache()
def all_interfaces(module):
nipype_module = getattr(fsl, module)
all_specs = [el for el in dir(nipype_module) if "InputSpec" in el]
all_interf = [el.replace("InputSpec", "") for el in all_specs]
# interfaces in the spec file
with open(spec_file) as f:
spec_interf = yaml.safe_load(f).keys()
if set(all_interf) - set(spec_interf):
warnings.warn(
f"some interfaces are not in the spec file: "
f"{set(all_interf) - set(spec_interf)}, "
f"and pydra interfaces will not be created for them"
)
return spec_interf
if interface_name == "all":
interface_list = all_interfaces(module_name)
elif interface_name in all_interfaces(module_name):
interface_list = [interface_name]
else:
raise Exception(
f"interface_name has to be 'all' "
f"or a name from the list {all_interfaces(module_name)}"
)
dirname_interf = Path(__file__).parent.parent / f"pydra/tasks/fsl/{module_name}"
dirname_interf.mkdir(exist_ok=True)
for interface_el in interface_list:
converter = FSLConverter(
interface_name=interface_el,
interface_spec_file=Path(__file__).parent.parent
/ f"specs/fsl_{module_name}_param.yml",
)
converter.pydra_specs(write=True, dirname=dirname_interf)
if __name__ == '__main__':
create_pydra_spec()
| 41.170956
| 143
| 0.594901
|
6786cd7847fa50ff7e6fe51240d177a0c7515538
| 4,483
|
py
|
Python
|
devtools/mk/create_resources.py
|
arccode/factory
|
a1b0fccd68987d8cd9c89710adc3c04b868347ec
|
[
"BSD-3-Clause"
] | 3
|
2022-01-06T16:52:52.000Z
|
2022-03-07T11:30:47.000Z
|
devtools/mk/create_resources.py
|
arccode/factory
|
a1b0fccd68987d8cd9c89710adc3c04b868347ec
|
[
"BSD-3-Clause"
] | null | null | null |
devtools/mk/create_resources.py
|
arccode/factory
|
a1b0fccd68987d8cd9c89710adc3c04b868347ec
|
[
"BSD-3-Clause"
] | 1
|
2021-10-24T01:47:22.000Z
|
2021-10-24T01:47:22.000Z
|
#!/usr/bin/env python3
# Copyright 2018 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility to create 'resource' files for Chromium OS factory build system.
This utility scans for *.rsrc, and creates the resource files based on the rules
defined in rsrc files.
See the URL for more details:
https://chromium.googlesource.com/chromiumos/platform/factory/+/HEAD/resources/README.md
"""
import argparse
import glob
import itertools
import logging
import os
import sys
import tarfile
RSRC_FILES = '*.rsrc'
class ResourceError(Exception):
"""All exceptions when creating resources."""
def AddResource(output, rule, args):
"""Adds one resource from given rule to output (tar) object.
output: A tarfile.TarFile instance for adding resource into.
rule: A string in SRC[:DEST] format.
args: the environment arguments for sysroot, board files, and resources.
"""
is_optional = rule.startswith('?')
if is_optional:
rule = rule[1:]
src, dest = rule.split(':') if ':' in rule else (rule, rule)
logging.info('%s => %s%s', src, dest, ' (optional)' if is_optional else '')
if os.path.isabs(src):
src_list = [os.path.join(args.sysroot, '.' + src)]
else:
src_list = [os.path.normpath(os.path.join(args.resources, src))]
if args.board_resources:
src_list += [os.path.normpath(os.path.join(args.board_resources, src))]
found = 0
for src_path in src_list:
if not os.path.exists(src_path):
continue
found += 1
logging.debug('Add: %s=>%s', src_path, dest)
output.add(src_path, dest)
if found < 1:
if is_optional:
logging.info('skip non-exist optional resource: %s', src)
return
raise ResourceError('Failed to find input resource: %s' % src)
def CreateResource(resource, input_list, args):
"""Creates a resource file by descriptions in input_list.
resource: the name of the resource file to create.
input_list: a list of RSRC files for creating the resource file.
args: the environment arguments for sysroot, board files, and resources.
"""
logging.info('Creating resource [%s]...', resource)
with tarfile.open(os.path.join(args.output_dir, resource + '.tar'), 'w',
dereference=True) as t:
for input_file in input_list:
with open(input_file) as f:
for rule in f.readlines():
rule = rule.strip()
if rule.startswith('#') or not rule:
continue
AddResource(t, rule, args)
def CreateAllResources(args):
"""Scans and creates all resources from *.rsrc files."""
def GetResourceName(rc_path):
"""Returns the derived resource name from an rsrc file.
The rsrc file name should be in format <resource-name>.[<sub-name>*.]rsrc
resource-name will be used to construct the name of output file.
sub-name will be simply discarded - this is to help packing multiple files
from multiple import files (i.e., sharing definition files between multiple
output files).
"""
return os.path.basename(rc_path).partition('.')[0]
rc_files = glob.glob(os.path.join(args.resources, RSRC_FILES))
if args.board_resources:
rc_files += glob.glob(os.path.join(args.board_resources, RSRC_FILES))
rc_files.sort()
rc_groups = {name: list(paths) for name, paths in itertools.groupby(
rc_files, GetResourceName)}
logging.debug('rc_groups: %r', rc_groups)
for resource, input_list in rc_groups.items():
CreateResource(resource, input_list, args)
def main():
parser = argparse.ArgumentParser(
description=(__doc__))
parser.add_argument('--verbose', '-v', action='count', default=0,
help='Verbose output')
parser.add_argument('--sysroot', default='/',
help='directory to search for absolute path resource')
parser.add_argument('--resources', default='.',
help='path to "resources/" to search for relative files')
parser.add_argument('--board_resources',
help='BOARD_FILES_DIR/resources for relative resource')
parser.add_argument('--output_dir', default='.',
help='directory to put generated resources')
args = parser.parse_args()
logging.basicConfig(level=logging.WARNING - args.verbose * 10)
try:
CreateAllResources(args)
except Exception as e:
print('ERROR: %s' % e)
sys.exit(1)
if __name__ == '__main__':
main()
| 32.963235
| 88
| 0.68124
|
ebfe22dad69d3fb9671bcdfd49387b22de140a59
| 9,446
|
py
|
Python
|
internal-enrichment/hygiene/src/hygiene.py
|
tiiibs/connectors
|
65349790813a1d49f4536662725d99d8bfa08481
|
[
"Apache-2.0"
] | 132
|
2019-06-28T23:23:18.000Z
|
2022-03-30T07:47:55.000Z
|
internal-enrichment/hygiene/src/hygiene.py
|
tiiibs/connectors
|
65349790813a1d49f4536662725d99d8bfa08481
|
[
"Apache-2.0"
] | 472
|
2019-06-26T12:14:54.000Z
|
2022-03-31T13:49:53.000Z
|
internal-enrichment/hygiene/src/hygiene.py
|
tiiibs/connectors
|
65349790813a1d49f4536662725d99d8bfa08481
|
[
"Apache-2.0"
] | 185
|
2019-07-01T09:32:14.000Z
|
2022-03-28T05:29:12.000Z
|
import os
import yaml
from pymispwarninglists import WarningLists
from pycti import OpenCTIConnectorHelper, get_config_variable
# At the moment it is not possible to map lists to their upstream path.
# Thus we need to have our own mapping here.
# Reference: https://github.com/MISP/misp-warninglists/issues/142
# To generate: grep '"name"' -r lists, and then reformat using vscode
LIST_MAPPING = {
"List of known gmail sending IP ranges": "lists/google-gmail-sending-ips/list.json",
"List of known domains to know external IP": "lists/whats-my-ip/list.json",
"Top 500 domains and pages from https://moz.com/top500": "lists/moz-top500/list.json",
"List of known Windows 10 connection endpoints": "lists/microsoft-win10-connection-endpoints/list.json",
"List of known security providers/vendors blog domain": "lists/security-provider-blogpost/list.json",
"List of known hashes with common false-positives (based on Florian Roth input list)": "lists/common-ioc-false-positive/list.json",
"Top 20 000 websites from Cisco Umbrella": "lists/cisco_top20k/list.json",
"Specialized list of IPv4 addresses belonging to common VPN providers and datacenters": "lists/vpn-ipv4/list.json",
"List of known Office 365 IP address ranges in China": "lists/microsoft-office365-cn/list.json",
"List of RFC 5735 CIDR blocks": "lists/rfc5735/list.json",
"List of RFC 5771 multicast CIDR blocks": "lists/multicast/list.json",
"CRL Warninglist": "lists/crl-ip-hostname/list.json",
"List of known GCP (Google Cloud Platform) IP address ranges": "lists/google-gcp/list.json",
"List of RFC 1918 CIDR blocks": "lists/rfc1918/list.json",
"Top 1000 website from Alexa": "lists/alexa/list.json",
"List of known Office 365 URLs": "lists/microsoft-office365/list.json",
"Hashes that are often included in IOC lists but are false positives.": "lists/ti-falsepositives/list.json",
"List of known bank domains": "lists/bank-website/list.json",
"List of known IPv6 public DNS resolvers": "lists/public-dns-v6/list.json",
"List of known google domains": "lists/google/list.json",
"List of known microsoft domains": "lists/microsoft/list.json",
"List of known Ovh Cluster IP": "lists/ovh-cluster/list.json",
"List of known domains used by automated malware analysis services & security vendors": "lists/automated-malware-analysis/list.json",
"List of known Cloudflare IP ranges": "lists/cloudflare/list.json",
"Top 10 000 websites from Cisco Umbrella": "lists/cisco_top10k/list.json",
"List of known hashes for empty files": "lists/empty-hashes/list.json",
"List of known Fastly IP address ranges": "lists/fastly/list.json",
"Common contact e-mail addresses": "lists/common-contact-emails/list.json",
"Fingerprint of trusted CA certificates": "lists/mozilla-CA/list.json",
"Covid-19 Cyber Threat Coalition's Whitelist": "lists/covid-19-cyber-threat-coalition-whitelist/list.json",
"List of known Akamai IP ranges": "lists/akamai/list.json",
"Specialized list of IPv6 addresses belonging to common VPN providers and datacenters": "lists/vpn-ipv6/list.json",
"List of known Microsoft Azure Datacenter IP Ranges": "lists/microsoft-azure/list.json",
"List of IPv6 link local blocks": "lists/ipv6-linklocal/list.json",
"List of known public DNS resolvers expressed as hostname": "lists/public-dns-hostname/list.json",
"Top 1000 websites from Cisco Umbrella": "lists/cisco_top1000/list.json",
"List of hashes for EICAR test virus": "lists/eicar.com/list.json",
"University domains": "lists/university_domains/list.json",
"List of known Office 365 IP address ranges": "lists/microsoft-office365-ip/list.json",
"Top 10K most-used sites from Tranco": "lists/tranco10k/list.json",
"List of known Amazon AWS IP address ranges": "lists/amazon-aws/list.json",
"List of known Googlebot IP ranges": "lists/googlebot/list.json",
"TLDs as known by IANA": "lists/tlds/list.json",
"Top 5000 websites from Cisco Umbrella": "lists/cisco_top5k/list.json",
"Unattributed phone number.": "lists/phone_numbers/list.json",
"List of RFC 3849 CIDR blocks": "lists/rfc3849/list.json",
"List of known Office 365 Attack Simulator used for phishing awareness campaigns": "lists/microsoft-attack-simulator/list.json",
"List of RFC 6761 Special-Use Domain Names": "lists/rfc6761/list.json",
"List of RFC 6598 CIDR blocks": "lists/rfc6598/list.json",
"List of known IPv4 public DNS resolvers": "lists/public-dns-v4/list.json",
"List of known dax30 webpages": "lists/dax30/list.json",
"List of disposable email domains": "lists/disposable-email/list.json",
"Top 1,000,000 most-used sites from Tranco": "lists/tranco/list.json",
"Valid covid-19 related domains": "lists/covid/list.json",
"Top 10K websites from Majestic Million": "lists/majestic_million/list.json",
"List of known URL Shorteners domains": "lists/url-shortener/list.json",
"Covid-19 Krassi's Whitelist": "lists/covid-19-krassi-whitelist/list.json",
"List of known Wikimedia address ranges": "lists/wikimedia/list.json",
"List of known sinkholes": "lists/sinkholes/list.json",
"Second level TLDs as known by Mozilla Foundation": "lists/second-level-tlds/list.json",
"Fingerprint of known intermedicate of trusted certificates": "lists/mozilla-IntermediateCA/list.json",
}
class HygieneConnector:
def __init__(self):
# Instantiate the connector helper from config
config_file_path = os.path.dirname(os.path.abspath(__file__)) + "/config.yml"
config = (
yaml.load(open(config_file_path), Loader=yaml.FullLoader)
if os.path.isfile(config_file_path)
else {}
)
self.helper = OpenCTIConnectorHelper(config)
warninglists_slow_search = bool(
get_config_variable(
"HYGIENE_WARNINGLISTS_SLOW_SEARCH",
["hygiene", "warninglists_slow_search"],
config,
default=False,
)
)
self.helper.log_info(f"Warning lists slow search: {warninglists_slow_search}")
self.warninglists = WarningLists(slow_search=warninglists_slow_search)
# Create Hygiene Tag
self.label_hygiene = self.helper.api.label.create(
value="Hygiene", color="#fc0341"
)
def _process_observable(self, observable) -> str:
# Extract IPv4, IPv6 and Domain from entity data
observable_value = observable["observable_value"]
# Search in warninglist
result = self.warninglists.search(observable_value)
# Iterate over the hits
if result:
self.helper.log_info(
"Hit found for %s in warninglists" % (observable_value)
)
for hit in result:
self.helper.log_info(
"Type: %s | Name: %s | Version: %s | Descr: %s"
% (hit.type, hit.name, hit.version, hit.description)
)
# We set the score based on the number of warning list entries
if len(result) >= 5:
score = "5"
elif len(result) >= 3:
score = "10"
elif len(result) == 1:
score = "15"
else:
score = "20"
self.helper.log_info(
f"number of hits ({len(result)}) setting score to {score}"
)
self.helper.api.stix_cyber_observable.add_label(
id=observable["id"], label_id=self.label_hygiene["id"]
)
self.helper.api.stix_cyber_observable.update_field(
id=observable["id"],
input={"key": "x_opencti_score", "value": score},
)
for indicator_id in observable["indicatorsIds"]:
self.helper.api.stix_domain_object.add_label(
id=indicator_id, label_id=self.label_hygiene["id"]
)
self.helper.api.stix_domain_object.update_field(
id=indicator_id,
input={"key": "x_opencti_score", "value": score},
)
# Create external references
external_reference_id = self.helper.api.external_reference.create(
source_name="misp-warninglist",
url="https://github.com/MISP/misp-warninglists/tree/main/"
+ LIST_MAPPING[hit.name],
external_id=hit.name,
description=hit.description,
)
self.helper.api.stix_cyber_observable.add_external_reference(
id=observable["id"],
external_reference_id=external_reference_id["id"],
)
return "Observable value found on warninglist and tagged accordingly"
def _process_message(self, data) -> str:
entity_id = data["entity_id"]
observable = self.helper.api.stix_cyber_observable.read(id=entity_id)
return self._process_observable(observable)
# Start the main loop
def start(self):
self.helper.listen(self._process_message)
if __name__ == "__main__":
HygieneInstance = HygieneConnector()
HygieneInstance.start()
| 52.187845
| 137
| 0.653187
|
978f499978b1b72de6b0de2378200641a3a7591b
| 921
|
py
|
Python
|
sa/profiles/HP/Comware/get_chassis_id.py
|
prorevizor/noc
|
37e44b8afc64318b10699c06a1138eee9e7d6a4e
|
[
"BSD-3-Clause"
] | 84
|
2017-10-22T11:01:39.000Z
|
2022-02-27T03:43:48.000Z
|
sa/profiles/HP/Comware/get_chassis_id.py
|
prorevizor/noc
|
37e44b8afc64318b10699c06a1138eee9e7d6a4e
|
[
"BSD-3-Clause"
] | 22
|
2017-12-11T07:21:56.000Z
|
2021-09-23T02:53:50.000Z
|
sa/profiles/HP/Comware/get_chassis_id.py
|
prorevizor/noc
|
37e44b8afc64318b10699c06a1138eee9e7d6a4e
|
[
"BSD-3-Clause"
] | 23
|
2017-12-06T06:59:52.000Z
|
2022-02-24T00:02:25.000Z
|
# ---------------------------------------------------------------------
# HP.Comware.get_chassis_id
# ---------------------------------------------------------------------
# Copyright (C) 2007-2016 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Python modules
import re
# NOC modules
from noc.sa.profiles.Generic.get_chassis_id import Script as BaseScript
from noc.sa.interfaces.igetchassisid import IGetChassisID
class Script(BaseScript):
name = "HP.Comware.get_chassis_id"
cache = True
interface = IGetChassisID
rx_id = re.compile(r"^\s*MAC_ADDRESS\s+:\s+(?P<id>\S+)", re.IGNORECASE | re.MULTILINE)
def execute_cli(self, **kwargs):
match = self.re_search(self.rx_id, self.cli("display device manuinfo", cached=True))
mac = match.group("id")
return {"first_chassis_mac": mac, "last_chassis_mac": mac}
| 34.111111
| 92
| 0.54506
|
b3b1605b206912ee968bd3a65a6ab1469759411d
| 60,629
|
py
|
Python
|
tensorflow/python/tpu/tensor_tracer.py
|
uve/tensorflow
|
e08079463bf43e5963acc41da1f57e95603f8080
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/tpu/tensor_tracer.py
|
uve/tensorflow
|
e08079463bf43e5963acc41da1f57e95603f8080
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/tpu/tensor_tracer.py
|
uve/tensorflow
|
e08079463bf43e5963acc41da1f57e95603f8080
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========================================================================
"""A utility to trace tensor values on TPU."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import os.path
import sys
from tensorflow.core.framework import summary_pb2
from tensorflow.python.ops import summary_ops_v2 as summary
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import graph_io
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.tpu import tensor_tracer_flags
from tensorflow.python.tpu import tensor_tracer_report
from tensorflow.python.tpu import tpu
from tensorflow.python.tpu.ops import tpu_ops
from tensorflow.python.training import training_util
from tensorflow.python.ops import nn_impl
_DEVICE_TYPE_TPU = 'tpu'
_DEVICE_TYPE_CPU = 'cpu'
_TRACE_MODE_PART_TENSOR_SIZE = 3
_REASON_OUTSIDE_OP_RANGE = 'not-traced-outside-op-range'
_REASON_UNSAFE_OP = 'not-traced-unsafe-op'
_REASON_WHILELOOP_OP = 'not-traced-special-whileloop-op'
_REASON_UNSAFE_SCALAR = 'not-traced-unsafe-scalar'
_REASON_SKIP_SCALAR = 'not-traced-scalar'
_REASON_LESS_INTERESTING_OP = 'not-traced-less-interesting-op'
_REASON_DEVICE_MISMATCH = 'not-traced-device-mismatch'
_REASON_DYNAMIC_SHAPE = 'not-traced-dynamic-shape'
_REASON_SCALAR_GET_TRACED = 'traced-scalar'
_REASON_TENSOR_GET_TRACED = 'traced-tensor'
_REASON_USER_INCLUDED = 'traced-user-included'
_REASON_USER_EXCLUDED = 'not-traced-user-excluded'
_REASON_NOT_EXECUTED = 'not-traced-not-in-exec-path'
_REASON_NON_NUMERIC_TENSOR = 'not-traced-non-numeric-tensor'
_REASON_FEEDS_WHILELOOP_OP = 'not-traced-feeds-special-whileloop-op'
_OUTPUT_STREAM_ESCAPE = 'file://'
_TENSOR_TRACER_COLLECTION = 'tensor_tracer_variables'
_TRACE_FILE_NAME = 'trace.all'
_COMPACT_TRACE_FILE_PREFIX = 'compact_trace.'
_COMPACT_TRACE_ENTRY_INIT_VALUE = -1.0
_TENSOR_TRACER_STORAGE = 'tensor_tracer_storage'
_TENSOR_VALUES_CACHE = 'tensor_values_cache'
_REPLICA_ID_TAG = '#replica-id: '
_TT_SUMMARY_NORM = 'tensor_tracer_norm'
_TT_SUMMARY_MAX = 'tensor_tracer_max'
_TT_SUMMARY_MIN = 'tensor_tracer_min'
_TT_SUMMARY_MEAN = 'tensor_tracer_mean'
_TT_SUMMARY_VAR = 'tensor_tracer_var'
_TT_SUMMARY_SIZE = 'tensor_tracer_size'
_TT_SUMMARY_TAG = 'tensor_tracer_summary'
_TT_SUMMARY_MAX_QUEUE = 100
def tensor_tracepoint(tensor, checkpoint_name):
"""Adds a checkpoint with the given checkpoint name for the given tensor.
The tensor will be added to the list of tensors that will be traced by the
tensor tracer.
Args:
tensor: the tensor object for which the tracing is requested.
checkpoint_name: a string name for the checkpoint. This name has to be a
unique name if used within model comparison. The tensors that have the same
checkpoint identifier is compared in model comparison.
Returns:
The provided tensor.
"""
tensor.graph.get_collection(_TENSOR_TRACER_COLLECTION)
tensor.graph.add_to_collection(_TENSOR_TRACER_COLLECTION,
(tensor, checkpoint_name))
return tensor
def keras_layer_tracepoint(layer, checkpoint_name):
"""An interface for adding the tensor outputs of a keras layer.
Encapsulates tensor_tracepoint.
Args:
layer: A keras layer.
checkpoint_name: a string name for the checkpoint. This name has to be a
unique name if used within model comparison. The tensors that have the same
checkpoint identifier is compared in model comparison.
Returns:
The provided layer.
"""
try:
outputs = layer.output
if tensor_util.is_tensor(outputs):
tensor_tracepoint(outputs, '%s' % (checkpoint_name))
else:
idx = 0
for output_tensor in outputs:
if tensor_util.is_tensor(outputs):
tensor_tracepoint(output_tensor, '%s_%d' % (checkpoint_name, idx))
idx += 1
except AttributeError:
pass
except RuntimeError:
pass
return layer
def _trace_files_need_precreated(output_dir):
"""Return True if trace files must be pre-created by users."""
if not output_dir.startswith('/'):
return False
if len(output_dir) < 5:
return False
if output_dir[2] != 'n':
return False
if output_dir[3] != 's':
return False
if output_dir[1] != 'c':
return False
if output_dir[4] != '/':
return False
return True
class TensorTracer(object):
"""A software construct for tracing tensor values in a TF graph on TPU.
This utility is disabled by default. It can be enabled by setting
the TENSOR_TRACER_FLAGS env variable as:
export TENSOR_TRACER_FLAGS="--enable=1"
If it is enabled, it will trace the output tensor values of
selected Ops in the graph. It has two outputs: (1) the traces and (2)
a report. The traces are dumped to a specified local file on the TPU
host. The report is printed to the log.info of the TPU job.
By passing options via the env variable, users can change:
(1) the trace mode (e.g., detecting NaN/Inf, printing partial or
full tensor values)
(2) which Ops to be traced (via op.name or op.type)
(3) output trace file path.
"""
# The set of graphs that are rewritten by tensor tracer.
_traced_graphs = set()
@staticmethod
def is_enabled():
"""Returns True if TensorTracer is enabled."""
return tensor_tracer_flags.TTParameters().is_enabled()
@staticmethod
def check_device_type(device_type):
"""Checks if the given device type is valid."""
if device_type not in [_DEVICE_TYPE_TPU, _DEVICE_TYPE_CPU]:
raise ValueError('Invalid device_type "%s"'%device_type)
@staticmethod
def check_trace_mode(device_type, trace_mode):
"""Checks if the given trace mode work on the given device type.
Args:
device_type: Device type, TPU, GPU, CPU.
trace_mode: Tensor tracer trace mode.
Raises:
ValueError: If the given trace mode is not supported for the device.
"""
if trace_mode == tensor_tracer_flags.TRACE_MODE_SUMMARY:
if device_type != _DEVICE_TYPE_TPU:
raise ValueError('Device_type "%s" is not yet supported for '
'trace mode "%s"' % (device_type, trace_mode))
@staticmethod
def loop_cond_op(op):
return op.type in ('LoopCond', 'RefLoopCond')
@staticmethod
def while_loop_op(op):
"""Returns true if op is one of the special ops of in a while loop.
Args:
op: A tf.Operation.
Returns:
True if the given op is one of [Switch, Merge, Enter, Exit,
NextIteration, LoopCond], which are all building blocks for TF while
loops.
"""
return (control_flow_util.IsLoopSwitch(op) or
control_flow_util.IsLoopMerge(op) or
control_flow_util.IsLoopEnter(op) or
control_flow_util.IsLoopExit(op) or
TensorTracer.loop_cond_op(op) or
op.type in ('RefNextIteration', 'NextIteration'))
@staticmethod
def unsafe_op(op):
"""Returns True if this op is not safe to be traced."""
if control_flow_util.IsInCond(op):
return True
# Reasons for not including following op types:
# Assign: cause incorrect result with CPU tracing.
if op.type in ['Assign']:
return True
return False
@staticmethod
def device_mismatch(device_type, op):
if device_type == _DEVICE_TYPE_TPU:
# pylint: disable=protected-access
return tpu._TPU_REPLICATE_ATTR not in op.node_def.attr
# pylint: enable=protected-access
return False
@staticmethod
def unsafe_scalar_trace(op):
"""Return true if scalar output tensor from Op is not safe to be traced."""
# Tracing the following causes cycle in the graph on TPU.
if op.type in ['LoopCond', 'Enter', 'Merge', 'Const',
'Switch', 'Less', 'ReadVariableOp']:
return True
# Tracing the following will cause casting-issue
# with the norm tracing mode or other compilation issues on CPU.
if op.type in ['VarHandleOp', 'IteratorToStringHandle',
'IteratorGetNext', 'OneShotIterator',
'IteratorV2', 'MakeIterator',
'BatchDatasetV2', 'MapDataset',
'FixedLengthRecordDataset', 'TakeDataset', 'ZipDataset',
'Placeholder', 'PlaceholderWithDefault', 'StridedSlice']:
return True
return False
def _less_interesting_op(self, op):
"""Returns True if the given op is not an interesting one to be traced."""
# If flag is set to include less interesting ops, then include everything.
if self._parameters.include_less_interesting_ops:
return False
# Following ops are highly unlikey to cause bugs.
return op.type in ['Const', 'Identity', 'Cast', 'Shape']
@staticmethod
def reason(op_idx, details):
"""Returns reason why the Op at op_idx is traced or not."""
return '%d %s'%(op_idx, details)
def __init__(self):
"""Initializes a TensorTracer.
Sets the various member fields from the flags (if given) or the defaults.
"""
self._replica_id = None
self._tt_config = tensor_tracer_report.TensorTracerConfig()
self._parameters = tensor_tracer_flags.TTParameters()
self._included_op_full_names = set()
self._host_call_fn = {}
self._cache_tensors = {}
def _create_or_get_tensor_values_cache(self, cache_name, graph=None,
num_tensors=None,
num_signatures=None):
"""Creates a variable as the cache to store intermediate tensor values.
Args:
cache_name: Name to be given to the cache (an instance of tf.variable).
graph: Tensorflow graph.
num_tensors: The number of traced tensors.
num_signatures: The number of signatures, statistics to be collected.
Returns:
A ref to newly created or existing cache with dimensions
num_tensors x num_signatures
Raises:
ValueError: If missing a parameter to create the cache.
"""
if cache_name not in self._cache_tensors:
if graph is None:
raise ValueError('Graph must be provided at cache creation.')
if num_tensors is None:
raise ValueError('num_tensors must be provided at cache creation.')
if num_signatures is None:
raise ValueError('num_signatures must be provided at cache creation.')
graph = graph or ops.get_default_graph()
# Create in proper graph and base name_scope.
with graph.as_default() as g, g.name_scope(None):
self._cache_tensors[cache_name] = variable_scope.get_variable(
cache_name + '_' + _TENSOR_VALUES_CACHE,
shape=[num_tensors, num_signatures],
dtype=dtypes.float32,
initializer=init_ops.constant_initializer(
_COMPACT_TRACE_ENTRY_INIT_VALUE),
trainable=False,
use_resource=True,
collections=[_TENSOR_TRACER_STORAGE, ops.GraphKeys.LOCAL_VARIABLES])
return self._cache_tensors[cache_name]
def _add_replica_id_to_graph(self):
"""Adds nodes for computing the replica ID to the graph."""
if self._tt_config.num_replicas:
with ops.control_dependencies(None):
# Uses None as dependency to run outside of TPU graph rewrites.
self._replica_id = tpu_ops.tpu_replicated_input(
list(range(self._tt_config.num_replicas)),
name='tt_replica_id')
else:
self._replica_id = 'unknown'
def _inside_op_range(self, idx):
"""Return True if the given index is inside the selected range."""
if idx < self._parameters.op_range[0]:
return False
return (self._parameters.op_range[1] < 0 or
idx <= self._parameters.op_range[1])
def _is_user_included_op(self, op):
"""Checks whether the op is included in the tensor tracer flags.
Args:
op: tf Operation
Returns:
True, if the op is included.
An op is included if:
- Its op name is given in included_opnames
- Its op type is given in included_optypes
- The op is at most _trace_ops_before_included hops before an included op
- The op is at most _trace_ops_after_included hops after an included op
"""
def _is_op_or_any_neighbor_included(op, check_before=0, check_after=0):
"""Helper function to check if op is included or not."""
if op.name in self._included_op_full_names:
return True
for opname_re in self._parameters.included_opname_re_list:
if opname_re.match(op.name):
self._included_op_full_names.add(op.name)
return True
for optype_re in self._parameters.included_optype_re_list:
if optype_re.match(op.type):
self._included_op_full_names.add(op.name)
return True
if check_after > 0:
for out_tensor in op.outputs:
for consumer in out_tensor.consumers():
if _is_op_or_any_neighbor_included(consumer, check_after - 1, 0):
self._included_op_full_names.add(op.name)
return True
if check_before > 0:
for input_tensor in op.inputs:
if _is_op_or_any_neighbor_included(input_tensor.op,
0,
check_before - 1):
self._included_op_full_names.add(op.name)
return True
return False
# check_after and check_before are swapped below, as below operation
# checks the distance from an arbitrary op to included ops.
return _is_op_or_any_neighbor_included(
op, self._parameters.trace_ops_after_included,
self._parameters.trace_ops_before_included)
def _is_user_excluded_op(self, op):
for opname_re in self._parameters.excluded_opname_re_list:
if opname_re.match(op.name):
return True
for optype_re in self._parameters.excluded_optype_re_list:
if optype_re.match(op.type):
return True
return False
def _signature_types(self):
"""Returns a dictionary holding the order of signatures in the cache for the selected trace mode."""
if self._parameters.trace_mode in set([
tensor_tracer_flags.TRACE_MODE_NAN_INF,
tensor_tracer_flags.TRACE_MODE_NORM,
tensor_tracer_flags.TRACE_MODE_MAX_ABS]):
return {self._parameters.trace_mode: 0}
if self._parameters.trace_mode == tensor_tracer_flags.TRACE_MODE_SUMMARY:
return {_TT_SUMMARY_NORM: 0, _TT_SUMMARY_MAX: 1, _TT_SUMMARY_MIN: 2,
_TT_SUMMARY_MEAN: 3, _TT_SUMMARY_VAR: 4, _TT_SUMMARY_SIZE: 5}
return {}
def _num_signature_dimensions(self):
return len(self._signature_types())
def _use_tensor_values_cache(self):
"""Returns True if immediate tensors should be first saved to a cache."""
if self._parameters.trace_mode not in set([
tensor_tracer_flags.TRACE_MODE_NAN_INF,
tensor_tracer_flags.TRACE_MODE_NORM,
tensor_tracer_flags.TRACE_MODE_MAX_ABS,
tensor_tracer_flags.TRACE_MODE_SUMMARY
]):
return False
if (self._parameters.trace_dir and
_trace_files_need_precreated(self._parameters.trace_dir)):
return True
return self._parameters.use_compact_trace
def _save_tensor_value_to_cache_op(self, cache_idx, updates):
"""Returns an op that will save the given updates to an entry in the cache.
Args:
cache_idx: The cache index of the tensor within the cache.
updates: A dictionary of the signature updates.
Returns:
Cache update operation.
"""
# state_ops.scatter_update allows updates only along the first dimension.
# Make a compact array by concantating different signatures, and update
# them all together.
sorted_update = []
signature_indices = self._signature_types()
for _, val in sorted(updates.items(),
key=lambda item: signature_indices[item[0]]):
sorted_update.append(val)
cache = self._create_or_get_tensor_values_cache(self._parameters.trace_mode)
indices = constant_op.constant([cache_idx])
updates = array_ops.concat(sorted_update, axis=0)
updates = array_ops.reshape(updates, [1, self._num_signature_dimensions()])
return state_ops.scatter_update(cache, indices, updates).op
def _preprocess_traced_tensor(self, tensor):
"""Computes NAN/Norm/Max on TPUs before sending to CPU.
Args:
tensor: The tensor to be traced.
Returns:
A tensor that should be input to the trace_function.
Raises:
RuntimeError: If the trace mode is invalid.
"""
def _detect_nan_inf(tensor):
"""Trace function for detecting any NaN/Inf in the tensor."""
if tensor.dtype.is_floating:
mask = math_ops.reduce_any(
gen_math_ops.logical_or(
gen_math_ops.is_nan(tensor), gen_math_ops.is_inf(tensor)))
output_tensor = control_flow_ops.cond(mask,
lambda: constant_op.constant(1.0),
lambda: constant_op.constant(0.0))
else:
output_tensor = constant_op.constant(0.0)
# The shape has to be 1. Set it if it does not have the information.
output_tensor = array_ops.reshape(output_tensor, [1])
return output_tensor
def _compute_signature(tensor, tf_op, cast_to_f32=True):
if cast_to_f32:
tensor = math_ops.cast(tensor, dtypes.float32)
output_tensor = tf_op(tensor)
# The shape has to be 1. Set it if it does not have the information.
output_tensor = array_ops.reshape(output_tensor, [1])
return output_tensor
def _show_size(tensor):
# In order to check the size of a tensor.
# Not all sizes are known at the compile time, also, different replicas
# sometimes get different sizes of tensors.
# Collect it here to be used in merging replica data.
tsize = _compute_signature(tensor, array_ops.size, cast_to_f32=False)
# Cast to float32, so that it can be placed into same cache with other
# signatures.
return math_ops.cast(tsize, dtypes.float32)
def _show_max(tensor, cast_to_f32=True):
# returns -inf for empty tensor
return _compute_signature(tensor, math_ops.reduce_max, cast_to_f32)
def _show_min(tensor, cast_to_f32=True):
# returns inf for empty tensor
return _compute_signature(tensor, math_ops.reduce_min, cast_to_f32)
def _show_norm(tensor, cast_to_f32=True):
# returns 0 for empty tensor
return _compute_signature(tensor, linalg_ops.norm, cast_to_f32)
def _show_mean_and_variance(tensor, cast_to_f32=True):
if cast_to_f32:
tensor = math_ops.cast(tensor, dtypes.float32)
# returns nan for empty tensor
mean, var = nn_impl.moments(array_ops.reshape(tensor, [-1]), axes=[0])
# The shape has to be 1. Set it if it does not have the information.
mean = array_ops.reshape(mean, [1])
var = array_ops.reshape(var, [1])
return mean, var
def _show_max_abs(tensor):
tensor = math_ops.cast(tensor, dtypes.float32)
output_tensor = math_ops.reduce_max(math_ops.abs(tensor))
zero = constant_op.constant(0, dtypes.float32)
output_tensor = gen_math_ops.maximum(zero, output_tensor)
# The shape has to be 1. Set it if it does not have the information.
output_tensor = array_ops.reshape(output_tensor, [1])
return output_tensor
def _detect_inf_nan_producer(tensor):
"""Checks if the tensor is the first NaN/Inf tensor in the computation path."""
if tensor.op.inputs:
inp_check = [
_detect_nan_inf(inp_tensor) for inp_tensor in tensor.op.inputs
]
is_any_input_inf_nan = math_ops.add_n(inp_check)
else:
is_any_input_inf_nan = constant_op.constant(0, dtypes.bool)
is_current_tensor_inf_nan = _detect_nan_inf(tensor)
# An op is NaN/INF producer only when all inputs are nan/inf free (
# is_any_input_inf_nan = 0), and its output has nan/inf (
# is_current_tensor_inf_nan=1). Below will be 1 if op nan/inf is producer.
is_nan_producer = is_current_tensor_inf_nan - is_any_input_inf_nan
is_nan_producer = math_ops.reduce_any(is_nan_producer > 0)
return is_nan_producer
if (self._parameters.trace_mode ==
tensor_tracer_flags.TRACE_MODE_FULL_IF_NAN):
return {self._parameters.trace_mode: _detect_inf_nan_producer(tensor)}
if self._parameters.trace_mode == tensor_tracer_flags.TRACE_MODE_NAN_INF:
return {self._parameters.trace_mode: _detect_nan_inf(tensor)}
if (self._parameters.trace_mode ==
tensor_tracer_flags.TRACE_MODE_PART_TENSOR):
return {self._parameters.trace_mode: tensor}
if (self._parameters.trace_mode ==
tensor_tracer_flags.TRACE_MODE_FULL_TENSOR):
return {self._parameters.trace_mode: tensor}
if self._parameters.trace_mode == tensor_tracer_flags.TRACE_MODE_NORM:
return {self._parameters.trace_mode: _show_norm(tensor)}
if self._parameters.trace_mode == tensor_tracer_flags.TRACE_MODE_MAX_ABS:
return {self._parameters.trace_mode: _show_max_abs(tensor)}
if self._parameters.trace_mode == tensor_tracer_flags.TRACE_MODE_SUMMARY:
tensor = math_ops.cast(tensor, dtypes.float32)
tsize = _show_size(tensor)
tnorm = _show_norm(tensor, cast_to_f32=False)
tmax = _show_max(tensor, cast_to_f32=False)
tmin = _show_min(tensor, cast_to_f32=False)
tmean, tvar = _show_mean_and_variance(tensor, cast_to_f32=False)
return {_TT_SUMMARY_NORM: tnorm, _TT_SUMMARY_MAX: tmax,
_TT_SUMMARY_MIN: tmin, _TT_SUMMARY_MEAN: tmean,
_TT_SUMMARY_VAR: tvar, _TT_SUMMARY_SIZE: tsize}
raise RuntimeError(
'Tensor trace fun for %s is not yet implemented'
% self._parameters.trace_mode)
def _make_tensor_trace_fun(self, tensor_name, tensor_trace_order):
"""Makes the tensor tracing function called by outside compilation.
Args:
tensor_name: name of the tensor being traced.
tensor_trace_order: TensorTraceOrder object holding tensorname to id map.
Returns:
A function to be passed as the first argument to outside compilation.
Raises:
RuntimeError: If the trace mode is invalid.
"""
def _print_tensor(tensor_name, num_elements, tensor, output_tensor):
"""Prints a tensor value to a file.
Args:
tensor_name: name of the tensor being traced.
num_elements: number of elements to print (-1 means print all).
tensor: the tensor needs to be returned.
output_tensor: the tensor needs to be printed.
Returns:
The same tensor passed via the "tensor" argument.
Raises:
ValueError: If tensor_name is not already in
self._tensorname_idx_map.
"""
if self._parameters.is_brief_mode():
if tensor_name not in tensor_trace_order.tensorname_idx_map:
raise ValueError(
'Tensor name %s is not in the tensorname_idx_map'%tensor_name)
msg = '%d'%self._tensorname_idx_map[tensor_name]
else:
msg = '"%s"'%tensor_name
if self._parameters.trace_dir:
output_path = os.path.join(self._parameters.trace_dir, _TRACE_FILE_NAME)
output_stream = _OUTPUT_STREAM_ESCAPE + output_path
else:
output_stream = sys.stderr
return logging_ops.print_v2(msg, array_ops.shape(output_tensor),
'@', self._replica_id,
'\n', output_tensor, '\n',
summarize=num_elements,
output_stream=output_stream)
def _show_part_tensor(tensor):
"""Trace function for printing part of the tensor."""
return _print_tensor(tensor_name, _TRACE_MODE_PART_TENSOR_SIZE,
tensor, tensor)
def _show_full_tensor(tensor):
"""Trace function for printing the entire tensor."""
return _print_tensor(tensor_name, -1, tensor, tensor)
def _show_full_tensors(tensor):
"""Prints the full tensor values for the tensors that are _trace_stack_size hops away from a given tensor."""
def _get_distance_k_tensors(k_before=0):
"""Returns the tensors that are at most k_before hops away from the tensor."""
if k_before < 0:
return []
visited_tensors = {tensor: 0}
visitor_queue = [tensor]
head = 0
while head < len(visitor_queue):
current_tensor = visitor_queue[head]
head += 1
distance = visited_tensors[current_tensor]
if distance == k_before:
break
for input_tensor in current_tensor.op.inputs:
if input_tensor in visited_tensors:
continue
visitor_queue.append(input_tensor)
visited_tensors[input_tensor] = distance + 1
return visitor_queue
tensors_to_print = _get_distance_k_tensors(
self._parameters.trace_stack_size)
print_ops = [_print_tensor(t.name, -1, t, t) for t in tensors_to_print]
with ops.control_dependencies(print_ops):
return constant_op.constant(True)
if (self._parameters.trace_mode ==
tensor_tracer_flags.TRACE_MODE_FULL_IF_NAN):
return _show_full_tensors
if (self._parameters.trace_mode ==
tensor_tracer_flags.TRACE_MODE_PART_TENSOR):
return _show_part_tensor
# The input tensor has a shape of "[1]" for TRACE_MODE_NAN_INF,
# TRACE_MODE_NORM, and TRACE_MODE_MAX_ABS, as related computations are
# performed within TPUs and only their results are transferred to CPU.
# Simply, print the full tensor for these trace modes.
if self._parameters.trace_mode in [
tensor_tracer_flags.TRACE_MODE_NAN_INF,
tensor_tracer_flags.TRACE_MODE_NORM,
tensor_tracer_flags.TRACE_MODE_FULL_TENSOR,
tensor_tracer_flags.TRACE_MODE_MAX_ABS,
tensor_tracer_flags.TRACE_MODE_SUMMARY
]:
return _show_full_tensor
raise RuntimeError('Tensor trace fun for %s is not yet implemented'
%self._parameters.trace_mode)
def _skip_op(self, op_id, op, ops_in_exec_path, report_handler):
"""Returns True if we should not trace Op.
Args:
op_id: Topological index of the op.
op: tf.Operation
ops_in_exec_path: Set of operations that are in the execution path.
report_handler: An instance of tensor_tracer_report.TTReportHandle.
Returns:
True if the op should not be traced, false otherwise.
"""
if TensorTracer.while_loop_op(op):
report_handler.instrument_op(
op, TensorTracer.reason(op_id, _REASON_WHILELOOP_OP))
return True
if TensorTracer.unsafe_op(op):
report_handler.instrument_op(
op, TensorTracer.reason(op_id, _REASON_UNSAFE_OP))
return True
if TensorTracer.device_mismatch(self._tt_config.device_type, op):
report_handler.instrument_op(
op, TensorTracer.reason(op_id, _REASON_DEVICE_MISMATCH))
return True
if op not in ops_in_exec_path:
report_handler.instrument_op(
op, TensorTracer.reason(op_id, _REASON_NOT_EXECUTED))
return True
if not self._inside_op_range(op_id):
report_handler.instrument_op(
op, TensorTracer.reason(op_id, _REASON_OUTSIDE_OP_RANGE))
return True
if self._less_interesting_op(op):
report_handler.instrument_op(
op, TensorTracer.reason(op_id, _REASON_LESS_INTERESTING_OP))
return True
if self._is_user_included_op(op):
report_handler.instrument_op(
op, TensorTracer.reason(op_id, _REASON_USER_INCLUDED))
return False
if self._is_user_excluded_op(op):
report_handler.instrument_op(
op, TensorTracer.reason(op_id, _REASON_USER_EXCLUDED))
return True
return False
def _skip_tensor(self, op_id, out_tensor, report_handler):
"""Returns True if we should not trace out_tensor.
Args:
op_id: Topological index of the op producing tensor.
out_tensor: tf.Tensor
report_handler: An instance of tensor_tracer_report.TTReportHandle.
Returns:
True if the tensor should not be traced, false otherwise.
"""
# Skips a tensor if the tensor has a non-numeric type.
# Note: we cannot use check_ops.is_numeric_tensor(out_tensor)
# because it also excludes tensors with dtypes, bool, and
# float32_ref, which we actually want to trace.
non_numeric_tensor_types = set([dtypes.variant, dtypes.resource,
dtypes.string])
if out_tensor.dtype in non_numeric_tensor_types:
report_handler.instrument_tensor(
out_tensor, TensorTracer.reason(op_id, _REASON_NON_NUMERIC_TENSOR))
return True
# Skip a tensor if it feeds a special while loop op.
if [consumer for consumer in out_tensor.consumers() if
TensorTracer.while_loop_op(consumer)]:
report_handler.instrument_tensor(
out_tensor, TensorTracer.reason(op_id, _REASON_FEEDS_WHILELOOP_OP))
return True
if self._is_user_included_op(out_tensor.op):
report_handler.instrument_tensor(
out_tensor, TensorTracer.reason(op_id, _REASON_USER_INCLUDED))
return False
if self._is_user_excluded_op(out_tensor.op):
report_handler.instrument_tensor(
out_tensor, TensorTracer.reason(op_id, _REASON_USER_EXCLUDED))
return True
if not out_tensor.get_shape().is_fully_defined():
# If trace mode is nan-inf, norm or max, then the tensor will be reduced
# to a scalar before the outside compilation call.
if self._parameters.trace_mode in [
tensor_tracer_flags.TRACE_MODE_NAN_INF,
tensor_tracer_flags.TRACE_MODE_NORM,
tensor_tracer_flags.TRACE_MODE_MAX_ABS,
tensor_tracer_flags.TRACE_MODE_SUMMARY
]:
report_handler.instrument_tensor(
out_tensor, TensorTracer.reason(op_id, _REASON_TENSOR_GET_TRACED))
return False
else:
report_handler.instrument_tensor(
out_tensor, TensorTracer.reason(op_id, _REASON_DYNAMIC_SHAPE))
return True
rank = len(out_tensor.shape)
if rank < 1:
# scalar
if self._parameters.trace_scalar_ops:
if TensorTracer.unsafe_scalar_trace(out_tensor.op):
report_handler.instrument_tensor(
out_tensor, TensorTracer.reason(op_id, _REASON_UNSAFE_SCALAR))
return True
else:
report_handler.instrument_tensor(
out_tensor, TensorTracer.reason(op_id, _REASON_SCALAR_GET_TRACED))
return False
else:
report_handler.instrument_tensor(
out_tensor, TensorTracer.reason(op_id, _REASON_SKIP_SCALAR))
return True
else:
# tensor
report_handler.instrument_tensor(
out_tensor, TensorTracer.reason(op_id, _REASON_TENSOR_GET_TRACED))
return False
def _filter_execution_path_operations(self, operations, fetches):
"""Returns the set of ops in the execution path to compute given fetches."""
# If no fetch provided, then return all operations.
if fetches is None:
return set(operations)
# Convert to list, if a single element is provided.
if not isinstance(fetches, (list, tuple)):
fetches = [fetches]
# If a tensor is given as fetch, convert it to op.
op_fetches = []
for fetch in fetches:
if isinstance(fetch, ops.Operation):
op_fetches.append(fetch)
elif isinstance(fetch, ops.Tensor):
op_fetches.append(fetch.op)
else:
raise RuntimeError('Given fetch:%s is neither a tensor nor an op.'
%fetch)
execution_path_operations = set(op_fetches)
traverse_stack = list(op_fetches)
while True:
if not traverse_stack:
break
head_op = traverse_stack.pop()
input_ops = [tensor_input.op for tensor_input in head_op.inputs]
input_ops.extend(head_op.control_inputs)
for input_op in input_ops:
if input_op not in execution_path_operations:
# Filter out loop condition operations, tracing them causes a cycle.
# Trace only the loop-body.
if TensorTracer.loop_cond_op(input_op):
continue
execution_path_operations.add(input_op)
traverse_stack.append(input_op)
return execution_path_operations
def _determine_and_instrument_traced_tensors(self, graph_order,
ops_in_exec_path,
tensor_trace_points,
report_handler):
"""Determines the tensors to trace and instruments the trace details.
Args:
graph_order: graph_order tuple containing graph (tf.graph), operations
(list of operations), op_to_idx (op id mapping), (tensors) list of
tensors, tensor_to_idx (tensor id mapping), contains_cycle (whether
there is a cycle in the graph), topological_order_or_cycle (list of ops
in topological order or list of ops creating a cycle).
ops_in_exec_path: Set of ops in the execution path.
tensor_trace_points: Collection of programatic tensor trace points.
report_handler: An instance of tensor_tracer_report.TTReportHandle.
Returns:
List of tensors to be traced.
"""
traced_tensors = []
checkpoint_operations = set([tensor.op
for (tensor, _) in tensor_trace_points])
for op_id, op in enumerate(graph_order.operations):
if checkpoint_operations and op not in checkpoint_operations:
continue
if self._skip_op(op_id, op, ops_in_exec_path, report_handler):
continue
for i in range(len(op.outputs)):
out_tensor = op.outputs[i]
if not self._skip_tensor(op_id, out_tensor, report_handler):
traced_tensors.append(out_tensor)
return traced_tensors
def _check_trace_files(self):
"""Checks if any requirements for trace files are satisfied."""
if not self._parameters.trace_dir:
# traces will be written to stderr. No need to check trace files.
return
if self._parameters.trace_mode == tensor_tracer_flags.TRACE_MODE_SUMMARY:
# Output files are handled by tf.summary operations, no need to precreate
# them.
return
if _trace_files_need_precreated(self._parameters.trace_dir):
for replica_id in range(0, self._tt_config.num_replicas):
trace_file_path = os.path.join(
self._parameters.trace_dir,
_COMPACT_TRACE_FILE_PREFIX) + '%d'%replica_id
if not gfile.Exists(trace_file_path):
raise RuntimeError(
'%s must be pre-created with the '
'appropriate properties.'%trace_file_path)
else:
if not gfile.Exists(self._parameters.trace_dir):
gfile.MkDir(self._parameters.trace_dir)
if not gfile.Exists(self._parameters.trace_dir):
raise RuntimeError('Failed to create %s'%self._parameters.trace_dir)
def _determine_trace_and_create_report(self, graph, ops_in_exec_path):
"""Work needs to be done prior to TPU or CPU tracing.
Args:
graph: tf.graph
ops_in_exec_path: Set of operations in the execution path.
Returns:
An instance of tensor_tracer_report.TensorTraceOrder, containing list of
tensors to be traced with their topological order information.
"""
self._check_trace_files()
graph_order = tensor_tracer_report.sort_tensors_and_ops(graph)
tensor_trace_points = graph.get_collection(_TENSOR_TRACER_COLLECTION)
report_handler = tensor_tracer_report.TTReportHandle()
traced_tensors = self._determine_and_instrument_traced_tensors(
graph_order, ops_in_exec_path, tensor_trace_points, report_handler)
tensor_trace_order = tensor_tracer_report.TensorTraceOrder(graph_order,
traced_tensors)
num_signatures = self._num_signature_dimensions()
if num_signatures:
self._create_or_get_tensor_values_cache(self._parameters.trace_mode,
graph,
len(traced_tensors),
num_signatures)
if self._parameters.trace_mode == tensor_tracer_flags.TRACE_MODE_SUMMARY:
report_proto = report_handler.create_report_proto(self._tt_config,
self._parameters,
tensor_trace_order,
tensor_trace_points,
self._signature_types())
report_handler.write_report_proto(report_proto, self._parameters)
else:
report_handler.create_report(self._tt_config, self._parameters,
tensor_trace_order, tensor_trace_points)
return tensor_trace_order
def _create_host_call(self):
return self._parameters.trace_mode == tensor_tracer_flags.TRACE_MODE_SUMMARY
def _generate_flush_cache_op(self, num_replicas, on_tpu):
"""Generates an Op that will flush the cache to file.
Args:
num_replicas: total number of replicas.
on_tpu: if the graph is executed on TPU.
Returns:
The Op to flush the cache to file.
"""
def _flush_fun(cache, replica_id):
"""Flushes the cache to a file corresponding to replica_id."""
def _f(file_index):
"""Generates a func that flushes the cache to a file."""
def _print_cache():
"""Flushes the cache to a file."""
replica_str = ('%d' % file_index)
if self._parameters.trace_dir:
output_path = (os.path.join(self._parameters.trace_dir,
_COMPACT_TRACE_FILE_PREFIX)
+ replica_str)
output_stream = _OUTPUT_STREAM_ESCAPE + output_path
else:
output_stream = sys.stderr
new_step_line = _REPLICA_ID_TAG + replica_str
print_ops = []
for i in range(self._num_signature_dimensions()):
print_ops.append(logging_ops.print_v2(
new_step_line, '\n',
cache[:, i], '\n',
summarize=-1,
output_stream=output_stream))
with ops.control_dependencies(print_ops):
return constant_op.constant(0).op
return _print_cache
def _eq(file_index):
return math_ops.equal(replica_id, file_index)
flush_op_cases = {}
for i in range(num_replicas):
flush_op_cases[_eq(i)] = _f(i)
# Each replica needs to determine where to write their output.
# To do this, we check if replica_id is 0, then 1, ..., and then
# num_replicas - 1 statically; and return the corresponding static file
# name. We cannot simply set the file name in python, as replica_id is
# only known during tf runtime, and we cannot create dynamic filenames.
return control_flow_ops.case(flush_op_cases, exclusive=True)
cache = self._create_or_get_tensor_values_cache(
self._parameters.trace_mode)
if on_tpu:
flush_op = tpu.outside_compilation(_flush_fun,
cache.value(), self._replica_id)
else:
flush_op = _flush_fun(cache.value(), self._replica_id)
with ops.control_dependencies([flush_op]):
reset_value = constant_op.constant(_COMPACT_TRACE_ENTRY_INIT_VALUE,
dtype=cache.dtype,
shape=cache.shape)
assign_op = state_ops.assign(cache, reset_value).op
with ops.control_dependencies([assign_op]):
return constant_op.constant(0).op
def _flush_tensor_values_cache(self, tensor_fetches, op_fetches, on_tpu):
"""Flushes the intermediate tensor values in the graph to the cache.
Args:
tensor_fetches: list of tensor results returned by the model_fn.
op_fetches: list of ops that are returned by the model_fn, e.g., train_op.
on_tpu: if the graph is executed on TPU.
Returns:
An identical copy of tensor_fetches.
"""
# Add a dependency to op and tensor fetches to make sure that all tracing
# ops are executed before flushing trace results.
with ops.control_dependencies(op_fetches +
[tensor.op for tensor in tensor_fetches]):
flush_cache_op = self._generate_flush_cache_op(
self._tt_config.num_replicas, on_tpu)
return control_flow_ops.tuple(tensor_fetches,
control_inputs=[flush_cache_op])
def _process_tensor_fetches(self, tensor_fetches):
"""Check that tensor_fetches is not empty and have valid tensors."""
# If none or empty list.
if tensor_fetches is None:
raise RuntimeError('tensor_fetches provided to tensor_tracer cannot be '
'None.')
if not isinstance(tensor_fetches, (list, tuple)):
tensor_fetches = [tensor_fetches]
elif not tensor_fetches:
raise RuntimeError('tensor_fetches provided to tensor_tracer cannot be '
'empty list.')
fetches = []
for fetch in tensor_fetches:
if isinstance(fetch, ops.Tensor):
fetches.append(fetch)
else:
raise RuntimeError('Given tensor_fetch:%s is not a tensor.' % fetch)
return fetches
def _process_op_fetches(self, op_fetches):
"""Check that op_fetches have valid ops."""
if op_fetches is None:
return []
if not isinstance(op_fetches, (list, tuple)):
op_fetches = [op_fetches]
fetches = []
for fetch in op_fetches:
if isinstance(fetch, ops.Operation):
fetches.append(fetch)
elif isinstance(fetch, ops.Tensor):
fetches.append(fetch.op)
else:
logging.warning('Ignoring the given op_fetch:%s, which is not an op.' %
fetch)
return fetches
def _convert_fetches_to_input_format(self, input_fetches, current_fetches):
"""Changes current_fetches' format, so that it matches input_fetches."""
if isinstance(input_fetches, ops.Tensor):
if len(current_fetches) != 1:
raise RuntimeError('Tensor tracer input/output fetches do not match.')
return current_fetches[0]
else:
if len(current_fetches) != len(current_fetches):
raise RuntimeError('Tensor tracer input/output fetches do not match.')
elif isinstance(input_fetches, tuple):
return tuple(current_fetches)
else:
return current_fetches
def _get_op_control_flow_context(self, op):
"""Returns the control flow of the given op.
Args:
op: tf.Operation for which the control flow context is requested.
Returns:
op_control_flow_context: which the is control flow context of the given
op. If the operation type is LoopExit, returns the outer control flow
context.
"""
# pylint: disable=protected-access
op_control_flow_context = op._control_flow_context
# pylint: enable=protected-access
if control_flow_util.IsLoopExit(op):
op_control_flow_context = op_control_flow_context.outer_context
return op_control_flow_context
def _prepare_host_call_fn(self, processed_t_fetches, op_fetches):
"""Creates a host call function that will write the cache as tb summary.
Args:
processed_t_fetches: List of tensor provided to session.run.
op_fetches: List of operations provided to session.run.
Raises:
ValueError if trace_dir is not set.
"""
if self._parameters.trace_dir is None:
raise ValueError('Provide a trace_dir for tensor tracer in summary mode. '
'--trace_dir=/model/dir')
def _write_cache(concatenated_cache_tensor, step):
"""Writes the cache as tensor summary."""
summary_metadata = summary_pb2.SummaryMetadata(
display_name=_TT_SUMMARY_TAG,
summary_description='',
plugin_data=summary_pb2.SummaryMetadata.PluginData(
plugin_name='tensor_tracer'))
# TODO(deveci): Parametrize max_queue, so that flushing op can be called
# less frequently.
# Setting max_queue to 100 appears to be safe even when the number of
# iterations are much lower, as the destructor of the writer will flushes
# it.
with summary.create_file_writer_v2(
self._parameters.trace_dir,
max_queue=_TT_SUMMARY_MAX_QUEUE).as_default():
return summary.write(
_TT_SUMMARY_TAG,
concatenated_cache_tensor,
metadata=summary_metadata,
step=step[0])
step = array_ops.reshape(training_util.get_or_create_global_step(), [1])
self._host_call_fn = {}
local_cache = self._create_or_get_tensor_values_cache(
self._parameters.trace_mode)
host_call_deps = op_fetches + [tensor.op for tensor in processed_t_fetches]
with ops.control_dependencies(host_call_deps):
# Convert the 2D cache shape from num_tensors x num_signatures
# to 3D shape of 1 x num_tensors x num_signatures, so that the after host
# call the dimensions will be num_cores x num_tensors x num_signatures
cache = array_ops.reshape(local_cache.value(),
[1, -1, self._num_signature_dimensions()])
self._host_call_fn['tensor_tracer_host_call'] = (_write_cache,
[cache, step])
def host_call_deps_and_fn(self):
return self._host_call_fn
def _trace_execution(self, graph,
tensor_fetches,
op_fetches=None,
on_tpu=True):
"""Commong tracing function for both CPU and TPUs.
The caller function should set device_type, num_replicas,
num_replicas_per_host, num_hosts and replica_id before calling
_trace_execution.
Args:
graph: the graph of Ops executed on the TPU.
tensor_fetches: a (list,tuple,or a single object) of tensor fetches
returned by model_fn given to session.run. Function must be provided
with as least one tensor to fetch.
op_fetches: A list of op fetches returned by model_fn given to
session.run. op_fetches and tensor_fetches are used to determine the
nodes that will be executed. Can be None.
on_tpu: True if executing on TPU.
Returns:
tensor_fetches: an exact copy of tensor_fetches that has additional
dependencies.
Raises:
RuntimeError: If tensor_fetches is None or empty.
"""
def _cast_unsupported_dtypes(tensor):
"""Casts tensor to a supported type."""
if tensor.dtype.__eq__(dtypes.int64):
# outside-compilation doesn't support int64 input yet.
return math_ops.cast(tensor, dtypes.int32)
if tensor.dtype.__eq__(dtypes.bfloat16) or tensor.dtype.__eq__(
dtypes.float16):
# Since host can't handle bf16, convert tensor to f32.
return math_ops.cast(tensor, dtypes.float32)
return tensor
TensorTracer.check_device_type(self._tt_config.device_type)
TensorTracer.check_trace_mode(self._tt_config.device_type,
self._parameters.trace_mode)
# Check in_tensor_fetches, and op_fetches and convert them to lists.
processed_t_fetches = self._process_tensor_fetches(tensor_fetches)
op_fetches = self._process_op_fetches(op_fetches)
all_fetches = op_fetches + [tensor.op for tensor in processed_t_fetches]
# Filter out the operations that won't be executed.
# if fetches=None, then ops_in_exec_path = set(operations)
exec_op_set = self._filter_execution_path_operations(graph.get_operations(),
all_fetches)
# Write report file, and determine the traced tensors.
tensor_trace_order = self._determine_trace_and_create_report(
graph, exec_op_set)
tensor_fetch_set = set(processed_t_fetches)
tracing_ops = []
# pylint: disable=protected-access
current_control_flow_context = graph._get_control_flow_context()
# pylint: enable=protected-access
sorted_exec_op_list = list(exec_op_set)
sorted_exec_op_list.sort(key=lambda op: op.name)
# Trace ops only if they are in the execution path.
for op in sorted_exec_op_list:
for i in range(len(op.outputs)):
out_tensor = op.outputs[i]
tensor_name = out_tensor.name
if tensor_name not in tensor_trace_order.tensorname_to_cache_idx:
continue
# Create the list of consumers before calling _preprocess_traced_tensor.
# Otherwise, adding control input below, will introduce a cycle in the
# graph.
consumers = out_tensor.consumers()
# Not all consumers may be in the exec path. Filter out the consumers
# to keep the graph simpler.
consumers = [cop for cop in consumers if cop in exec_op_set]
# If there is no consumer of the tensor, there is no need to trace it;
# unless the tensor itself is one of the fetches.
is_a_fetched_tensor = out_tensor in tensor_fetch_set
if (not consumers) and (not is_a_fetched_tensor):
continue
op_control_flow_context = self._get_op_control_flow_context(op)
# pylint: disable=protected-access
graph._set_control_flow_context(op_control_flow_context)
# pylint: enable=protected-access
processed_tensors = self._preprocess_traced_tensor(out_tensor)
if on_tpu:
for signature in processed_tensors.keys():
processed_tensors[signature] = _cast_unsupported_dtypes(
processed_tensors[signature])
if self._use_tensor_values_cache():
cache_idx = tensor_trace_order.tensorname_to_cache_idx[tensor_name]
trace_op = self._save_tensor_value_to_cache_op(cache_idx,
processed_tensors)
else:
def tpu_wrap_trace_fn(tensor, out_tensor_name):
"""Wraps the trace_fn with outside compilation if on TPUs."""
tensor_trace_fn = self._make_tensor_trace_fun(out_tensor_name,
tensor_trace_order)
if on_tpu:
return tpu.outside_compilation(tensor_trace_fn, tensor)
else:
return tensor_trace_fn(tensor)
def conditional_trace_fn(predicate_tensor, out_tensor, trace_fn,
out_tensor_name):
"""Creates a cond op that traces the out_tensor if predicate is satisfied."""
return control_flow_ops.cond(
predicate_tensor, lambda: trace_fn(out_tensor, out_tensor_name),
lambda: constant_op.constant(False)).op
if len(processed_tensors) != 1:
raise RuntimeError('Multiple stats are only allowed in compact '
'mode.')
# Collecting multiple statistics are only supported in the summary
# mode that uses compact format(self._use_tensor_values_cache = true).
# Non-compact mode currently allows single stat per tensor.
processed_out_tensor = processed_tensors.values()[0]
if self._parameters.is_conditional_trace:
trace_op = conditional_trace_fn(processed_out_tensor, out_tensor,
tpu_wrap_trace_fn, tensor_name)
elif self._parameters.included_cores:
should_print = constant_op.constant(False)
for core in self._parameters.included_cores:
should_print = gen_math_ops.logical_or(
should_print, gen_math_ops.equal(self._replica_id, core))
trace_op = conditional_trace_fn(should_print, processed_out_tensor,
tpu_wrap_trace_fn, tensor_name)
else:
trace_op = tpu_wrap_trace_fn(processed_out_tensor, tensor_name)
if is_a_fetched_tensor:
tracing_ops.append(trace_op)
continue
# Add it to all consumers, as some consumers may not be executed if they
# are in a control flow.
for consumer_op in consumers:
# pylint: disable=protected-access
consumer_op._add_control_input(trace_op)
# pylint: enable=protected-access
# pylint: disable=protected-access
graph._set_control_flow_context(current_control_flow_context)
# pylint: enable=protected-access
if tracing_ops:
# If we are tracing a fetched tensor, their dependency is stored in
# tracing_ops.
processed_t_fetches = control_flow_ops.tuple(processed_t_fetches,
control_inputs=tracing_ops)
if self._use_tensor_values_cache():
if self._create_host_call() and on_tpu:
self._prepare_host_call_fn(processed_t_fetches, op_fetches)
else:
processed_t_fetches = self._flush_tensor_values_cache(
processed_t_fetches, op_fetches, on_tpu=on_tpu)
# processed_t_fetches is a list at this point. Convert it to the same
# format as given in tensor_fetches.
return self._convert_fetches_to_input_format(tensor_fetches,
processed_t_fetches)
def trace_tpu(self, graph,
tensor_fetches,
op_fetches=None,
num_replicas=None,
num_replicas_per_host=None,
num_hosts=None):
"""Traces the tensors generated by TPU Ops in a TF graph.
Args:
graph: the graph of Ops executed on the TPU.
tensor_fetches: a (list,tuple,or a single object) of tensor fetches
returned by model_fn given to session.run. Function must be provided
with as least one tensor to fetch.
op_fetches: A list of op fetches returned by model_fn given to
session.run. op_fetches and tensor_fetches are used to determine the
nodes that will be executed. Can be None.
num_replicas: number of replicas used on the TPU.
num_replicas_per_host: number of replicas per TPU host.
num_hosts: total number of TPU hosts.
Returns:
tensor_fetches: an exact copy of tensor_fetches that has additional
dependencies.
Raises:
RuntimeError: If num_replicas_per_host > 8.
RuntimeError: If tensor_fetches is None or empty.
"""
if graph in TensorTracer._traced_graphs:
logging.warning('Graph is already rewritten with tensor tracer, ignoring '
'multiple calls.')
return tensor_fetches
else:
TensorTracer._traced_graphs.add(graph)
self._tt_config.device_type = _DEVICE_TYPE_TPU
self._tt_config.num_replicas = num_replicas
self._tt_config.num_replicas_per_host = num_replicas_per_host
self._tt_config.num_hosts = num_hosts
if self._tt_config.num_replicas is not None:
if self._tt_config.num_replicas_per_host is None:
self._tt_config.num_replicas_per_host = 8
if self._tt_config.num_hosts is None:
self._tt_config.num_hosts = (
num_replicas // self._tt_config.num_replicas_per_host +
(num_replicas % self._tt_config.num_replicas_per_host > 0))
if self._parameters.graph_dump_path:
graph_io.write_graph(graph, self._parameters.graph_dump_path,
'graph_before_tt.pbtxt')
with graph.as_default():
self._add_replica_id_to_graph()
tensor_fetches = self._trace_execution(graph, tensor_fetches, op_fetches,
on_tpu=True)
if self._parameters.graph_dump_path:
graph_io.write_graph(graph, self._parameters.graph_dump_path,
'graph_after_tt.pbtxt')
return tensor_fetches
def trace_cpu(self, graph, tensor_fetches, op_fetches=None):
"""Traces the tensors generated by CPU Ops in a TF graph.
Args:
graph: the graph of Ops executed on the CPU.
tensor_fetches: a (list,tuple,or a single object) of tensor fetches
returned by model_fn given to session.run. Function must be provided
with as least one tensor to fetch.
op_fetches: A list of op fetches returned by model_fn given to
session.run. op_fetches and tensor_fetches are used to determine the
nodes that will be executed. Can be None.
Returns:
tensor_fetches: an exact copy of tensor_fetches that has additional
dependencies.
Raises:
RuntimeError: If tensor_fetches is None or empty.
"""
if graph in TensorTracer._traced_graphs:
logging.warning('Graph is already rewritten with tensor tracer, ignoring '
'multiple calls.')
return tensor_fetches
else:
TensorTracer._traced_graphs.add(graph)
self._tt_config.device_type = _DEVICE_TYPE_CPU
self._tt_config.num_replicas = 1
self._tt_config.num_replicas_per_host = 1
self._tt_config.num_hosts = 1
self._replica_id = 0
if self._parameters.graph_dump_path:
graph_io.write_graph(graph, self._parameters.graph_dump_path,
'graph_before_tt.pbtxt')
with graph.as_default():
tensor_fetches = self._trace_execution(graph, tensor_fetches, op_fetches,
on_tpu=False)
if self._parameters.graph_dump_path:
graph_io.write_graph(graph, self._parameters.graph_dump_path,
'graph_after_tt.pbtxt')
return tensor_fetches
| 42.103472
| 116
| 0.663478
|
7268a93b653f4f6560499c60351ef846046b26a0
| 3,236
|
py
|
Python
|
gobigger/hyper/configs/config_2f2s.py
|
jayyoung0802/GoBigger
|
f7cf14ee4208e041295035342ecee70026f268d9
|
[
"Apache-2.0"
] | 189
|
2021-10-08T07:55:10.000Z
|
2022-03-31T23:49:43.000Z
|
gobigger/hyper/configs/config_2f2s.py
|
jayyoung0802/GoBigger
|
f7cf14ee4208e041295035342ecee70026f268d9
|
[
"Apache-2.0"
] | 25
|
2021-11-01T06:59:30.000Z
|
2022-03-22T11:22:27.000Z
|
gobigger/hyper/configs/config_2f2s.py
|
jayyoung0802/GoBigger
|
f7cf14ee4208e041295035342ecee70026f268d9
|
[
"Apache-2.0"
] | 28
|
2021-10-14T12:23:14.000Z
|
2022-03-31T23:49:45.000Z
|
# 分身合球,分身吃球
server_default_config = dict(
team_num=2,
player_num_per_team=2,
map_width=300,
map_height=300,
match_time=10,
state_tick_per_second=10, # frame
action_tick_per_second=5, # frame
collision_detection_type='precision',
save_video=False,
save_quality='high', # ['high', 'low']
save_path='',
save_bin=False, # save bin to go-explore
load_bin=False,
load_bin_path='',
load_bin_frame_num = 'all',
jump_to_frame_file = '',
manager_settings=dict(
# food setting
food_manager=dict(
num_init=180, # initial number
num_min=180, # Minimum number
num_max=225, # Maximum number
refresh_time=2, # Time interval (seconds) for refreshing food in the map
refresh_num=0, # The number of refreshed foods in the map each time
ball_settings=dict( # The specific parameter description can be viewed in the ball module
radius_min=2,
radius_max=2,
),
),
# thorns setting
thorns_manager=dict(
num_init=1, # initial number
num_min=1, # Minimum number
num_max=2, # Maximum number
refresh_time=6, # Time interval (seconds) for refreshing thorns in the map
refresh_num=0, # The number of refreshed thorns in the map each time
ball_settings=dict( # The specific parameter description can be viewed in the ball module
radius_min=12,
radius_max=20,
vel_max=100,
eat_spore_vel_init=10,
eat_spore_vel_zero_time=1,
)
),
# player setting
player_manager=dict(
ball_settings=dict( # The specific parameter description can be viewed in the ball module
acc_max=100,
vel_max=25,
radius_min=3,
radius_max=300,
radius_init=3,
part_num_max=16,
on_thorns_part_num=10,
on_thorns_part_radius_max=20,
split_radius_min=10,
eject_radius_min=10,
recombine_age=20,
split_vel_init=30,
split_vel_zero_time=1,
stop_zero_time=1,
size_decay_rate=0.00005,
given_acc_weight=10,
)
),
# spore setting
spore_manager=dict(
ball_settings=dict( # The specific parameter description can be viewed in the ball module
radius_min=3,
radius_max=3,
vel_init=250,
vel_zero_time=0.3,
spore_radius_init=20,
)
)
),
custom_init=dict(
food=[], # only position and radius
thorns=[[300, 300, 16]], # only position and radius
spore=[], # only position and radius
clone=[[80, 100, 16, '0', '0'], [130, 100, 10, '1', '0'],
[130, 130, 12, '2', '1'], [300, 300, 3, '3', '1']],
),
obs_settings=dict(
with_spatial=True,
with_speed=False,
with_all_vision=False,
),
)
| 35.173913
| 102
| 0.542954
|
b486343e3e524177322949219cae4c7b4728f679
| 163
|
py
|
Python
|
analysis/test.py
|
njchiang/analogy-fmri
|
90a6870a7c15dc71d3ea193b6dfea91fc7295108
|
[
"MIT"
] | 2
|
2019-12-11T02:46:36.000Z
|
2021-07-02T08:29:16.000Z
|
analysis/test.py
|
njchiang/analogy-fmri
|
90a6870a7c15dc71d3ea193b6dfea91fc7295108
|
[
"MIT"
] | null | null | null |
analysis/test.py
|
njchiang/analogy-fmri
|
90a6870a7c15dc71d3ea193b6dfea91fc7295108
|
[
"MIT"
] | null | null | null |
import os
from fmri.analogy_rsa import *
from fmri.analogy_utils import *
def main():
assert True, "something's broken"
if __name__ == "__main__":
main()
| 18.111111
| 37
| 0.705521
|
4c7476e36b1f3d0f009a8671bdb4f20dd8780ff5
| 2,933
|
py
|
Python
|
parlai/scripts/build_candidates.py
|
omry/ParlAI
|
61703c7b76dce45bc7f7282b20a35be64c6a0880
|
[
"MIT"
] | 9
|
2020-01-17T09:34:00.000Z
|
2021-11-19T07:46:52.000Z
|
parlai/scripts/build_candidates.py
|
omry/ParlAI
|
61703c7b76dce45bc7f7282b20a35be64c6a0880
|
[
"MIT"
] | 5
|
2019-12-29T07:52:39.000Z
|
2022-03-12T00:10:03.000Z
|
parlai/scripts/build_candidates.py
|
omry/ParlAI
|
61703c7b76dce45bc7f7282b20a35be64c6a0880
|
[
"MIT"
] | 2
|
2020-01-28T01:41:52.000Z
|
2020-12-27T07:22:07.000Z
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Build the candidate responses for a retrieval model.
Examples
--------
.. code-block:: shell
python build_candidates.py -t convai2 --outfile /tmp/cands.txt
"""
from parlai.core.params import ParlaiParser
from parlai.agents.repeat_label.repeat_label import RepeatLabelAgent
from parlai.core.worlds import create_task
from parlai.utils.misc import TimeLogger
import random
import tempfile
def build_cands(opt):
# create repeat label agent and assign it to the specified task
if opt['numthreads'] > 1:
# Broken in hogwild mode. Just fall back to single processing mode
opt['numthreads'] = 1
agent = RepeatLabelAgent(opt)
world = create_task(opt, agent)
if opt['outfile'] is None:
outfile = tempfile.mkstemp(
prefix='{}_{}_'.format(opt['task'], opt['datatype']), suffix='.txt'
)[1]
else:
outfile = opt['outfile']
if opt.get('num_examples', -1) == -1:
num_examples = world.num_examples()
else:
num_examples = opt['num_examples']
log_timer = TimeLogger()
print('[ starting to build candidates from task.. (ex:' + str(num_examples) + ')]')
print('[ saving output to {} ]'.format(outfile))
cands = []
for _ in range(num_examples):
world.parley()
# We get the acts of the first agent, which is the teacher.
acts = world.get_acts()[0]
if isinstance(acts, dict):
# We turn into a batch of 1 example, in case batching is being used.
acts = [acts]
for a in acts:
candidate = a.get('labels', a.get('eval_labels', None))
if candidate is not None:
candidate = candidate[0]
cands.append(candidate)
if log_timer.time() > opt['log_every_n_secs']:
text, _log = log_timer.log(world.total_parleys, world.num_examples())
print(text)
if world.epoch_done():
print('EPOCH DONE')
break
fw = open(outfile, 'w')
fw.write('\n'.join(cands))
fw.close()
def main():
random.seed(42)
# Get command line arguments
parser = ParlaiParser()
parser.add_argument(
'-n',
'--num-examples',
default=-1,
type=int,
help='Total number of exs to convert, -1 to convert all examples',
)
parser.add_argument(
'-of',
'--outfile',
default=None,
type=str,
help='Output file where to save, by default will be created in /tmp',
)
parser.add_argument('-ltim', '--log-every-n-secs', type=float, default=2)
parser.set_defaults(datatype='train:evalmode')
opt = parser.parse_args()
build_cands(opt)
if __name__ == '__main__':
main()
| 30.552083
| 87
| 0.617457
|
39ce4fa6784d926edf82192f9146f5b179fec43f
| 24,564
|
py
|
Python
|
safe_control_gym/controllers/lqr/ilqr.py
|
catgloss/safe-control-gym
|
b3f69bbed8577f64fc36d23677bf50027e991b2d
|
[
"MIT"
] | null | null | null |
safe_control_gym/controllers/lqr/ilqr.py
|
catgloss/safe-control-gym
|
b3f69bbed8577f64fc36d23677bf50027e991b2d
|
[
"MIT"
] | null | null | null |
safe_control_gym/controllers/lqr/ilqr.py
|
catgloss/safe-control-gym
|
b3f69bbed8577f64fc36d23677bf50027e991b2d
|
[
"MIT"
] | null | null | null |
"""Linear Quadratic Regulator (LQR)
[1] https://studywolf.wordpress.com/2016/02/03/the-iterative-linear-quadratic-regulator-method/
[2] https://arxiv.org/pdf/1708.09342.pdf
Example:
run ilqr on cartpole balance:
python3 experiments/main.py --func test --tag ilqr_pendulum --algo ilqr --task cartpole
run ilqr on quadrotor stabilization:
python3 experiments/main.py --func test --tag ilqr_quad --algo ilqr --task quadrotor --q_lqr 0.1
"""
import os
import numpy as np
from termcolor import colored
from matplotlib.ticker import FormatStrFormatter
from safe_control_gym.envs.env_wrappers.record_episode_statistics import RecordEpisodeStatistics, VecRecordEpisodeStatistics
from safe_control_gym.utils.logging import ExperimentLogger
from safe_control_gym.controllers.base_controller import BaseController
from safe_control_gym.controllers.lqr.lqr_utils import *
from safe_control_gym.envs.benchmark_env import Cost, Task
class iLQR(BaseController):
"""Linear quadratic regulator.
Attributes:
env (gym.Env): environment for the task.
Q, R (np.array): cost weight matrix.
x_0, u_0 (np.array): equilibrium state & input.
gain (np.array): input gain matrix.
"""
def __init__(
self,
env_func,
# model args
q_lqr=[1],
r_lqr=[1],
discrete_dynamics=1,
# runner args
deque_size=10,
eval_batch_size=1,
# Task
task: Task = Task.STABILIZATION,
task_info=None,
episode_len_sec=10,
# iLQR args
max_iterations=15,
lamb_factor=10,
lamb_max=1000,
epsilon=0.01,
# shared/base args
output_dir="./results/temp/",
verbose=True,
random_init=True,
ctrl_freq=240,
pyb_freq=240,
save_data=False,
data_dir=None,
plot_traj=False,
plot_dir=None,
save_plot=False,
**kwargs):
"""Example of docstring on the __init__ method.
The __init__ method may be documented in either the class level
docstring, or as a docstring on the __init__ method itself.
Args:
param1 (str): Description of `param1`.
param2 (:obj:`int`, optional): Description of `param2`. Multiple
lines are supported.
param3 (:obj:`list` of :obj:`str`): Description of `param3`.
"""
# All params/args (lazy hack).
for k, v in locals().items():
if k != "self" and k != "kwargs" and "__" not in k:
self.__dict__[k] = v
# Task.
self.env_func = env_func
self.ctrl_freq = ctrl_freq
self.pyb_freq = pyb_freq
self.deque_size = deque_size
self.task = Task(task)
self.task_info = task_info
self.episode_len_sec = episode_len_sec
self.discrete_dynamics = discrete_dynamics
# iLQR iterations.
self.max_iterations = max_iterations
# iLQR policy update parameters. See [1] for details.
self.lamb_factor = lamb_factor # Factor for scaling lambda
self.lamb_max = lamb_max # Maximum lambda
self.epsilon = epsilon # Tolerance for convergence
# Stop iteration (to make sure that subsequent iteration number not
# exceeding the first one)
self.stop_iteration = False
# Plot trajectory.
self.plot_traj = plot_traj
# Randomize initial state
self.random_init = random_init
# Plot trajectory.
self.plot_traj = plot_traj
# Save plot.
self.save_plot = save_plot
# Plot output directory.
self.plot_dir = plot_dir
if not os.path.exists(plot_dir):
os.makedirs(plot_dir)
# Save data.
self.save_data = save_data
# Data output directory.
self.data_dir = data_dir
if not os.path.exists(data_dir):
os.makedirs(data_dir)
# Logging.
self.logger = ExperimentLogger(output_dir)
# Verbose.
self.verbose = verbose
def close(self):
"""Cleans up resources.
"""
self.env.close()
self.logger.close()
def run_ilqr(self, render=False, logging=False):
"""Run iLQR to iteratively update policy for each time step k
Args:
render (bool): Flag to save frames for visualization.
logging (bool): Flag to log results.
Returns:
ilqr_eval_results (dict): Dictionary containing the results from
each iLQR iteration.
"""
# Snapshot env state
# state_dict = self.env.state_dict()
# Initialize iteration logging variables.
ite_returns, ite_lengths, ite_data, frames = [], [], {}, []
# Initialize iteration and step counter.
self.ite_counter = 0
self.k = 0
# Initialize step size
self.lamb = 1.0
# Set update unstable flag to False
self.update_unstable = False
# Loop through iLQR iterations
while self.ite_counter < self.max_iterations:
# Current goal.
if self.task == Task.STABILIZATION:
current_goal = self.x_0
elif self.task == Task.TRAJ_TRACKING:
current_goal = self.x_0[self.k]
# Compute input.
action = self.select_action(self.env.state, self.k)
# Save rollout data.
if self.k == 0:
# Initialize state and input stack.
state_stack = self.env.state
input_stack = action
goal_stack = current_goal
# Print initial state.
print(colored("initial state: " + get_arr_str(self.env.state), "green"))
if self.ite_counter == 0:
self.init_state = self.env.state
else:
# Save state and input.
state_stack = np.vstack((state_stack, self.env.state))
input_stack = np.vstack((input_stack, action))
goal_stack = np.vstack((goal_stack, current_goal))
# Step forward.
obs, reward, done, info = self.env.step(action)
# Update step counter.
self.k += 1
# print("step", k, "done", done)
# Print out.
if self.verbose and self.k % 100 == 0:
print(colored("episode: %d step: %d" % (self.ite_counter, self.k), "green"))
print("state: " + get_arr_str(self.env.state))
print("action: " + get_arr_str(self.env.state) + "\n")
# Save frame for visualization.
if render:
self.env.render()
frames.append(self.env.render("rgb_array"))
# Save data and update policy if iteration is finished.
if done:
# Push last state and input to stack.
# Last input is not really used.
state_stack = np.vstack((state_stack, self.env.state))
# input_stack = np.vstack((input_stack, action))
# goal_stack = np.vstack((goal_stack, current_goal))
# Update iteration return and length lists.
assert "episode" in info
ite_returns.append(info["episode"]["r"])
ite_lengths.append(info["episode"]["l"])
ite_data["ite%d_state" % self.ite_counter] = state_stack
ite_data["ite%d_input" % self.ite_counter] = input_stack
# Print iteration reward.
print(colored("final state: " + get_arr_str(self.env.state), "green"))
print(colored("iteration %d reward %.4f" %
(self.ite_counter, info["episode"]["r"]), "green"))
print(colored("--------------------------", "green"))
# Break if the first iteration is not successful
if self.task == Task.STABILIZATION:
if self.ite_counter == 0 and not info["goal_reached"]:
print(colored("The initial policy might be unstable. "
+ "Break from iLQR updates.", "red"))
break
# Maximum episode length.
self.num_steps = np.shape(input_stack)[0]
self.episode_len_sec = self.num_steps * self.stepsize
print(colored("Maximum episode length: %d steps!" % (self.num_steps), "blue"))
print(np.shape(input_stack), np.shape(self.gains_fb))
# import ipdb; ipdb.set_trace()
# Check if cost is increased and update lambda correspondingly
delta_reward = np.diff(ite_returns[-2:])
if self.ite_counter == 0:
# Save best iteration.
print("Save iteration gains. Best iteration %d" % self.ite_counter)
self.best_iteration = self.ite_counter
self.input_ff_best = np.copy(self.input_ff)
self.gains_fb_best = np.copy(self.gains_fb)
# Update controller gains
self.update_policy(state_stack, input_stack)
# Initialize improved flag.
self.prev_ite_improved = False
elif delta_reward < 0.0 or self.update_unstable:
# If cost is increased, increase lambda
self.lamb *= self.lamb_factor
# Reset feedforward term and controller gain to that from
# the previous iteration.
print("Cost increased by %.2f. " % -delta_reward
+ "Set feedforward term and controller gain to that "
"from the previous iteration. "
"Increased lambda to %.2f." % self.lamb)
print("Current policy is from iteration %d." % self.best_iteration)
self.input_ff = np.copy(self.input_ff_best)
self.gains_fb = np.copy(self.gains_fb_best)
# Set improved flag to False.
self.prev_ite_improved = False
# Break if maximum lambda is reached.
if self.lamb > self.lamb_max:
print(colored("Maximum lambda reached.", "red"))
self.lamb = self.lamb_max
# Reset update_unstable flag to False.
self.update_unstable = False
elif delta_reward >= 0.0:
# If cost is reduced, reduce lambda.
# Smoother convergence if not scaling down lambda.
# self.lamb /= self.lamb_factor
# Save feedforward term and gain and state and input stacks.
print("Save iteration gains. Best iteration %d" % self.ite_counter)
self.best_iteration = self.ite_counter
self.input_ff_best = np.copy(self.input_ff)
self.gains_fb_best = np.copy(self.gains_fb)
# Check consecutive reward increment (cost decrement).
if delta_reward < self.epsilon and self.prev_ite_improved:
# Cost converged.
print(colored("iLQR cost converged with a tolerance "
+ "of %.2f." % self.epsilon, "yellow"))
break
# Set improved flag to True.
self.prev_ite_improved = True
# Update controller gains
self.update_policy(state_stack, input_stack)
# Reset iteration and step counter.
self.ite_counter += 1
self.k = 0
# Reset environment.
print("Reset environment.")
self.reset_env()
# Post analysis.
if self.plot_traj or self.save_plot or self.save_data:
analysis_data = post_analysis(goal_stack, state_stack,
input_stack, self.env, 0,
self.ep_counter,
self.plot_traj,
self.save_plot,
self.save_data,
self.plot_dir, self.data_dir)
# Collect evaluation results.
ite_lengths = np.asarray(ite_lengths)
ite_returns = np.asarray(ite_returns)
if logging:
msg = "****** Evaluation ******\n"
msg += "eval_ep_length {:.2f} +/- {:.2f} | " + \
"eval_ep_return {:.3f} +/- {:.3f}\n".format(
ite_lengths.mean(), ite_lengths.std(), ite_returns.mean(),
ite_returns.std())
self.logger.info(msg + "\n")
ilqr_eval_results = {
"ite_returns": ite_returns,
"ite_lengths": ite_lengths,
"ite_data": ite_data
}
if len(frames) > 0:
ilqr_eval_results["frames"] = frames
return ilqr_eval_results
def update_policy(self, state_stack, input_stack):
"""One-line description.
Args:
state_stack (np.array): States from previous rollout.
input_stack (np.array): Inputs from previous rollout.
"""
print(colored("UPDATE POLICY", "blue"))
# Get symbolic loss function which also contains the necessary Jacobian
# and Hessian of the loss w.r.t. state and input.
loss = self.model.loss
# Initialize backward pass.
state_k = state_stack[-1]
input_k = self.env.U_GOAL
if self.task == Task.STABILIZATION:
x_goal = self.x_0
elif self.task == Task.TRAJ_TRACKING:
x_goal = self.x_0[-1]
loss_k = loss(x=state_k,
u=input_k,
Xr=x_goal,
Ur=self.env.U_GOAL,
Q=self.Q,
R=self.R)
s = loss_k["l"].toarray()
Sv = loss_k["l_x"].toarray().transpose()
Sm = loss_k["l_xx"].toarray().transpose()
# Backward pass.
for k in reversed(range(self.num_steps)):
print(k, self.num_steps, np.shape(state_stack), np.shape(input_stack), np.shape(self.gains_fb))
# Get current operating point.
state_k = state_stack[k]
input_k = input_stack[k]
# Linearized dynamics about (x_k, u_k).
df_k = self.model.df_func(state_k, input_k)
Ac_k, Bc_k = df_k[0].toarray(), df_k[1].toarray()
Ad_k, Bd_k = discretize_linear_system(Ac_k, Bc_k, self.model.dt)
# Get symbolic loss function that includes the necessary Jacobian
# and Hessian of the loss w.r.t. state and input.
if self.task == Task.STABILIZATION:
x_goal = self.x_0
elif self.task == Task.TRAJ_TRACKING:
x_goal = self.x_0[k]
loss_k = loss(x=state_k,
u=input_k,
Xr=x_goal,
Ur=self.env.U_GOAL,
Q=self.Q,
R=self.R)
# Quadratic approximation of cost.
q = loss_k["l"].toarray() # l
Qv = loss_k["l_x"].toarray().transpose() # dl/dx
Qm = loss_k["l_xx"].toarray().transpose() # ddl/dxdx
Rv = loss_k["l_u"].toarray().transpose() # dl/du
Rm = loss_k["l_uu"].toarray().transpose() # ddl/dudu
Pm = loss_k["l_xu"].toarray().transpose() # ddl/dudx
# Control dependent terms of cost function.
g = Rv + Bd_k.transpose().dot(Sv)
G = Pm + Bd_k.transpose().dot(Sm.dot(Ad_k))
H = Rm + Bd_k.transpose().dot(Sm.dot(Bd_k))
# Trick to make sure H is well-conditioned for inversion
if not (np.isinf(np.sum(H)) or np.isnan(np.sum(H))):
H = (H + H.transpose()) / 2
H_eval, H_evec = np.linalg.eig(H)
H_eval[H_eval < 0] = 0.0
H_eval += self.lamb
H_inv = np.dot(H_evec, np.dot(np.diag(1.0 / H_eval), H_evec.T))
# Update controller gains.
duff = -H_inv.dot(g)
K = -H_inv.dot(G)
# Update control input.
input_ff_k = input_k + duff[:, 0] - K.dot(state_k)
self.input_ff[:, k] = input_ff_k
self.gains_fb[k] = K
# Update s variables for time step k.
Sm = Qm + Ad_k.transpose().dot(Sm.dot(Ad_k)) + \
K.transpose().dot(H.dot(K)) + \
K.transpose().dot(G) + G.transpose().dot(K)
Sv = Qv + Ad_k.transpose().dot(Sv) + \
K.transpose().dot(H.dot(duff)) + K.transpose().dot(g) + \
G.transpose().dot(duff)
s = q + s + 0.5 * duff.transpose().dot(H.dot(duff)) + \
duff.transpose().dot(g)
else:
self.update_unstable = True
print(colored("Policy update unstable. Terminate update.", "red"))
def select_action(self, x, k):
"""Control input u = -K x.
Args:
x (np.array): Current state of the system.
k (int): Current time step.
Returns:
action (np.array): Action computed based on current policy.
"""
if self.ite_counter == 0:
# Compute gain for the first iteration.
# action = -self.gain @ (x - self.x_0) + self.u_0
if self.task == Task.STABILIZATION:
gains_fb = -self.gain
input_ff = self.gain @ self.x_0 + self.u_0
elif self.task == Task.TRAJ_TRACKING:
self.gain = compute_lqr_gain(self.model, self.x_0[k],
self.u_0, self.Q, self.R,
self.discrete_dynamics)
gains_fb = -self.gain
input_ff = self.gain @ self.x_0[k] + self.u_0
else:
print(colored("Incorrect task specified.", "red"))
# Compute action
action = gains_fb.dot(x) + input_ff
# Save gains and feedforward term
if self.k == 0:
self.gains_fb = gains_fb.reshape(1, self.model.nu, self.model.nx)
self.input_ff = input_ff.reshape(self.model.nu, 1)
else:
self.gains_fb = np.append(self.gains_fb, gains_fb.reshape(1, self.model.nu, self.model.nx), axis=0)
self.input_ff = np.append(self.input_ff, input_ff.reshape(self.model.nu, 1), axis=1)
else:
print(k, self.gains_fb[k])
action = self.gains_fb[k].dot(x) + self.input_ff[:, k]
return action
def init_env(self):
self.env = self.env_func(randomized_init=self.random_init,
cost=Cost.QUADRATIC,
randomized_inertial_prop=False,
episode_len_sec=self.episode_len_sec,
task=self.task,
task_info=self.task_info,
ctrl_freq=self.ctrl_freq,
pyb_freq=self.pyb_freq
)
self.env = RecordEpisodeStatistics(self.env, self.deque_size)
# Controller params.
self.model = self.env.symbolic
self.Q = get_cost_weight_matrix(self.q_lqr, self.model.nx)
self.R = get_cost_weight_matrix(self.r_lqr, self.model.nu)
self.env.set_cost_function_param(self.Q, self.R)
self.env.reset()
# Linearize at operating point (equilibrium for stabilization).
self.x_0, self.u_0 = self.env.X_GOAL, self.env.U_GOAL
if self.task == Task.STABILIZATION:
self.gain = compute_lqr_gain(self.model, self.x_0, self.u_0,
self.Q, self.R, self.discrete_dynamics)
# Control stepsize.
self.stepsize = self.model.dt
def reset_env(self):
'''Reset environment between iLQR iterations.'''
print(colored("Set maximum episode length to %.3f" % self.episode_len_sec, "blue"))
self.env = self.env_func(init_state=self.init_state,
randomized_init=False,
cost=Cost.QUADRATIC,
randomized_inertial_prop=False,
episode_len_sec=self.episode_len_sec,
task=self.task,
task_info=self.task_info,
ctrl_freq=self.ctrl_freq,
pyb_freq=self.pyb_freq
)
self.env = RecordEpisodeStatistics(self.env, self.deque_size)
# Controller params.
self.model = self.env.symbolic
self.Q = get_cost_weight_matrix(self.q_lqr, self.model.nx)
self.R = get_cost_weight_matrix(self.r_lqr, self.model.nu)
self.env.set_cost_function_param(self.Q, self.R)
self.env.reset()
# Linearize at operating point (equilibrium for stabilization).
self.x_0, self.u_0 = self.env.X_GOAL, self.env.U_GOAL
def run(self, n_episodes=1, render=False, logging=False, verbose=False, use_adv=False):
"""Runs evaluation with current policy.
Args:
render (bool): Flag to save frames for visualization.
logging (bool): Flag to log results.
Returns:
eval_results (dict): Dictionary containing returns and data for each
evaluation trial.
"""
# Initialize logging variables.
ep_returns, ep_lengths, ep_fulldata, frames = [], [], {}, []
# Loop through episode.
for self.ep_counter in range(self.eval_batch_size):
# Initialize new environment for the test trial.
self.init_env()
# Run iLQR for the particular initial condition.
ilqr_eval_results = self.run_ilqr(render=render, logging=logging)
# Save the results from the last iteration for evaluation.
ep_returns.append(ilqr_eval_results["ite_returns"][-1])
ep_lengths.append(ilqr_eval_results["ite_lengths"][-1])
ep_fulldata["run%d_data"
% self.ep_counter] = ilqr_eval_results["ite_data"]
if "frames" in ilqr_eval_results:
frames.append(ilqr_eval_results["frames"][-1])
# Print episode reward.
print(colored("Test Run %d reward %.4f" % (self.ep_counter, ep_returns[-1]), "yellow"))
print(colored("==========================\n", "yellow"))
# Save reward
if self.save_data:
np.savetxt(self.data_dir + "test%d_rewards.csv" % self.ep_counter, np.array([ep_returns[-1]]), delimiter=',', fmt='%.8f')
# Collect evaluation results.
ep_lengths = np.asarray(ep_lengths)
ep_returns = np.asarray(ep_returns)
# Log data.
if logging:
msg = "****** Evaluation ******\n"
msg += "eval_ep_length {:.2f} +/- {:.2f} | " + \
"eval_ep_return {:.3f} +/- {:.3f}\n".format(
ep_lengths.mean(), ep_lengths.std(), ep_returns.mean(),
ep_returns.std())
self.logger.info(msg + "\n")
# Save evaluation results.
# Note: To retrieve the state and input trajectories, use the following
# eval_results["ep_fulldata"]["run#_data"]["ite#_state"]
# eval_results["ep_fulldata"]["run#_data"]["ite#_input"]
eval_results = {
"ep_returns": ep_returns,
"ep_lengths": ep_lengths,
"ep_fulldata": ep_fulldata
}
# Save frames.
if frames is not None and len(frames) > 0:
eval_results["frames"] = frames
return eval_results
| 39.11465
| 137
| 0.532202
|
393dffcf9e04df00974024a1d02d23daa2a05bb8
| 2,956
|
py
|
Python
|
docs/conf.py
|
dmyersturnbull/sauronx
|
5c0cd4a0602dcd3224f799701f95f48d5ddf2fc0
|
[
"Apache-2.0"
] | null | null | null |
docs/conf.py
|
dmyersturnbull/sauronx
|
5c0cd4a0602dcd3224f799701f95f48d5ddf2fc0
|
[
"Apache-2.0"
] | 27
|
2021-02-22T06:49:46.000Z
|
2022-03-14T09:15:55.000Z
|
docs/conf.py
|
dmyersturnbull/sauronx
|
5c0cd4a0602dcd3224f799701f95f48d5ddf2fc0
|
[
"Apache-2.0"
] | null | null | null |
"""
Sphinx config file.
Uses several extensions to get API docs and sourcecode.
https://www.sphinx-doc.org/en/master/usage/configuration.html
"""
from pathlib import Path
from typing import Optional, Type, TypeVar
import tomlkit
# This assumes that we have the full project root above, containing pyproject.toml
_root = Path(__file__).parent.parent.absolute()
_toml = tomlkit.loads((_root / "pyproject.toml").read_text(encoding="utf8"))
T = TypeVar("T")
def find(key: str, default: Optional[T] = None, as_type: Type[T] = str) -> Optional[T]:
"""
Gets a value from pyproject.toml, or a default.
Args:
key: A period-delimited TOML key; e.g. ``tools.poetry.name``
default: Default value if any node in the key is not found
as_type: Convert non-``None`` values to this type before returning
Returns:
The value converted to ``as_type``, or ``default`` if it was not found
"""
at = _toml
for k in key.split("."):
at = at.get(k)
if at is None:
return default
return as_type(at)
# Basic information, used by Sphinx
# Leave language as None unless you have multiple translations
language = None
project = find("tool.poetry.name")
version = find("tool.poetry.version")
release = version
author = ", ".join(find("tool.poetry.authors", as_type=list))
# Copyright string (for documentation)
# It's not clear whether we're supposed to, but we'll add the license
copyright = find("tool.tyrannosaurus.sources.copyright").strip("'")
_license = find("tool.tyrannosaurus.sources.doc_license")
_license_url = find("tool.tyrannosaurus.sources.doc_license_url")
# Load extensions
# These should be in docs/requirements.txt
# Napoleon is bundled in Sphinx, so we don't need to list it there
# NOTE: 'autoapi' here refers to sphinx-autoapi
# See https://sphinx-autoapi.readthedocs.io/
extensions = [
"autoapi.extension",
"sphinx.ext.napoleon",
"sphinx_copybutton",
"sphinx_rtd_theme",
]
master_doc = "index"
napoleon_include_special_with_doc = True
autoapi_type = "python"
autoapi_dirs = [str(_root / project)]
autoapi_keep_files = True
autoapi_python_class_content = "both"
autoapi_member_order = "groupwise"
autoapi_options = ["private-members", "undoc-members", "special-members"]
# The vast majority of Sphinx themes are unmaintained
# This includes the commonly used alabaster theme
# The readthedocs theme is pretty good anyway
# These can be specific to the theme, or processed by Sphinx directly
# https://www.sphinx-doc.org/en/master/usage/configuration.html
html_theme = "sphinx_rtd_theme"
html_theme_options = dict(
collapse_navigation=False,
navigation_depth=False,
style_external_links=True,
)
# doc types to build
sphinx_enable_epub_build = False
sphinx_enable_pdf_build = False
exclude_patterns = ["_build", "Thumbs.db", ".*", "~*", "*~", "*#"]
if __name__ == "__main__":
print(f"{project} v{version}\n© Copyright {copyright}")
| 32.844444
| 87
| 0.721583
|
25198d8b8d4beda066b30989fdc21735cb50c1c4
| 4,120
|
py
|
Python
|
chartify/_core/options.py
|
rechenchen/PycharmProjects
|
9e5dd9e3795bc440738d2d9df4ec55eb9f902f08
|
[
"Apache-2.0"
] | 1
|
2019-01-03T00:36:08.000Z
|
2019-01-03T00:36:08.000Z
|
chartify/_core/options.py
|
gabriel-rcpereira/chartify
|
6dbe996e847d9c37ebe1b712eb9791ab96c8387a
|
[
"Apache-2.0"
] | 4
|
2021-09-08T03:06:21.000Z
|
2022-03-12T00:55:48.000Z
|
chartify/_core/options.py
|
gabriel-rcpereira/chartify
|
6dbe996e847d9c37ebe1b712eb9791ab96c8387a
|
[
"Apache-2.0"
] | 1
|
2019-05-22T20:11:16.000Z
|
2019-05-22T20:11:16.000Z
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017-2018 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
import os
from pathlib import Path
import yaml
class ChartifyOptions:
def __init__(self):
try:
options_path = os.environ['CHARTIFY_CONFIG_DIR']
except KeyError:
home_path = str(Path.home())
options_path = home_path + '/.chartify/'
self._options = OrderedDict({
'style.color_palette_categorical':
OptionValue('Category20'),
'style.color_palette_sequential':
OptionValue('Blues'),
'style.color_palette_diverging':
OptionValue('RdBu'),
'style.color_palette_accent':
OptionValue('Category20'),
'style.color_palette_accent_default_color':
OptionValue('grey'),
'chart.blank_labels':
OptionValue(False),
'config.logos_path':
OptionValue(options_path + 'logos/'),
'config.options':
OptionValue(options_path + 'options_config.yaml'),
'config.style_settings':
OptionValue(options_path + 'style_settings_config.yaml'),
'config.colors':
OptionValue(options_path + 'colors_config.yaml'),
'config.color_palettes':
OptionValue(options_path + 'color_palettes_config.yaml')
})
config_filename = self.get_option('config.options')
try:
self._from_yaml(config_filename)
except FileNotFoundError:
pass
def get_option(self, option_name):
"""Return the value of the given option"""
return self._options[option_name].value
def set_option(self, option_name, option_value):
"""Set the default value of the specified option.
Available options:
'style.color_palette_categorical': (str)
Color palette for categorical palette types.
'style.color_palette_sequential': (str)
Color palette for sequential palette types.
'style.color_palette_diverging': (str)
Color palette for diverging palette types.
'style.color_palette_accent': (str)
Color palette for assigning color to specific values.
'style.color_palette_accent_default_color': (str)
Default color of values in the 'color_column' that
are not accented.
Default: 'light grey'
'chart.blank_labels': boolean
If False, chartify.Chart objects populate the default
chart labels with helper text.
Default: False
"""
self._options[option_name].value = option_value
@staticmethod
def _get_value(option_value):
if isinstance(option_value, OptionValue):
return option_value.value
else:
return option_value
def _to_yaml(self, filename):
"""Write the options to a yaml file"""
with open(filename, 'w') as outfile:
yaml.dump(self._options, outfile, default_flow_style=False)
def _from_yaml(self, filename):
"""Load options from a yaml file.
Overwrites any options that are specified in the yaml file.
"""
yaml_options = yaml.load(open(filename))
self._options.update(yaml_options)
class OptionValue:
def __init__(self, value):
self.value = value
def __repr__(self):
return '%s' % self.value
options = ChartifyOptions()
| 33.495935
| 74
| 0.625485
|
7e1fc58e7d813e9c8e11ec3b66eadc0433fd2f6a
| 1,960
|
py
|
Python
|
day2/day2.py
|
tomp/AOC-2020
|
c7b8313f883b6285f118d281d99c4b32b0497289
|
[
"MIT"
] | null | null | null |
day2/day2.py
|
tomp/AOC-2020
|
c7b8313f883b6285f118d281d99c4b32b0497289
|
[
"MIT"
] | null | null | null |
day2/day2.py
|
tomp/AOC-2020
|
c7b8313f883b6285f118d281d99c4b32b0497289
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
#
# Advent of Code 2020 - Day N
#
from pathlib import Path
from collections import Counter
import re
LINE_RE = re.compile(r"(\d+)-(\d+) (\w): (\w+)$")
INPUTFILE = 'input.txt'
def sample_input():
return filter_blank_lines("""
1-3 a: abcde
1-3 b: cdefg
2-9 c: ccccccccc
""".split("\n"))
# Utility functions
def load_input(infile):
return filter_blank_lines(Path(infile).open())
def filter_blank_lines(lines):
for line in lines:
line = line.strip()
if line:
yield line
# Solution
def is_valid(line: str) -> bool:
m = LINE_RE.match(line)
if m:
lo, hi, ch, password = m.groups()
c = Counter(password)
return int(lo) <= c[ch] <= int(hi)
def is_valid2(line: str) -> bool:
m = LINE_RE.match(line)
if m:
lo, hi, ch, password = m.groups()
return (password[int(lo)-1] == ch) != (password[int(hi)-1] == ch)
def solve(lines, is_valid) -> int:
"""Solve the problem."""
count = 0
for line in lines:
if is_valid(line):
count += 1
return count
# PART 1
def example():
passwords = list(sample_input())
expected = 2
result = solve(passwords, is_valid)
print("'sample-input' -> {} (expected {})".format(result, expected))
assert result == expected
print('= ' * 32)
def part1(lines):
result = solve(lines, is_valid)
print("result is {}".format(result))
print('= ' * 32)
# PART 2
def example2():
passwords = list(sample_input())
expected = 1
result = solve(passwords, is_valid2)
print("'sample-input' -> {} (expected {})".format(result, expected))
assert result == expected
print('= ' * 32)
def part2(lines):
result = solve(lines, is_valid2)
print("result is {}".format(result))
print('= ' * 32)
if __name__ == '__main__':
example()
lines = list(load_input(INPUTFILE))
part1(lines)
example2()
part2(lines)
| 21.075269
| 73
| 0.592347
|
31c17378634a74755a77a40ade0b50f6004b3f3d
| 205
|
py
|
Python
|
python_pd/work/syntax/fizzbuzz/fizzbuzz.py
|
SerLap-ctrl/python-bp
|
1db0446bc95989ac23083e277806a2942c8d124f
|
[
"MIT"
] | 17
|
2020-12-02T08:37:10.000Z
|
2022-03-11T11:58:57.000Z
|
python_pd/work/syntax/fizzbuzz/fizzbuzz.py
|
SerLap-ctrl/python-bp
|
1db0446bc95989ac23083e277806a2942c8d124f
|
[
"MIT"
] | 6
|
2021-07-01T04:39:24.000Z
|
2021-11-03T19:04:43.000Z
|
python_pd/work/syntax/fizzbuzz/fizzbuzz.py
|
SerLap-ctrl/python-bp
|
1db0446bc95989ac23083e277806a2942c8d124f
|
[
"MIT"
] | 15
|
2021-07-01T04:41:15.000Z
|
2022-03-04T15:49:15.000Z
|
def fizzbuzz(number):
# разместите здесь реализацию основной логики программы.
pass
def main():
# разместите здесь проверку входных значений.
pass
if __name__ == '__main__':
main()
| 15.769231
| 60
| 0.682927
|
7927271ed1659b7348835fc54abf7fb3cd146395
| 339
|
py
|
Python
|
day1.py
|
davide-butera/advent-of-code-2020
|
1cc67992caaa24200ee3c379f6558b46a2b5fcc7
|
[
"MIT"
] | null | null | null |
day1.py
|
davide-butera/advent-of-code-2020
|
1cc67992caaa24200ee3c379f6558b46a2b5fcc7
|
[
"MIT"
] | null | null | null |
day1.py
|
davide-butera/advent-of-code-2020
|
1cc67992caaa24200ee3c379f6558b46a2b5fcc7
|
[
"MIT"
] | null | null | null |
from load import load
data = list(map(lambda l: int(l), load()))
def part1():
for i in data:
if (2020 - i) in data:
return i * (2020 - i)
def part2():
for i in data:
for j in data:
if (2020 - i - j) in data:
return i * j * (2020 - i - j)
print(part1())
print(part2())
| 16.142857
| 45
| 0.480826
|
cb5881f2477edd3c915a34e30533fe364a5b0276
| 3,152
|
py
|
Python
|
MachineLearning/Server_Prediction3.py
|
SKravitsky/MachineLearningServer
|
170081fe1ea53be7394e35ce208bd665ae002b73
|
[
"Apache-2.0"
] | null | null | null |
MachineLearning/Server_Prediction3.py
|
SKravitsky/MachineLearningServer
|
170081fe1ea53be7394e35ce208bd665ae002b73
|
[
"Apache-2.0"
] | null | null | null |
MachineLearning/Server_Prediction3.py
|
SKravitsky/MachineLearningServer
|
170081fe1ea53be7394e35ce208bd665ae002b73
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import pandas as pd
import os
import sys
import pydot
import mysql.connector
from sklearn import tree
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.externals.six import StringIO
config = {
'user': 'ECE32',
'password': 'seniordesign',
'host': 'septa-instance.ctejk6luw06s.us-west-2.rds.amazonaws.com',
'database': 'septa',
'raise_on_warnings': True,
}
def get_all_lines(user_id):
cnx = mysql.connector.connect(**config)
cursor = cnx.cursor(dictionary=True)
sql4 = 'SELECT id_weekday, time_departure, id_station_origin, id_station_destination FROM trips WHERE id_user = "%s"' % user_id
sql = 'SELECT id_weekday, time_departure, id_station_origin, id_station_destination FROM trips'
df_mysql = pd.read_sql(sql, con=cnx)
#print df_mysql.dtypes
df_mysql.time_departure = df_mysql.time_departure.astype(int)
#print df_mysql.dtypes
#print df_mysql.head()
return df_mysql
def get_csv():
if os.path.exists("Update.csv"):
df = pd.read_csv("Update.csv")
return df
def scrub_df(data):
#print("* df.head()", data.head())
features = list(data.columns[:3])
targets = list(data.columns[3:])
#print("* features:", features)
#print("* targets:", targets)
X = data[features]
Y = data[targets]
#print("Head", X.tail())
#print("Head2", Y.tail())
return X,Y,features,targets
def prediction_accuracy(F, T, FN, TN):
clf = tree.DecisionTreeClassifier()
F_train, F_test, T_train, T_test = train_test_split(F, T, test_size = .2)
clf.fit(F, T)
predictions = clf.predict(F_test)
print accuracy_score(T_test, predictions)
#tree.export_graphviz(clf, out_file='tree.dot', feature_names=FN, filled=True, rounded=True)
#os.system('dot -Tpng tree.dot -o tree.png')
def prediction(F, T, FN, TN, data):
clf = tree.DecisionTreeClassifier()
clf.fit(F, T)
df_api = pd.DataFrame(data, columns = ['id_weekday','time_departure','id_station_origin'])
df_api.time_departure = df_api.time_departure.astype(int)
prediction = clf.predict(df_api)
return prediction
def start_function(user_id, weekday, time, station):
df = get_all_lines(user_id)
features, targets, fnames, tnames = scrub_df(df)
data = (weekday, time, station)
#print features
#prediction_accuracy(features, targets, fnames, tnames)
output_prediction = prediction(features, targets, fnames, tnames, data)
print output_prediction
def lambda_handler(event, context):
user_id = event['key1']
weekday = event['key2']
time = event['key3']
station = event['key4']
start_function(user_id, weekday, time, station)
if __name__ == "__main__":
user_id = 'e2f4uovEeYU'
df = get_all_lines(user_id)
features, targets, fnames, tnames = scrub_df(df)
print features
'''
df2 = get_csv()
features2, targets2, fnames2, tnames2 = scrub_df(df2)
print '----'
print features2
'''
prediction_accuracy(features, targets, fnames, tnames)
| 23.878788
| 131
| 0.678299
|
0b5a3bc86832a854920da1271aba439a1bbef913
| 7,419
|
py
|
Python
|
library/ospf_element_facts.py
|
Forcepoint/fp-NGFW-SMC-ansible
|
47cafea1bef162e75f3632b969e996d53d636374
|
[
"Apache-2.0"
] | 5
|
2019-11-19T07:23:01.000Z
|
2021-08-25T08:04:49.000Z
|
library/ospf_element_facts.py
|
Forcepoint/fp-NGFW-SMC-ansible
|
47cafea1bef162e75f3632b969e996d53d636374
|
[
"Apache-2.0"
] | 2
|
2020-03-24T20:30:32.000Z
|
2020-09-15T09:02:44.000Z
|
library/ospf_element_facts.py
|
Forcepoint/fp-NGFW-SMC-ansible
|
47cafea1bef162e75f3632b969e996d53d636374
|
[
"Apache-2.0"
] | 3
|
2020-03-24T20:32:29.000Z
|
2022-02-15T15:29:07.000Z
|
#!/usr/bin/python
# Copyright (c) 2017-2019 Forcepoint
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: ospf_element_facts
short_description: Facts about OSPF based elements in the SMC
description:
- BGP elements are the building blocks to building a BGP configuration on
a layer 3 engine. Use this module to obtain available elements and their
values.
version_added: '2.5'
options:
element:
description:
- Type of OSPF element to retrieve
required: true
choices:
- ospfv2_area
- ospfv2_profile
type: str
extends_documentation_fragment:
- management_center
- management_center_facts
requirements:
- smc-python
author:
- Forcepoint
'''
EXAMPLES = '''
- name: Facts about OSPF elements
hosts: localhost
gather_facts: no
tasks:
- name: Find all OSPF v2 areas
ospf_element_facts:
element: ospfv2_area
- name: Find a specific OSPF area with details
ospf_element_facts:
element: ospfv2_area
filter: myarea
- name: Find an OSPF profile containing name 'Default'
ospf_element_facts:
element: ospfv2_profile
filter: Default
- name: Get details for autonomous system myas and save as yaml
register: results
ospf_element_facts:
smc_logging:
level: 10
path: ansible-smc.log
element: ospfv2_profile
filter: myprofile
exact_match: false
as_yaml: true
- name: Write the yaml using a jinja template
template: src=templates/facts_yaml.j2 dest=./ospf_element.yml
vars:
playbook: ospf_element
'''
RETURN = '''
elements:
description: List all OSPF Areas
returned: always
type: list
sample: [
{
"name": "myarea",
"type": "ospfv2_area"
},
{
"name": "myarea2",
"type": "ospfv2_area"
},
{
"name": "foo",
"type": "ospfv2_area"
}]
elements:
description: List a specific OSPF profile
returned: always
type: list
sample: [
{
"comment": "added by ansible",
"default_metric": 123,
"domain_settings_ref": "Default OSPFv2 Domain Settings",
"external_distance": 110,
"inter_distance": 130,
"intra_distance": 110,
"name": "myprofile",
"redistribution_entry": [
{
"enabled": true,
"metric_type": "external_1",
"type": "bgp"
},
{
"enabled": true,
"filter": {
"route_map": [
"myroutemap"
]
},
"metric": 2,
"metric_type": "external_1",
"type": "static"
},
{
"enabled": true,
"filter": {
"ip_access_list": [
"myacl"
]
},
"metric_type": "external_2",
"type": "connected"
},
{
"enabled": false,
"metric_type": "external_1",
"type": "kernel"
},
{
"enabled": false,
"metric_type": "external_1",
"type": "default_originate"
}]
}]
'''
from ansible.module_utils.smc_util import ForcepointModuleBase, format_element
try:
from smc.routing.route_map import RouteMap
from smc.routing.access_list import IPAccessList
from smc.base.model import lookup_class
except ImportError:
pass
ospf_elements = ('ospfv2_area', 'ospfv2_interface_settings', 'ospfv2_key_chain',
'ospfv2_profile', 'ospfv2_domain_settings')
def area_to_yaml(area):
yaml = {}
yaml.update(
name=area.name,
comment=area.comment,
area_type=area.area_type,
interface_settings_ref=area.interface_settings_ref.name)
for filt in ('inbound_filters', 'outbound_filters'):
if len(getattr(area, '%s_ref' % filt)):
for _filt in getattr(area, filt):
_filter = {_filt.typeof: [_filt.name]}
yaml.setdefault(filt, {}).update(_filter)
return yaml
def profile_to_yaml(profile):
yaml = {}
yaml.update(
name=profile.name,
comment=profile.comment,
external_distance=profile.external_distance,
inter_distance=profile.inter_distance,
intra_distance=profile.intra_distance,
domain_settings_ref=profile.domain_settings_ref.name,
default_metric=profile.data.get('default_metric', None)
)
redist_entries = []
for redist in profile.data.get('redistribution_entry', []):
filter_type = redist.pop('filter_type', 'none')
if filter_type != 'none':
if filter_type == 'route_map_policy':
redist.update(
filter={'route_map': [RouteMap.from_href(
redist.pop('redistribution_rm_ref')).name]})
elif filter_type == 'access_list':
redist.update(
filter={'ip_access_list': [IPAccessList.from_href(
redist.pop('redistribution_filter_ref')).name]})
redist_entries.append(redist)
yaml.update(redistribution_entry=redist_entries)
return yaml
def convert_to_dict(element):
if element.typeof == 'ospfv2_area':
return {element.typeof: area_to_yaml(element)}
elif element.typeof == 'ospfv2_profile':
return {element.typeof: profile_to_yaml(element)}
else:
return {element.typeof: format_element(element)}
class OSPFElementFacts(ForcepointModuleBase):
def __init__(self):
self.module_args = dict(
element=dict(required=True, type='str', choices=list(ospf_elements))
)
self.element = None
self.limit = None
self.filter = None
self.as_yaml = None
self.exact_match = None
self.case_sensitive = None
required_if=([
('as_yaml', True, ['filter'])])
self.results = dict(
ansible_facts=dict(
ospf_element=[]
)
)
super(OSPFElementFacts, self).__init__(self.module_args, required_if=required_if,
is_fact=True)
def exec_module(self, **kwargs):
for name, value in kwargs.items():
setattr(self, name, value)
result = self.search_by_type(lookup_class(self.element))
if self.filter:
if self.as_yaml:
elements = [convert_to_dict(element) for element in result
if element.name == self.filter]
else:
elements = [convert_to_dict(element) for element in result]
else:
elements = [{'name': element.name, 'type': element.typeof} for element in result]
self.results['ansible_facts']['ospf_element'] = [{'elements': elements}]\
if elements else []
return self.results
def main():
OSPFElementFacts()
if __name__ == '__main__':
main()
| 27.275735
| 93
| 0.560183
|
913479ed43dbdc561dc9fe7b69728da1da432d1d
| 794
|
py
|
Python
|
egs2/libritts/tts1/src/create_data_from_alex.py
|
ishine/Unsupervised-TTS
|
6f89633228626100615bcbe21263d80aea6920c5
|
[
"Apache-2.0"
] | 2
|
2022-02-25T13:27:36.000Z
|
2022-03-18T21:11:35.000Z
|
egs2/libritts/tts1/src/create_data_from_alex.py
|
ishine/Unsupervised-TTS
|
6f89633228626100615bcbe21263d80aea6920c5
|
[
"Apache-2.0"
] | null | null | null |
egs2/libritts/tts1/src/create_data_from_alex.py
|
ishine/Unsupervised-TTS
|
6f89633228626100615bcbe21263d80aea6920c5
|
[
"Apache-2.0"
] | 1
|
2022-02-26T23:54:53.000Z
|
2022-02-26T23:54:53.000Z
|
import os
import argparse
def create_data_dir(phn_seq_file, text_file):
with open(phn_seq_file, 'r') as f:
content = f.readlines()
content = [x.strip('\n') for x in content]
id2phn_seq = {x.split()[0]:' '.join(x.split()[1:]) for x in content}
with open(text_file, 'r') as f:
content = f.readlines()
content = [x.strip('\n') for x in content]
id_seq = [x.split()[0] for x in content]
with open(text_file, 'w') as f:
for id in id_seq:
f.write('%s %s\n' % (id, id2phn_seq[id]))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--phn_file', type=str)
parser.add_argument('--text_file', type=str)
args = parser.parse_args()
create_data_dir(args.phn_file, args.text_file)
| 29.407407
| 72
| 0.618388
|
18375165a526a780f2e602a3800e12833dbb3e67
| 14,121
|
py
|
Python
|
alphafold/data/mmcif_parsing.py
|
thenotcompany/alphafold
|
1d43aaff941c84dc56311076b58795797e49107b
|
[
"Apache-2.0"
] | 45
|
2022-01-12T04:39:36.000Z
|
2022-03-25T12:33:36.000Z
|
alphafold/data/mmcif_parsing.py
|
thenotcompany/alphafold
|
1d43aaff941c84dc56311076b58795797e49107b
|
[
"Apache-2.0"
] | 6
|
2022-01-15T16:48:39.000Z
|
2022-03-15T16:20:34.000Z
|
alphafold/data/mmcif_parsing.py
|
thenotcompany/alphafold
|
1d43aaff941c84dc56311076b58795797e49107b
|
[
"Apache-2.0"
] | 10
|
2022-01-12T11:28:03.000Z
|
2022-03-30T11:36:41.000Z
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Parses the mmCIF file format."""
import collections
import dataclasses
import io
from typing import Any, Mapping, Optional, Sequence, Tuple
from absl import logging
from Bio import PDB
from Bio.Data import SCOPData
# Type aliases:
ChainId = str
PdbHeader = Mapping[str, Any]
PdbStructure = PDB.Structure.Structure
SeqRes = str
MmCIFDict = Mapping[str, Sequence[str]]
@dataclasses.dataclass(frozen=True)
class Monomer:
id: str
num: int
# Note - mmCIF format provides no guarantees on the type of author-assigned
# sequence numbers. They need not be integers.
@dataclasses.dataclass(frozen=True)
class AtomSite:
residue_name: str
author_chain_id: str
mmcif_chain_id: str
author_seq_num: str
mmcif_seq_num: int
insertion_code: str
hetatm_atom: str
model_num: int
# Used to map SEQRES index to a residue in the structure.
@dataclasses.dataclass(frozen=True)
class ResiduePosition:
chain_id: str
residue_number: int
insertion_code: str
@dataclasses.dataclass(frozen=True)
class ResidueAtPosition:
position: Optional[ResiduePosition]
name: str
is_missing: bool
hetflag: str
@dataclasses.dataclass(frozen=True)
class MmcifObject:
"""Representation of a parsed mmCIF file.
Contains:
file_id: A meaningful name, e.g. a pdb_id. Should be unique amongst all
files being processed.
header: Biopython header.
structure: Biopython structure.
chain_to_seqres: Dict mapping chain_id to 1 letter amino acid sequence. E.g.
{'A': 'ABCDEFG'}
seqres_to_structure: Dict; for each chain_id contains a mapping between
SEQRES index and a ResidueAtPosition. e.g. {'A': {0: ResidueAtPosition,
1: ResidueAtPosition,
...}}
raw_string: The raw string used to construct the MmcifObject.
"""
file_id: str
header: PdbHeader
structure: PdbStructure
chain_to_seqres: Mapping[ChainId, SeqRes]
seqres_to_structure: Mapping[ChainId, Mapping[int, ResidueAtPosition]]
raw_string: Any
@dataclasses.dataclass(frozen=True)
class ParsingResult:
"""Returned by the parse function.
Contains:
mmcif_object: A MmcifObject, may be None if no chain could be successfully
parsed.
errors: A dict mapping (file_id, chain_id) to any exception generated.
"""
mmcif_object: Optional[MmcifObject]
errors: Mapping[Tuple[str, str], Any]
class ParseError(Exception):
"""An error indicating that an mmCIF file could not be parsed."""
def mmcif_loop_to_list(prefix: str,
parsed_info: MmCIFDict) -> Sequence[Mapping[str, str]]:
"""Extracts loop associated with a prefix from mmCIF data as a list.
Reference for loop_ in mmCIF:
http://mmcif.wwpdb.org/docs/tutorials/mechanics/pdbx-mmcif-syntax.html
Args:
prefix: Prefix shared by each of the data items in the loop.
e.g. '_entity_poly_seq.', where the data items are _entity_poly_seq.num,
_entity_poly_seq.mon_id. Should include the trailing period.
parsed_info: A dict of parsed mmCIF data, e.g. _mmcif_dict from a Biopython
parser.
Returns:
Returns a list of dicts; each dict represents 1 entry from an mmCIF loop.
"""
cols = []
data = []
for key, value in parsed_info.items():
if key.startswith(prefix):
cols.append(key)
data.append(value)
assert all([len(xs) == len(data[0]) for xs in data]), (
'mmCIF error: Not all loops are the same length: %s' % cols)
return [dict(zip(cols, xs)) for xs in zip(*data)]
def mmcif_loop_to_dict(prefix: str,
index: str,
parsed_info: MmCIFDict,
) -> Mapping[str, Mapping[str, str]]:
"""Extracts loop associated with a prefix from mmCIF data as a dictionary.
Args:
prefix: Prefix shared by each of the data items in the loop.
e.g. '_entity_poly_seq.', where the data items are _entity_poly_seq.num,
_entity_poly_seq.mon_id. Should include the trailing period.
index: Which item of loop data should serve as the key.
parsed_info: A dict of parsed mmCIF data, e.g. _mmcif_dict from a Biopython
parser.
Returns:
Returns a dict of dicts; each dict represents 1 entry from an mmCIF loop,
indexed by the index column.
"""
entries = mmcif_loop_to_list(prefix, parsed_info)
return {entry[index]: entry for entry in entries}
def parse(*,
file_id: str,
mmcif_string: str,
catch_all_errors: bool = True) -> ParsingResult:
"""Entry point, parses an mmcif_string.
Args:
file_id: A string identifier for this file. Should be unique within the
collection of files being processed.
mmcif_string: Contents of an mmCIF file.
catch_all_errors: If True, all exceptions are caught and error messages are
returned as part of the ParsingResult. If False exceptions will be allowed
to propagate.
Returns:
A ParsingResult.
"""
errors = {}
try:
parser = PDB.MMCIFParser(QUIET=True)
handle = io.StringIO(mmcif_string)
full_structure = parser.get_structure('', handle)
first_model_structure = _get_first_model(full_structure)
# Extract the _mmcif_dict from the parser, which contains useful fields not
# reflected in the Biopython structure.
parsed_info = parser._mmcif_dict # pylint:disable=protected-access
# Ensure all values are lists, even if singletons.
for key, value in parsed_info.items():
if not isinstance(value, list):
parsed_info[key] = [value]
header = _get_header(parsed_info)
# Determine the protein chains, and their start numbers according to the
# internal mmCIF numbering scheme (likely but not guaranteed to be 1).
valid_chains = _get_protein_chains(parsed_info=parsed_info)
if not valid_chains:
return ParsingResult(
None, {(file_id, ''): 'No protein chains found in this file.'})
seq_start_num = {chain_id: min([monomer.num for monomer in seq])
for chain_id, seq in valid_chains.items()}
# Loop over the atoms for which we have coordinates. Populate two mappings:
# -mmcif_to_author_chain_id (maps internal mmCIF chain ids to chain ids used
# the authors / Biopython).
# -seq_to_structure_mappings (maps idx into sequence to ResidueAtPosition).
mmcif_to_author_chain_id = {}
seq_to_structure_mappings = {}
for atom in _get_atom_site_list(parsed_info):
if atom.model_num != '1':
# We only process the first model at the moment.
continue
mmcif_to_author_chain_id[atom.mmcif_chain_id] = atom.author_chain_id
if atom.mmcif_chain_id in valid_chains:
hetflag = ' '
if atom.hetatm_atom == 'HETATM':
# Water atoms are assigned a special hetflag of W in Biopython. We
# need to do the same, so that this hetflag can be used to fetch
# a residue from the Biopython structure by id.
if atom.residue_name in ('HOH', 'WAT'):
hetflag = 'W'
else:
hetflag = 'H_' + atom.residue_name
insertion_code = atom.insertion_code
if not _is_set(atom.insertion_code):
insertion_code = ' '
position = ResiduePosition(chain_id=atom.author_chain_id,
residue_number=int(atom.author_seq_num),
insertion_code=insertion_code)
seq_idx = int(atom.mmcif_seq_num) - seq_start_num[atom.mmcif_chain_id]
current = seq_to_structure_mappings.get(atom.author_chain_id, {})
current[seq_idx] = ResidueAtPosition(position=position,
name=atom.residue_name,
is_missing=False,
hetflag=hetflag)
seq_to_structure_mappings[atom.author_chain_id] = current
# Add missing residue information to seq_to_structure_mappings.
for chain_id, seq_info in valid_chains.items():
author_chain = mmcif_to_author_chain_id[chain_id]
current_mapping = seq_to_structure_mappings[author_chain]
for idx, monomer in enumerate(seq_info):
if idx not in current_mapping:
current_mapping[idx] = ResidueAtPosition(position=None,
name=monomer.id,
is_missing=True,
hetflag=' ')
author_chain_to_sequence = {}
for chain_id, seq_info in valid_chains.items():
author_chain = mmcif_to_author_chain_id[chain_id]
seq = []
for monomer in seq_info:
code = SCOPData.protein_letters_3to1.get(monomer.id, 'X')
seq.append(code if len(code) == 1 else 'X')
seq = ''.join(seq)
author_chain_to_sequence[author_chain] = seq
mmcif_object = MmcifObject(
file_id=file_id,
header=header,
structure=first_model_structure,
chain_to_seqres=author_chain_to_sequence,
seqres_to_structure=seq_to_structure_mappings,
raw_string=parsed_info)
return ParsingResult(mmcif_object=mmcif_object, errors=errors)
except Exception as e: # pylint:disable=broad-except
errors[(file_id, '')] = e
if not catch_all_errors:
raise
return ParsingResult(mmcif_object=None, errors=errors)
def _get_first_model(structure: PdbStructure) -> PdbStructure:
"""Returns the first model in a Biopython structure."""
return next(structure.get_models())
_MIN_LENGTH_OF_CHAIN_TO_BE_COUNTED_AS_PEPTIDE = 21
def get_release_date(parsed_info: MmCIFDict) -> str:
"""Returns the oldest revision date."""
revision_dates = parsed_info['_pdbx_audit_revision_history.revision_date']
return min(revision_dates)
def _get_header(parsed_info: MmCIFDict) -> PdbHeader:
"""Returns a basic header containing method, release date and resolution."""
header = {}
experiments = mmcif_loop_to_list('_exptl.', parsed_info)
header['structure_method'] = ','.join([
experiment['_exptl.method'].lower() for experiment in experiments])
# Note: The release_date here corresponds to the oldest revision. We prefer to
# use this for dataset filtering over the deposition_date.
if '_pdbx_audit_revision_history.revision_date' in parsed_info:
header['release_date'] = get_release_date(parsed_info)
else:
logging.warning('Could not determine release_date: %s',
parsed_info['_entry.id'])
header['resolution'] = 0.00
for res_key in ('_refine.ls_d_res_high', '_em_3d_reconstruction.resolution',
'_reflns.d_resolution_high'):
if res_key in parsed_info:
try:
raw_resolution = parsed_info[res_key][0]
header['resolution'] = float(raw_resolution)
except ValueError:
logging.warning('Invalid resolution format: %s', parsed_info[res_key])
return header
def _get_atom_site_list(parsed_info: MmCIFDict) -> Sequence[AtomSite]:
"""Returns list of atom sites; contains data not present in the structure."""
return [AtomSite(*site) for site in zip( # pylint:disable=g-complex-comprehension
parsed_info['_atom_site.label_comp_id'],
parsed_info['_atom_site.auth_asym_id'],
parsed_info['_atom_site.label_asym_id'],
parsed_info['_atom_site.auth_seq_id'],
parsed_info['_atom_site.label_seq_id'],
parsed_info['_atom_site.pdbx_PDB_ins_code'],
parsed_info['_atom_site.group_PDB'],
parsed_info['_atom_site.pdbx_PDB_model_num'],
)]
def _get_protein_chains(
*, parsed_info: Mapping[str, Any]) -> Mapping[ChainId, Sequence[Monomer]]:
"""Extracts polymer information for protein chains only.
Args:
parsed_info: _mmcif_dict produced by the Biopython parser.
Returns:
A dict mapping mmcif chain id to a list of Monomers.
"""
# Get polymer information for each entity in the structure.
entity_poly_seqs = mmcif_loop_to_list('_entity_poly_seq.', parsed_info)
polymers = collections.defaultdict(list)
for entity_poly_seq in entity_poly_seqs:
polymers[entity_poly_seq['_entity_poly_seq.entity_id']].append(
Monomer(id=entity_poly_seq['_entity_poly_seq.mon_id'],
num=int(entity_poly_seq['_entity_poly_seq.num'])))
# Get chemical compositions. Will allow us to identify which of these polymers
# are proteins.
chem_comps = mmcif_loop_to_dict('_chem_comp.', '_chem_comp.id', parsed_info)
# Get chains information for each entity. Necessary so that we can return a
# dict keyed on chain id rather than entity.
struct_asyms = mmcif_loop_to_list('_struct_asym.', parsed_info)
entity_to_mmcif_chains = collections.defaultdict(list)
for struct_asym in struct_asyms:
chain_id = struct_asym['_struct_asym.id']
entity_id = struct_asym['_struct_asym.entity_id']
entity_to_mmcif_chains[entity_id].append(chain_id)
# Identify and return the valid protein chains.
valid_chains = {}
for entity_id, seq_info in polymers.items():
chain_ids = entity_to_mmcif_chains[entity_id]
# Reject polymers without any peptide-like components, such as DNA/RNA.
if any(['peptide' in chem_comps[monomer.id]['_chem_comp.type']
for monomer in seq_info]):
for chain_id in chain_ids:
valid_chains[chain_id] = seq_info
return valid_chains
def _is_set(data: str) -> bool:
"""Returns False if data is a special mmCIF character indicating 'unset'."""
return data not in ('.', '?')
| 36.677922
| 84
| 0.691665
|
260d3c5cecb37be3378fd1d3816fd08ad94a64a8
| 6,570
|
py
|
Python
|
modules/feedparser/util.py
|
whanderley/eden
|
08ced3be3d52352c54cbd412ed86128fbb68b1d2
|
[
"MIT"
] | 2
|
2019-11-25T20:34:52.000Z
|
2021-06-04T20:05:46.000Z
|
modules/feedparser/util.py
|
whanderley/eden
|
08ced3be3d52352c54cbd412ed86128fbb68b1d2
|
[
"MIT"
] | 1
|
2020-01-29T15:33:17.000Z
|
2020-01-29T15:33:17.000Z
|
modules/feedparser/util.py
|
whanderley/eden
|
08ced3be3d52352c54cbd412ed86128fbb68b1d2
|
[
"MIT"
] | 3
|
2019-11-30T20:25:55.000Z
|
2022-02-03T17:12:16.000Z
|
# Copyright 2010-2019 Kurt McKee <contactme@kurtmckee.org>
# Copyright 2002-2008 Mark Pilgrim
# All rights reserved.
#
# This file is a part of feedparser.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import absolute_import
from __future__ import unicode_literals
import warnings
class FeedParserDict(dict):
keymap = {
'channel': 'feed',
'items': 'entries',
'guid': 'id',
'date': 'updated',
'date_parsed': 'updated_parsed',
'description': ['summary', 'subtitle'],
'description_detail': ['summary_detail', 'subtitle_detail'],
'url': ['href'],
'modified': 'updated',
'modified_parsed': 'updated_parsed',
'issued': 'published',
'issued_parsed': 'published_parsed',
'copyright': 'rights',
'copyright_detail': 'rights_detail',
'tagline': 'subtitle',
'tagline_detail': 'subtitle_detail',
}
def __getitem__(self, key):
"""
:return: A :class:`FeedParserDict`.
"""
if key == 'category':
try:
return dict.__getitem__(self, 'tags')[0]['term']
except IndexError:
raise KeyError("object doesn't have key 'category'")
elif key == 'enclosures':
norel = lambda link: FeedParserDict([(name, value) for (name, value) in link.items() if name != 'rel'])
return [
norel(link)
for link in dict.__getitem__(self, 'links')
if link['rel'] == 'enclosure'
]
elif key == 'license':
for link in dict.__getitem__(self, 'links'):
if link['rel'] == 'license' and 'href' in link:
return link['href']
elif key == 'updated':
# Temporarily help developers out by keeping the old
# broken behavior that was reported in issue 310.
# This fix was proposed in issue 328.
if (
not dict.__contains__(self, 'updated')
and dict.__contains__(self, 'published')
):
warnings.warn(
"To avoid breaking existing software while "
"fixing issue 310, a temporary mapping has been created "
"from `updated` to `published` if `updated` doesn't "
"exist. This fallback will be removed in a future version "
"of feedparser.",
DeprecationWarning,
)
return dict.__getitem__(self, 'published')
return dict.__getitem__(self, 'updated')
elif key == 'updated_parsed':
if (
not dict.__contains__(self, 'updated_parsed')
and dict.__contains__(self, 'published_parsed')
):
warnings.warn(
"To avoid breaking existing software while "
"fixing issue 310, a temporary mapping has been created "
"from `updated_parsed` to `published_parsed` if "
"`updated_parsed` doesn't exist. This fallback will be "
"removed in a future version of feedparser.",
DeprecationWarning,
)
return dict.__getitem__(self, 'published_parsed')
return dict.__getitem__(self, 'updated_parsed')
else:
realkey = self.keymap.get(key, key)
if isinstance(realkey, list):
for k in realkey:
if dict.__contains__(self, k):
return dict.__getitem__(self, k)
elif dict.__contains__(self, realkey):
return dict.__getitem__(self, realkey)
return dict.__getitem__(self, key)
def __contains__(self, key):
if key in ('updated', 'updated_parsed'):
# Temporarily help developers out by keeping the old
# broken behavior that was reported in issue 310.
# This fix was proposed in issue 328.
return dict.__contains__(self, key)
try:
self.__getitem__(key)
except KeyError:
return False
else:
return True
has_key = __contains__
def get(self, key, default=None):
"""
:return: A :class:`FeedParserDict`.
"""
try:
return self.__getitem__(key)
except KeyError:
return default
def __setitem__(self, key, value):
key = self.keymap.get(key, key)
if isinstance(key, list):
key = key[0]
return dict.__setitem__(self, key, value)
def setdefault(self, k, default):
if k not in self:
self[k] = default
return default
return self[k]
def __getattr__(self, key):
# __getattribute__() is called first; this will be called
# only if an attribute was not already found
try:
return self.__getitem__(key)
except KeyError:
raise AttributeError("object has no attribute '%s'" % key)
def __hash__(self):
# This is incorrect behavior -- dictionaries shouldn't be hashable.
# Note to self: remove this behavior in the future.
return id(self)
| 39.341317
| 115
| 0.591629
|
43585adbcbc8761b49ed87aecc2093b1534422eb
| 10,580
|
py
|
Python
|
tests/connectors/pony/test_statement_filter.py
|
gvasold/papilotte
|
7683da0a56daf77450f962caaf58b7cfe3acf408
|
[
"Apache-2.0"
] | 3
|
2019-10-28T08:27:32.000Z
|
2021-03-01T02:30:26.000Z
|
tests/connectors/pony/test_statement_filter.py
|
gvasold/papilotte
|
7683da0a56daf77450f962caaf58b7cfe3acf408
|
[
"Apache-2.0"
] | null | null | null |
tests/connectors/pony/test_statement_filter.py
|
gvasold/papilotte
|
7683da0a56daf77450f962caaf58b7cfe3acf408
|
[
"Apache-2.0"
] | null | null | null |
"""Tests filtering (searching) for papilotte.connectors.pony.statement
"""
import copy
import datetime
import pytest
from pony import orm
from papilotte.connectors.pony import statement
def test_filter_by_factoid_id(db200final_cfg):
"Test the factoidId filter."
connector = statement.StatementConnector(db200final_cfg)
assert len(connector.search(size=100, page=1, factoidId="F00154")) == 5
# searching for ids only matches full ids (not parts of id)
assert connector.search(size=100, page=1, factoidId="F0015") == []
def test_filter_by_from(db200final_cfg):
"Filter by from= parameter"
connector = statement.StatementConnector(db200final_cfg)
assert len(connector.search(size=100, page=1, from_='1803-03-30')) == 18
def test_filter_by_member_of(db200final_cfg):
"Filter by memberOf= parameter"
connector = statement.StatementConnector(db200final_cfg)
assert len(connector.search(size=100, page=1, memberOf='Group 00051')) == 6
# search for partial entry in label
assert len(connector.search(size=100, page=1, memberOf='oup 00051')) == 6
# searching in label is case insensitive
assert len(connector.search(size=100, page=1, memberOf='group 00051')) == 6
# searching in uri is case sensitive and only matches full matches
assert len(connector.search(size=100, page=1, memberOf='https://example.com/groups/00053')) == 6
# Upercase 'G' should not be found
assert connector.search(size=100, page=1, memberOf='https://example.com/Groups/00053') == []
assert connector.search(size=100, page=1, memberOf='https://example.com/Groups/00053') == []
assert connector.search(size=100, page=1, memberOf='https://example.com/Groups/0005') == []
def test_filter_by_name(db200final_cfg):
"Filter by name= parameter"
connector = statement.StatementConnector(db200final_cfg)
assert len(connector.search(size=100, page=1, name="Statement 0001")) == 9
assert len(connector.search(size=100, page=1, name="statement 0001")) == 9
assert len(connector.search(size=100, page=1, name="atement 0001")) == 9
def test_filter_by_place(db200final_cfg):
"Filter by name= parameter"
connector = statement.StatementConnector(db200final_cfg)
assert len(connector.search(size=100, page=1, place="Place 00053")) == 12
assert len(connector.search(size=100, page=1, place="place 00053")) == 12
assert len(connector.search(size=100, page=1, place="ace 00053")) == 12
assert len(connector.search(size=100, page=1, place="https://example.com/places/00053")) == 6
assert connector.search(size=100, page=1, place="https://example.com/PLaces/00053") == []
assert connector.search(size=100, page=1, place="https://example.com/places/0005") == []
def test_filter_by_relates_to_person(db200final_cfg):
"Filter by name= parameter"
connector = statement.StatementConnector(db200final_cfg)
assert len(connector.search(size=100, page=1, relatesToPerson="Related person 00056")) == 6
assert len(connector.search(size=100, page=1, relatesToPerson="related person 00056")) == 6
assert len(connector.search(size=100, page=1, relatesToPerson="lated person 00056")) == 6
assert len(connector.search(size=100, page=1,
relatesToPerson="https://example.com/relatedpersons/00058")) == 3
assert connector.search(size=100, page=1,
relatesToPerson="https://example.com/RelatedPersons/00058") == []
assert connector.search(size=100, page=1,
relatesToPerson="https://example.com/relatedpersons/0005") == []
def test_filter_by_role(db200final_cfg):
"Filter by name= parameter"
connector = statement.StatementConnector(db200final_cfg)
assert len(connector.search(size=100, page=1, role="Role 00051")) == 6
assert len(connector.search(size=100, page=1, role="role 00051")) == 6
assert len(connector.search(size=100, page=1, role="le 00051")) == 6
assert len(connector.search(size=100, page=1, role="https://example.com/roles/00053")) == 6
assert connector.search(size=100, page=1, role="https://example.com/ROLES/00053") == []
assert connector.search(size=100, page=1, role="https://example.com/roles/0005") == []
def test_filter_by_source_id(db200final_cfg):
"Test the sourceId filter."
connector = statement.StatementConnector(db200final_cfg)
assert len(connector.search(size=100, page=1, sourceId="S00055")) == 10
# searching for ids only matches full ids (not parts of id)
assert connector.search(size=100, page=1, factoidId="F001") == []
def test_filter_by_source_label(db200final_cfg):
"Test the label filter."
connector = statement.StatementConnector(db200final_cfg)
# search for exact label
assert len(connector.search(size=100, page=1, label="Source 00002")) == 4
# search for part of label
assert len(connector.search(size=100, page=1, label="Source 0000")) == 40
# search for non existing label
assert connector.search(size=100, page=1, label="FooFooBar") == []
def test_filter_by_statement_content(db200final_cfg):
"Filter by statementContent= parameter"
connector = statement.StatementConnector(db200final_cfg)
assert len(connector.search(size=100, page=1, statementContent="Statement content 00061")) == 3
assert len(connector.search(size=100, page=1, statementContent="statement content 00061")) == 3
assert len(connector.search(size=100, page=1, statementContent="ement content 00061")) == 3
def test_filter_by_statement_type(db200final_cfg):
"Filter by statementContent= parameter"
connector = statement.StatementConnector(db200final_cfg)
assert len(connector.search(size=100, page=1, statementType="Statement type 00061")) == 6
assert len(connector.search(size=100, page=1, statementType="statement TYPE 00061")) == 6
assert len(connector.search(size=100, page=1, statementType="tement type 00061")) == 6
assert len(connector.search(size=100, page=1,
statementType="https://example.com/statementtypes/00021")) == 9
assert connector.search(size=100, page=1,
statementType="https://example.com/statementtypes/0002") == []
def test_filter_by_statement_id(db200final_cfg):
"Test the statementId filter."
connector = statement.StatementConnector(db200final_cfg)
assert len(connector.search(size=200, page=1, statementId="Stmt00024")) == 1
# searching for ids only matches full ids (not parts of id)
assert connector.search(size=100, page=1, factoidId="Stmt001") == []
def test_filter_by_to(db200final_cfg):
"Filter by statementContent= parameter"
connector = statement.StatementConnector(db200final_cfg)
assert len(connector.search(size=100, page=1, to='1800-06-01')) == 15
# ------------- Compliance level 1+ get tests ----------------------------------------------
# TODO: more tests
def test_filter_by_p(db200final_cfg):
"Test the p= parameter."
connector = statement.StatementConnector(db200final_cfg)
assert len(connector.search(size=200, page=1, p="P00058")) == 6
assert len(connector.search(size=200, page=1, p="P0005")) == 66
assert len(connector.search(size=200, page=1, p="https://example.com/persons/22a")) == 6
# Test if only full uris are matched, so only '6', but not 60
assert len(connector.search(size=200, page=1, p="https://example.com/persons/22")) == 0
# TODO: more tests
def test_filter_by_f(db200final_cfg):
"Test the f= parameter"
connector = statement.StatementConnector(db200final_cfg)
assert len(connector.search(size=220, page=1, f="F00033")) == 4
assert len(connector.search(size=200, page=1, f="F0003")) == 30
# TODO: more tests
def test_filter_by_s(db200final_cfg):
"Test the s= parameter."
connector = statement.StatementConnector(db200final_cfg)
assert len(connector.search(size=200, page=1, s="S00033")) == 6
assert len(connector.search(size=200, page=1, s="S0003")) == 60
assert len(connector.search(size=200, page=1, s="Source 00031")) == 2
# TODO: more tests?
def test_filter_by_st(db200final_cfg):
"Test the st= parameter."
connector = statement.StatementConnector(db200final_cfg)
assert len(connector.search(size=100, page=1, s="Source 00031")) == 2
# id
assert len(connector.search(size=100, page=1, st="Stmt00048")) == 1
# date.label
assert len(connector.search(size=100, page=1, st="Historical Date 00061")) == 2
# memberOf.label
assert len(connector.search(size=100, page=1, st="Group 00061")) == 6
# memberOf.uri
assert len(connector.search(size=100, page=1, st="https://example.com/groups/00061")) == 3
# name
assert len(connector.search(size=100, page=1, st="Statement 00048")) == 1
# role.label
assert len(connector.search(size=100, page=1, st="Role 00061")) == 6
# statementContent
assert len(connector.search(size=100, page=1, st="Statement content 00061")) == 3
# statementtype.label
assert len(connector.search(size=100, page=1, st="Statement type 00061")) == 6
# statementType.uri
assert len(connector.search(size=100, page=1, st="https://example.com/statementtypes/00021")) == 9
# places.label
assert len(connector.search(size=100, page=1, st="Place 00051")) == 12
# places.uri
assert len(connector.search(size=100, page=1, st="https://example.com/places/00053")) == 6
# relatesToPersons.label
assert len(connector.search(size=100, page=1, st="Related person 00056")) == 6
# relatesToPersons.uri
assert len(connector.search(size=100, page=1, st="https://example.com/relatedpersons/00058")) == 3
# uris
assert len(connector.search(size=100, page=1, st="https://example.com/statements/61a")) == 1
def test_filter_factoid_and_source(db200final_cfg):
"Test the sourceId filter together with factoid filter."
connector = statement.StatementConnector(db200final_cfg)
# there are 2 person with this source id
assert len(connector.search(size=100, page=1, sourceId="S00055")) == 10
# if we add a factoidId, there should be only one left
assert len(connector.search(size=100, page=1, sourceId="S00055", factoidId="F00054")) == 5
def test_get_factoid_refs(db200final_cfg):
connector = statement.StatementConnector(db200final_cfg)
st = connector.get('Stmt00001')
refs = st['factoid-refs']
assert len(refs) == 1
assert refs[0]['@id'] == 'F00001'
assert refs[0]['source-ref']['@id'] == 'S00002'
assert refs[0]['person-ref']['@id'] == 'P00002'
assert refs[0]['statement-refs'][0]['@id'] == 'Stmt00001'
| 48.53211
| 102
| 0.703875
|
a8bef0dbbb17486c7114c09d850536406924295c
| 20,808
|
py
|
Python
|
blender/render_bodies.py
|
ivantishchenko/RefRESH
|
08dd7a323ab62abcbf16b1cd13d48b787fbc878b
|
[
"MIT"
] | 1
|
2020-02-12T10:37:40.000Z
|
2020-02-12T10:37:40.000Z
|
blender/render_bodies.py
|
ivantishchenko/RefRESH
|
08dd7a323ab62abcbf16b1cd13d48b787fbc878b
|
[
"MIT"
] | null | null | null |
blender/render_bodies.py
|
ivantishchenko/RefRESH
|
08dd7a323ab62abcbf16b1cd13d48b787fbc878b
|
[
"MIT"
] | null | null | null |
"""
MIT License
Copyright (c) 2018 Zhaoyang Lv
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import sys, os, math, time
import numpy as np
from os import getenv, remove
from os.path import join, dirname, realpath, exists
from glob import glob
from pickle import load
# from random import choice, seed, random
import bpy
from bpy_extras.object_utils import world_to_camera_view as world2cam
sys.path.insert(0, ".")
from motion_surreal import *
from utils import world_to_blender, set_intrinsic
def create_directory(target_dir):
if not os.path.isdir(target_dir):
os.makedirs(target_dir)
def restart_blender_scene():
for obj in bpy.data.objects.values():
obj.select=False
bpy.context.scene.objects.active = None
class SceneGeneration:
start_time = None
frame_per_clip = 200 # We will generate 200 poses per clip
def log_message(self, message):
elapsed_time = time.time() - self.start_time
print("[%.2f s] %s" % (elapsed_time, message))
def __init__(self, fg_stride, fg_number):
'''
Foreground action stride. You can make it random for each object
'''
self.fg_stride = fg_stride
self.fg_total_number = fg_number
self.start_time = time.time()
self.params = io_utils.load_file('body_config', 'SYNTH_HUMAN')
#####################################################################
self.log_message("Setup Blender")
scene = bpy.context.scene
scene = bpy.data.scenes['Scene']
scene.render.engine = 'CYCLES'
scene.cycles.shading_system = True
scene.use_nodes = True
#####################################################################
# import idx info with the format (name, split)
self.log_message("Importing idx info pickle")
seed()
idx_info = load(open("pkl/idx_info.pickle", 'rb'))
# random load foreground indices
idx_info_len = len(idx_info)
fg_indices = [int(idx_info_len*random()) for i in range(idx_info_len)]
self.fg_indices_info = []
for idx in fg_indices:
self.fg_indices_info.append(idx_info[idx])
#######################################################################
self.log_message("Loading the smpl data")
smpl_data_folder = self.params['smpl_data_folder']
smpl_data_filename = self.params['smpl_data_filename']
self.smpl_data = np.load(join(smpl_data_folder, smpl_data_filename))
######################################################################
self.log_message('Set up background information')
self.init_camera()
########################################################################
# the closing option grey, nongrey or all
clothing_option = self.params['clothing_option']
self.log_message('Set up foreground information. clothing: %s' % clothing_option)
genders = {0: 'female', 1: 'male'}
# pick several foreground objects with random gender and clothing
self.clothing_names = []
for idx in range(self.fg_total_number):
gender = choice(genders)
with open( join(smpl_data_folder, 'textures',
'%s_%s.txt' % ( gender, self.fg_indices_info[idx]['use_split'] ) ) ) as f:
txt_paths = f.read().splitlines()
# if using only one source of clothing
if clothing_option == 'nongrey':
txt_paths = [k for k in txt_paths if 'nongrey' in k]
elif clothing_option == 'grey':
txt_paths = [k for k in txt_paths if 'nongrey' not in k]
# random clothing texture
cloth_img_name = choice(txt_paths)
cloth_img_name = join(smpl_data_folder, cloth_img_name)
self.clothing_names.append([gender, cloth_img_name])
######################################################################
self.log_message('Prepare for output directory')
self.init_directories()
# >> don't use random generator before this point <<
# initialize RNG with seeds from sequence id ToDo: not sure whether still useful or not
import hashlib
s = "synth_data:{:d}".format(fg_stride)
seed_number = int(hashlib.sha1(s.encode('utf-8')).hexdigest(), 16) % (10 ** 8)
self.log_message("GENERATED SEED %d from string '%s'" % (seed_number, s))
np.random.seed(seed_number)
output_types = self.params['output_types']
if(output_types['vblur']):
vblur_factor = np.random.normal(0.5, 0.5)
self.params['vblur_factor'] = vblur_factor
#######################################################################
# grab clothing names
self.log_message('Set up blender node')
self.res_paths = self.create_composite_nodes(scene.node_tree)
def init_directories(self):
'''
how the data will be saved
'''
folder_name = 'bodies_output'
tmp_path = self.params['tmp_path']
tmp_path = join(tmp_path, folder_name)
self.tmp_path = tmp_path
print('The blender output will be written to {:s}'.format(self.tmp_path))
if exists(tmp_path) and tmp_path != "" and tmp_path != "/":
os.system('rm -rf %s' % tmp_path)
rgb_vid_filename = folder_name + ".mp4"
create_directory(tmp_path)
# create copy-spher.harm. directory if not exists
sh_dir = join(tmp_path, 'spher_harm')
create_directory(sh_dir)
self.sh_dst = join(sh_dir, 'sh_sphere.osl')
os.system('cp spher_harm/sh.osl {:s}'.format(self.sh_dst))
self.rgb_path = join(tmp_path, 'rgb_video.mp4')
def init_camera(self):
'''
Currently no camera poses are loaded. You can random set the camera trajectory to render multiple images at one time.
Leave to Abhijit as an option TODO.
'''
# load the pickle file generated from background rendering
self.cam_poses = []
self.cam_poses.append(np.eye(4))
# set or load the camera intrinsic here
K = [528.871, 528.871, 320, 240]
bpy_camera_obj = bpy.data.objects['Camera']
bpy_scene = bpy.context.scene
set_intrinsic(K, bpy_camera_obj, bpy_scene, self.params['height'], self.params['width'])
self.K = np.eye(3)
self.K[0,0] = K[0]
self.K[1,1] = K[1]
self.K[0,2] = K[2]
self.K[1,2] = K[3]
def run(self):
# time logging
scene = bpy.context.scene
output_types = self.params['output_types']
restart_blender_scene()
self.log_message("Initializing scene")
fg_humans, bpy_camera_obj = self.init_scene()
orig_cam_loc = bpy_camera_obj.location.copy()
smpl_DoF = 10 # only pick the top DoF for the creations, maximum 10
# for each clipsize'th frame in the sequence
random_zrot = 0
reset_loc = False
batch_it = 0
random_zrot = 2*np.pi*np.random.rand()
bpy_camera_obj.animation_data_clear()
# set for optical flow
for part, material in fg_humans[0].materials.items():
material.node_tree.nodes['Vector Math'].inputs[1].default_value[:2] = (0, 0)
# set up random light
shading_params = .7 * (2 * np.random.rand(9) - 1)
shading_params[0] = .5 + .9 * np.random.rand() # Ambient light (first coeff) needs a minimum is ambient. Rest is uniformly distributed, higher means brighter.
shading_params[1] = -.7 * np.random.rand()
# spherical harmonics material needs a script to be loaded and compiled
spherical_harmonics = []
for mname, material in fg_humans[0].materials.items():
spherical_harmonics.append(material.node_tree.nodes['Script'])
spherical_harmonics[-1].filepath = self.sh_dst
spherical_harmonics[-1].update()
for ish, coeff in enumerate(shading_params):
for sc in spherical_harmonics:
sc.inputs[ish+1].default_value = coeff
''' -------------------- LOOP TO CREATE 3D ANIMATION '''
# create a keyframe animation with pose, translation, blendshapes and camera motion
for frame_idx in range(0, len(self.cam_poses)):
scene.frame_set(frame_idx)
bpy_camera_obj.matrix_world = world_to_blender(Matrix(self.cam_poses[frame_idx]))
bpy_camera_obj.keyframe_insert('location', frame=frame_idx)
bpy_camera_obj.keyframe_insert('rotation_euler', frame=frame_idx)
# apply the translation, pose and shape to the character
body_data_index = frame_idx * self.fg_stride
for idx in range(self.fg_total_number):
pose, trans = fg_humans[idx].apply_Rt_body_shape(body_data_index, frame_idx)
scene.update()
''' ---------------------- LOOP TO RENDER ------------------------- '''
# iterate over the keyframes and render
for frame_idx in range(0, len(self.cam_poses)):
scene.frame_set(frame_idx)
scene.render.use_antialiasing = False
scene.render.filepath = join(self.rgb_path, 'Image%04d.png' % frame_idx)
self.log_message("Rendering frame {:d}".format(frame_idx))
# disable render output
logfile = '/dev/null'
open(logfile, 'a').close()
old = os.dup(1)
sys.stdout.flush()
os.close(1)
os.open(logfile, os.O_WRONLY)
# Render
bpy.ops.render.render(write_still=True)
# disable output redirection
os.close(1)
os.dup(old)
os.close(old)
for idx in range(self.fg_total_number):
fg_humans[idx].reset_pose()
def create_composite_nodes(self, tree, img=None):
'''
Create the different passes for blender rendering.
Note: refer to blender render passes for all the relevant information:
https://docs.blender.org/manual/en/dev/render/blender_render/settings/passes.html
We use cycles engine in our renderng setting: https://docs.blender.org/manual/en/dev/render/cycles/settings/scene/render_layers/passes.html
'''
res_paths = {k:join(self.tmp_path, k) for k in self.params['output_types'] if self.params['output_types'][k]}
# clear default nodes
for n in tree.nodes:
tree.nodes.remove(n)
# create node for foreground image
layers = tree.nodes.new('CompositorNodeRLayers')
layers.location = -300, 400
if(self.params['output_types']['vblur']):
# create node for computing vector blur (approximate motion blur)
vblur = tree.nodes.new('CompositorNodeVecBlur')
vblur.factor = params['vblur_factor']
vblur.location = 240, 400
# create node for saving output of vector blurred image
vblur_out = tree.nodes.new('CompositorNodeOutputFile')
vblur_out.format.file_format = 'PNG'
vblur_out.base_path = res_paths['vblur']
vblur_out.location = 460, 460
# create node for the final output
composite_out = tree.nodes.new('CompositorNodeComposite')
composite_out.location = 240, 30
# create node for saving depth
if(self.params['output_types']['depth']):
depth_out = tree.nodes.new('CompositorNodeOutputFile')
depth_out.location = 40, 700
depth_out.format.file_format = 'OPEN_EXR'
depth_out.base_path = res_paths['depth']
# create node for saving normals
if(self.params['output_types']['normal']):
normal_out = tree.nodes.new('CompositorNodeOutputFile')
normal_out.location = 40, 600
normal_out.format.file_format = 'OPEN_EXR'
normal_out.base_path = res_paths['normal']
# create node for saving foreground image
if(self.params['output_types']['fg']):
fg_out = tree.nodes.new('CompositorNodeOutputFile')
fg_out.location = 170, 600
fg_out.format.file_format = 'PNG'
fg_out.base_path = res_paths['fg']
# create node for saving ground truth flow
if(self.params['output_types']['gtflow']):
gtflow_out = tree.nodes.new('CompositorNodeOutputFile')
gtflow_out.location = 40, 500
gtflow_out.format.file_format = 'OPEN_EXR'
gtflow_out.base_path = res_paths['gtflow']
# create node for saving segmentation
if(self.params['output_types']['segm']):
segm_out = tree.nodes.new('CompositorNodeOutputFile')
segm_out.location = 40, 400
segm_out.format.file_format = 'OPEN_EXR'
segm_out.base_path = res_paths['segm']
if(self.params['output_types']['vblur']):
tree.links.new(layers.outputs['Image'], vblur.inputs[0]) # apply vector blur on the bg+fg image,
tree.links.new(layers.outputs['Depth'], vblur.inputs[1]) # using depth,
tree.links.new(layers.outputs['Vector'], vblur.inputs[2]) # and flow.
tree.links.new(vblur.outputs[0], vblur_out.inputs[0]) # save vblurred output
if(self.params['output_types']['fg']):
tree.links.new(layers.outputs['Image'], fg_out.inputs[0]) # save fg
if(self.params['output_types']['depth']):
tree.links.new(layers.outputs['Depth'], depth_out.inputs[0]) # save depth
if(self.params['output_types']['normal']):
tree.links.new(layers.outputs['Normal'], normal_out.inputs[0]) # save normal
if(self.params['output_types']['gtflow']):
tree.links.new(layers.outputs['Vector'], gtflow_out.inputs[0]) # save ground truth flow
if(self.params['output_types']['segm']):
# IndexMA: get access to alpha value per object per mask
# https://docs.blender.org/manual/en/dev/compositing/types/converter/id_mask.html
tree.links.new(layers.outputs['IndexMA'], segm_out.inputs[0]) # save segmentation
return(res_paths)
# creation of the spherical harmonics material, using an OSL script
def create_shader_material(self, tree, sh_path, texture):
# clear default nodes
for n in tree.nodes:
tree.nodes.remove(n)
uv = tree.nodes.new('ShaderNodeTexCoord')
uv.location = -800, 400
uv_xform = tree.nodes.new('ShaderNodeVectorMath')
uv_xform.location = -600, 400
uv_xform.inputs[1].default_value = (0, 0, 1)
uv_xform.operation = 'AVERAGE'
# for pair in self.clothing_names:
cloth_img = bpy.data.images.load(texture)
uv_im = tree.nodes.new('ShaderNodeTexImage')
uv_im.location = -400, 400
uv_im.image = cloth_img
rgb = tree.nodes.new('ShaderNodeRGB')
rgb.location = -400, 200
script = tree.nodes.new('ShaderNodeScript')
script.location = -230, 400
script.mode = 'EXTERNAL'
script.filepath = sh_path #'spher_harm/sh.osl' #using the same file from multiple jobs causes white texture
script.update()
# the emission node makes it independent of the scene lighting
emission = tree.nodes.new('ShaderNodeEmission')
emission.location = -60, 400
mat_out = tree.nodes.new('ShaderNodeOutputMaterial')
mat_out.location = 110, 400
tree.links.new(uv.outputs[2], uv_im.inputs[0])
tree.links.new(uv_im.outputs[0], script.inputs[0])
tree.links.new(script.outputs[0], emission.inputs[0])
tree.links.new(emission.outputs[0], mat_out.inputs[0])
def init_scene(self):
'''init_scene
Initialize the blender scene environment
'''
# TODO: add the scene loading functions
# may also need to add the camera sequence here
# assign the existing spherical harmonics material
#fg_obj.active_material = bpy.data.materials['Material']
# delete the default cube (which held the material)
bpy.ops.object.select_all(action='DESELECT')
bpy.data.objects['Cube'].select = True
bpy.ops.object.delete(use_global=False)
# set camera properties and initial position
bpy.ops.object.select_all(action='DESELECT')
bpy_camera_obj = bpy.data.objects['Camera']
bpy_scene = bpy.context.scene
bpy_scene.objects.active = bpy_camera_obj
bpy_camera_obj.matrix_world = world_to_blender(Matrix(self.cam_poses[0]))
##### set cycles and cuda
cycles_preferences = bpy.context.user_preferences.addons['cycles'].preferences
bpy_scene.render.use_overwrite = False
bpy_scene.render.use_placeholder = True
cycles_preferences.compute_device_type = "CUDA"
bpy_scene.cycles.film_transparent = True
bpy_scene.render.layers["RenderLayer"].use_pass_vector = True
bpy_scene.render.layers["RenderLayer"].use_pass_normal = True
bpy_scene.render.layers['RenderLayer'].use_pass_emit = True
bpy_scene.render.layers['RenderLayer'].use_pass_material_index = True
# set render size
bpy_scene.render.resolution_x = self.params['width']
bpy_scene.render.resolution_y = self.params['height']
bpy_scene.render.resolution_percentage = 100
bpy_scene.render.image_settings.file_format = 'PNG'
# set the render parameters
bpy_scene.render.use_raytrace = False
bpy_scene.render.tile_x = 512
bpy_scene.render.tile_y = 512
bpy_scene.cycles.max_bounces = 8
bpy_scene.cycles.samples = 64
W, H = self.params['width'], self.params['height']
fg_humans = []
for idx in range(self.fg_total_number):
# randomly set the camera pose here, leave to Abhijit to set the foreground poses
np.random.seed()
loc_2d = [np.random.uniform(0, W), np.random.uniform(0, H), 1.0]
distance = np.random.uniform(1, 20)
# Not sure how you want to normalize it.
loc_3d = np.linalg.inv(self.K).dot(loc_2d) * distance
# transform coordinate to blender
loc_3d *= np.array([1, -1, -1])
cam_pose = world_to_blender(Matrix(self.cam_poses[0]))
# set up the material for the object
material = bpy.data.materials.new(name='Material'+str(idx))
# material = bpy.data.materials['Material']
material.use_nodes = True
self.create_shader_material(material.node_tree, self.sh_dst, self.clothing_names[idx][1])
# randomly generate action number
gender = self.clothing_names[idx][0]
fg_human = SMPL_Body(self.smpl_data, gender, cam_pose, material, idx, anchor_location3d=loc_3d)
fg_human.obj.active_material = material
fg_humans.append(fg_human)
return fg_humans, bpy_camera_obj
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Generate synth dataset images')
parser.add_argument('--fg_stride', type=int, default=0, help='The number of stride when we load')
parser.add_argument('--fg_number', type=int, default=0, help='The total number of foreground bodies')
args = parser.parse_args(sys.argv[sys.argv.index("--") + 1:])
fg_stride, fg_number = None, None
if args.fg_stride != 0:
fg_stride = args.fg_stride
if args.fg_number != 0:
fg_number = args.fg_number
sg = SceneGeneration(fg_stride, fg_number)
sg.run()
# sys.exit()
| 40.247582
| 167
| 0.621588
|
53683e5667d608ca22d96dd2e0c1d70999f55e9b
| 294
|
py
|
Python
|
jurisdictions/united-states/downloaddate_function.py
|
DiarmuidM/charity-dissolution
|
17ddaa2177ec97661516bb46f773e7fb32178a29
|
[
"MIT"
] | null | null | null |
jurisdictions/united-states/downloaddate_function.py
|
DiarmuidM/charity-dissolution
|
17ddaa2177ec97661516bb46f773e7fb32178a29
|
[
"MIT"
] | null | null | null |
jurisdictions/united-states/downloaddate_function.py
|
DiarmuidM/charity-dissolution
|
17ddaa2177ec97661516bb46f773e7fb32178a29
|
[
"MIT"
] | 2
|
2020-03-26T12:38:59.000Z
|
2020-04-22T19:43:40.000Z
|
from time import sleep
from datetime import datetime
# Define a function that identifies the data the script was run; derived from datatime. Call on this function from other scripts.
def downloaddate():
ddate = datetime.today().strftime('%Y%m%d')
print(ddate)
return ddate
#downloaddate()
| 29.4
| 129
| 0.768707
|
8dd5c507d7da342fde880fdbf2deb8b0ab3a95ee
| 7,173
|
py
|
Python
|
homeassistant/components/tibber/sensor.py
|
domwillcode/home-assistant
|
f170c80bea70c939c098b5c88320a1c789858958
|
[
"Apache-2.0"
] | 4
|
2020-07-29T17:47:10.000Z
|
2020-09-16T13:39:13.000Z
|
homeassistant/components/tibber/sensor.py
|
domwillcode/home-assistant
|
f170c80bea70c939c098b5c88320a1c789858958
|
[
"Apache-2.0"
] | 6
|
2020-11-08T19:40:10.000Z
|
2022-03-01T11:11:07.000Z
|
homeassistant/components/tibber/sensor.py
|
klauern/home-assistant-core
|
c18ba6aec0627e6afb6442c678edb5ff2bb17db6
|
[
"Apache-2.0"
] | 5
|
2020-03-29T00:29:13.000Z
|
2021-09-06T20:58:40.000Z
|
"""Support for Tibber sensors."""
import asyncio
from datetime import timedelta
import logging
import aiohttp
from homeassistant.const import POWER_WATT
from homeassistant.exceptions import PlatformNotReady
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle, dt as dt_util
from .const import DOMAIN as TIBBER_DOMAIN, MANUFACTURER
_LOGGER = logging.getLogger(__name__)
ICON = "mdi:currency-usd"
ICON_RT = "mdi:power-plug"
SCAN_INTERVAL = timedelta(minutes=1)
MIN_TIME_BETWEEN_UPDATES = timedelta(minutes=5)
PARALLEL_UPDATES = 0
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up the Tibber sensor."""
tibber_connection = hass.data.get(TIBBER_DOMAIN)
dev = []
for home in tibber_connection.get_homes(only_active=False):
try:
await home.update_info()
except asyncio.TimeoutError as err:
_LOGGER.error("Timeout connecting to Tibber home: %s ", err)
raise PlatformNotReady()
except aiohttp.ClientError as err:
_LOGGER.error("Error connecting to Tibber home: %s ", err)
raise PlatformNotReady()
if home.has_active_subscription:
dev.append(TibberSensorElPrice(home))
if home.has_real_time_consumption:
dev.append(TibberSensorRT(home))
async_add_entities(dev, True)
class TibberSensor(Entity):
"""Representation of a generic Tibber sensor."""
def __init__(self, tibber_home):
"""Initialize the sensor."""
self._tibber_home = tibber_home
self._last_updated = None
self._state = None
self._is_available = False
self._device_state_attributes = {}
self._name = tibber_home.info["viewer"]["home"]["appNickname"]
if self._name is None:
self._name = tibber_home.info["viewer"]["home"]["address"].get(
"address1", ""
)
@property
def device_state_attributes(self):
"""Return the state attributes."""
return self._device_state_attributes
@property
def model(self):
"""Return the model of the sensor."""
return None
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def device_id(self):
"""Return the ID of the physical device this sensor is part of."""
home = self._tibber_home.info["viewer"]["home"]
return home["meteringPointData"]["consumptionEan"]
@property
def device_info(self):
"""Return the device_info of the device."""
device_info = {
"identifiers": {(TIBBER_DOMAIN, self.device_id)},
"name": self.name,
"manufacturer": MANUFACTURER,
}
if self.model is not None:
device_info["model"] = self.model
return device_info
class TibberSensorElPrice(TibberSensor):
"""Representation of a Tibber sensor for el price."""
async def async_update(self):
"""Get the latest data and updates the states."""
now = dt_util.now()
if (
self._tibber_home.current_price_total
and self._last_updated
and self._last_updated.hour == now.hour
and self._tibber_home.last_data_timestamp
):
return
if (
not self._tibber_home.last_data_timestamp
or (self._tibber_home.last_data_timestamp - now).total_seconds() / 3600 < 12
or not self._is_available
):
_LOGGER.debug("Asking for new data.")
await self._fetch_data()
res = self._tibber_home.current_price_data()
self._state, price_level, self._last_updated = res
self._device_state_attributes["price_level"] = price_level
attrs = self._tibber_home.current_attributes()
self._device_state_attributes.update(attrs)
self._is_available = self._state is not None
@property
def available(self):
"""Return True if entity is available."""
return self._is_available
@property
def name(self):
"""Return the name of the sensor."""
return f"Electricity price {self._name}"
@property
def model(self):
"""Return the model of the sensor."""
return "Price Sensor"
@property
def icon(self):
"""Return the icon to use in the frontend."""
return ICON
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity."""
return self._tibber_home.price_unit
@property
def unique_id(self):
"""Return a unique ID."""
return self.device_id
@Throttle(MIN_TIME_BETWEEN_UPDATES)
async def _fetch_data(self):
try:
await self._tibber_home.update_info()
await self._tibber_home.update_price_info()
except (asyncio.TimeoutError, aiohttp.ClientError):
return
data = self._tibber_home.info["viewer"]["home"]
self._device_state_attributes["app_nickname"] = data["appNickname"]
self._device_state_attributes["grid_company"] = data["meteringPointData"][
"gridCompany"
]
self._device_state_attributes["estimated_annual_consumption"] = data[
"meteringPointData"
]["estimatedAnnualConsumption"]
class TibberSensorRT(TibberSensor):
"""Representation of a Tibber sensor for real time consumption."""
async def async_added_to_hass(self):
"""Start listen for real time data."""
await self._tibber_home.rt_subscribe(self.hass.loop, self._async_callback)
async def _async_callback(self, payload):
"""Handle received data."""
errors = payload.get("errors")
if errors:
_LOGGER.error(errors[0])
return
data = payload.get("data")
if data is None:
return
live_measurement = data.get("liveMeasurement")
if live_measurement is None:
return
self._state = live_measurement.pop("power", None)
for key, value in live_measurement.items():
if value is None:
continue
self._device_state_attributes[key] = value
self.async_write_ha_state()
@property
def available(self):
"""Return True if entity is available."""
return self._tibber_home.rt_subscription_running
@property
def model(self):
"""Return the model of the sensor."""
return "Tibber Pulse"
@property
def name(self):
"""Return the name of the sensor."""
return f"Real time consumption {self._name}"
@property
def should_poll(self):
"""Return the polling state."""
return False
@property
def icon(self):
"""Return the icon to use in the frontend."""
return ICON_RT
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity."""
return POWER_WATT
@property
def unique_id(self):
"""Return a unique ID."""
return f"{self.device_id}_rt_consumption"
| 30.523404
| 88
| 0.632511
|
bc67a947ec5d3b7bd326a55d3b9cf1e39817f1b7
| 15,411
|
py
|
Python
|
dependencies/rdflib/namespace.py
|
situx/geowebannotation
|
1c4f27226913ae45f8943ee5fb2cabed494f3273
|
[
"MIT"
] | 8
|
2019-05-29T09:38:30.000Z
|
2021-01-20T03:36:59.000Z
|
dependencies/rdflib/namespace.py
|
situx/geowebannotation
|
1c4f27226913ae45f8943ee5fb2cabed494f3273
|
[
"MIT"
] | 12
|
2021-03-09T03:01:16.000Z
|
2022-03-11T23:59:36.000Z
|
dependencies/rdflib/namespace.py
|
situx/geowebannotation
|
1c4f27226913ae45f8943ee5fb2cabed494f3273
|
[
"MIT"
] | 4
|
2021-06-10T18:54:16.000Z
|
2021-10-25T00:42:22.000Z
|
from rdflib.py3compat import format_doctest_out
__doc__ = format_doctest_out("""
===================
Namespace Utilities
===================
RDFLib provides mechanisms for managing Namespaces.
In particular, there is a :class:`~rdflib.namespace.Namespace` class
that takes as its argument the base URI of the namespace.
.. code-block:: pycon
>>> from rdflib.namespace import Namespace
>>> owl = Namespace('http://www.w3.org/2002/07/owl#')
Fully qualified URIs in the namespace can be constructed either by attribute
or by dictionary access on Namespace instances:
.. code-block:: pycon
>>> owl.seeAlso
rdflib.term.URIRef(%(u)s'http://www.w3.org/2002/07/owl#seeAlso')
>>> owl['seeAlso']
rdflib.term.URIRef(%(u)s'http://www.w3.org/2002/07/owl#seeAlso')
Automatic handling of unknown predicates
-----------------------------------------
As a programming convenience, a namespace binding is automatically
created when :class:`rdflib.term.URIRef` predicates are added to the graph.
Importable namespaces
-----------------------
The following namespaces are available by directly importing from rdflib:
* RDF
* RDFS
* OWL
* XSD
* FOAF
* SKOS
* DOAP
* DC
* DCTERMS
* VOID
.. code-block:: pycon
>>> from rdflib import OWL
>>> OWL.seeAlso
rdflib.term.URIRef(%(u)s'http://www.w3.org/2002/07/owl#seeAlso')
""")
import logging
logger = logging.getLogger(__name__)
import os
from urllib.parse import urljoin, urldefrag
from urllib.request import pathname2url
from rdflib.term import URIRef, Variable, _XSD_PFX, _is_valid_uri
__all__ = [
'is_ncname', 'split_uri', 'Namespace',
'ClosedNamespace', 'NamespaceManager',
'XMLNS', 'RDF', 'RDFS', 'XSD', 'OWL',
'SKOS', 'DOAP', 'FOAF', 'DC', 'DCTERMS', 'VOID']
class Namespace(str):
__doc__ = format_doctest_out("""
Utility class for quickly generating URIRefs with a common prefix
>>> from rdflib import Namespace
>>> n = Namespace("http://example.org/")
>>> n.Person # as attribute
rdflib.term.URIRef(%(u)s'http://example.org/Person')
>>> n['first-name'] # as item - for things that are not valid python identifiers
rdflib.term.URIRef(%(u)s'http://example.org/first-name')
""")
def __new__(cls, value):
try:
rt = str.__new__(cls, value)
except UnicodeDecodeError:
rt = str.__new__(cls, value, 'utf-8')
return rt
@property
def title(self):
return URIRef(self + 'title')
def term(self, name):
# need to handle slices explicitly because of __getitem__ override
return URIRef(self + (name if isinstance(name, str) else ''))
def __getitem__(self, key, default=None):
return self.term(key)
def __getattr__(self, name):
if name.startswith("__"): # ignore any special Python names!
raise AttributeError
else:
return self.term(name)
def __repr__(self):
return "Namespace(%s)"%str.__repr__(self)
class URIPattern(str):
__doc__ = format_doctest_out("""
Utility class for creating URIs according to some pattern
This supports either new style formatting with .format
or old-style with %% operator
>>> u=URIPattern("http://example.org/%%s/%%d/resource")
>>> u%%('books', 12345)
rdflib.term.URIRef(%(u)s'http://example.org/books/12345/resource')
""")
def __new__(cls, value):
try:
rt = str.__new__(cls, value)
except UnicodeDecodeError:
rt = str.__new__(cls, value, 'utf-8')
return rt
def __mod__(self, *args, **kwargs):
return URIRef(str(self).__mod__(*args, **kwargs))
def format(self, *args, **kwargs):
return URIRef(str.format(self, *args, **kwargs))
def __repr__(self):
return "URIPattern(%r)"%str.__repr__(self)
class ClosedNamespace(object):
"""
A namespace with a closed list of members
Trying to create terms not listen is an error
"""
def __init__(self, uri, terms):
self.uri = uri
self.__uris = {}
for t in terms:
self.__uris[t] = URIRef(self.uri + t)
def term(self, name):
uri = self.__uris.get(name)
if uri is None:
raise Exception(
"term '%s' not in namespace '%s'" % (name, self.uri))
else:
return uri
def __getitem__(self, key, default=None):
return self.term(key)
def __getattr__(self, name):
if name.startswith("__"): # ignore any special Python names!
raise AttributeError
else:
return self.term(name)
def __str__(self):
return str(self.uri)
def __repr__(self):
return """rdf.namespace.ClosedNamespace('%s')""" % str(self.uri)
class _RDFNamespace(ClosedNamespace):
"""
Closed namespace for RDF terms
"""
def __init__(self):
super(_RDFNamespace, self).__init__(
URIRef("http://www.w3.org/1999/02/22-rdf-syntax-ns#"),
terms=[
# Syntax Names
"RDF", "Description", "ID", "about", "parseType",
"resource", "li", "nodeID", "datatype",
# RDF Classes
"Seq", "Bag", "Alt", "Statement", "Property",
"List", "PlainLiteral",
# RDF Properties
"subject", "predicate", "object", "type",
"value", "first", "rest",
# and _n where n is a non-negative integer
# RDF Resources
"nil",
# Added in RDF 1.1
"XMLLiteral", "HTML", "langString"]
)
def term(self, name):
try:
i = int(name)
return URIRef("%s_%s" % (self.uri, i))
except ValueError:
return super(_RDFNamespace, self).term(name)
RDF = _RDFNamespace()
RDFS = ClosedNamespace(
uri=URIRef("http://www.w3.org/2000/01/rdf-schema#"),
terms=[
"Resource", "Class", "subClassOf", "subPropertyOf", "comment", "label",
"domain", "range", "seeAlso", "isDefinedBy", "Literal", "Container",
"ContainerMembershipProperty", "member", "Datatype"]
)
OWL = Namespace('http://www.w3.org/2002/07/owl#')
XSD = Namespace(_XSD_PFX)
SKOS = Namespace('http://www.w3.org/2004/02/skos/core#')
DOAP = Namespace('http://usefulinc.com/ns/doap#')
FOAF = Namespace('http://xmlns.com/foaf/0.1/')
DC = Namespace('http://purl.org/dc/elements/1.1/')
DCTERMS = Namespace('http://purl.org/dc/terms/')
VOID = Namespace('http://rdfs.org/ns/void#')
class NamespaceManager(object):
"""
Class for managing prefix => namespace mappings
Sample usage from FuXi ...
.. code-block:: python
ruleStore = N3RuleStore(additionalBuiltins=additionalBuiltins)
nsMgr = NamespaceManager(Graph(ruleStore))
ruleGraph = Graph(ruleStore,namespace_manager=nsMgr)
and ...
.. code-block:: pycon
>>> import rdflib
>>> from rdflib import Graph
>>> from rdflib.namespace import Namespace, NamespaceManager
>>> exNs = Namespace('http://example.com/')
>>> namespace_manager = NamespaceManager(Graph())
>>> namespace_manager.bind('ex', exNs, override=False)
>>> g = Graph()
>>> g.namespace_manager = namespace_manager
>>> all_ns = [n for n in g.namespace_manager.namespaces()]
>>> assert ('ex', rdflib.term.URIRef('http://example.com/')) in all_ns
>>>
"""
def __init__(self, graph):
self.graph = graph
self.__cache = {}
self.__log = None
self.bind("xml", "http://www.w3.org/XML/1998/namespace")
self.bind("rdf", RDF)
self.bind("rdfs", RDFS)
self.bind("xsd", XSD)
def reset(self):
self.__cache = {}
def __get_store(self):
return self.graph.store
store = property(__get_store)
def qname(self, uri):
prefix, namespace, name = self.compute_qname(uri)
if prefix == "":
return name
else:
return ":".join((prefix, name))
def normalizeUri(self, rdfTerm):
"""
Takes an RDF Term and 'normalizes' it into a QName (using the
registered prefix) or (unlike compute_qname) the Notation 3
form for URIs: <...URI...>
"""
try:
namespace, name = split_uri(rdfTerm)
namespace = URIRef(str(namespace))
except:
if isinstance(rdfTerm, Variable):
return "?%s" % rdfTerm
else:
return "<%s>" % rdfTerm
prefix = self.store.prefix(namespace)
if prefix is None and isinstance(rdfTerm, Variable):
return "?%s" % rdfTerm
elif prefix is None:
return "<%s>" % rdfTerm
else:
qNameParts = self.compute_qname(rdfTerm)
return ':'.join([qNameParts[0], qNameParts[-1]])
def compute_qname(self, uri, generate=True):
if not _is_valid_uri(uri):
raise Exception('"%s" does not look like a valid URI, I cannot serialize this. Perhaps you wanted to urlencode it?'%uri)
if not uri in self.__cache:
namespace, name = split_uri(uri)
namespace = URIRef(namespace)
prefix = self.store.prefix(namespace)
if prefix is None:
if not generate:
raise Exception(
"No known prefix for %s and generate=False")
num = 1
while 1:
prefix = "ns%s" % num
if not self.store.namespace(prefix):
break
num += 1
self.bind(prefix, namespace)
self.__cache[uri] = (prefix, namespace, name)
return self.__cache[uri]
def bind(self, prefix, namespace, override=True, replace=False):
"""bind a given namespace to the prefix
if override, rebind, even if the given namespace is already
bound to another prefix.
if replace, replace any existing prefix with the new namespace
"""
namespace = URIRef(str(namespace))
# When documenting explain that override only applies in what cases
if prefix is None:
prefix = ''
bound_namespace = self.store.namespace(prefix)
# Check if the bound_namespace contains a URI
# and if so convert it into a URIRef for comparison
# This is to prevent duplicate namespaces with the
# same URI
if bound_namespace:
bound_namespace = URIRef(bound_namespace)
if bound_namespace and bound_namespace != namespace:
if replace:
self.store.bind(prefix, namespace)
return
# prefix already in use for different namespace
#
# append number to end of prefix until we find one
# that's not in use.
if not prefix:
prefix = "default"
num = 1
while 1:
new_prefix = "%s%s" % (prefix, num)
tnamespace = self.store.namespace(new_prefix)
if tnamespace and namespace == URIRef(tnamespace):
# the prefix is already bound to the correct
# namespace
return
if not self.store.namespace(new_prefix):
break
num += 1
self.store.bind(new_prefix, namespace)
else:
bound_prefix = self.store.prefix(namespace)
if bound_prefix is None:
self.store.bind(prefix, namespace)
elif bound_prefix == prefix:
pass # already bound
else:
if override or bound_prefix.startswith("_"): # or a generated
# prefix
self.store.bind(prefix, namespace)
def namespaces(self):
for prefix, namespace in self.store.namespaces():
namespace = URIRef(namespace)
yield prefix, namespace
def absolutize(self, uri, defrag=1):
base = urljoin("file:", pathname2url(os.getcwd()))
result = urljoin("%s/" % base, uri, allow_fragments=not defrag)
if defrag:
result = urldefrag(result)[0]
if not defrag:
if uri and uri[-1] == "#" and result[-1] != "#":
result = "%s#" % result
return URIRef(result)
# From: http://www.w3.org/TR/REC-xml#NT-CombiningChar
#
# * Name start characters must have one of the categories Ll, Lu, Lo,
# Lt, Nl.
#
# * Name characters other than Name-start characters must have one of
# the categories Mc, Me, Mn, Lm, or Nd.
#
# * Characters in the compatibility area (i.e. with character code
# greater than #xF900 and less than #xFFFE) are not allowed in XML
# names.
#
# * Characters which have a font or compatibility decomposition
# (i.e. those with a "compatibility formatting tag" in field 5 of the
# database -- marked by field 5 beginning with a "<") are not allowed.
#
# * The following characters are treated as name-start characters rather
# than name characters, because the property file classifies them as
# Alphabetic: [#x02BB-#x02C1], #x0559, #x06E5, #x06E6.
#
# * Characters #x20DD-#x20E0 are excluded (in accordance with Unicode
# 2.0, section 5.14).
#
# * Character #x00B7 is classified as an extender, because the property
# list so identifies it.
#
# * Character #x0387 is added as a name character, because #x00B7 is its
# canonical equivalent.
#
# * Characters ':' and '_' are allowed as name-start characters.
#
# * Characters '-' and '.' are allowed as name characters.
from unicodedata import category
NAME_START_CATEGORIES = ["Ll", "Lu", "Lo", "Lt", "Nl"]
NAME_CATEGORIES = NAME_START_CATEGORIES + ["Mc", "Me", "Mn", "Lm", "Nd"]
ALLOWED_NAME_CHARS = ["\u00B7", "\u0387", "-", ".", "_"]
# http://www.w3.org/TR/REC-xml-names/#NT-NCName
# [4] NCName ::= (Letter | '_') (NCNameChar)* /* An XML Name, minus
# the ":" */
# [5] NCNameChar ::= Letter | Digit | '.' | '-' | '_' | CombiningChar
# | Extender
def is_ncname(name):
first = name[0]
if first == "_" or category(first) in NAME_START_CATEGORIES:
for i in range(1, len(name)):
c = name[i]
if not category(c) in NAME_CATEGORIES:
if c in ALLOWED_NAME_CHARS:
continue
return 0
# if in compatibility area
# if decomposition(c)!='':
# return 0
return 1
else:
return 0
XMLNS = "http://www.w3.org/XML/1998/namespace"
def split_uri(uri):
if uri.startswith(XMLNS):
return (XMLNS, uri.split(XMLNS)[1])
length = len(uri)
for i in range(0, length):
c = uri[-i - 1]
if not category(c) in NAME_CATEGORIES:
if c in ALLOWED_NAME_CHARS:
continue
for j in range(-1 - i, length):
if category(uri[j]) in NAME_START_CATEGORIES or uri[j] == "_":
ns = uri[:j]
if not ns:
break
ln = uri[j:]
return (ns, ln)
break
raise Exception("Can't split '%s'" % uri)
| 30.638171
| 132
| 0.577445
|
2ad490768db3777f6472de436030686eaa44944b
| 2,583
|
py
|
Python
|
ote_sdk/ote_sdk/usecases/exportable_code/visualization.py
|
pfinashx/training_extensions
|
6b9d085900c0f8c0742a477ebf04f55987edd241
|
[
"Apache-2.0"
] | null | null | null |
ote_sdk/ote_sdk/usecases/exportable_code/visualization.py
|
pfinashx/training_extensions
|
6b9d085900c0f8c0742a477ebf04f55987edd241
|
[
"Apache-2.0"
] | null | null | null |
ote_sdk/ote_sdk/usecases/exportable_code/visualization.py
|
pfinashx/training_extensions
|
6b9d085900c0f8c0742a477ebf04f55987edd241
|
[
"Apache-2.0"
] | null | null | null |
# INTEL CONFIDENTIAL
#
# Copyright (C) 2021 Intel Corporation
#
# This software and the related documents are Intel copyrighted materials, and
# your use of them is governed by the express license under which they were provided to
# you ("License"). Unless the License provides otherwise, you may not use, modify, copy,
# publish, distribute, disclose or transmit this software or the related documents
# without Intel's prior written permission.
#
# This software and the related documents are provided as is,
# with no express or implied warranties, other than those that are expressly stated
# in the License.
from typing import Optional
import cv2
import numpy as np
from ote_sdk.entities.annotation import AnnotationSceneEntity
from ote_sdk.usecases.exportable_code.streamer.streamer import MediaType
from ote_sdk.utils.shape_drawer import ShapeDrawer
class Visualizer:
"""
Visualize the predicted output by drawing the annotations on the input image.
:example:
>>> predictions = inference_model.predict(frame)
>>> annotation = prediction_converter.convert_to_annotation(predictions)
>>> output = visualizer.draw(frame, annotation.shape, annotation.get_labels())
>>> visualizer.show(output)
"""
def __init__(
self,
media_type: Optional[MediaType] = None,
window_name: Optional[str] = None,
show_count: bool = False,
is_one_label: bool = False,
delay: Optional[int] = None,
):
self.window_name = "Window" if window_name is None else window_name
self.shape_drawer = ShapeDrawer(show_count, is_one_label)
if delay is None:
self.delay = 0 if (media_type == "" or media_type == MediaType.image) else 1
def draw(self, image: np.ndarray, annotation: AnnotationSceneEntity) -> np.ndarray:
"""
Draw annotations on the image
:param image: Input image
:param annotation: Annotations to be drawn on the input image
:return: Output image with annotations.
"""
# TODO: Conversion is to be made in `show` not here.
# This requires ShapeDrawer.draw to be updated
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
return self.shape_drawer.draw(image, annotation, labels=[])
def show(self, image: np.ndarray) -> None:
# TODO: RGB2BGR Conversion is to be made here.
# This requires ShapeDrawer.draw to be updated
cv2.imshow(self.window_name, image)
def is_quit(self) -> bool:
return ord("q") == cv2.waitKey(self.delay)
| 36.380282
| 88
| 0.69338
|
98ff682c19a81c98f8897b0a20c2e6e30bcf587e
| 670
|
py
|
Python
|
api/util/escape.py
|
edisonlz/fastor
|
342078a18363ac41d3c6b1ab29dbdd44fdb0b7b3
|
[
"Apache-2.0"
] | 285
|
2019-12-23T09:50:21.000Z
|
2021-12-08T09:08:49.000Z
|
api/util/escape.py
|
jeckun/fastor
|
342078a18363ac41d3c6b1ab29dbdd44fdb0b7b3
|
[
"Apache-2.0"
] | null | null | null |
api/util/escape.py
|
jeckun/fastor
|
342078a18363ac41d3c6b1ab29dbdd44fdb0b7b3
|
[
"Apache-2.0"
] | 9
|
2019-12-23T12:59:25.000Z
|
2022-03-15T05:12:11.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
import simplejson as __json
except ImportError:
import json as __json
def to_str(value):
"""unicode->str"""
if isinstance(value, (str, type(None))):
return value
assert isinstance(value, unicode)
return value.encode("utf-8")
def to_unicode(value):
"""str->unicode"""
if isinstance(value, (unicode, type(None))):
return value
assert isinstance(value, str)
return value.decode("utf-8")
def load_json(value):
"""json->obj"""
return __json.loads(value)
def dump_json(value):
"""obj->json"""
return __json.dumps(value, separators=(',', ':'))
| 19.142857
| 53
| 0.622388
|
b20bd4d5269618989c3d8751b85a713941a0ccdb
| 1,415
|
py
|
Python
|
solum/tests/api/camp/test_platform_endpoints.py
|
ed-/solum
|
2d23edb7fb53e1bdeff510710824658575d166c4
|
[
"Apache-2.0"
] | null | null | null |
solum/tests/api/camp/test_platform_endpoints.py
|
ed-/solum
|
2d23edb7fb53e1bdeff510710824658575d166c4
|
[
"Apache-2.0"
] | null | null | null |
solum/tests/api/camp/test_platform_endpoints.py
|
ed-/solum
|
2d23edb7fb53e1bdeff510710824658575d166c4
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from solum.api.controllers.camp import platform_endpoints
from solum import objects
from solum.tests import base
from solum.tests import fakes
@mock.patch('pecan.request', new_callable=fakes.FakePecanRequest)
@mock.patch('pecan.response', new_callable=fakes.FakePecanResponse)
class TestPlatformEndpoints(base.BaseTestCase):
def setUp(self):
super(TestPlatformEndpoints, self).setUp()
objects.load()
def test_platform_endpoints_get(self, resp_mock, request_mock):
fake_platform_endpoints = fakes.FakePlatformEndpoints()
cont = platform_endpoints.Controller()
resp = cont.index()
self.assertEqual(200, resp_mock.status)
self.assertEqual(fake_platform_endpoints.name, resp['result'].name)
self.assertEqual(fake_platform_endpoints.type, resp['result'].type)
| 38.243243
| 75
| 0.75053
|
225321c0bd21e3f7235ca52e56130ee6921043ed
| 822
|
py
|
Python
|
src/z3c/celery/logging.py
|
agroszer/z3c.celery
|
b64504c78a4b8d60f90208970167c3932c0aa919
|
[
"BSD-3-Clause"
] | null | null | null |
src/z3c/celery/logging.py
|
agroszer/z3c.celery
|
b64504c78a4b8d60f90208970167c3932c0aa919
|
[
"BSD-3-Clause"
] | null | null | null |
src/z3c/celery/logging.py
|
agroszer/z3c.celery
|
b64504c78a4b8d60f90208970167c3932c0aa919
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import absolute_import
from celery._state import get_current_task
import zope.exceptions.log
class TaskFormatter(zope.exceptions.log.Formatter):
"""Provides `task_id` and `task_name` variables for the log format.
We want to have a general formatter so we want to get rid of '???' which
are rendered by celery.app.log.TaskFormatter. Also we inherit from
zope.exceptions to support `__traceback_info__`.
"""
def format(self, record):
task = get_current_task()
if task:
record.__dict__.update(
task_id=task.request.id,
task_name=task.name)
else:
record.__dict__.setdefault('task_name', '')
record.__dict__.setdefault('task_id', '')
return super(TaskFormatter, self).format(record)
| 34.25
| 76
| 0.667883
|
835430777bb2dc5a56129928167fbec1d8ed13be
| 4,927
|
py
|
Python
|
clickTheBall.py
|
tommccallum/python-simple-games
|
f3a4e02c14ee200f7d20f816367668315fed42f6
|
[
"MIT"
] | null | null | null |
clickTheBall.py
|
tommccallum/python-simple-games
|
f3a4e02c14ee200f7d20f816367668315fed42f6
|
[
"MIT"
] | null | null | null |
clickTheBall.py
|
tommccallum/python-simple-games
|
f3a4e02c14ee200f7d20f816367668315fed42f6
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
## python script for a simple game
## where the person has to click on a round circle
## when they do their score goes up
## every n points the speed of movement increases
import pygame
import sys
import math
import random
import time
timeAllowed=10 ## seconds
level=1
width=640
height=480
initialRadius=20
radius = initialRadius
thickness = 0 ## fills if thickness is zero
color = ( 255, 0, 0 )
startx = int(round(width / 2))
starty = int(round(height / 2))
score = 0
win_points = 1
step = 5 # how many points before we change speed
speed = 1 # how fast do we move
direction_x = random.choice([-1,1])
direction_y = random.choice([-1,1])
initialSleep=50000
sleep=initialSleep
cnt=0
startTime=time.clock()
state=True
COLORS = [(139, 0, 0),
(0, 100, 0),
(0, 0, 139)]
## initalise location
x = startx
y = starty
def random_color():
return random.choice(COLORS)
def onClick( pos, x, y, radius ):
"This function detects in pos is in the circle (x,y,r)"
d = pow(radius,2)
n = math.pow(pos[0] - x,2) + math.pow(pos[1] - y,2)
if n <= 0:
return False
if n >= 0 and n <= d:
return True
return False
def draw( screen, color, x, y, radius, thickness, score, level, timeLeft ):
screen.fill((255,255,255)) ## clear screen to background color
## draw shapes
pygame.draw.circle( screen, color, (x,y), radius, thickness )
## display score text using a given font
f = pygame.font.SysFont("monospace",15)
label = f.render("Level: "+str(level)+" Score: "+str(score)+" Time Left: "+str(timeLeft), 1, (0,0,0))
screen.blit(label, (10,10))
pygame.display.update()
pygame.init()
## screen setup
## (0,0) is upper left hand corner
screen = pygame.display.set_mode( (width, height), 0, 32 )
## draw starting position
timeLeft = timeAllowed - int(round(time.clock() - startTime))
draw( screen, color, x, y, radius, thickness, score, level, timeLeft )
## enter our event loop looking for the user to click the circle
while True:
if state == True:
ev = pygame.event.get()
for event in ev:
if event.type == pygame.MOUSEBUTTONUP:
pos = pygame.mouse.get_pos()
hit = onClick( pos, x, y, radius )
if hit == True:
print("HIT!")
score += win_points
color = random_color()
direction_x = random.choice([-1,1])
direction_y = random.choice([-1,1])
x = random.randint(radius,width-radius)
y = random.randint(radius,height-radius)
radius=random.randint(20,50)
if score % step == 0:
sleep = int(round(sleep * 0.8))
level = level + 1
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_q:
pygame.quit()
sys.exit("Bye!")
if event.key == pygame.K_n:
## quickly skip to next level
sleep = int(round(sleep * 0.8))
level = level + 1
if cnt % sleep == 0:
x = x + ( speed * direction_x )
y = y + ( speed * direction_y )
if x > width-radius:
direction_x = -1 * direction_x
if x < radius:
direction_x = -1 * direction_x
if y > height-radius:
direction_y = -1 * direction_y
if y < radius:
direction_y = -1 * direction_y
timeLeft = timeAllowed - int(round(time.clock() - startTime))
draw( screen, color, x, y, radius, thickness, score, level, timeLeft )
cnt=cnt+1
if time.clock() - startTime > timeAllowed:
screen.fill((0,255,0)) ## clear screen to background color
f = pygame.font.SysFont("arial bold",45)
s = "Well Done! You scored "+str(score)
d = f.size(s)
label = f.render(s, 1, (255,255,0))
screen.blit(label, (width/2 - d[0]/2,height/2 - d[1]/2))
pygame.display.update()
state=False
else:
# waiting for player to restart
ev = pygame.event.get()
for event in ev:
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_r:
state=True
startTime=time.clock()
score=0
cnt=0
sleep=initialSleep
direction_x = random.choice([-1,1])
direction_y = random.choice([-1,1])
level=1
radius = initialRadius
if event.key == pygame.K_q:
pygame.quit()
sys.exit("Bye!")
| 31.787097
| 105
| 0.527907
|
6c99c8379c86057607da638af82b3463e170302a
| 1,043
|
py
|
Python
|
oscarBuild/ContainerLauncher.py
|
PatrickKutch/minioncontainers
|
018a9644199b4ec00348065519674eb775cc1d92
|
[
"Apache-2.0"
] | null | null | null |
oscarBuild/ContainerLauncher.py
|
PatrickKutch/minioncontainers
|
018a9644199b4ec00348065519674eb775cc1d92
|
[
"Apache-2.0"
] | null | null | null |
oscarBuild/ContainerLauncher.py
|
PatrickKutch/minioncontainers
|
018a9644199b4ec00348065519674eb775cc1d92
|
[
"Apache-2.0"
] | null | null | null |
import argparse
import subprocess
import os
def main():
parser = argparse.ArgumentParser(description='Minion Launcher.')
parser.add_argument("-t","--target",help='specifies the IP:PORT of target Oscar or Marvin',required=True,type=str)
parser.add_argument("-p","--port",help="the UDP port to listen on default is 3232",type=int,default=3232)
parser.add_argument("-i","--id",help="the Oscar ID, default is 'ContainerizedOscar'",type=str,default="ContainerizedOscar")
args = parser.parse_args()
parts=args.target.split(":")
marvinIP = parts[0]
marvinPort = parts[1]
int(parts[1]) # port should be an int
envVars=dict(os.environ)
envVars["MarvinIP"]=marvinIP
envVars["MarvinPort"]= marvinPort
envVars["ListenPort"] = str(args.port)
oscarProc = subprocess.Popen(["python", "Oscar.py", "-i", "OscarConfig.xml"],env=envVars)
oscarProc.wait()
if __name__ == "__main__":
try:
main()
except Exception as ex:
print("Uncaught app error: " + str(ex))
| 30.676471
| 127
| 0.667306
|
169b2eeed2adb366abf620fa777ed2660e7b3b9d
| 5,700
|
py
|
Python
|
tests/test_pipeline3.py
|
gvwilson/nitinat
|
48d95e27301af955c09d3757507f8fd66ce204d7
|
[
"MIT"
] | null | null | null |
tests/test_pipeline3.py
|
gvwilson/nitinat
|
48d95e27301af955c09d3757507f8fd66ce204d7
|
[
"MIT"
] | 14
|
2022-03-22T22:48:14.000Z
|
2022-03-31T11:18:53.000Z
|
tests/test_pipeline3.py
|
gvwilson/nitinat
|
48d95e27301af955c09d3757507f8fd66ce204d7
|
[
"MIT"
] | null | null | null |
"""Test provenance pipeline."""
from datetime import datetime
from textwrap import dedent
from unittest.mock import patch
import pandas as pd
import yaml
from pytest import fixture
from nitinat.pipeline3 import pipeline
READ_CONFIG = "nitinat.pipeline3._read_config"
NOW = "nitinat.pipeline3._now"
READER_SHORTENED_LEN = 3
def simple_df():
return pd.DataFrame(
{
"red": [0.1, 0.2, 0.3],
"green": [0.4, 0.5, 0.6],
"blue": [0.7, 0.8, 0.9],
"orange": [1.1, 1.2, 1.3],
"yellow": [1.4, 1.5, 1.6],
"purple": [1.7, 1.8, 1.9],
}
)
def times(num_stages):
return [datetime(2022, 1, 1, 1, 1, i) for i in range(2 * num_stages)]
def first(df):
"""To demonstrate pipeline operation."""
return df.iloc[[0]]
def head(df, num, debug=False):
"""To demonstrate pipeline operation."""
return df.head(1) if debug else df.head(num)
def tail(df, num, debug=False):
"""To demonstrate pipeline operation."""
return df.tail(1) if debug else df.tail(num)
def reader(debug=False):
"""To demonstrate pipeline operation."""
df = simple_df()
return df.head(READER_SHORTENED_LEN) if debug else df
def failure(df, debug=False):
"""To demonstrate pipeline operation."""
raise ValueError("failure message")
@fixture
def available():
return {f.__name__: f for f in [first, head, tail, reader, failure]}
def test_pipeline3_empty_returns_nothing(available):
with patch(READ_CONFIG, return_value=[]):
provenance, result = pipeline("test.yml", available)
assert result is None
assert provenance == []
def test_pipeline3_single_stage_no_parameters_no_overall(available):
config = [{"function": "reader"}]
with (
patch(READ_CONFIG, return_value=config),
patch(NOW, side_effect=times(1)),
):
expected = simple_df()
provenance, result = pipeline("test.yml", available)
assert result.equals(expected)
assert provenance == [
{"exc": None, "elapsed": 1.0, "function": "reader"}
]
def test_pipeline3_two_stages_with_parameters_no_overall(available):
config = [{"function": "reader"}, {"function": "head", "num": 2}]
with (
patch(READ_CONFIG, return_value=config),
patch(NOW, side_effect=times(2)),
):
provenance, result = pipeline("test.yml", available)
assert len(result) == 2
assert result.equals(simple_df().iloc[[0, 1]])
assert provenance == [
{"exc": None, "elapsed": 1.0, "function": "reader"},
{"exc": None, "elapsed": 1.0, "function": "head", "num": 2},
]
def test_pipeline3_single_stage_with_debugging(available):
config = [{"function": "reader", "debug": True}]
with (
patch(READ_CONFIG, return_value=config),
patch(NOW, side_effect=times(1)),
):
provenance, result = pipeline("test.yml", available)
assert len(result) == READER_SHORTENED_LEN
assert provenance == [
{"exc": None, "elapsed": 1.0, "function": "reader", "debug": True}
]
def test_pipeline3_single_stage_with_overall_debugging(available):
config = [{"overall": {"debug": True}}, {"function": "reader"}]
with (
patch(READ_CONFIG, return_value=config),
patch(NOW, side_effect=times(1)),
):
provenance, result = pipeline("test.yml", available)
assert len(result) == READER_SHORTENED_LEN
assert provenance == [
{"exc": None, "elapsed": 1.0, "function": "reader", "debug": True}
]
def test_pipeline3_two_stage_with_overall_debugging(available):
data_len = len(simple_df())
config = [
{"overall": {"debug": True}},
{"function": "reader"},
{"function": "head", "num": data_len},
]
with (
patch(READ_CONFIG, return_value=config),
patch(NOW, side_effect=times(2)),
):
provenance, result = pipeline("test.yml", available)
assert len(result) == 1
assert provenance == [
{"exc": None, "elapsed": 1.0, "function": "reader", "debug": True},
{
"exc": None,
"elapsed": 1.0,
"function": "head",
"num": data_len,
"debug": True,
},
]
def test_pipeline3_two_stage_with_yaml_text(available):
config = dedent(
"""\
- overall:
debug: true
- function: reader
- function: head
num: 1000
"""
)
config = yaml.safe_load(config)
with (
patch(READ_CONFIG, return_value=config),
patch(NOW, side_effect=times(2)),
):
provenance, result = pipeline("test.yml", available)
assert len(result) == 1
assert provenance == [
{"exc": None, "elapsed": 1.0, "function": "reader", "debug": True},
{
"exc": None,
"elapsed": 1.0,
"function": "head",
"num": 1000,
"debug": True,
},
]
def test_pipeline3_two_stages_with_failure(available):
config = [{"function": "reader"}, {"function": "failure"}]
with (
patch(READ_CONFIG, return_value=config),
patch(NOW, side_effect=times(2)),
):
provenance, result = pipeline("test.yml", available)
assert result is None
assert provenance == [
{"exc": None, "elapsed": 1.0, "function": "reader"},
{
"exc": "ValueError('failure message')",
"elapsed": 1.0,
"function": "failure",
},
]
| 28.787879
| 79
| 0.569123
|
de284f9dc9535c3eed51076b4eeae5f734ada46d
| 2,630
|
py
|
Python
|
src/jama/api_caller.py
|
taishengG/jama-slack-integration
|
746b7186ceaf955ca81e9e0ad4862141ce35eb8d
|
[
"MIT"
] | 1
|
2019-07-17T22:39:21.000Z
|
2019-07-17T22:39:21.000Z
|
src/jama/api_caller.py
|
taishengG/jama-slack-integration
|
746b7186ceaf955ca81e9e0ad4862141ce35eb8d
|
[
"MIT"
] | 4
|
2018-11-16T05:56:06.000Z
|
2018-11-29T05:07:52.000Z
|
src/jama/api_caller.py
|
taishengG/jama-slack-integration
|
746b7186ceaf955ca81e9e0ad4862141ce35eb8d
|
[
"MIT"
] | 6
|
2018-11-08T03:49:28.000Z
|
2019-04-29T19:53:25.000Z
|
from jama import oauth
import os
import json
import requests
def is_using_oauth():
return "DB_HOST" in os.environ
def post(team_id, user_id, url, payload):
"""
Using post method to get data from given Jama url
Args:
team_id (string): Slack team ID
user_id (string): Slack User ID
url (string): Jama API url
payload (dict): The dictionary of payload wants to be sent to the url
Returns:
(dict): Data get from Jama
(None): If fail to get Jama OAuth, return None
"""
if is_using_oauth():
token = oauth.get_access_token(team_id, user_id)
if token is None:
return None
header_list = {
"content-type": "application/json",
"Authorization": "Bearer " + token
}
response = requests.post(url, json=payload, headers=header_list)
else:
response = requests.post(url, json=payload, auth=(os.environ["JAMA_USER"], os.environ["JAMA_PASS"]))
return json.loads(response.text)
def get(team_id, user_id, url):
"""
Using get method to get data from given Jama url
Args:
team_id (string): Slack team ID
user_id (string): Slack User ID
url (string): Jama API url
Returns:
(dict): Data get from Jama
(None): If fail to get Jama OAuth, return None
"""
if is_using_oauth():
token = oauth.get_access_token(team_id, user_id)
if token is None:
return None
header_list = {
"content-type": "application/json",
"Authorization": "Bearer " + token
}
response = requests.get(url, headers=header_list)
else:
response = requests.get(url, auth=(os.environ["JAMA_USER"], os.environ["JAMA_PASS"]))
return json.loads(response.text)
def put_file(team_id, user_id, url, file_data):
"""
Using put method to get data from given url
Args:
team_id (string): Slack team ID
user_id (string): Slack User ID
url (string): Jama API url
file_data (dict): The file name and file data wants to be sent to the url
Returns:
(dict): Data get from Jama
(None): If fail to get Jama OAuth, return None
"""
if is_using_oauth():
token = oauth.get_access_token(team_id, user_id)
if token is None:
return None
response = requests.put(url, files=file_data, headers={"Authorization": "Bearer " + token})
else:
response = requests.put(url, files=file_data, auth=(os.environ["JAMA_USER"], os.environ["JAMA_PASS"]))
return response.status_code
| 32.073171
| 110
| 0.61635
|
fb28fd1ffef02a96e84c8adfeb706ddfa69ff423
| 9,706
|
py
|
Python
|
recommender.py
|
cmpgamer/Sprint2
|
5f9aadb4b3450cfa3db34484b52ef0614e6abc6b
|
[
"MIT"
] | null | null | null |
recommender.py
|
cmpgamer/Sprint2
|
5f9aadb4b3450cfa3db34484b52ef0614e6abc6b
|
[
"MIT"
] | null | null | null |
recommender.py
|
cmpgamer/Sprint2
|
5f9aadb4b3450cfa3db34484b52ef0614e6abc6b
|
[
"MIT"
] | null | null | null |
import codecs
from math import sqrt
class recommender:
def __init__(self, k=1, n=5):
self.k = k
self.n = n
self.data = {}
self.username2id = {}
self.userid2name = {}
self.productid2name = {}
self.cardinality = {}
self.slopeOneDeviations = {}
#Grab a bunch of info from the CSV
self.artists = {}
self.users = {}
self.normalizedData = {}
self.similarity = {}
self.frequencies = {}
self.deviations = {}
# for some reason I want to save the name of the metric
def convertProductID2name(self, id):
if id in self.productid2name:
return self.productid2name[id]
else:
return id
def userRatings(self, id, n):
"""Return n top ratings for user with id"""
print ("Ratings for " + self.userid2name[id])
ratings = self.data[id]
print(len(ratings))
ratings = list(ratings.items())[:n]
ratings = [(self.convertProductID2name(k), v) for (k, v) in ratings]
# finally sort and return
ratings.sort(key=lambda artistTuple: artistTuple[1], reverse = True)
for rating in ratings:
print("%s\t%i" % (rating[0], rating[1]))
def showUserTopItems(self, user, n):
""" show top n items for user"""
items = list(self.data[user].items())
items.sort(key=lambda itemTuple: itemTuple[1], reverse=True)
for i in range(n):
print("%s\t%i" % (self.convertProductID2name(items[i][0]),items[i][1]))
def computeDeviations(self):
# for each person in the data:
# get their ratings
for ratings in self.data.values():
# for each item & rating in that set of ratings:
for (item, rating) in ratings.items():
self.frequencies.setdefault(item, {})
self.deviations.setdefault(item, {})
# for each item2 & rating2 in that set of ratings:
for (item2, rating2) in ratings.items():
if item != item2:
# add the difference between the ratings to our
# computation
self.frequencies[item].setdefault(item2, 0)
self.deviations[item].setdefault(item2, 0.0)
self.frequencies[item][item2] += 1
self.deviations[item][item2] += rating - rating2
for (item, ratings) in self.deviations.items():
for item2 in ratings:
ratings[item2] /= self.frequencies[item][item2]
def pearson(self, rating1, rating2):
sum_xy = 0
sum_x = 0
sum_y = 0
sum_x2 = 0
sum_y2 = 0
n = 0
for key in rating1:
if key in rating2:
n += 1
x = rating1[key]
y = rating2[key]
sum_xy += x * y
sum_x += x
sum_y += y
sum_x2 += pow(x, 2)
sum_y2 += pow(y, 2)
if n == 0:
return 0
# now compute denominator
denominator = sqrt(sum_x2 - pow(sum_x, 2) / n) * sqrt(sum_y2 - pow(sum_y, 2) / n)
if denominator == 0:
return 0
else:
return (sum_xy - (sum_x * sum_y) / n) / denominator
def computeNearestNeighbor(self, username):
distances = []
for instance in self.data:
if instance != username:
distance = self.manhattan(self.data[username], self.data[instance])
distances.append((instance, distance))
# sort based on distance -- closest first
distances.sort(key=lambda artistTuple: artistTuple[1], reverse=True)
return distances
def recommend(self, user):
print("we got here")
"""Give list of recommendations"""
recommendations = {}
# first get list of users ordered by nearness
nearest = self.computeNearestNeighbor(user)
#
# now get the ratings for the user
#
userRatings = self.data[user]
#
# determine the total distance
totalDistance = 0.0
for i in range(self.k):
totalDistance += nearest[i][1]
# now iterate through the k nearest neighbors
# accumulating their ratings
for i in range(self.k):
# compute slice of pie
weight = nearest[i][1] / totalDistance
# get the name of the person
name = nearest[i][0]
# get the ratings for this person
neighborRatings = self.data[name]
# get the name of the person
# now find bands neighbor rated that user didn't
for artist in neighborRatings:
if not artist in userRatings:
if artist not in recommendations:
recommendations[artist] = neighborRatings[artist] * weight
else:
recommendations[artist] = recommendations[artist] + neighborRatings[artist] * weight
# now make list from dictionary and only get the first n items
recommendations = list(recommendations.items())[:self.n]
recommendations = [(self.convertProductID2name(k), v) for (k, v) in recommendations]
# finally sort and return
recommendations.sort(key=lambda artistTuple: artistTuple[1], reverse = True)
return recommendations
def manhattan(self, rating1, rating2):
"""Computes the Manhattan distance. Both rating1 and rating2 are dictionaries
of the form {'The Strokes': 3.0, 'Slightly Stoopid': 2.5}"""
distance = 0
commonRatings = False
for key in rating1:
if key in rating2:
distance += abs(rating1[key] - rating2[key])
commonRatings = True
if commonRatings:
return distance
else:
return -1 #Indicates no ratings in common
def euclidean(self, rating1, rating2):
"""Computes the euclidean distance. Both rating1 and rating2 are dictionaries
of the form {'The Strokes': 3.0, 'Slightly Stoopid': 2.5}"""
totalDistance = 0
distance = 0
commonRatings = False
for key in rating1:
if key in rating2:
distance = abs(rating1[key] - rating2[key])
totalDistance += pow(distance, 2)
commonRatings = True
if commonRatings:
return pow(totalDistance, .5)
else:
return -1 #Indicates no ratings in common
def computeCosineSimilarity(self):
averages = {}
similarity = {}
#We need the averages for each user for the numerator
for userItem, ratings in self.data.items():
userAvg = sum(ratings.values())/len(ratings)
averages[userItem] = userAvg
for user, value in self.data.items():
#time to do the denominator
for band1 in value:
newTuple = {}
for band2 in value:
numerator = 0
denominator1 = 0
denominator2 = 0
if band1 is band2:
continue
for userItem, ratings in self.data.items():
if band1 in ratings and band2 in ratings:
userAvg = averages[userItem]
numerator += (ratings[band1] - userAvg) * (ratings[band2] - userAvg)
denominator1 += (ratings[band1] - userAvg)**2
denominator2 += (ratings[band2] - userAvg)**2
finalD = (sqrt(denominator1) * sqrt(denominator2))
try:
newTuple[band2] = numerator/(sqrt(denominator1) * sqrt(denominator2))
except:
newTuple[band2] = 0
similarity[band1] = newTuple
self.similarity = similarity
#print(similarity)
def normalizeData(self, minNum, maxNum):
normalized = {}
listOfUsers = self.data.items()
for userItem, ratings in listOfUsers:
newTuple = {}
for band in self.data:
if band in ratings:
normalRate = (2*(ratings[band] - minNum) - (maxNum - minNum))/(maxNum - minNum)
newTuple[band] = normalRate
normalized[userItem] = newTuple
self.normalizedData = normalized
#print(normalized)
def recommendCosine(self, minNum, maxNum):
normalized = self.normalizedData
similarity = self.similarity
finalRatings = {}
for user, userRatings in normalized.items():
finalRatings[user] = {}
for artist in self.data:
if not artist in userRatings:
numerator = 0
denominator = 0
for otherArtist in self.data:
if otherArtist in userRatings:
numerator += (userRatings[otherArtist] * similarity[artist][otherArtist])
denominator += abs(similarity[artist][otherArtist])
finalRatings[user][artist] = .5*((numerator/denominator)+ 1)*(maxNum-minNum)+minNum
print(finalRatings)
| 39.295547
| 110
| 0.524624
|
583d97c3d484a8a0f89b4bf6bf98102e90f42d66
| 5,857
|
py
|
Python
|
python/drydock_provisioner/drivers/kubernetes/promenade_driver/driver.py
|
Vjrx/airship-drydock
|
315fb9864e6d55a66d5266f76c160be55d22c98b
|
[
"Apache-2.0"
] | null | null | null |
python/drydock_provisioner/drivers/kubernetes/promenade_driver/driver.py
|
Vjrx/airship-drydock
|
315fb9864e6d55a66d5266f76c160be55d22c98b
|
[
"Apache-2.0"
] | 1
|
2021-06-01T23:08:49.000Z
|
2021-06-01T23:08:49.000Z
|
python/drydock_provisioner/drivers/kubernetes/promenade_driver/driver.py
|
Vjrx/airship-drydock
|
315fb9864e6d55a66d5266f76c160be55d22c98b
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 AT&T Intellectual Property. All other rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Task driver for Promenade"""
import logging
import uuid
import concurrent.futures
from oslo_config import cfg
import drydock_provisioner.error as errors
import drydock_provisioner.objects.fields as hd_fields
import drydock_provisioner.config as config
from drydock_provisioner.drivers.kubernetes.driver import KubernetesDriver
from drydock_provisioner.drivers.kubernetes.promenade_driver.promenade_client \
import PromenadeClient
from .actions.k8s_node import RelabelNode
class PromenadeDriver(KubernetesDriver):
driver_name = 'promenadedriver'
driver_key = 'promenadedriver'
driver_desc = 'Promenade Kubernetes Driver'
action_class_map = {
hd_fields.OrchestratorAction.RelabelNode: RelabelNode,
}
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.logger = logging.getLogger(
cfg.CONF.logging.kubernetesdriver_logger_name)
def execute_task(self, task_id):
# actions that should be threaded for execution
threaded_actions = [
hd_fields.OrchestratorAction.RelabelNode,
]
action_timeouts = {
hd_fields.OrchestratorAction.RelabelNode:
config.config_mgr.conf.timeouts.relabel_node,
}
task = self.state_manager.get_task(task_id)
if task is None:
raise errors.DriverError("Invalid task %s" % (task_id))
if task.action not in self.supported_actions:
raise errors.DriverError("Driver %s doesn't support task action %s"
% (self.driver_desc, task.action))
task.set_status(hd_fields.TaskStatus.Running)
task.save()
if task.action in threaded_actions:
if task.retry > 0:
msg = "Retrying task %s on previous failed entities." % str(
task.get_id())
task.add_status_msg(
msg=msg,
error=False,
ctx=str(task.get_id()),
ctx_type='task')
target_nodes = self.orchestrator.get_target_nodes(
task, failures=True)
else:
target_nodes = self.orchestrator.get_target_nodes(task)
with concurrent.futures.ThreadPoolExecutor() as e:
subtask_futures = dict()
for n in target_nodes:
prom_client = PromenadeClient()
nf = self.orchestrator.create_nodefilter_from_nodelist([n])
subtask = self.orchestrator.create_task(
design_ref=task.design_ref,
action=task.action,
node_filter=nf,
retry=task.retry)
task.register_subtask(subtask)
action = self.action_class_map.get(task.action, None)(
subtask,
self.orchestrator,
self.state_manager,
prom_client=prom_client)
subtask_futures[subtask.get_id().bytes] = e.submit(
action.start)
timeout = action_timeouts.get(
task.action, config.config_mgr.conf.timeouts.relabel_node)
finished, running = concurrent.futures.wait(
subtask_futures.values(), timeout=(timeout * 60))
for t, f in subtask_futures.items():
if not f.done():
task.add_status_msg(
"Subtask timed out before completing.",
error=True,
ctx=str(uuid.UUID(bytes=t)),
ctx_type='task')
task.failure()
else:
if f.exception():
msg = ("Subtask %s raised unexpected exception: %s" %
(str(uuid.UUID(bytes=t)), str(f.exception())))
self.logger.error(msg, exc_info=f.exception())
task.add_status_msg(
msg=msg,
error=True,
ctx=str(uuid.UUID(bytes=t)),
ctx_type='task')
task.failure()
task.bubble_results()
task.align_result()
else:
try:
prom_client = PromenadeClient()
action = self.action_class_map.get(task.action, None)(
task,
self.orchestrator,
self.state_manager,
prom_client=prom_client)
action.start()
except Exception as e:
msg = ("Subtask for action %s raised unexpected exception: %s"
% (task.action, str(e)))
self.logger.error(msg, exc_info=e)
task.add_status_msg(
msg=msg,
error=True,
ctx=str(task.get_id()),
ctx_type='task')
task.failure()
task.set_status(hd_fields.TaskStatus.Complete)
task.save()
return
| 37.787097
| 79
| 0.555062
|
5b66444d5bb885c6ab5920db6f1c193b3c2cf253
| 422
|
py
|
Python
|
pajbot/modules/clr_overlay/__init__.py
|
MrBean355/pajbot
|
3f27aabccfb242f5e3e8eedd20c97633b0d39950
|
[
"MIT"
] | 145
|
2019-06-08T15:38:40.000Z
|
2022-03-29T22:51:47.000Z
|
pajbot/modules/clr_overlay/__init__.py
|
MrBean355/pajbot
|
3f27aabccfb242f5e3e8eedd20c97633b0d39950
|
[
"MIT"
] | 671
|
2019-05-26T22:19:08.000Z
|
2022-03-31T06:00:49.000Z
|
pajbot/modules/clr_overlay/__init__.py
|
MrBean355/pajbot
|
3f27aabccfb242f5e3e8eedd20c97633b0d39950
|
[
"MIT"
] | 105
|
2019-05-25T18:22:13.000Z
|
2022-02-23T00:57:27.000Z
|
import logging
from pajbot.modules import BaseModule
from pajbot.modules.base import ModuleType
log = logging.getLogger(__name__)
class CLROverlayModule(BaseModule):
ID = "clroverlay-group"
NAME = "CLR Overlay"
DESCRIPTION = "A collection of overlays that can be used in the streaming software of choice"
CATEGORY = "Feature"
ENABLED_DEFAULT = True
MODULE_TYPE = ModuleType.TYPE_ALWAYS_ENABLED
| 26.375
| 97
| 0.760664
|
d90475c08dca53f116aa6a9949c6a75e0aec556f
| 16,971
|
py
|
Python
|
sharpy/utils/settings.py
|
ostodieck/sharpy
|
b85aa1c001a0ec851af4eb259cce7c01dfa68b9e
|
[
"BSD-3-Clause"
] | 1
|
2020-07-27T05:15:35.000Z
|
2020-07-27T05:15:35.000Z
|
sharpy/utils/settings.py
|
briandesilva/sharpy
|
aed86428ff88fd14d36cabd91cf7e04b5fc9a39a
|
[
"BSD-3-Clause"
] | null | null | null |
sharpy/utils/settings.py
|
briandesilva/sharpy
|
aed86428ff88fd14d36cabd91cf7e04b5fc9a39a
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Settings Generator Utilities
"""
import configparser
import ctypes as ct
import numpy as np
import sharpy.utils.exceptions as exceptions
import sharpy.utils.cout_utils as cout
import ast
class DictConfigParser(configparser.ConfigParser):
def as_dict(self):
d = dict(self._sections)
for k in d:
d[k] = dict(self._defaults, **d[k])
d[k].pop('__name__', None)
return d
def cast(k, v, pytype, ctype, default):
try:
# if default is None:
# raise TypeError
val = ctype(pytype(v))
except KeyError:
val = ctype(default)
cout.cout_wrap("--- The variable " + k + " has no given value, using the default " + default, 2)
except TypeError:
raise exceptions.NoDefaultValueException(k)
except ValueError:
val = ctype(v.value)
return val
def to_custom_types(dictionary, types, default, options=dict(), no_ctype=False):
for k, v in types.items():
if v == 'int':
if no_ctype:
data_type = int
else:
data_type = ct.c_int
try:
dictionary[k] = cast(k, dictionary[k], int, data_type, default[k])
except KeyError:
if default[k] is None:
raise exceptions.NoDefaultValueException(k)
dictionary[k] = cast(k, default[k], int, data_type, default[k])
notify_default_value(k, dictionary[k])
elif v == 'float':
if no_ctype:
data_type = float
else:
data_type = ct.c_double
try:
dictionary[k] = cast(k, dictionary[k], float, data_type, default[k])
except KeyError:
if default[k] is None:
raise exceptions.NoDefaultValueException(k)
dictionary[k] = cast(k, default[k], float, data_type, default[k])
notify_default_value(k, dictionary[k])
elif v == 'str':
try:
dictionary[k] = cast(k, dictionary[k], str, str, default[k])
except KeyError:
if default[k] is None:
raise exceptions.NoDefaultValueException(k)
dictionary[k] = cast(k, default[k], eval(v), eval(v), default[k])
notify_default_value(k, dictionary[k])
elif v == 'bool':
if no_ctype:
data_type = bool
else:
data_type = ct.c_bool
try:
dictionary[k] = cast(k, dictionary[k], str2bool, data_type, default[k])
except KeyError:
if default[k] is None:
raise exceptions.NoDefaultValueException(k)
dictionary[k] = cast(k, default[k], str2bool, data_type, default[k])
notify_default_value(k, dictionary[k])
elif v == 'list(str)':
try:
# if isinstance(dictionary[k], list):
# continue
# dictionary[k] = dictionary[k].split(',')
# getting rid of leading and trailing spaces
dictionary[k] = list(map(lambda x: x.strip(), dictionary[k]))
except KeyError:
if default[k] is None:
raise exceptions.NoDefaultValueException(k)
dictionary[k] = default[k].copy()
notify_default_value(k, dictionary[k])
elif v == 'list(dict)':
try:
# if isinstance(dictionary[k], list):
# continue
# dictionary[k] = dictionary[k].split(',')
# getting rid of leading and trailing spaces
for i in range(len(dictionary[k])):
dictionary[k][i] = ast.literal_eval(dictionary[k][i])
except KeyError:
if default[k] is None:
raise exceptions.NoDefaultValueException(k)
dictionary[k] = default[k].copy()
notify_default_value(k, dictionary[k])
elif v == 'list(float)':
try:
dictionary[k]
except KeyError:
if default[k] is None:
raise exceptions.NoDefaultValueException(k)
dictionary[k] = default[k].copy()
notify_default_value(k, dictionary[k])
if isinstance(dictionary[k], np.ndarray):
continue
if isinstance(dictionary[k], list):
for i in range(len(dictionary[k])):
dictionary[k][i] = float(dictionary[k][i])
dictionary[k] = np.array(dictionary[k])
continue
# dictionary[k] = dictionary[k].split(',')
# # getting rid of leading and trailing spaces
# dictionary[k] = list(map(lambda x: x.strip(), dictionary[k]))
if dictionary[k].find(',') < 0:
dictionary[k] = np.fromstring(dictionary[k].strip('[]'), sep=' ', dtype=ct.c_double)
else:
dictionary[k] = np.fromstring(dictionary[k].strip('[]'), sep=',', dtype=ct.c_double)
elif v == 'list(int)':
try:
dictionary[k]
except KeyError:
if default[k] is None:
raise exceptions.NoDefaultValueException(k)
dictionary[k] = default[k].copy()
notify_default_value(k, dictionary[k])
if isinstance(dictionary[k], np.ndarray):
continue
if isinstance(dictionary[k], list):
for i in range(len(dictionary[k])):
dictionary[k][i] = int(dictionary[k][i])
dictionary[k] = np.array(dictionary[k])
continue
# dictionary[k] = dictionary[k].split(',')
# # getting rid of leading and trailing spaces
# dictionary[k] = list(map(lambda x: x.strip(), dictionary[k]))
if dictionary[k].find(',') < 0:
dictionary[k] = np.fromstring(dictionary[k].strip('[]'), sep=' ').astype(ct.c_int)
else:
dictionary[k] = np.fromstring(dictionary[k].strip('[]'), sep=',').astype(ct.c_int)
elif v == 'list(complex)':
try:
dictionary[k]
except KeyError:
if default[k] is None:
raise exceptions.NoDefaultValueException(k)
dictionary[k] = default[k].copy()
notify_default_value(k, dictionary[k])
if isinstance(dictionary[k], np.ndarray):
continue
if isinstance(dictionary[k], list):
for i in range(len(dictionary[k])):
dictionary[k][i] = float(dictionary[k][i])
dictionary[k] = np.array(dictionary[k])
continue
# dictionary[k] = dictionary[k].split(',')
# # getting rid of leading and trailing spaces
# dictionary[k] = list(map(lambda x: x.strip(), dictionary[k]))
if dictionary[k].find(',') < 0:
dictionary[k] = np.fromstring(dictionary[k].strip('[]'), sep=' ').astype(complex)
else:
dictionary[k] = np.fromstring(dictionary[k].strip('[]'), sep=',').astype(complex)
elif v == 'dict':
try:
if not isinstance(dictionary[k], dict):
raise TypeError
except KeyError:
if default[k] is None:
raise exceptions.NoDefaultValueException(k)
dictionary[k] = default[k].copy()
notify_default_value(k, dictionary[k])
else:
raise TypeError('Variable %s has an unknown type (%s) that cannot be casted' % (k, v))
check_settings_in_options(dictionary, types, options)
for k in dictionary.keys():
if k not in list(types.keys()):
cout.cout_wrap('Warning - Unrecognised setting: %s. Please check input file and/or documentation.' % k, 3)
def check_settings_in_options(settings, settings_types, settings_options):
"""
Checks that settings given a type ``str`` or ``int`` and allowable options are indeed valid.
Args:
settings (dict): Dictionary of processed settings
settings_types (dict): Dictionary of settings types
settings_options (dict): Dictionary of options (may be empty)
Raises:
exception.NotValidSetting: if the setting is not allowed.
"""
for k in settings_options:
if settings_types[k] == 'int':
try:
value = settings[k].value
except AttributeError:
value = settings[k]
if value not in settings_options[k]:
raise exceptions.NotValidSetting(k, value, settings_options[k])
elif settings_types[k] == 'str':
value = settings[k]
if value not in settings_options[k] and value:
# checks that the value is within the options and that it is not an empty string.
raise exceptions.NotValidSetting(k, value, settings_options[k])
elif settings_types[k] == 'list(str)':
for item in settings[k]:
if item not in settings_options[k] and item:
raise exceptions.NotValidSetting(k, item, settings_options[k])
else:
pass # no other checks implemented / required
def load_config_file(file_name: str) -> dict:
"""This function reads the flight condition and solver input files.
Args:
file_name (str): contains the path and file name of the file to be read by the ``configparser``
reader.
Returns:
config (dict): a ``ConfigParser`` object that behaves like a dictionary
"""
# config = DictConfigParser()
# config.read(file_name)
# dict_config = config.as_dict()
import configobj
dict_config = configobj.ConfigObj(file_name)
return dict_config
def str2bool(string):
false_list = ['false', 'off', '0', 'no']
if isinstance(string, bool):
return string
if isinstance(string, ct.c_bool):
return string.value
if not string:
return False
elif string.lower() in false_list:
return False
else:
return True
def notify_default_value(k, v):
cout.cout_wrap('Variable ' + k + ' has no assigned value in the settings file.')
cout.cout_wrap(' will default to the value: ' + str(v), 1)
class SettingsTable:
"""
Generates the documentation's setting table at runtime.
Sphinx is our chosen documentation manager and takes docstrings in reStructuredText format. Given that the SHARPy
solvers contain several settings, this class produces a table in reStructuredText format with the solver's settings
and adds it to the solver's docstring.
This table will then be printed alongside the remaining docstrings.
To generate the table, parse the setting's description to a solver dictionary named ``settings_description``, in a
similar fashion to what is done with ``settings_types`` and ``settings_default``. If no description is given it will
be left blank.
Then, add at the end of the solver's class declaration method an instance of the ``SettingsTable`` class and a call
to the ``SettingsTable.generate()`` method.
Examples:
The end of the solver's class declaration should contain
.. code-block:: python
# Generate documentation table
settings_table = settings.SettingsTable()
__doc__ += settings_table.generate(settings_types, settings_default, settings_description)
to generate the settings table.
"""
def __init__(self):
self.n_fields = 4
self.n_settings = 0
self.field_length = [0] * self.n_fields
self.titles = ['Name', 'Type', 'Description', 'Default']
self.settings_types = dict()
self.settings_description = dict()
self.settings_default = dict()
self.settings_options = dict()
self.settings_options_strings = dict()
self.line_format = ''
self.table_string = ''
def generate(self, settings_types, settings_default, settings_description, settings_options=dict(), header_line=None):
"""
Returns a rst-format table with the settings' names, types, description and default values
Args:
settings_types (dict): Setting types.
settings_default (dict): Settings default value.
settings_description (dict): Setting description.
header_line (str): Header line description (optional)
Returns:
str: .rst formatted string with a table containing the settings' information.
"""
self.settings_types = settings_types
self.settings_default = settings_default
self.n_settings = len(self.settings_types)
#
if header_line is None:
header_line = 'The settings that this solver accepts are given by a dictionary, ' \
'with the following key-value pairs:'
else:
assert type(header_line) == str, 'header_line not a string, verify order of arguments'
if type(settings_options) != dict:
raise TypeError('settings_options is not a dictionary')
if settings_options:
# if settings_options are provided
self.settings_options = settings_options
self.n_fields += 1
self.field_length.append(0)
self.titles.append('Options')
self.process_options()
try:
self.settings_description = settings_description
except AttributeError:
pass
self.set_field_length()
self.line_format = self.setting_line_format()
table_string = '\n ' + header_line + '\n'
table_string += '\n ' + self.print_divider_line()
table_string += ' ' + self.print_header()
table_string += ' ' + self.print_divider_line()
for setting in self.settings_types:
table_string += ' ' + self.print_setting(setting)
table_string += ' ' + self.print_divider_line()
self.table_string = table_string
return table_string
def process_options(self):
self.settings_options_strings = self.settings_options.copy()
for k, v in self.settings_options.items():
opts = ''
for option in v:
opts += ' ``%s``,' %str(option)
self.settings_options_strings[k] = opts[1:-1] # removes the initial whitespace and final comma
def set_field_length(self):
field_lengths = [[] for i in range(self.n_fields)]
for setting in self.settings_types:
stype = str(self.settings_types.get(setting, ''))
description = self.settings_description.get(setting, '')
default = str(self.settings_default.get(setting, ''))
option = str(self.settings_options_strings.get(setting, ''))
field_lengths[0].append(len(setting) + 4) # length of name
field_lengths[1].append(len(stype) + 4) # length of type + 4 for the rst ``X``
field_lengths[2].append(len(description)) # length of type
field_lengths[3].append(len(default) + 4) # length of type + 4 for the rst ``X``
if self.settings_options:
field_lengths[4].append(len(option))
for i_field in range(self.n_fields):
field_lengths[i_field].append(len(self.titles[i_field]))
self.field_length[i_field] = max(field_lengths[i_field]) + 2 # add the two spaces as column dividers
def print_divider_line(self):
divider = ''
for i_field in range(self.n_fields):
divider += '='*(self.field_length[i_field]-2) + ' '
divider += '\n'
return divider
def print_setting(self, setting):
type = '``' + str(self.settings_types.get(setting, '')) + '``'
description = self.settings_description.get(setting, '')
default = '``' + str(self.settings_default.get(setting, '')) + '``'
if self.settings_options:
option = self.settings_options_strings.get(setting, '')
line = self.line_format.format(['``' + str(setting) + '``', type, description, default, option]) + '\n'
else:
line = self.line_format.format(['``' + str(setting) + '``', type, description, default]) + '\n'
return line
def print_header(self):
header = self.line_format.format(self.titles) + '\n'
return header
def setting_line_format(self):
string = ''
for i_field in range(self.n_fields):
string += '{0[' + str(i_field) + ']:<' + str(self.field_length[i_field]) + '}'
return string
| 39.013793
| 122
| 0.574981
|
47a2f0ce4b0fd2b720912c617794374f1c2ffa71
| 1,267
|
py
|
Python
|
pyscf/nao/test/test_0045_tddft_load_kernel_nao.py
|
robert-anderson/pyscf
|
cdc56e168cb15f47e8cdc791a92d689fa9b655af
|
[
"Apache-2.0"
] | 3
|
2021-02-28T00:52:53.000Z
|
2021-03-01T06:23:33.000Z
|
pyscf/nao/test/test_0045_tddft_load_kernel_nao.py
|
robert-anderson/pyscf
|
cdc56e168cb15f47e8cdc791a92d689fa9b655af
|
[
"Apache-2.0"
] | 36
|
2018-08-22T19:44:03.000Z
|
2020-05-09T10:02:36.000Z
|
pyscf/nao/test/test_0045_tddft_load_kernel_nao.py
|
robert-anderson/pyscf
|
cdc56e168cb15f47e8cdc791a92d689fa9b655af
|
[
"Apache-2.0"
] | 4
|
2018-02-14T16:28:28.000Z
|
2019-08-12T16:40:30.000Z
|
from __future__ import print_function, division
import os,unittest,numpy as np
from pyscf.nao import tddft_iter
import h5py
dname = os.path.dirname(os.path.abspath(__file__))
td = tddft_iter(label='water', cd=dname, jcutoff=7, iter_broadening=1e-2, xc_code='RPA')
np.savetxt("kernel.txt", td.kernel)
np.save("kernel.npy", td.kernel)
hdf = h5py.File("kernel.hdf5", "w")
hdf.create_dataset("kernel_pack", data=td.kernel)
hdf.close()
class KnowValues(unittest.TestCase):
def test_load_kernel(self):
data_ref_nonin = np.loadtxt(dname+'/water.tddft_iter.omega.pxx.txt-ref')[:, 1]
data_ref_inter = np.loadtxt(dname+'/water.tddft_iter.omega.inter.pxx.txt-ref')[:, 1]
for form in ["txt", "npy", "hdf5"]:
td = tddft_iter(label='water', cd=dname, iter_broadening=1e-2, xc_code='RPA', load_kernel=True, kernel_fname = "kernel." + form, kernel_format = form, kernel_path_hdf5="kernel_pack")
omegas = np.linspace(0.0,2.0,150)+1j*td.eps
pxx = -td.comp_polariz_inter_xx(omegas).imag
data = np.array([omegas.real*27.2114, pxx])
np.savetxt('water.tddft_iter_rpa.omega.inter.pxx.txt', data.T, fmt=['%f','%f'])
self.assertTrue(np.allclose(data_ref_inter, pxx, rtol=1.0, atol=1e-05))
if __name__ == "__main__": unittest.main()
| 37.264706
| 188
| 0.706393
|
2675b097cf5e9478dcad9486baea6499b7103e03
| 15,343
|
py
|
Python
|
YOLO/.history/pytorch-yolo-v3/video_demo_20201105163826.py
|
jphacks/D_2003
|
60a5684d549862e85bdf758069518702d9925a48
|
[
"MIT"
] | 1
|
2020-11-07T07:58:13.000Z
|
2020-11-07T07:58:13.000Z
|
YOLO/.history/pytorch-yolo-v3/video_demo_20201105163826.py
|
jphacks/D_2003
|
60a5684d549862e85bdf758069518702d9925a48
|
[
"MIT"
] | null | null | null |
YOLO/.history/pytorch-yolo-v3/video_demo_20201105163826.py
|
jphacks/D_2003
|
60a5684d549862e85bdf758069518702d9925a48
|
[
"MIT"
] | 4
|
2020-11-02T02:51:45.000Z
|
2020-11-07T02:54:47.000Z
|
from __future__ import division
import time
import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
import cv2
from util import *
from darknet import Darknet
from preprocess import prep_image, inp_to_image
import pandas as pd
import random
import argparse
import pickle as pkl
import requests
from requests.auth import HTTPDigestAuth
import io
from PIL import Image, ImageDraw, ImageFilter
import play
#from pygame import mixer
#import winsound
camera_name = {
"north":0,
"south":2,
"east":1,
"west":3,
}
def prep_image(img, inp_dim):
# CNNに通すために画像を加工する
orig_im = img
dim = orig_im.shape[1], orig_im.shape[0]
img = cv2.resize(orig_im, (inp_dim, inp_dim))
img_ = img[:,:,::-1].transpose((2,0,1)).copy()
img_ = torch.from_numpy(img_).float().div(255.0).unsqueeze(0)
return img_, orig_im, dim
def count(x, img, count):
# 画像に結果を描画
c1 = tuple(x[1:3].int())
c2 = tuple(x[3:5].int())
cls = int(x[-1])
label = "{0}".format(classes[cls])
print("label:\n", label)
# 人数カウント
if(label=='no-mask'):
count+=1
print(count)
return count
def write(x, img,camId):
global count
global point
p = [0,0]
# 画像に結果を描画
c1 = tuple(x[1:3].int())
c2 = tuple(x[3:5].int())
cls = int(x[-1])
print(camId, "_c0:",c1)
print(camId, "_c1:",c2)
label = "{0}".format(classes[cls])
print("label:", label)
# 人数カウント
if(label=='no-mask'):
count+=1
print(count)
p[0] = (c2[0]-c1[0])/2
p[1] = (c2[1]-c1[1])/2
point[camId].append(p)
print("point0",point[0])
print("point1",point[1])
color = random.choice(colors)
cv2.rectangle(img, c1, c2,color, 1)
t_size = cv2.getTextSize(label, cv2.FONT_HERSHEY_PLAIN, 1 , 1)[0]
c2 = c1[0] + t_size[0] + 3, c1[1] + t_size[1] + 4
cv2.rectangle(img, c1, c2,color, -1)
cv2.putText(img, label, (c1[0], c1[1] + t_size[1] + 4), cv2.FONT_HERSHEY_PLAIN, 1, [225,255,255], 1);
return img
def arg_parse():
# モジュールの引数を作成
parser = argparse.ArgumentParser(description='YOLO v3 Cam Demo') # ArgumentParserで引数を設定する
parser.add_argument("--confidence", dest = "confidence", help = "Object Confidence to filter predictions", default = 0.25)
# confidenceは信頼性
parser.add_argument("--nms_thresh", dest = "nms_thresh", help = "NMS Threshhold", default = 0.4)
# nms_threshは閾値
parser.add_argument("--reso", dest = 'reso', help =
"Input resolution of the network. Increase to increase accuracy. Decrease to increase speed",
default = "160", type = str)
# resoはCNNの入力解像度で、増加させると精度が上がるが、速度が低下する。
return parser.parse_args() # 引数を解析し、返す
def cvpaste(img, imgback, x, y, angle, scale):
# x and y are the distance from the center of the background image
r = img.shape[0]
c = img.shape[1]
rb = imgback.shape[0]
cb = imgback.shape[1]
hrb=round(rb/2)
hcb=round(cb/2)
hr=round(r/2)
hc=round(c/2)
# Copy the forward image and move to the center of the background image
imgrot = np.zeros((rb,cb,3),np.uint8)
imgrot[hrb-hr:hrb+hr,hcb-hc:hcb+hc,:] = img[:hr*2,:hc*2,:]
# Rotation and scaling
M = cv2.getRotationMatrix2D((hcb,hrb),angle,scale)
imgrot = cv2.warpAffine(imgrot,M,(cb,rb))
# Translation
M = np.float32([[1,0,x],[0,1,y]])
imgrot = cv2.warpAffine(imgrot,M,(cb,rb))
# Makeing mask
imggray = cv2.cvtColor(imgrot,cv2.COLOR_BGR2GRAY)
ret, mask = cv2.threshold(imggray, 10, 255, cv2.THRESH_BINARY)
mask_inv = cv2.bitwise_not(mask)
# Now black-out the area of the forward image in the background image
img1_bg = cv2.bitwise_and(imgback,imgback,mask = mask_inv)
# Take only region of the forward image.
img2_fg = cv2.bitwise_and(imgrot,imgrot,mask = mask)
# Paste the forward image on the background image
imgpaste = cv2.add(img1_bg,img2_fg)
return imgpaste
def hconcat_resize_min(im_list, interpolation=cv2.INTER_CUBIC):
h_min = min(im.shape[0] for im in im_list)
im_list_resize = [cv2.resize(im, (int(im.shape[1] * h_min / im.shape[0]), h_min), interpolation=interpolation)
for im in im_list]
return cv2.hconcat(im_list_resize)
# def beep(freq, dur=100):
# winsound.Beep(freq, dur)
if __name__ == '__main__':
#学習前YOLO
# cfgfile = "cfg/yolov3.cfg" # 設定ファイル
# weightsfile = "weight/yolov3.weights" # 重みファイル
# classes = load_classes('data/coco.names') # 識別クラスのリスト
#マスク学習後YOLO
cfgfile = "cfg/mask.cfg" # 設定ファイル
weightsfile = "weight/mask_1500.weights" # 重みファイル
classes = load_classes('data/mask.names') # 識別クラスのリスト
num_classes = 80 # クラスの数
args = arg_parse() # 引数を取得
confidence = float(args.confidence) # 信頼性の設定値を取得
nms_thesh = float(args.nms_thresh) # 閾値を取得
start = 0
CUDA = torch.cuda.is_available() # CUDAが使用可能かどうか
max = 0 #限界人数
num_camera = 3 #camera数
num_classes = 80 # クラスの数
bbox_attrs = 5 + num_classes
model = [[] for i in range(num_camera)]
inp_dim = [[] for i in range(num_camera)]
cap = [[] for i in range(num_camera)]
point = [[] for i in range(num_camera)]
# output = [[] for i in range(num_camera)]
# output = torch.tensor(output)
# print("output_shape\n", output.shape)
for i in range(num_camera):
model[i] = Darknet(cfgfile) #model1の作成
model[i].load_weights(weightsfile) # model1に重みを読み込む
model[i].net_info["height"] = args.reso
inp_dim[i] = int(model[i].net_info["height"])
assert inp_dim[i] % 32 == 0
assert inp_dim[i] > 32
#mixer.init() #初期化
if CUDA:
for i in range(num_camera):
model[i].cuda() #CUDAが使用可能であればcudaを起動
for i in range(num_camera):
model[i].eval()
cap[0] = cv2.VideoCapture(0) #カメラを指定(USB接続)
cap[1] = cv2.VideoCapture(1) #カメラを指定(USB接続)
cap[2] = cv2.VideoCapture(2) #カメラを指定(USB接続)
# cap = cv2.VideoCapture("movies/sample.mp4")
#cap = cv2.VideoCapture("movies/one_v2.avi")
# Use the next line if your camera has a username and password
# cap = cv2.VideoCapture('protocol://username:password@IP:port/1')
#cap = cv2.VideoCapture('rtsp://admin:admin@192.168.11.4/1') #(ネットワーク接続)
#cap = cv2.VideoCapture('rtsp://admin:admin@192.168.11.4/80')
#cap = cv2.VideoCapture('http://admin:admin@192.168.11.4:80/video')
#cap = cv2.VideoCapture('http://admin:admin@192.168.11.4/camera-cgi/admin/recorder.cgi?action=start&id=samba')
#cap = cv2.VideoCapture('http://admin:admin@192.168.11.4/recorder.cgi?action=start&id=samba')
#cap = cv2.VideoCapture('http://admin:admin@192.168.11.5:80/snapshot.jpg?user=admin&pwd=admin&strm=0')
print('-1')
#カメラの起動を確認
for i in range(num_camera):
if not cap[i].isOpened():
if i < num_camera - 1:
for j in range(len(num_camera - i) - 1):
cap[i + j] = cap[i + j + 1]
cap.pop()
num_camera -= 1
#assert cap.isOpened(), 'Cannot capture source' #カメラが起動できたか確認
img1 = cv2.imread("images/phase_1.jpg")
img2 = cv2.imread("images/phase_2.jpg")
img3 = cv2.imread("images/phase_2_red.jpg")
img4 = cv2.imread("images/phase_3.jpg")
#mixer.music.load("voice/voice_3.m4a")
#print(img1)
frames = 0
count_frame = 0 #フレーム数カウント
flag = 0 #密状態(0:疎密,1:密入り)
start = time.time()
print('-1')
while (cap[i].isOpened() for i in range(num_camera)): #カメラが起動している間
count=0 #人数をカウント
point = [[] for i in range(num_camera)]
ret = [[] for i in range(num_camera)]
frame = [[] for i in range(num_camera)]
img = [[] for i in range(num_camera)]
orig_im = [[] for i in range(num_camera)]
dim = [[] for i in range(num_camera)]
output0 = []
output1 = []
output2 = []
output3 = []
for i in range(num_camera):
if cap == []:
ret[i], frame[i] = cap[i].read() #キャプチャ画像を取得
if (ret[i] for i in range(num_camera)):
# 解析準備としてキャプチャ画像を加工
for i in range(num_camera):
if not frame[i] == [] and inp_dim[i] == []:
img[i], orig_im[i], dim[i] = prep_image(frame[i], inp_dim[i])
if CUDA:
for i in range(num_camera):
im_dim[i] = im_dim[i].cuda()
img[i] = img[i].cuda()
# for i in range(num_camera):
# output[i] = model[i](Variable(img[i]), CUDA)
if not img[0] == []:
output0 = model[0](Variable(img[0]), CUDA)
if not img[1] == []:
output1 = model[1](Variable(img[1]), CUDA)
# output2 = model[2](Variable(img[2]), CUDA)
# output3 = model[3](Variable(img[3]), CUDA)
#print("output:\n", output)
# output[i] = write_results(output[i], confidence, num_classes, nms = True, nms_conf = nms_thesh)
if not output0 == []:
output0 = write_results(output0, confidence, num_classes, nms = True, nms_conf = nms_thesh)
if not output1 == []:
output1 = write_results(output1, confidence, num_classes, nms = True, nms_conf = nms_thesh)
# output2 = write_results(output2, confidence, num_classes, nms = True, nms_conf = nms_thesh)
# output3 = write_results(output3, confidence, num_classes, nms = True, nms_conf = nms_thesh)
# print("output", i, ":\n", output[i])
# print(output.shape)
"""
# FPSの表示
if (type(output[i]) == int for i in range(num_camera)):
print("表示")
frames += 1
print("FPS of the video is {:5.2f}".format( frames / (time.time() - start)))
# qキーを押すとFPS表示の終了
key = cv2.waitKey(1)
if key & 0xFF == ord('q'):
break
continue
for i in range(num_camera):
output[i][:,1:5] = torch.clamp(output[i][:,1:5], 0.0, float(inp_dim[i]))/inp_dim[i]
output[i][:,[1,3]] *= frame[i].shape[1]
output[i][:,[2,4]] *= frame[i].shape[0]
"""
# # FPSの表示
# if type(output0) == int:
# print("表示")
# frames += 1
# print("FPS of the video is {:5.2f}".format( frames / (time.time() - start)))
# # qキーを押すとFPS表示の終了
# key = cv2.waitKey(1)
# if key & 0xFF == ord('q'):
# break
# continue
# for i in range(num_camera):
if not output0 == []:
output0[:,1:5] = torch.clamp(output0[:,1:5], 0.0, float(inp_dim[i]))/inp_dim[i]
output0[:,[1,3]] *= frame[0].shape[1]
output0[:,[2,4]] *= frame[0].shape[0]
if not output1 == []:
output1[:,1:5] = torch.clamp(output1[:,1:5], 0.0, float(inp_dim[i]))/inp_dim[i]
output1[:,[1,3]] *= frame[1].shape[1]
output1[:,[2,4]] *= frame[1].shape[0]
# if cap[2].isOpened():
# output2[:,1:5] = torch.clamp(output2[:,1:5], 0.0, float(inp_dim[i]))/inp_dim[i]
# output2[:,[1,3]] *= frame[i].shape[1]
# output2[:,[2,4]] *= frame[i].shape[0]
# if cap[3].isOpened():
# output3[:,1:5] = torch.clamp(output3[:,1:5], 0.0, float(inp_dim[i]))/inp_dim[i]
# output3[:,[1,3]] *= frame[i].shape[1]
# output3[:,[2,4]] *= frame[i].shape[0]
colors = pkl.load(open("pallete", "rb"))
#count = lambda x: count(x, orig_im, count) #人数をカウント
"""
for i in range(num_camera):
list(map(lambda x: write(x, orig_im[i]), output[i]))
print("count:\n",count)
"""
# for i in range(num_camera):
# list(map(lambda x: write(x, orig_im[i]), output))
if not orig_im[0] == []:
list(map(lambda x0: write(x0, orig_im[0],0), output0))
if not orig_im[1] == []:
list(map(lambda x1: write(x1, orig_im[1],1), output1))
# print("x0",x0)
# list(map(lambda x2: write(x2, orig_im[2],2), output2))
# list(map(lambda x3: write(x3, orig_im[3],3), output3))
# print("point0",point[0])
# print("point1",point[1])
print("count:\n",count)
print("count_frame", count_frame)
if count > max:
count_frame += 1
#print("-1")
if count_frame <= 50:
x=0
y=0
angle=20
scale=1.5
for i in range(num_camera):
if not orig_im[i] == []:
imgpaste = cvpaste(img1, orig_im[i], x, y, angle, scale)
if flag == 1:
# play.googlehome()
flag += 1
#mixer.music.play(1)
elif count_frame <= 100:
x=-30
y=10
angle=20
scale=1.1
if count_frame%2==1:
for i in range(num_camera):
if not orig_im[i] == []:
imgpaste = cvpaste(img2, orig_im[i], x, y, angle, scale)
else:
for i in range(num_camera):
if not orig_im[i] == []:
imgpaste = cvpaste(img3, orig_im[i], x, y, angle, scale)
if flag == 2:
# play.googlehome()
flag += 1
else:
x=-30
y=0
angle=20
scale=1.5
for i in range(num_camera):
if not orig_im[i] == []:
imgpaste = cvpaste(img4, orig_im[i], x, y, angle, scale)
if count_frame > 101: #<--2フレームずらす
print("\007") #警告音
time.sleep(3)
if flag == 3:
# play.googlehome()
flag += 1
# cv2.imshow("frame", imgpaste)
else:
count_frame = 0
flag = 0
#print("-2")
# for i in range(num_camera):
for i in range(num_classes):
if orig_im[i] == []:
orig1_im[i] = img
im_h_resize = hconcat_resize_min(orig_im)
cv2.imshow("frame", im_h_resize )
# play.googlehome()
key = cv2.waitKey(1)
# qキーを押すと動画表示の終了
if key & 0xFF == ord('q'):
break
frames += 1
print("count_frame:\n", count_frame)
print("FPS of the video is {:5.2f}".format( frames / (time.time() - start)))
else:
break
| 35.516204
| 126
| 0.524148
|
47754147f12902b92fac59a09938726f6b10a5be
| 9,170
|
py
|
Python
|
hack/gen.py
|
akutz/go-interface-values-and-malloc
|
2077a43f46036a397d546f2858f54a7995231607
|
[
"Apache-2.0"
] | 24
|
2022-02-17T19:05:21.000Z
|
2022-03-27T10:20:04.000Z
|
hack/gen.py
|
x448/go-interface-values
|
478b2428e64a411baa6a2a574bb3585b61528029
|
[
"Apache-2.0"
] | 2
|
2022-02-08T08:28:31.000Z
|
2022-02-09T18:07:15.000Z
|
hack/gen.py
|
x448/go-interface-values
|
478b2428e64a411baa6a2a574bb3585b61528029
|
[
"Apache-2.0"
] | 4
|
2022-02-08T08:16:54.000Z
|
2022-02-20T12:14:09.000Z
|
#!/usr/bin/env python3
"""
Copyright 2022
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""generate
Generates several of the go source files.
"""
import subprocess
_HEADER = """/*
Copyright 2022
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
//
// !! Generated code -- do not modify !!
//
package mem_test
"""
_INSTRUMENTED_TYPES = [
{
"type": "int",
"nonz": "nonZeroRandInt(_int_size)",
},
{
"type": "int8",
"nonz": "int8(nonZeroRandInt(8))",
},
{
"type": "int16",
"nonz": "int16(nonZeroRandInt(16))",
},
{
"type": "int32",
"nonz": "int32(nonZeroRandInt(32))",
},
{
"type": "int64",
"nonz": "int64(nonZeroRandInt(64))",
"nwln": 2,
},
{
"type": "uint",
"nonz": "uint(nonZeroRandInt(_int_size))",
},
{
"type": "uint8",
"nonz": "uint8(nonZeroRandInt(8))",
},
{
"type": "uint16",
"nonz": "uint16(nonZeroRandInt(16))",
},
{
"type": "uint32",
"nonz": "uint32(nonZeroRandInt(32))",
},
{
"type": "uint64",
"nonz": "uint64(nonZeroRandInt(64))",
"nwln": 2,
},
{
"type": "float32",
"nonz": "float32(nonZeroRandInt(32))",
},
{
"type": "float64",
"nonz": "float64(nonZeroRandInt(64))",
"nwln": 2,
},
{
"type": "complex64",
"nonz": "complex(float32(nonZeroRandInt(32)), float32(nonZeroRandInt(32)))",
},
{
"type": "complex128",
"nonz": "complex(float64(nonZeroRandInt(64)), float64(nonZeroRandInt(64)))",
"nwln": 2,
},
{
"type": "byte",
"nonz": "byte(nonZeroRandInt(8))",
},
{
"type": "bool",
"nonz": "nonConstBoolTrue()",
},
{
"type": "rune",
"nonz": "rune(nonZeroRandInt(32))",
},
{
"type": "string",
"nonz": "nonZeroString(50)",
"nwln": 2,
},
{
"name": "struct_int32_int32",
"type": "struct{ a, b int32 }",
"nonz": "{ a: int32(nonZeroRandInt(32)), b: int32(nonZeroRandInt(32)) }",
},
{
"name": "struct_int32_int64",
"type": "struct { a int32; b int64 }",
"nonz": "{ a: int32(nonZeroRandInt(32)), b: int64(nonZeroRandInt(64)) }",
},
{
"name": "struct_array_bytes_7",
"type": "struct{ a [7]byte }",
"nonz": "{ a: [7]byte{byte(nonZeroRandInt(8)), byte(nonZeroRandInt(8)), byte(nonZeroRandInt(8)), byte(nonZeroRandInt(8)), byte(nonZeroRandInt(8)), byte(nonZeroRandInt(8)), byte(nonZeroRandInt(8)) }}",
},
{
"name": "struct_byte_7",
"type": "struct{ a, b, c, d, e, f, g byte }",
"nonz": "{ a: byte(nonZeroRandInt(8)), b: byte(nonZeroRandInt(8)), c: byte(nonZeroRandInt(8)), d: byte(nonZeroRandInt(8)), e: byte(nonZeroRandInt(8)), f: byte(nonZeroRandInt(8)), g: byte(nonZeroRandInt(8)) }",
"nwln": 2,
},
]
_TYPES_TEST_PATH = "types_test.go"
_MEM_TEST_PATH = "bench_test.go"
_is_struct = lambda t: t["type"].startswith("struct")
_non_struct_types = [t for t in _INSTRUMENTED_TYPES if not _is_struct(t)]
_struct_types = [t for t in _INSTRUMENTED_TYPES if _is_struct(t)]
def go_fmt(p):
# Format the file.
subprocess.run(
["go", "fmt", p],
capture_output=True,
check=True,
)
def gen_types():
def _print(
f,
it,
is_zero=False,
is_struct=False,
is_wrapped=False,
is_benchmark_global=False,
):
t = it["type"]
# Define the variable' name.
f.write("\t_")
if is_wrapped:
f.write("struct_")
f.write(t if "name" not in it else it["name"])
if not is_zero:
f.write("_n")
if is_benchmark_global:
f.write("_benchmark")
f.write(" ")
# The variables type declaration depends on whether this is a non-zero
# value and if the type is a struct.
if not is_zero and not is_benchmark_global and (is_struct or is_wrapped):
f.write(" = ")
# Define the variable' type.
if is_wrapped:
f.write("struct{ a ")
f.write(t)
if is_wrapped:
f.write(" }")
if not is_zero:
# Define the variable's value.
if not is_struct and not is_wrapped:
f.write(" = ")
if is_wrapped:
f.write("{ a: ")
f.write(it["nonz"])
if is_wrapped:
f.write(" }")
f.write("\n" * (1 if "nwln" not in it else it["nwln"]))
with open(_TYPES_TEST_PATH, "w") as f:
f.write(_HEADER)
f.write("var (\n")
# Define the variables for the zero values for the non-struct types.
for it in _non_struct_types:
_print(f, it, is_zero=True)
# Define the variables for the zero values for the non-struct types wrapped
# by a struct.
for it in _non_struct_types:
_print(f, it, is_zero=True, is_wrapped=True)
# Define the variables for the zero-values for the struct types.
for it in _struct_types:
_print(f, it, is_zero=True, is_struct=True)
# Define the variables for the non-zero values for the non-struct types.
for it in _non_struct_types:
_print(f, it)
# Define the variables for the non-zero values for the non-struct types
# wrapped by a struct.
for it in _non_struct_types:
_print(f, it, is_wrapped=True)
# Define the variables for the non-zero values for the struct types.
for it in _struct_types:
_print(f, it, is_struct=True)
# Define the variables for the zero values for the non-struct types
# used by the typed benchmarks.
for it in _non_struct_types:
_print(f, it, is_zero=True, is_benchmark_global=True)
# Define the variables for the zero values for the non-struct types
# wrapped by a struct and used by the typed benchmarks.
for it in _non_struct_types:
_print(f, it, is_zero=True, is_wrapped=True, is_benchmark_global=True)
# Define the variables for the zero-values for the struct types
# used by the typed benchmarks.
for it in _struct_types:
_print(f, it, is_zero=True, is_struct=True, is_benchmark_global=True)
f.write(")\n\n")
go_fmt(_TYPES_TEST_PATH)
def gen_bench():
s = """
b.Run("{0}", func(b *testing.B) {{
b.Logf("real(T)=%T", _{0})
b.Run("0", func(b *testing.B) {{
b.Run("h", func(b *testing.B) {{
for j := 0; j < b.N; j++ {{
_i = _{0}
}}
}})
b.Run("s", func(b *testing.B) {{
for j := 0; j < b.N; j++ {{
_{0}_benchmark = _{0}
}}
}})
}})
b.Run("n", func(b *testing.B) {{
b.Run("h", func(b *testing.B) {{
for j := 0; j < b.N; j++ {{
_i = _{0}_n
}}
}})
b.Run("s", func(b *testing.B) {{
for j := 0; j < b.N; j++ {{
_{0}_benchmark = _{0}_n
}}
}})
}})
}})
"""
def _print(f, it, is_wrapped=False):
t = it["type"] if "name" not in it else it["name"]
if is_wrapped:
t = "struct_" + t
f.write(s.format(t))
with open(_MEM_TEST_PATH, "w") as f:
f.write(_HEADER)
f.write('\nimport "testing"\n')
f.write("func BenchmarkMem(b *testing.B) {\n")
# Benchmarks for the non-struct types.
for it in _non_struct_types:
_print(f, it)
# Benchmarks for the non-struct types wrapped by a struct.
for it in _non_struct_types:
_print(f, it, is_wrapped=True)
# Benchmarks for the struct types.
for it in _struct_types:
_print(f, it)
f.write("}\n")
go_fmt(_MEM_TEST_PATH)
gen_types()
gen_bench()
| 27.87234
| 217
| 0.545474
|
71c64def6ef5ea5dd0401d07fbcaf8c4b291cce1
| 2,362
|
py
|
Python
|
src/config.py
|
youngerous/transformer
|
7b849dbca813ceff140f7261ad739c72a045faaa
|
[
"Apache-2.0"
] | 3
|
2021-05-21T03:58:49.000Z
|
2021-07-13T06:00:36.000Z
|
src/config.py
|
youngerous/transformer
|
7b849dbca813ceff140f7261ad739c72a045faaa
|
[
"Apache-2.0"
] | null | null | null |
src/config.py
|
youngerous/transformer
|
7b849dbca813ceff140f7261ad739c72a045faaa
|
[
"Apache-2.0"
] | null | null | null |
import argparse
def load_config():
parser = argparse.ArgumentParser()
# default hparams
parser.add_argument("--root-path", type=str, default="./src/data")
parser.add_argument("--ckpt-path", type=str, default="./src/checkpoints/")
parser.add_argument("--result-path", type=str, default="./src/results.csv")
parser.add_argument("--seed", type=int, default=42, help="Seed for reproducibility")
parser.add_argument("--workers", type=int, default=0)
parser.add_argument("--log-step", type=int, default=200)
parser.add_argument(
"--eval-ratio",
type=float,
default=0.0,
help="Evaluation will be done at the end of epoch if set to 0.0",
)
parser.add_argument(
"--amp", action="store_true", default=False, help="PyTorch(>=1.6.x) AMP"
)
# ddp hparams
parser.add_argument(
"--distributed", action="store_true", default=False, help="Whether to use ddp"
)
parser.add_argument("--dist-backend", type=str, default="nccl")
parser.add_argument("--dist-url", default="tcp://127.0.0.1:3456", type=str)
parser.add_argument(
"--world-size", type=int, default=1, help="Total number of processes to run"
)
parser.add_argument(
"--rank", type=int, default=-1, help="Local GPU rank (-1 if not using ddp)"
)
# training hparams
parser.add_argument("--epoch", type=int, default=5)
parser.add_argument("--batch-size", type=int, default=8)
parser.add_argument("--lr", type=float, default=5e-5)
parser.add_argument("--dropout", type=float, default=0.1)
parser.add_argument("--weight-decay", type=float, default=0.01)
parser.add_argument("--warmup-ratio", type=float, default=0.1)
parser.add_argument("--max-grad-norm", type=float, default=1.0)
parser.add_argument("--gradient-accumulation-step", type=int, default=1)
# model hparams
parser.add_argument("--n-enc-block", type=int, default=6)
parser.add_argument("--n-dec-block", type=int, default=6)
parser.add_argument("--hidden", type=int, default=512)
parser.add_argument("--fc-hidden", type=int, default=2048)
parser.add_argument(
"--num-head", type=int, default=8, help="Number of self-attention head"
)
parser.add_argument("--max-len", type=int, default=512)
args = parser.parse_args()
return args
| 40.033898
| 88
| 0.657494
|
2fe91c96f87a19b617285cd2409bd9c0519ed67b
| 3,638
|
py
|
Python
|
HARK/tests/test_core.py
|
HsinYiHung/HARK_HY
|
086c46af5bd037fe1ced6906c6ea917ed58b134f
|
[
"Apache-2.0"
] | null | null | null |
HARK/tests/test_core.py
|
HsinYiHung/HARK_HY
|
086c46af5bd037fe1ced6906c6ea917ed58b134f
|
[
"Apache-2.0"
] | null | null | null |
HARK/tests/test_core.py
|
HsinYiHung/HARK_HY
|
086c46af5bd037fe1ced6906c6ea917ed58b134f
|
[
"Apache-2.0"
] | null | null | null |
"""
This file implements unit tests for interpolation methods
"""
from HARK.core import HARKobject, distanceMetric, AgentType
import numpy as np
import unittest
class testdistanceMetric(unittest.TestCase):
def setUp(self):
self.list_a = [1.0, 2.1, 3]
self.list_b = [3.1, 4, -1.4]
self.list_c = [8.6, 9]
self.obj_a = HARKobject()
self.obj_b = HARKobject()
self.obj_c = HARKobject()
def test_list(self):
# same length
self.assertEqual(distanceMetric(self.list_a, self.list_b), 4.4)
# different length
self.assertEqual(distanceMetric(self.list_b, self.list_c), 1.0)
# sanity check, same objects
self.assertEqual(distanceMetric(self.list_b, self.list_b), 0.0)
def test_array(self):
# same length
self.assertEqual(
distanceMetric(np.array(self.list_a), np.array(self.list_b)), 4.4
)
# different length
self.assertEqual(
distanceMetric(np.array(self.list_b).reshape(1, 3), np.array(self.list_c)),
1.0,
)
# sanity check, same objects
self.assertEqual(
distanceMetric(np.array(self.list_b), np.array(self.list_b)), 0.0
)
def test_hark_object_distance(self):
self.obj_a.distance_criteria = ["var_1", "var_2", "var_3"]
self.obj_b.distance_criteria = ["var_1", "var_2", "var_3"]
self.obj_c.distance_criteria = ["var_5"]
# if attributes don't exist or don't match
self.assertEqual(distanceMetric(self.obj_a, self.obj_b), 1000.0)
self.assertEqual(distanceMetric(self.obj_a, self.obj_c), 1000.0)
# add single numbers to attributes
self.obj_a.var_1, self.obj_a.var_2, self.obj_a.var_3 = 0.1, 1, 2.1
self.obj_b.var_1, self.obj_b.var_2, self.obj_b.var_3 = 1.8, -1, 0.1
self.assertEqual(distanceMetric(self.obj_a, self.obj_b), 2.0)
# sanity check - same objects
self.assertEqual(distanceMetric(self.obj_a, self.obj_a), 0.0)
class testHARKobject(unittest.TestCase):
def setUp(self):
# similar test to distanceMetric
self.obj_a = HARKobject()
self.obj_b = HARKobject()
self.obj_c = HARKobject()
def test_distance(self):
self.obj_a.distance_criteria = ["var_1", "var_2", "var_3"]
self.obj_b.distance_criteria = ["var_1", "var_2", "var_3"]
self.obj_c.distance_criteria = ["var_5"]
self.obj_a.var_1, self.obj_a.var_2, self.obj_a.var_3 = [0.1], [1, 2], [2.1]
self.obj_b.var_1, self.obj_b.var_2, self.obj_b.var_3 = [1.8], [0, 0.1], [1.1]
self.assertEqual(self.obj_a.distance(self.obj_b), 1.9)
# change the length of a attribute list
self.obj_b.var_1, self.obj_b.var_2, self.obj_b.var_3 = [1.8], [0, 0, 0.1], [1.1]
self.assertEqual(self.obj_a.distance(self.obj_b), 1.7)
# sanity check
self.assertEqual(self.obj_b.distance(self.obj_b), 0.0)
class testAgentType(unittest.TestCase):
def setUp(self):
self.agent = AgentType()
def test_solve(self):
self.agent.time_vary = ['vary_1']
self.agent.time_inv = ['inv_1']
self.agent.vary_1 = [1.1, 1.2, 1.3, 1.4]
self.agent.inv_1 = 1.05
# to test the superclass we create a dummy solveOnePeriod function
# for our agent, which doesn't do anything, instead of using a NullFunc
self.agent.solveOnePeriod = lambda vary_1: HARKobject()
self.agent.solve()
self.assertEqual(len(self.agent.solution), 4)
self.assertTrue(isinstance(self.agent.solution[0], HARKobject))
| 38.702128
| 88
| 0.630566
|
0d005040f44129d4069b7bd9d2eabfee08d462b7
| 665
|
py
|
Python
|
paper/tools/mse_gf.py
|
toros-astro/corral
|
75474b38ff366330d33644461a902d07374a5bbc
|
[
"BSD-3-Clause"
] | 4
|
2015-11-19T16:04:30.000Z
|
2021-05-13T06:42:27.000Z
|
paper/tools/mse_gf.py
|
toros-astro/corral
|
75474b38ff366330d33644461a902d07374a5bbc
|
[
"BSD-3-Clause"
] | 27
|
2015-10-20T16:28:02.000Z
|
2018-08-21T20:48:45.000Z
|
paper/tools/mse_gf.py
|
toros-astro/corral
|
75474b38ff366330d33644461a902d07374a5bbc
|
[
"BSD-3-Clause"
] | 5
|
2015-11-20T00:03:07.000Z
|
2019-07-15T00:39:53.000Z
|
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_context(context='paper', font_scale=1.8)
mse = np.arange(40)
f20 = np.exp(mse/20.)
f13 = np.exp(mse/13.)
f10 = np.exp(mse/10.)
f5 = np.exp(mse/5.)
gf10 = 2./(1+f10)
gf13 = 2./(1+f13)
gf5 = 2./(1+f5)
gf20 = 2./(1+f20)
plt.rc('text', usetex=True)
plt.rc('font', family='serif', size=20)
plt.plot(mse, gf20, 'r-.', label=r'$\tau=20$')
plt.plot(mse, gf13, 'b', label=r'$\tau=13$')
plt.plot(mse, gf10, 'g:', label=r'$\tau=10$')
plt.plot(mse, gf5, 'y--', label=r'$\tau=5$')
plt.ylabel(r'$2 \times [1+exp(MSE/\tau)]^{-1}$')
plt.xlabel('MSE Style errors')
plt.legend(loc=1)
plt.show()
| 21.451613
| 48
| 0.619549
|
56563ea32089e25c1db3365ebf229670134f3418
| 5,198
|
py
|
Python
|
src/mce_irl.py
|
andyruddh/irl-maxent
|
f1644ec4a2a31e3d99aa5d22c8f441c395da1de0
|
[
"MIT"
] | null | null | null |
src/mce_irl.py
|
andyruddh/irl-maxent
|
f1644ec4a2a31e3d99aa5d22c8f441c395da1de0
|
[
"MIT"
] | null | null | null |
src/mce_irl.py
|
andyruddh/irl-maxent
|
f1644ec4a2a31e3d99aa5d22c8f441c395da1de0
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import gridworld as W
import maxent as M
import plot as P
import trajectory as T
import solver as S
import optimizer as O
import numpy as np
import matplotlib.pyplot as plt
import os
def setup_mdp(GRID_SIZE, p_slip, avoid_states):
"""
Set-up our MDP/GridWorld
"""
# create our world
world = W.IcyGridWorld(size=GRID_SIZE, p_slip=p_slip)
# set up the reward function
# reward = np.zeros(world.n_states)
reward = np.ones(world.n_states)
reward[-1] = 3.0
# reward[6] = 2.5
# Define some obstacles or avoid regions
for s in avoid_states:
reward[s] = 0
# set up terminal states
terminal = [GRID_SIZE**2-1]
# print(world.n_states)
# print(reward)
return world, reward, terminal
def generate_trajectories(world, reward, terminal):
"""
Generate some "expert" trajectories.
"""
# parameters
n_trajectories = 300
print("\nNumber of experts: %d\n" %(n_trajectories))
discount = 0.9
weighting = lambda x: x**5
# set up initial probabilities for trajectory generation
initial = np.zeros(world.n_states)
initial[0] = 1.0
# generate trajectories
value = S.value_iteration(world.p_transition, reward, discount)
policy = S.stochastic_policy_from_value(world, value, w=weighting)
policy_exec = T.stochastic_policy_adapter(policy)
tjs = list(T.generate_trajectories(n_trajectories, world, policy_exec, initial, terminal))
return tjs, policy
def maxent(world, terminal, trajectories, avoid_states=None):
"""
Maximum Entropy Inverse Reinforcement Learning
"""
# set up features: we use one feature vector per state
# features = W.state_features(world)
features = W.state_custom_features(world, avoid_states, terminal)
# choose our parameter initialization strategy:
# initialize parameters with constant
init = O.Constant(1.0)
# choose our optimization strategy:
# we select exponentiated gradient descent with linear learning-rate decay
optim = O.ExpSga(lr=O.linear_decay(lr0=0.2))
# actually do some inverse reinforcement learning
reward = M.irl(world.p_transition, features, terminal, trajectories, optim, init)
return reward
def maxent_causal(world, avoid_states, terminal, trajectories, discount=0.7):
"""
Maximum Causal Entropy Inverse Reinforcement Learning
"""
# set up features: we use one feature vector per state
features = W.state_custom_features(world, avoid_states, terminal)
# features = W.state_features(world)
# choose our parameter initialization strategy:
# initialize parameters with constant
init = O.Constant(1.0)
# choose our optimization strategy:
# we select exponentiated gradient descent with linear learning-rate decay
optim = O.ExpSga(lr=O.linear_decay(lr0=0.2))
# actually do some inverse reinforcement learning
reward = M.irl_causal(world.p_transition, features, terminal, trajectories, optim, init, discount)
return reward
def mce_irl(grid_size, p_slip, avoid_states):
cwd = os.getcwd()
fig_dir = "figs"
if not os.path.exists(fig_dir):
os.makedirs(fig_dir)
# common style arguments for plotting
style = {
'border': {'color': 'red', 'linewidth': 0.5},
'cmap': "Blues",
}
# set-up mdp
world, reward, terminal = setup_mdp(grid_size, p_slip, avoid_states)
# show our original reward
ax = plt.figure(num='Original Reward').add_subplot(111)
tt = P.plot_state_values(ax, world, reward, **style)
plt.colorbar(tt)
plt.draw()
plt.savefig(os.path.join(fig_dir, "gt_reward.png"))
print("\nGenerating expert trajectories ...\n")
# generate "expert" trajectories
trajectories, expert_policy = generate_trajectories(world, reward, terminal)
# show our expert policies
ax = plt.figure(num='Expert Trajectories and Policy').add_subplot(111)
P.plot_stochastic_policy(ax, world, expert_policy, **style)
for t in trajectories:
P.plot_trajectory(ax, world, t, lw=5, color='white', alpha=0.025)
plt.draw()
plt.savefig(os.path.join(fig_dir, "demonstrations.png"))
'''
print("ME-IRL ...")
# maximum entropy reinforcement learning (non-causal)
reward_maxent = maxent(world, terminal, trajectories)
# show the computed reward
ax = plt.figure(num='MaxEnt Reward').add_subplot(111)
P.plot_state_values(ax, world, reward_maxent, **style)
plt.draw()
'''
print("\nPerforming MCE-IRL ...\n")
# maximum casal entropy reinforcement learning (non-causal)
# reward_maxcausal = maxent_causal(world, avoid_states, terminal, trajectories)
reward_maxcausal = maxent(world, terminal, trajectories, avoid_states)
# print(reward_maxcausal)
# show the computed reward
ax = plt.figure(num='MaxEnt Reward (Causal)').add_subplot(111)
tt = P.plot_state_values(ax, world, reward_maxcausal, **style)
plt.colorbar(tt)
plt.draw()
plt.savefig(os.path.join(fig_dir, "maxent_causal_reward.png"))
print("\nDone! Rewards learned\n")
# plt.show()
return (reward_maxcausal, world, terminal)
| 30.046243
| 102
| 0.691227
|
4e85cfd4865cd2504e0383c6886e4007bdaa0655
| 435
|
py
|
Python
|
scripts/planck_data_info.py
|
veragluscevic/npoint-fgs
|
911a7c998cc2d8303bf7b59f028d194bb5ce2b09
|
[
"MIT"
] | null | null | null |
scripts/planck_data_info.py
|
veragluscevic/npoint-fgs
|
911a7c998cc2d8303bf7b59f028d194bb5ce2b09
|
[
"MIT"
] | null | null | null |
scripts/planck_data_info.py
|
veragluscevic/npoint-fgs
|
911a7c998cc2d8303bf7b59f028d194bb5ce2b09
|
[
"MIT"
] | null | null | null |
# for beam_file='HFI_RIMO_Beams-100pc_R2.00.fits'
BEAM_INDEX = {
'100': 3,
'143': 4,
'217': 5,
'353': 6,
'100P': 7,
'143P': 8,
'217P': 9,
'353P': 10,
}
#for maskfile='HFI_Mask_GalPlane-apo{}_2048_R2.00.fits'
MASK_FIELD = {
20: 0,
40: 1,
60: 2,
70: 3,
80: 4,
90: 5,
97: 6,
99: 7,
}
FG_SCALING = {
100: 1.,
143: 1.,
217: 1.,
353: 1.,
}
| 12.794118
| 55
| 0.441379
|
765495ec01abb0e8a98c2e2274626f69e46fd483
| 2,953
|
py
|
Python
|
pygazebo/msg/world_reset_pb2.py
|
CryptoCopter/pygazebo
|
f16704f3b59cb50a1390ef92fde283558fd71f8f
|
[
"Apache-2.0"
] | null | null | null |
pygazebo/msg/world_reset_pb2.py
|
CryptoCopter/pygazebo
|
f16704f3b59cb50a1390ef92fde283558fd71f8f
|
[
"Apache-2.0"
] | null | null | null |
pygazebo/msg/world_reset_pb2.py
|
CryptoCopter/pygazebo
|
f16704f3b59cb50a1390ef92fde283558fd71f8f
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: world_reset.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='world_reset.proto',
package='gazebo.msgs',
syntax='proto2',
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x11world_reset.proto\x12\x0bgazebo.msgs\"T\n\nWorldReset\x12\x11\n\x03\x61ll\x18\x01 \x01(\x08:\x04true\x12\x18\n\ttime_only\x18\x02 \x01(\x08:\x05\x66\x61lse\x12\x19\n\nmodel_only\x18\x03 \x01(\x08:\x05\x66\x61lse'
)
_WORLDRESET = _descriptor.Descriptor(
name='WorldReset',
full_name='gazebo.msgs.WorldReset',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='all', full_name='gazebo.msgs.WorldReset.all', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='time_only', full_name='gazebo.msgs.WorldReset.time_only', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='model_only', full_name='gazebo.msgs.WorldReset.model_only', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=34,
serialized_end=118,
)
DESCRIPTOR.message_types_by_name['WorldReset'] = _WORLDRESET
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
WorldReset = _reflection.GeneratedProtocolMessageType('WorldReset', (_message.Message,), {
'DESCRIPTOR' : _WORLDRESET,
'__module__' : 'world_reset_pb2'
# @@protoc_insertion_point(class_scope:gazebo.msgs.WorldReset)
})
_sym_db.RegisterMessage(WorldReset)
# @@protoc_insertion_point(module_scope)
| 34.741176
| 236
| 0.758212
|
c476aea2890fe203a58963632f6592591e1c7a1c
| 569
|
py
|
Python
|
src/scion/service/test/orb_sources.py
|
scion-network/scion
|
9a778eeb3e4e690d1b1ebd19e5349314cbf7b0fc
|
[
"BSD-2-Clause"
] | null | null | null |
src/scion/service/test/orb_sources.py
|
scion-network/scion
|
9a778eeb3e4e690d1b1ebd19e5349314cbf7b0fc
|
[
"BSD-2-Clause"
] | null | null | null |
src/scion/service/test/orb_sources.py
|
scion-network/scion
|
9a778eeb3e4e690d1b1ebd19e5349314cbf7b0fc
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python
import subprocess
from pyon.public import CFG
antelope_path = CFG.get_safe("scion.antelope.path", "/opt/antelope/5.6")
cmd = ". " + antelope_path + "/setup.sh;orbstat -s ceusnexport.ucsd.edu:usarray | grep M40 | grep seconds | cut -d' ' -f1"
max_sources = 5
res = subprocess.check_output(cmd, shell=True)
all_sources = res.split()
#if max_sources:
# sources = all_sources[0:max_sources]
#else:
# sources = all_sources
sources = all_sources
print 'retrieved %i of %i M40 sources with seconds latency' % (len(sources), len(all_sources))
| 22.76
| 122
| 0.718805
|
13e356bab1f32bd858c618450780d7e66011e0de
| 7,428
|
py
|
Python
|
kitti_eval/depth_evaluation_utils.py
|
sakshikakde/sfm_dl
|
d39d7068e23fe44394a3d5694bba074f2f05edaf
|
[
"MIT"
] | null | null | null |
kitti_eval/depth_evaluation_utils.py
|
sakshikakde/sfm_dl
|
d39d7068e23fe44394a3d5694bba074f2f05edaf
|
[
"MIT"
] | null | null | null |
kitti_eval/depth_evaluation_utils.py
|
sakshikakde/sfm_dl
|
d39d7068e23fe44394a3d5694bba074f2f05edaf
|
[
"MIT"
] | null | null | null |
# Mostly based on the code written by Clement Godard:
# https://github.com/mrharicot/monodepth/blob/master/utils/evaluation_utils.py
import numpy as np
# import pandas as pd
import os
import cv2
from collections import Counter
import pickle
def compute_errors(gt, pred):
thresh = np.maximum((gt / pred), (pred / gt))
a1 = (thresh < 1.25 ).mean()
a2 = (thresh < 1.25 ** 2).mean()
a3 = (thresh < 1.25 ** 3).mean()
rmse = (gt - pred) ** 2
rmse = np.sqrt(rmse.mean())
rmse_log = (np.log(gt) - np.log(pred)) ** 2
rmse_log = np.sqrt(rmse_log.mean())
abs_rel = np.mean(np.abs(gt - pred) / gt)
sq_rel = np.mean(((gt - pred)**2) / gt)
return abs_rel, sq_rel, rmse, rmse_log, a1, a2, a3
###############################################################################
####################### KITTI
width_to_focal = dict()
width_to_focal[1242] = 721.5377
width_to_focal[1241] = 718.856
width_to_focal[1224] = 707.0493
width_to_focal[1238] = 718.3351
def load_gt_disp_kitti(path):
gt_disparities = []
for i in range(200):
disp = cv2.imread(path + "/training/disp_noc_0/" + str(i).zfill(6) + "_10.png", -1)
disp = disp.astype(np.float32) / 256
gt_disparities.append(disp)
return gt_disparities
def convert_disps_to_depths_kitti(gt_disparities, pred_disparities):
gt_depths = []
pred_depths = []
pred_disparities_resized = []
for i in range(len(gt_disparities)):
gt_disp = gt_disparities[i]
height, width = gt_disp.shape
pred_disp = pred_disparities[i]
pred_disp = width * cv2.resize(pred_disp, (width, height), interpolation=cv2.INTER_LINEAR)
pred_disparities_resized.append(pred_disp)
mask = gt_disp > 0
gt_depth = width_to_focal[width] * 0.54 / (gt_disp + (1.0 - mask))
pred_depth = width_to_focal[width] * 0.54 / pred_disp
gt_depths.append(gt_depth)
pred_depths.append(pred_depth)
return gt_depths, pred_depths, pred_disparities_resized
###############################################################################
####################### EIGEN
def read_text_lines(file_path):
f = open(file_path, 'r')
lines = f.readlines()
f.close()
lines = [l.rstrip() for l in lines]
return lines
def read_file_data(files, data_root):
gt_files = []
gt_calib = []
im_sizes = []
im_files = []
cams = []
num_probs = 0
for filename in files:
filename = filename.split()[0]
splits = filename.split('/')
# camera_id = filename[-1] # 2 is left, 3 is right
date = splits[0]
im_id = splits[4][:10]
file_root = '{}/{}'
im = filename
vel = '{}/{}/velodyne_points/data/{}.bin'.format(splits[0], splits[1], im_id)
if os.path.isfile(data_root + im):
gt_files.append(data_root + vel)
gt_calib.append(data_root + date + '/')
im_sizes.append(cv2.imread(data_root + im).shape[:2])
im_files.append(data_root + im)
cams.append(2)
else:
num_probs += 1
print('{} missing'.format(data_root + im))
# print(num_probs, 'files missing')
return gt_files, gt_calib, im_sizes, im_files, cams
def load_velodyne_points(file_name):
# adapted from https://github.com/hunse/kitti
points = np.fromfile(file_name, dtype=np.float32).reshape(-1, 4)
points[:, 3] = 1.0 # homogeneous
return points
def lin_interp(shape, xyd):
# taken from https://github.com/hunse/kitti
m, n = shape
ij, d = xyd[:, 1::-1], xyd[:, 2]
f = LinearNDInterpolator(ij, d, fill_value=0)
J, I = np.meshgrid(np.arange(n), np.arange(m))
IJ = np.vstack([I.flatten(), J.flatten()]).T
disparity = f(IJ).reshape(shape)
return disparity
def read_calib_file(path):
# taken from https://github.com/hunse/kitti
float_chars = set("0123456789.e+- ")
data = {}
with open(path, 'r') as f:
for line in f.readlines():
key, value = line.split(':', 1)
value = value.strip()
data[key] = value
if float_chars.issuperset(value):
# try to cast to float array
try:
data[key] = np.array(list(map(float, value.split(' '))))
except ValueError:
# casting error: data[key] already eq. value, so pass
pass
return data
def get_focal_length_baseline(calib_dir, cam=2):
cam2cam = read_calib_file(calib_dir + 'calib_cam_to_cam.txt')
P2_rect = cam2cam['P_rect_02'].reshape(3,4)
P3_rect = cam2cam['P_rect_03'].reshape(3,4)
# cam 2 is left of camera 0 -6cm
# cam 3 is to the right +54cm
b2 = P2_rect[0,3] / -P2_rect[0,0]
b3 = P3_rect[0,3] / -P3_rect[0,0]
baseline = b3-b2
if cam==2:
focal_length = P2_rect[0,0]
elif cam==3:
focal_length = P3_rect[0,0]
return focal_length, baseline
def sub2ind(matrixSize, rowSub, colSub):
m, n = matrixSize
return rowSub * (n-1) + colSub - 1
def generate_depth_map(calib_dir, velo_file_name, im_shape, cam=2, interp=False, vel_depth=False):
# load calibration files
cam2cam = read_calib_file(calib_dir + 'calib_cam_to_cam.txt')
velo2cam = read_calib_file(calib_dir + 'calib_velo_to_cam.txt')
velo2cam = np.hstack((velo2cam['R'].reshape(3,3), velo2cam['T'][..., np.newaxis]))
velo2cam = np.vstack((velo2cam, np.array([0, 0, 0, 1.0])))
# compute projection matrix velodyne->image plane
R_cam2rect = np.eye(4)
R_cam2rect[:3,:3] = cam2cam['R_rect_00'].reshape(3,3)
P_rect = cam2cam['P_rect_0'+str(cam)].reshape(3,4)
P_velo2im = np.dot(np.dot(P_rect, R_cam2rect), velo2cam)
# load velodyne points and remove all behind image plane (approximation)
# each row of the velodyne data is forward, left, up, reflectance
velo = load_velodyne_points(velo_file_name)
velo = velo[velo[:, 0] >= 0, :]
# project the points to the camera
velo_pts_im = np.dot(P_velo2im, velo.T).T
velo_pts_im[:, :2] = velo_pts_im[:,:2] / velo_pts_im[:,2][..., np.newaxis]
if vel_depth:
velo_pts_im[:, 2] = velo[:, 0]
# check if in bounds
# use minus 1 to get the exact same value as KITTI matlab code
velo_pts_im[:, 0] = np.round(velo_pts_im[:,0]) - 1
velo_pts_im[:, 1] = np.round(velo_pts_im[:,1]) - 1
val_inds = (velo_pts_im[:, 0] >= 0) & (velo_pts_im[:, 1] >= 0)
val_inds = val_inds & (velo_pts_im[:,0] < im_shape[1]) & (velo_pts_im[:,1] < im_shape[0])
velo_pts_im = velo_pts_im[val_inds, :]
# project to image
depth = np.zeros((im_shape))
depth[velo_pts_im[:, 1].astype(np.int), velo_pts_im[:, 0].astype(np.int)] = velo_pts_im[:, 2]
# find the duplicate points and choose the closest depth
inds = sub2ind(depth.shape, velo_pts_im[:, 1], velo_pts_im[:, 0])
dupe_inds = [item for item, count in Counter(inds).items() if count > 1]
for dd in dupe_inds:
pts = np.where(inds==dd)[0]
x_loc = int(velo_pts_im[pts[0], 0])
y_loc = int(velo_pts_im[pts[0], 1])
depth[y_loc, x_loc] = velo_pts_im[pts, 2].min()
depth[depth<0] = 0
if interp:
# interpolate the depth map to fill in holes
depth_interp = lin_interp(im_shape, velo_pts_im)
return depth, depth_interp
else:
return depth
| 32.578947
| 98
| 0.599623
|
9407a11ad6881e63f368e757cc5e9b0e8685cf29
| 8,914
|
py
|
Python
|
utils_pg.py
|
ruotianluo/neural-summ-cnndm-pytorch
|
027b63107b748bc56356bd119b243cfdda684aa2
|
[
"MIT"
] | 3
|
2018-10-22T23:03:40.000Z
|
2018-10-23T09:45:32.000Z
|
utils_pg.py
|
ruotianluo/neural-summ-cnndm-pytorch
|
027b63107b748bc56356bd119b243cfdda684aa2
|
[
"MIT"
] | null | null | null |
utils_pg.py
|
ruotianluo/neural-summ-cnndm-pytorch
|
027b63107b748bc56356bd119b243cfdda684aa2
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#pylint: skip-file
import numpy as np
from numpy.random import random as rand
import cPickle as pickle
import sys
import os
import shutil
from copy import deepcopy
import random
import torch
from torch import nn
def init_seeds():
random.seed(123)
torch.manual_seed(123)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(123)
def init_lstm_weight(lstm):
for param in lstm.parameters():
if len(param.shape) >= 2: # weights
init_ortho_weight(param.data)
else: # bias
init_bias(param.data)
def init_gru_weight(gru):
for param in gru.parameters():
if len(param.shape) >= 2: # weights
init_ortho_weight(param.data)
else: # bias
init_bias(param.data)
def init_linear_weight(linear):
init_xavier_weight(linear.weight)
if linear.bias is not None:
init_bias(linear.bias)
def init_normal_weight(w):
nn.init.normal_(w, mean=0, std=0.01)
def init_uniform_weight(w):
nn.init.uniform_(w, -0.1, 0.1)
def init_ortho_weight(w):
nn.init.orthogonal_(w)
def init_xavier_weight(w):
nn.init.xavier_normal_(w)
def init_bias(b):
nn.init.constant_(b, 0.)
def rebuild_dir(path):
if os.path.exists(path):
try:
shutil.rmtree(path)
except OSError:
pass
os.mkdir(path)
def save_model(f, model, optimizer):
torch.save({"model_state_dict" : model.state_dict(),
"optimizer_state_dict" : optimizer.state_dict()},
f)
def load_model(f, model, optimizer):
checkpoint = torch.load(f)
model.load_state_dict(checkpoint["model_state_dict"])
optimizer.load_state_dict(checkpoint["optimizer_state_dict"])
return model, optimizer
def sort_samples(x, len_x, mask_x, y, len_y, \
mask_y, oys, x_ext, y_ext, oovs):
sorted_x_idx = np.argsort(len_x)[::-1]
sorted_x_len = np.array(len_x)[sorted_x_idx]
sorted_x = x[:, sorted_x_idx]
sorted_x_mask = mask_x[:, sorted_x_idx, :]
sorted_oovs = [oovs[i] for i in sorted_x_idx]
sorted_y_len = np.array(len_y)[sorted_x_idx]
sorted_y = y[:, sorted_x_idx]
sorted_y_mask = mask_y[:, sorted_x_idx, :]
sorted_oys = [oys[i] for i in sorted_x_idx]
sorted_x_ext = x_ext[:, sorted_x_idx]
sorted_y_ext = y_ext[:, sorted_x_idx]
return sorted_x, sorted_x_len, sorted_x_mask, sorted_y, \
sorted_y_len, sorted_y_mask, sorted_oys, \
sorted_x_ext, sorted_y_ext, sorted_oovs
def print_sent_dec(y_pred, y, y_mask, oovs, modules, consts, options, batch_size, lvt_dict = None):
print "golden truth and prediction samples:"
max_y_words = np.sum(y_mask, axis = 0)
max_y_words = max_y_words.reshape((batch_size))
max_num_docs = 16 if batch_size > 16 else batch_size
is_unicode = options["is_unicode"]
dict_size = len(modules["i2w"])
for idx_doc in range(max_num_docs):
print idx_doc + 1, "----------------------------------------------------------------------------------------------------"
sent_true= ""
for idx_word in range(max_y_words[idx_doc]):
i = y[idx_word, idx_doc] if options["has_learnable_w2v"] else np.argmax(y[idx_word, idx_doc])
if i in modules["i2w"]:
sent_true += modules["i2w"][i]
else:
sent_true += oovs[idx_doc][i - dict_size]
if not is_unicode:
sent_true += " "
if is_unicode:
print sent_true.encode("utf-8")
else:
print sent_true
print
sent_pred = ""
for idx_word in range(max_y_words[idx_doc]):
i = torch.argmax(y_pred[idx_word, idx_doc, :]).item()
if options["has_lvt_trick"]:
i = lvt_dict[i]
if i in modules["i2w"]:
sent_pred += modules["i2w"][i]
else:
sent_pred += oovs[idx_doc][i - dict_size]
if not is_unicode:
sent_pred += " "
if is_unicode:
print sent_pred.encode("utf-8")
else:
print sent_pred
print "----------------------------------------------------------------------------------------------------"
print
def write_summ(dst_path, summ_list, num_summ, options, i2w = None, score_list = None):
is_unicode = options["is_unicode"]
assert num_summ > 0
with open(dst_path, "w") as f_summ:
if num_summ == 1:
if score_list != None:
f_summ.write(str(score_list[0]))
f_summ.write("\t")
if i2w != None:
#for e in summ_list:
# print i2w[int(e)],
#print "\n"
if is_unicode:
s = "".join([i2w[int(e)] for e in summ_list]).encode("utf-8")
else:
s = " ".join([i2w[int(e)] for e in summ_list])
else:
s = " ".join(summ_list)
f_summ.write(s)
f_summ.write("\n")
else:
assert num_summ == len(summ_list)
if score_list != None:
assert num_summ == len(score_list)
for i in xrange(num_summ):
if score_list != None:
f_summ.write(str(score_list[i]))
f_summ.write("\t")
if i2w != None:
#for e in summ_list[i]:
# print i2w[int(e)],
#print "\n"
if is_unicode:
s = "".join([i2w[int(e)] for e in summ_list[i]]).encode("utf-8")
else:
s = " ".join([i2w[int(e)] for e in summ_list[i]])
else:
s = " ".join(summ_list[i])
f_summ.write(s)
f_summ.write("\n")
def write_for_rouge(fname, ref_sents, dec_words, cfg):
dec_sents = []
while len(dec_words) > 0:
try:
fst_period_idx = dec_words.index(".")
except ValueError:
fst_period_idx = len(dec_words)
sent = dec_words[:fst_period_idx + 1]
dec_words = dec_words[fst_period_idx + 1:]
dec_sents.append(' '.join(sent))
ref_file = "".join((cfg.cc.GROUND_TRUTH_PATH, fname))
decoded_file = "".join((cfg.cc.SUMM_PATH, fname))
with open(ref_file, "w") as f:
for idx, sent in enumerate(ref_sents):
sent = sent.strip()
f.write(sent) if idx == len(ref_sents) - 1 else f.write(sent + "\n")
with open(decoded_file, "w") as f:
for idx, sent in enumerate(dec_sents):
sent = sent.strip()
f.write(sent) if idx == len(dec_sents) - 1 else f.write(sent + "\n")
def write_summ_copy(dst_path, summ_list, num_summ, options, i2w = None, oovs=None, score_list = None):
assert num_summ > 0
with open(dst_path, "w") as f_summ:
if num_summ == 1:
if score_list != None:
f_summ.write(str(score_list[0]))
f_summ.write("\t")
if i2w != None:
'''
for e in summ_list:
e = int(e)
if e in i2w:
print i2w[e],
else:
print oovs[e - len(i2w)],
print "\n"
'''
s = []
for e in summ_list:
e = int(e)
if e in i2w:
s.append(i2w[e])
else:
s.append(oovs[e - len(i2w)])
s = " ".join(s)
else:
s = " ".join(summ_list)
f_summ.write(s)
f_summ.write("\n")
else:
assert num_summ == len(summ_list)
if score_list != None:
assert num_summ == len(score_list)
for i in xrange(num_summ):
if score_list != None:
f_summ.write(str(score_list[i]))
f_summ.write("\t")
if i2w != None:
'''
for e in summ_list[i]:
e = int(e)
if e in i2w:
print i2w[e],
else:
print oovs[e - len(i2w)],
print "\n"
'''
s = []
for e in summ_list[i]:
e = int(e)
if e in i2w:
s.append(i2w[e])
else:
s.append(oovs[e - len(i2w)])
s = " ".join(s)
else:
s = " ".join(summ_list[i])
f_summ.write(s)
f_summ.write("\n")
| 33.137546
| 129
| 0.4954
|
4a8bea19b4c3c8588c3bf0158c1afe9b34871a0c
| 12,872
|
py
|
Python
|
google/cloud/errorreporting_v1beta1/services/error_group_service/transports/grpc_asyncio.py
|
LaudateCorpus1/python-error-reporting
|
b207f2cec4f5f3196e775ed35cd429f34f9c0bd1
|
[
"Apache-2.0"
] | 17
|
2020-09-19T17:48:32.000Z
|
2022-03-09T06:40:39.000Z
|
google/cloud/errorreporting_v1beta1/services/error_group_service/transports/grpc_asyncio.py
|
LaudateCorpus1/python-error-reporting
|
b207f2cec4f5f3196e775ed35cd429f34f9c0bd1
|
[
"Apache-2.0"
] | 67
|
2020-02-11T13:24:20.000Z
|
2022-03-18T15:27:25.000Z
|
google/cloud/errorreporting_v1beta1/services/error_group_service/transports/grpc_asyncio.py
|
LaudateCorpus1/python-error-reporting
|
b207f2cec4f5f3196e775ed35cd429f34f9c0bd1
|
[
"Apache-2.0"
] | 6
|
2020-02-07T00:29:36.000Z
|
2022-02-16T07:27:09.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import gapic_v1
from google.api_core import grpc_helpers_async
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.cloud.errorreporting_v1beta1.types import common
from google.cloud.errorreporting_v1beta1.types import error_group_service
from .base import ErrorGroupServiceTransport, DEFAULT_CLIENT_INFO
from .grpc import ErrorGroupServiceGrpcTransport
class ErrorGroupServiceGrpcAsyncIOTransport(ErrorGroupServiceTransport):
"""gRPC AsyncIO backend transport for ErrorGroupService.
Service for retrieving and updating individual error groups.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(
cls,
host: str = "clouderrorreporting.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
def __init__(
self,
*,
host: str = "clouderrorreporting.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@property
def grpc_channel(self) -> aio.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Return the channel from cache.
return self._grpc_channel
@property
def get_group(
self,
) -> Callable[[error_group_service.GetGroupRequest], Awaitable[common.ErrorGroup]]:
r"""Return a callable for the get group method over gRPC.
Get the specified group.
Returns:
Callable[[~.GetGroupRequest],
Awaitable[~.ErrorGroup]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_group" not in self._stubs:
self._stubs["get_group"] = self.grpc_channel.unary_unary(
"/google.devtools.clouderrorreporting.v1beta1.ErrorGroupService/GetGroup",
request_serializer=error_group_service.GetGroupRequest.serialize,
response_deserializer=common.ErrorGroup.deserialize,
)
return self._stubs["get_group"]
@property
def update_group(
self,
) -> Callable[
[error_group_service.UpdateGroupRequest], Awaitable[common.ErrorGroup]
]:
r"""Return a callable for the update group method over gRPC.
Replace the data for the specified group.
Fails if the group does not exist.
Returns:
Callable[[~.UpdateGroupRequest],
Awaitable[~.ErrorGroup]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_group" not in self._stubs:
self._stubs["update_group"] = self.grpc_channel.unary_unary(
"/google.devtools.clouderrorreporting.v1beta1.ErrorGroupService/UpdateGroup",
request_serializer=error_group_service.UpdateGroupRequest.serialize,
response_deserializer=common.ErrorGroup.deserialize,
)
return self._stubs["update_group"]
def close(self):
return self.grpc_channel.close()
__all__ = ("ErrorGroupServiceGrpcAsyncIOTransport",)
| 44.233677
| 93
| 0.639372
|
d99b69cd7e1b049fe6663ea634a948e0e7b74755
| 3,197
|
py
|
Python
|
canon.py
|
ipa-tys/ockeghem
|
a30ffa9e7ea7cf86b376ff5a9976a792bc725276
|
[
"MIT"
] | null | null | null |
canon.py
|
ipa-tys/ockeghem
|
a30ffa9e7ea7cf86b376ff5a9976a792bc725276
|
[
"MIT"
] | null | null | null |
canon.py
|
ipa-tys/ockeghem
|
a30ffa9e7ea7cf86b376ff5a9976a792bc725276
|
[
"MIT"
] | null | null | null |
from music21 import *
from midi2audio import FluidSynth
import random
import pdb
import sys
import argparse
import textwrap
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='generate canon antecedents.',
prog='canon.py',
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=textwrap.dedent('''
Examples:
1.) Canon in the lower fourth:
python canon.py -o canon01 -t l4
''')
)
parser.add_argument('-t','--type', help='Type of canon', required=True)
parser.add_argument('-o','--output',help='File stem for output files',required=True)
parser.add_argument('--pdf', help='Generate sheet music PDF', action='store_true')
# todo: add difficulty levels (e.g. only a subset of steps, etc)
# let choose the soundfont
args = parser.parse_args()
filestem = args.output
canon_type = args.type
pdf_output = args.pdf
# todo: ficta notes B, Eb, and F-sharp are allowed
# after creating a new note, check that it has no melodic tritone and that the two lines are consonant
# check_comes_melodically(note1)
# check_canon_harmonically(note1)
# otherwise try ficta in comes and/or in dux
legalNoteNamesTenor = ['B-2','B2','C3','D3','E3','F3','G3','A3','B-3','B3','C4','D4','E4','F4','G4','A4']
legalNoteNamesSoprano = ['B-3','B3','C4','D4','E4','F4','G4','A4','B-4','B4','C5','D5'] # 'E5','F5','G5','A5']
legalNotes = []
for legalNote in legalNoteNamesSoprano:
note1 = note.Note(legalNote)
note1.duration = note.duration.Duration(4.0)
legalNotes.append(note1)
if canon_type == 'l8':
intervalNames = ['m-3','M-3','P-4','P1','m3','M3','P5']
if canon_type == 'l5':
intervalNames = ['m-3','M-3','P-5','P1','m2','M2','P4']
if canon_type == 'l4':
intervalNames = ['m-2','M-2','P-4','m2','M2','m3','M3','P5']
intervals = []
for intervalName in intervalNames:
intervals.append(interval.Interval(intervalName))
fs = FluidSynth('Papelmedia_Irina_Brochin.sf2')
note1 = note.Note('D4')
note1.duration = note.duration.Duration(4.0)
stream1 = stream.Stream()
stream1.append(note1)
for i in range(10):
legal = False
while not legal:
i = random.choice(intervals)
i.noteStart = stream1[-1]
if i.noteEnd in legalNotes:
legal = True
note1 = i.noteEnd
note1.duration = note.duration.Duration(4.0)
stream1.append(note1)
fp = stream1.write('midi', fp=filestem + '.mid')
if args.pdf:
stream1.show()
# stream1.show(filestem + '.pdf')
my_tempo = tempo.MetronomeMark(number=40)
stream1.insert(0, my_tempo)
cadence_midi = stream1.write('midi')
fs = FluidSynth('~/soundfonts/papelmedia_Irina_Brochin.sf2')
fs.midi_to_audio(cadence_midi, filestem + '.wav')
# todo: command line parser, e.g. with option for tempo, ornamentation etc.
# other canons, e.g. in upper octave, fifth, fourth, ...
# antecedent for canon around a cantus firmus
# try aubio for pitch detection
| 33.652632
| 114
| 0.618392
|
0c786a5b9646824715d086306d165d9e0d44c2fa
| 8,725
|
py
|
Python
|
python/doc/conf.py
|
industrial-sloth/thunder
|
ea2a6b3cfdefb8d09441865e15ac65c6e2f01151
|
[
"Apache-2.0"
] | null | null | null |
python/doc/conf.py
|
industrial-sloth/thunder
|
ea2a6b3cfdefb8d09441865e15ac65c6e2f01151
|
[
"Apache-2.0"
] | null | null | null |
python/doc/conf.py
|
industrial-sloth/thunder
|
ea2a6b3cfdefb8d09441865e15ac65c6e2f01151
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Thunder documentation build configuration file, created by
# sphinx-quickstart on Wed Jul 16 17:00:45 2014.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
import sphinx_bootstrap_theme
import matplotlib as mpl
mpl.use("Agg")
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
sys.path.insert(0, os.path.abspath('sphinxext'))
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.pngmath',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.autosummary',
'numpydoc',
'ipython_directive',
'ipython_console_highlighting']
# Generate the API documentation when building
autosummary_generate = True
numpydoc_show_class_members = False
autodoc_default_flags = ['members', 'inherited-members']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
autosummary_generate = True
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Thunder'
copyright = u'2014, Jeremy Freeman'
html_show_copyright = False
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
sys.path.insert(0, os.path.abspath(os.path.pardir))
import thunder
version = thunder.__version__
# The full version, including alpha/beta/rc tags.
release = thunder.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build','_templates']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'bootstrap'
html_logo = "thunder_logo.png"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'navbar_title': " ",
'source_link_position': "footer",
'bootstrap_version': "3",
'bootswatch_theme': "simplex",
'navbar_sidebarrel': False,
'navbar_links': [("Tutorials", "tutorials"),("API", "api")]
}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Thunderdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Thunder.tex', u'Thunder Documentation',
u'Jeremy Freeman', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'thunder', u'Thunder Documentation',
[u'Jeremy Freeman'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Thunder', u'Thunder Documentation',
u'Jeremy Freeman', 'Thunder', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| 31.384892
| 80
| 0.714842
|
df22bcb9cd272e5d8175cde97861a1cd0c68c0ef
| 4,703
|
py
|
Python
|
MSS_points.py
|
U-Shift/circuity-lisbon
|
fb55e201d336c5a10af989cd1e4ed2c3052fbeb7
|
[
"MIT"
] | null | null | null |
MSS_points.py
|
U-Shift/circuity-lisbon
|
fb55e201d336c5a10af989cd1e4ed2c3052fbeb7
|
[
"MIT"
] | null | null | null |
MSS_points.py
|
U-Shift/circuity-lisbon
|
fb55e201d336c5a10af989cd1e4ed2c3052fbeb7
|
[
"MIT"
] | null | null | null |
import pandas as pd
import geopandas as gpd
from tqdm import tqdm, trange
import random
from shapely.geometry import Point, Polygon
def generate_random(number, polygon):
points = []
aux = polygon.bounds
minx = aux['minx']
miny = aux['miny']
maxx = aux['maxx']
maxy = aux['maxy']
while len(points) < number:
pnt = Point(random.uniform(float(minx), float(maxx)), random.uniform(float(miny), float(maxy)))
if polygon.contains(pnt).any():
points.append(pnt)
return points
NUMBER_OF_POINTS_PER_ZONE = 5
OUTPUT_FILE = 'data/imob_generated_points.csv'
# Read IMOB data on OD
df = pd.read_csv('Data/IMOB/15.1_IMOB/BASE DADOS/AML/CSV/TBL_AML/TBL_viagens_OR_DE_AML.csv', sep=';')
df = df[df['DTCC_or11'].notna()]
df = df[df['DTCC_de11'].notna()]
df['DTCC_or11'] = df['DTCC_or11'].astype('int64')
df['FR_or11'] = df['FR_or11'].astype('int64')
df['Sec_or11'] = df['Sec_or11'].astype('int64')
df['SS_or11'] = df['SS_or11'].astype('int64')
df['DTCC_de11'] = df['DTCC_de11'].astype('int64')
df['FR_de11'] = df['FR_de11'].astype('int64')
df['Sec_de11'] = df['Sec_de11'].astype('int64')
df['SS_de11'] = df['SS_de11'].astype('int64')
df['Tipo_veiculo_2'] = df['Tipo_veiculo_2'].astype('category')
print('Original IMOB data shape: ', df.shape)
### Compute BRI for OD trips
df['BRI11_or'] = df['DTCC_or11'].astype('str').str.zfill(4) + \
df['FR_or11'].astype('str').str.zfill(2) + \
df['Sec_or11'].astype('str').str.zfill(3) + \
df['SS_or11'].astype('str').str.zfill(2)
df['BRI11_de'] = df['DTCC_de11'].astype('str').str.zfill(4) + \
df['FR_de11'].astype('str').str.zfill(2) + \
df['Sec_de11'].astype('str').str.zfill(3) + \
df['SS_de11'].astype('str').str.zfill(2)
df['BRI11_or'] = df['BRI11_or'].astype('int64')
df['BRI11_de'] = df['BRI11_de'].astype('int64')
### Filter for Lisbon municipaly instead of metropolitan area
mask_lisboa = (df['DTCC_or11'] == 1106) & (df['DTCC_de11'] == 1106)
df = df.loc[mask_lisboa]
print('Trips inside Lisbon\'s municipality:', df.shape[0])
# Divide into cycling, driving and walking trips
df_cycling = df[df['Tipo_veiculo_2'] == 'Cycling']
df_walking = df[df['Tipo_veiculo_2'] == 'Walking']
df_motorized = df[(df['Tipo_veiculo_2'] == 'passenger car - as driver') | \
(df['Tipo_veiculo_2'] == 'passenger car - as passenger') | \
(df['Tipo_veiculo_2'] == 'van/lorry/tractor/camper') | \
(df['Tipo_veiculo_2'] == 'Táxi (como passageiro)') | \
(df['Tipo_veiculo_2'] == 'motorcycle and moped')]
df_tp = df[(df['Tipo_veiculo_2'] == 'Regular train') | \
(df['Tipo_veiculo_2'] == 'Urban rail') | \
(df['Tipo_veiculo_2'] == 'Waterways') | \
(df['Tipo_veiculo_2'] == 'bus and coach - TE') | \
(df['Tipo_veiculo_2'] == 'bus and coach - TP')]
print('Amount of trips:',
'\n Cycling:', df_cycling.shape[0],
'\n Walking:', df_walking.shape[0],
'\n Motorized:', df_motorized.shape[0],
'\n TP:', df_tp.shape[0],)
# Read Lisbon's CAOP 2011 data
gdf = gpd.read_file("IMOB/lisboa2011/BGRI11_LISBOA.shp")
gdf['DTMN11'] = gdf['DTMN11'].astype('int64')
gdf['FR11'] = gdf['FR11'].astype('int64')
gdf['SEC11'] = gdf['SEC11'].astype('int64')
gdf['SS11'] = gdf['SS11'].astype('int64')
gdf['BGRI11'] = gdf['BGRI11'].astype('int64')
gdf['LUG11'] = gdf['LUG11'].astype('int64')
gdf_proj = gdf.to_crs(epsg=4326)
columns = ['point_A', 'point_B', 'vehicle', 'weekday', 'weight']
data = pd.DataFrame(columns=columns)
with tqdm(total=df.shape[0]*NUMBER_OF_POINTS_PER_ZONE) as t:
t.set_description('Generating random points ')
for i in range(df.shape[0]):
example = df.iloc[i]
example_or = example['BRI11_or']
example_de = example['BRI11_de']
mask_or = gdf_proj['BGRI11'] == example_or
mask_de = gdf_proj['BGRI11'] == example_de
example_or = gdf_proj.loc[mask_or]
example_de = gdf_proj.loc[mask_de]
for j in range(NUMBER_OF_POINTS_PER_ZONE):
t.update(1)
points_or = generate_random(1, example_or.geometry)[0]
points_de = generate_random(1, example_de.geometry)[0]
data_row = {}
data_row['point_A'] = [points_or.x, points_or.y]
data_row['point_B'] = [points_de.x, points_de.y]
data_row['vehicle'] = example['Tipo_veiculo_2']
data_row['weekday'] = example['Dia_da_semana']
data_row['weight'] = example['PESOFIN']
data = data.append(data_row, ignore_index=True, sort=False)
data.to_csv(OUTPUT_FILE)
| 36.457364
| 103
| 0.613863
|
d7bb7313465f6c274eaad5056521f69857e0d1dc
| 81,664
|
py
|
Python
|
manim/mobject/types/vectorized_mobject.py
|
Pow3r5/manim
|
2972a64342aa5ae72977b444f653b05250ab1f8f
|
[
"MIT"
] | 2
|
2022-03-31T08:31:00.000Z
|
2022-03-31T08:31:43.000Z
|
manim/mobject/types/vectorized_mobject.py
|
Pow3r5/manim
|
2972a64342aa5ae72977b444f653b05250ab1f8f
|
[
"MIT"
] | null | null | null |
manim/mobject/types/vectorized_mobject.py
|
Pow3r5/manim
|
2972a64342aa5ae72977b444f653b05250ab1f8f
|
[
"MIT"
] | null | null | null |
"""Mobjects that use vector graphics."""
__all__ = [
"VMobject",
"VGroup",
"VDict",
"VectorizedPoint",
"CurvesAsSubmobjects",
"DashedVMobject",
]
import itertools as it
import sys
import typing
from typing import Optional, Sequence, Union
import colour
import numpy as np
from PIL.Image import Image
from manim.mobject.opengl.opengl_compatibility import ConvertToOpenGL
from manim.mobject.opengl.opengl_vectorized_mobject import OpenGLVMobject
from manim.mobject.three_d.three_d_utils import (
get_3d_vmob_gradient_start_and_end_points,
)
from ... import config
from ...constants import *
from ...mobject.mobject import Mobject
from ...utils.bezier import (
bezier,
get_smooth_handle_points,
integer_interpolate,
interpolate,
partial_bezier_points,
proportions_along_bezier_curve_for_point,
)
from ...utils.color import BLACK, WHITE, color_to_rgba
from ...utils.deprecation import deprecated
from ...utils.iterables import make_even, stretch_array_to_length, tuplify
from ...utils.space_ops import rotate_vector, shoelace_direction
# TODO
# - Change cubic curve groups to have 4 points instead of 3
# - Change sub_path idea accordingly
# - No more mark_paths_closed, instead have the camera test
# if last point in close to first point
# - Think about length of self.points. Always 0 or 1 mod 4?
# That's kind of weird.
class VMobject(Mobject):
"""A vectorized mobject.
Parameters
----------
background_stroke_color
The purpose of background stroke is to have something
that won't overlap fill, e.g. For text against some
textured background.
sheen_factor
When a color c is set, there will be a second color
computed based on interpolating c to WHITE by with
sheen_factor, and the display will gradient to this
secondary color in the direction of sheen_direction.
close_new_points
Indicates that it will not be displayed, but
that it should count in parent mobject's path
tolerance_point_for_equality
This is within a pixel
"""
sheen_factor = 0.0
def __init__(
self,
fill_color=None,
fill_opacity=0.0,
stroke_color=None,
stroke_opacity=1.0,
stroke_width=DEFAULT_STROKE_WIDTH,
background_stroke_color=BLACK,
background_stroke_opacity=1.0,
background_stroke_width=0,
sheen_factor=0.0,
sheen_direction=UL,
close_new_points=False,
pre_function_handle_to_anchor_scale_factor=0.01,
make_smooth_after_applying_functions=False,
background_image=None,
shade_in_3d=False,
# TODO, do we care about accounting for varying zoom levels?
tolerance_for_point_equality=1e-6,
n_points_per_cubic_curve=4,
**kwargs,
):
self.fill_opacity = fill_opacity
self.stroke_opacity = stroke_opacity
self.stroke_width = stroke_width
self.background_stroke_color = background_stroke_color
self.background_stroke_opacity = background_stroke_opacity
self.background_stroke_width = background_stroke_width
self.sheen_factor = sheen_factor
self.sheen_direction = sheen_direction
self.close_new_points = close_new_points
self.pre_function_handle_to_anchor_scale_factor = (
pre_function_handle_to_anchor_scale_factor
)
self.make_smooth_after_applying_functions = make_smooth_after_applying_functions
self.background_image = background_image
self.shade_in_3d = shade_in_3d
self.tolerance_for_point_equality = tolerance_for_point_equality
self.n_points_per_cubic_curve = n_points_per_cubic_curve
super().__init__(**kwargs)
if fill_color:
self.fill_color = fill_color
if stroke_color:
self.stroke_color = stroke_color
def get_group_class(self):
return VGroup
# Colors
def init_colors(self, propagate_colors=True):
self.set_fill(
color=self.fill_color,
opacity=self.fill_opacity,
family=propagate_colors,
)
self.set_stroke(
color=self.stroke_color,
width=self.stroke_width,
opacity=self.stroke_opacity,
family=propagate_colors,
)
self.set_background_stroke(
color=self.background_stroke_color,
width=self.background_stroke_width,
opacity=self.background_stroke_opacity,
family=propagate_colors,
)
self.set_sheen(
factor=self.sheen_factor,
direction=self.sheen_direction,
family=propagate_colors,
)
if not propagate_colors:
for submobject in self.submobjects:
submobject.init_colors(propagate_colors=False)
return self
def generate_rgbas_array(self, color, opacity):
"""
First arg can be either a color, or a tuple/list of colors.
Likewise, opacity can either be a float, or a tuple of floats.
If self.sheen_factor is not zero, and only
one color was passed in, a second slightly light color
will automatically be added for the gradient
"""
colors = [c if (c is not None) else BLACK for c in tuplify(color)]
opacities = [o if (o is not None) else 0 for o in tuplify(opacity)]
rgbas = np.array(
[color_to_rgba(c, o) for c, o in zip(*make_even(colors, opacities))],
)
sheen_factor = self.get_sheen_factor()
if sheen_factor != 0 and len(rgbas) == 1:
light_rgbas = np.array(rgbas)
light_rgbas[:, :3] += sheen_factor
np.clip(light_rgbas, 0, 1, out=light_rgbas)
rgbas = np.append(rgbas, light_rgbas, axis=0)
return rgbas
def update_rgbas_array(self, array_name, color=None, opacity=None):
rgbas = self.generate_rgbas_array(color, opacity)
if not hasattr(self, array_name):
setattr(self, array_name, rgbas)
return self
# Match up current rgbas array with the newly calculated
# one. 99% of the time they'll be the same.
curr_rgbas = getattr(self, array_name)
if len(curr_rgbas) < len(rgbas):
curr_rgbas = stretch_array_to_length(curr_rgbas, len(rgbas))
setattr(self, array_name, curr_rgbas)
elif len(rgbas) < len(curr_rgbas):
rgbas = stretch_array_to_length(rgbas, len(curr_rgbas))
# Only update rgb if color was not None, and only
# update alpha channel if opacity was passed in
if color is not None:
curr_rgbas[:, :3] = rgbas[:, :3]
if opacity is not None:
curr_rgbas[:, 3] = rgbas[:, 3]
return self
def set_fill(
self,
color: Optional[str] = None,
opacity: Optional[float] = None,
family: bool = True,
):
"""Set the fill color and fill opacity of a :class:`VMobject`.
Parameters
----------
color
Fill color of the :class:`VMobject`.
opacity
Fill opacity of the :class:`VMobject`.
family
If ``True``, the fill color of all submobjects is also set.
Returns
-------
:class:`VMobject`
``self``
Examples
--------
.. manim:: SetFill
:save_last_frame:
class SetFill(Scene):
def construct(self):
square = Square().scale(2).set_fill(WHITE,1)
circle1 = Circle().set_fill(GREEN,0.8)
circle2 = Circle().set_fill(YELLOW) # No fill_opacity
circle3 = Circle().set_fill(color = '#FF2135', opacity = 0.2)
group = Group(circle1,circle2,circle3).arrange()
self.add(square)
self.add(group)
See Also
--------
:meth:`~.VMobject.set_style`
"""
if family:
for submobject in self.submobjects:
submobject.set_fill(color, opacity, family)
self.update_rgbas_array("fill_rgbas", color, opacity)
if opacity is not None:
self.fill_opacity = opacity
return self
def set_stroke(
self,
color=None,
width=None,
opacity=None,
background=False,
family=True,
):
if family:
for submobject in self.submobjects:
submobject.set_stroke(color, width, opacity, background, family)
if background:
array_name = "background_stroke_rgbas"
width_name = "background_stroke_width"
opacity_name = "background_stroke_opacity"
else:
array_name = "stroke_rgbas"
width_name = "stroke_width"
opacity_name = "stroke_opacity"
self.update_rgbas_array(array_name, color, opacity)
if width is not None:
setattr(self, width_name, width)
if opacity is not None:
setattr(self, opacity_name, opacity)
if color is not None and background:
self.background_stroke_color = color
return self
def set_background_stroke(self, **kwargs):
kwargs["background"] = True
self.set_stroke(**kwargs)
return self
def set_style(
self,
fill_color=None,
fill_opacity=None,
stroke_color=None,
stroke_width=None,
stroke_opacity=None,
background_stroke_color=None,
background_stroke_width=None,
background_stroke_opacity=None,
sheen_factor=None,
sheen_direction=None,
background_image=None,
family=True,
):
self.set_fill(color=fill_color, opacity=fill_opacity, family=family)
self.set_stroke(
color=stroke_color,
width=stroke_width,
opacity=stroke_opacity,
family=family,
)
self.set_background_stroke(
color=background_stroke_color,
width=background_stroke_width,
opacity=background_stroke_opacity,
family=family,
)
if sheen_factor:
self.set_sheen(
factor=sheen_factor,
direction=sheen_direction,
family=family,
)
if background_image:
self.color_using_background_image(background_image)
return self
def get_style(self, simple=False):
ret = {
"stroke_opacity": self.get_stroke_opacity(),
"stroke_width": self.get_stroke_width(),
}
if simple:
ret["fill_color"] = colour.rgb2hex(self.get_fill_color().get_rgb())
ret["fill_opacity"] = self.get_fill_opacity()
ret["stroke_color"] = colour.rgb2hex(self.get_stroke_color().get_rgb())
else:
ret["fill_color"] = self.get_fill_colors()
ret["fill_opacity"] = self.get_fill_opacities()
ret["stroke_color"] = self.get_stroke_colors()
ret["background_stroke_color"] = self.get_stroke_colors(background=True)
ret["background_stroke_width"] = self.get_stroke_width(background=True)
ret["background_stroke_opacity"] = self.get_stroke_opacity(background=True)
ret["sheen_factor"] = self.get_sheen_factor()
ret["sheen_direction"] = self.get_sheen_direction()
ret["background_image"] = self.get_background_image()
return ret
def match_style(self, vmobject, family=True):
self.set_style(**vmobject.get_style(), family=False)
if family:
# Does its best to match up submobject lists, and
# match styles accordingly
submobs1, submobs2 = self.submobjects, vmobject.submobjects
if len(submobs1) == 0:
return self
elif len(submobs2) == 0:
submobs2 = [vmobject]
for sm1, sm2 in zip(*make_even(submobs1, submobs2)):
sm1.match_style(sm2)
return self
def set_color(self, color, family=True):
self.set_fill(color, family=family)
self.set_stroke(color, family=family)
return self
def set_opacity(self, opacity, family=True):
self.set_fill(opacity=opacity, family=family)
self.set_stroke(opacity=opacity, family=family)
self.set_stroke(opacity=opacity, family=family, background=True)
return self
def fade(self, darkness=0.5, family=True):
factor = 1.0 - darkness
self.set_fill(opacity=factor * self.get_fill_opacity(), family=False)
self.set_stroke(opacity=factor * self.get_stroke_opacity(), family=False)
self.set_background_stroke(
opacity=factor * self.get_stroke_opacity(background=True),
family=False,
)
super().fade(darkness, family)
return self
def get_fill_rgbas(self):
try:
return self.fill_rgbas
except AttributeError:
return np.zeros((1, 4))
def get_fill_color(self):
"""
If there are multiple colors (for gradient)
this returns the first one
"""
return self.get_fill_colors()[0]
fill_color = property(get_fill_color, set_fill)
def get_fill_opacity(self):
"""
If there are multiple opacities, this returns the
first
"""
return self.get_fill_opacities()[0]
def get_fill_colors(self):
return [
colour.Color(rgb=rgba[:3]) if rgba.any() else None
for rgba in self.get_fill_rgbas()
]
def get_fill_opacities(self):
return self.get_fill_rgbas()[:, 3]
def get_stroke_rgbas(self, background=False):
try:
if background:
rgbas = self.background_stroke_rgbas
else:
rgbas = self.stroke_rgbas
return rgbas
except AttributeError:
return np.zeros((1, 4))
def get_stroke_color(self, background=False):
return self.get_stroke_colors(background)[0]
stroke_color = property(get_stroke_color, set_stroke)
def get_stroke_width(self, background=False):
if background:
width = self.background_stroke_width
else:
width = self.stroke_width
if isinstance(width, str):
width = int(width)
return max(0, width)
def get_stroke_opacity(self, background=False):
return self.get_stroke_opacities(background)[0]
def get_stroke_colors(self, background=False):
return [
colour.Color(rgb=rgba[:3]) if rgba.any() else None
for rgba in self.get_stroke_rgbas(background)
]
def get_stroke_opacities(self, background=False):
return self.get_stroke_rgbas(background)[:, 3]
def get_color(self):
if np.all(self.get_fill_opacities() == 0):
return self.get_stroke_color()
return self.get_fill_color()
color = property(get_color, set_color)
def set_sheen_direction(self, direction: np.ndarray, family=True):
"""Sets the direction of the applied sheen.
Parameters
----------
direction : :class:`numpy.ndarray`, optional
Direction from where the gradient is applied.
Examples
--------
Normal usage::
Circle().set_sheen_direction(UP)
See Also
--------
:meth:`~.VMobject.set_sheen`
:meth:`~.VMobject.rotate_sheen_direction`
"""
direction = np.array(direction)
if family:
for submob in self.get_family():
submob.sheen_direction = direction
else:
self.sheen_direction = direction
return self
def rotate_sheen_direction(self, angle: np.ndarray, axis: float = OUT, family=True):
"""Rotates the direction of the applied sheen.
Parameters
----------
angle : :class:`float`
Angle by which the direction of sheen is rotated.
axis : :class:`numpy.ndarray`
Axis of rotation.
Examples
--------
Normal usage::
Circle().set_sheen_direction(UP).rotate_sheen_direction(PI)
See Also
--------
:meth:`~.VMobject.set_sheen_direction`
"""
if family:
for submob in self.get_family():
submob.sheen_direction = rotate_vector(
submob.sheen_direction,
angle,
axis,
)
else:
self.sheen_direction = rotate_vector(self.sheen_direction, angle, axis)
return self
def set_sheen(self, factor, direction: np.ndarray = None, family=True):
"""Applies a color gradient from a direction.
Parameters
----------
factor : :class:`float`
The extent of lustre/gradient to apply. If negative, the gradient
starts from black, if positive the gradient starts from white and
changes to the current color.
direction : :class:`numpy.ndarray`, optional
Direction from where the gradient is applied.
Examples
--------
.. manim:: SetSheen
:save_last_frame:
class SetSheen(Scene):
def construct(self):
circle = Circle(fill_opacity=1).set_sheen(-0.3, DR)
self.add(circle)
"""
if family:
for submob in self.submobjects:
submob.set_sheen(factor, direction, family)
self.sheen_factor = factor
if direction is not None:
# family set to false because recursion will
# already be handled above
self.set_sheen_direction(direction, family=False)
# Reset color to put sheen_factor into effect
if factor != 0:
self.set_stroke(self.get_stroke_color(), family=family)
self.set_fill(self.get_fill_color(), family=family)
return self
def get_sheen_direction(self):
return np.array(self.sheen_direction)
def get_sheen_factor(self):
return self.sheen_factor
def get_gradient_start_and_end_points(self):
if self.shade_in_3d:
return get_3d_vmob_gradient_start_and_end_points(self)
else:
direction = self.get_sheen_direction()
c = self.get_center()
bases = np.array(
[self.get_edge_center(vect) - c for vect in [RIGHT, UP, OUT]],
).transpose()
offset = np.dot(bases, direction)
return (c - offset, c + offset)
def color_using_background_image(self, background_image: Union[Image, str]):
self.background_image = background_image
self.set_color(WHITE)
for submob in self.submobjects:
submob.color_using_background_image(background_image)
return self
def get_background_image(self) -> Union[Image, str]:
return self.background_image
def match_background_image(self, vmobject):
self.color_using_background_image(vmobject.get_background_image())
return self
def set_shade_in_3d(self, value=True, z_index_as_group=False):
for submob in self.get_family():
submob.shade_in_3d = value
if z_index_as_group:
submob.z_index_group = self
return self
def set_points(self, points):
self.points = np.array(points)
return self
def set_anchors_and_handles(
self,
anchors1: Sequence[float],
handles1: Sequence[float],
handles2: Sequence[float],
anchors2: Sequence[float],
):
"""Given two sets of anchors and handles, process them to set them as anchors
and handles of the VMobject.
anchors1[i], handles1[i], handles2[i] and anchors2[i] define the i-th bezier
curve of the vmobject. There are four hardcoded parameters and this is a
problem as it makes the number of points per cubic curve unchangeable from 4
(two anchors and two handles).
Returns
-------
:class:`VMobject`
``self``
"""
assert len(anchors1) == len(handles1) == len(handles2) == len(anchors2)
nppcc = self.n_points_per_cubic_curve # 4
total_len = nppcc * len(anchors1)
self.points = np.zeros((total_len, self.dim))
# the following will, from the four sets, dispatch them in points such that
# self.points = [
# anchors1[0], handles1[0], handles2[0], anchors1[0], anchors1[1],
# handles1[1], ...
# ]
arrays = [anchors1, handles1, handles2, anchors2]
for index, array in enumerate(arrays):
self.points[index::nppcc] = array
return self
def clear_points(self):
self.points = np.zeros((0, self.dim))
def append_points(self, new_points):
# TODO, check that number new points is a multiple of 4?
# or else that if len(self.points) % 4 == 1, then
# len(new_points) % 4 == 3?
self.points = np.append(self.points, new_points, axis=0)
return self
def start_new_path(self, point):
# TODO, make sure that len(self.points) % 4 == 0?
self.append_points([point])
return self
def add_cubic_bezier_curve(
self,
anchor1: np.ndarray,
handle1: np.ndarray,
handle2: np.ndarray,
anchor2,
) -> None:
# TODO, check the len(self.points) % 4 == 0?
self.append_points([anchor1, handle1, handle2, anchor2])
def add_cubic_bezier_curve_to(
self,
handle1: np.ndarray,
handle2: np.ndarray,
anchor: np.ndarray,
):
"""Add cubic bezier curve to the path.
NOTE : the first anchor is not a parameter as by default the end of the last sub-path!
Parameters
----------
handle1 : np.ndarray
first handle
handle2 : np.ndarray
second handle
anchor : np.ndarray
anchor
Returns
-------
:class:`VMobject`
``self``
"""
self.throw_error_if_no_points()
new_points = [handle1, handle2, anchor]
if self.has_new_path_started():
self.append_points(new_points)
else:
self.append_points([self.get_last_point()] + new_points)
return self
def add_quadratic_bezier_curve_to(
self,
handle: np.ndarray,
anchor: np.ndarray,
):
"""Add Quadratic bezier curve to the path.
Returns
-------
:class:`VMobject`
``self``
"""
# How does one approximate a quadratic with a cubic?
# refer to the Wikipedia page on Bezier curves
# https://en.wikipedia.org/wiki/B%C3%A9zier_curve#Degree_elevation, accessed Jan 20, 2021
# 1. Copy the end points, and then
# 2. Place the 2 middle control points 2/3 along the line segments
# from the end points to the quadratic curve's middle control point.
# I think that's beautiful.
self.add_cubic_bezier_curve_to(
2 / 3 * handle + 1 / 3 * self.get_last_point(),
2 / 3 * handle + 1 / 3 * anchor,
anchor,
)
return self
def add_line_to(self, point: np.ndarray):
"""Add a straight line from the last point of VMobject to the given point.
Parameters
----------
point : np.ndarray
end of the straight line.
Returns
-------
:class:`VMobject`
``self``
"""
nppcc = self.n_points_per_cubic_curve
self.add_cubic_bezier_curve_to(
*(
interpolate(self.get_last_point(), point, a)
for a in np.linspace(0, 1, nppcc)[1:]
)
)
return self
def add_smooth_curve_to(self, *points: np.array):
"""Creates a smooth curve from given points and add it to the VMobject. If two points are passed in, the first is interpreted
as a handle, the second as an anchor.
Parameters
----------
points: np.array
Points (anchor and handle, or just anchor) to add a smooth curve from
Returns
-------
:class:`VMobject`
``self``
Raises
------
ValueError
If 0 or more than 2 points are given.
"""
# TODO remove the value error and just add two parameters with one optional
if len(points) == 1:
handle2 = None
new_anchor = points[0]
elif len(points) == 2:
handle2, new_anchor = points
else:
name = sys._getframe(0).f_code.co_name
raise ValueError(f"Only call {name} with 1 or 2 points")
if self.has_new_path_started():
self.add_line_to(new_anchor)
else:
self.throw_error_if_no_points()
last_h2, last_a2 = self.points[-2:]
last_tangent = last_a2 - last_h2
handle1 = last_a2 + last_tangent
if handle2 is None:
to_anchor_vect = new_anchor - last_a2
new_tangent = rotate_vector(last_tangent, PI, axis=to_anchor_vect)
handle2 = new_anchor - new_tangent
self.append_points([last_a2, handle1, handle2, new_anchor])
return self
def has_new_path_started(self):
nppcc = self.n_points_per_cubic_curve # 4
# A new path starting is defined by a control point which is not part of a bezier subcurve.
return len(self.points) % nppcc == 1
def get_last_point(self):
return self.points[-1]
def is_closed(self):
# TODO use consider_points_equals_2d ?
return self.consider_points_equals(self.points[0], self.points[-1])
def add_points_as_corners(self, points: np.ndarray) -> "VMobject":
for point in points:
self.add_line_to(point)
return points
def set_points_as_corners(self, points: Sequence[float]):
"""Given an array of points, set them as corner of the vmobject.
To achieve that, this algorithm sets handles aligned with the anchors such that the resultant bezier curve will be the segment
between the two anchors.
Parameters
----------
points : Iterable[float]
Array of points that will be set as corners.
Returns
-------
:class:`VMobject`
``self``
"""
nppcc = self.n_points_per_cubic_curve
points = np.array(points)
# This will set the handles aligned with the anchors.
# Id est, a bezier curve will be the segment from the two anchors such that the handles belongs to this segment.
self.set_anchors_and_handles(
*(interpolate(points[:-1], points[1:], a) for a in np.linspace(0, 1, nppcc))
)
return self
def set_points_smoothly(self, points):
self.set_points_as_corners(points)
self.make_smooth()
return self
def change_anchor_mode(self, mode: str):
"""Changes the anchor mode of the bezier curves. This will modify the handles.
There can be only two modes, "jagged", and "smooth".
Returns
-------
:class:`VMobject`
``self``
"""
assert mode in ["jagged", "smooth"]
nppcc = self.n_points_per_cubic_curve
for submob in self.family_members_with_points():
subpaths = submob.get_subpaths()
submob.clear_points()
# A subpath can be composed of several bezier curves.
for subpath in subpaths:
# This will retrieve the anchors of the subpath, by selecting every n element in the array subpath
# The append is needed as the last element is not reached when slicing with numpy.
anchors = np.append(subpath[::nppcc], subpath[-1:], 0)
if mode == "smooth":
h1, h2 = get_smooth_handle_points(anchors)
elif mode == "jagged":
# The following will make the handles aligned with the anchors, thus making the bezier curve a segment
a1 = anchors[:-1]
a2 = anchors[1:]
h1 = interpolate(a1, a2, 1.0 / 3)
h2 = interpolate(a1, a2, 2.0 / 3)
new_subpath = np.array(subpath)
new_subpath[1::nppcc] = h1
new_subpath[2::nppcc] = h2
submob.append_points(new_subpath)
return self
def make_smooth(self):
return self.change_anchor_mode("smooth")
def make_jagged(self):
return self.change_anchor_mode("jagged")
def add_subpath(self, points: np.ndarray):
assert len(points) % 4 == 0
self.points = np.append(self.points, points, axis=0)
return self
def append_vectorized_mobject(self, vectorized_mobject):
new_points = list(vectorized_mobject.points)
if self.has_new_path_started():
# Remove last point, which is starting
# a new path
self.points = self.points[:-1]
self.append_points(new_points)
def apply_function(self, function):
factor = self.pre_function_handle_to_anchor_scale_factor
self.scale_handle_to_anchor_distances(factor)
super().apply_function(function)
self.scale_handle_to_anchor_distances(1.0 / factor)
if self.make_smooth_after_applying_functions:
self.make_smooth()
return self
def rotate(
self,
angle: float,
axis: np.ndarray = OUT,
about_point: Optional[Sequence[float]] = None,
**kwargs,
):
self.rotate_sheen_direction(angle, axis)
super().rotate(angle, axis, about_point, **kwargs)
return self
def scale_handle_to_anchor_distances(self, factor: float):
"""If the distance between a given handle point H and its associated
anchor point A is d, then it changes H to be a distances factor*d
away from A, but so that the line from A to H doesn't change.
This is mostly useful in the context of applying a (differentiable)
function, to preserve tangency properties. One would pull all the
handles closer to their anchors, apply the function then push them out
again.
Parameters
----------
factor
The factor used for scaling.
Returns
-------
:class:`VMobject`
``self``
"""
for submob in self.family_members_with_points():
if len(submob.points) < self.n_points_per_cubic_curve:
# The case that a bezier quad is not complete (there is no bezier curve as there is not enough control points.)
continue
a1, h1, h2, a2 = submob.get_anchors_and_handles()
a1_to_h1 = h1 - a1
a2_to_h2 = h2 - a2
new_h1 = a1 + factor * a1_to_h1
new_h2 = a2 + factor * a2_to_h2
submob.set_anchors_and_handles(a1, new_h1, new_h2, a2)
return self
#
def consider_points_equals(self, p0, p1):
return np.allclose(p0, p1, atol=self.tolerance_for_point_equality)
def consider_points_equals_2d(self, p0: np.ndarray, p1: np.ndarray) -> bool:
"""Determine if two points are close enough to be considered equal.
This uses the algorithm from np.isclose(), but expanded here for the
2D point case. NumPy is overkill for such a small question.
Parameters
----------
p0 : np.ndarray
first point
p1 : np.ndarray
second point
Returns
-------
bool
whether two points considered close.
"""
rtol = 1.0e-5 # default from np.isclose()
atol = self.tolerance_for_point_equality
if abs(p0[0] - p1[0]) > atol + rtol * abs(p1[0]):
return False
if abs(p0[1] - p1[1]) > atol + rtol * abs(p1[1]):
return False
return True
# Information about line
def get_cubic_bezier_tuples_from_points(self, points):
return np.array(list(self.gen_cubic_bezier_tuples_from_points(points)))
def gen_cubic_bezier_tuples_from_points(self, points: np.ndarray) -> typing.Tuple:
"""Returns the bezier tuples from an array of points.
self.points is a list of the anchors and handles of the bezier curves of the mobject (ie [anchor1, handle1, handle2, anchor2, anchor3 ..])
This algorithm basically retrieve them by taking an element every n, where n is the number of control points
of the bezier curve.
Parameters
----------
points : np.ndarray
Points from which control points will be extracted.
Returns
-------
typing.Tuple
Bezier control points.
"""
nppcc = self.n_points_per_cubic_curve
remainder = len(points) % nppcc
points = points[: len(points) - remainder]
# Basically take every nppcc element.
return (points[i : i + nppcc] for i in range(0, len(points), nppcc))
def get_cubic_bezier_tuples(self):
return self.get_cubic_bezier_tuples_from_points(self.points)
def _gen_subpaths_from_points(
self,
points: np.ndarray,
filter_func: typing.Callable[[int], bool],
) -> typing.Tuple:
"""Given an array of points defining the bezier curves of the vmobject, return subpaths formed by these points.
Here, Two bezier curves form a path if at least two of their anchors are evaluated True by the relation defined by filter_func.
The algorithm every bezier tuple (anchors and handles) in ``self.points`` (by regrouping each n elements, where
n is the number of points per cubic curve)), and evaluate the relation between two anchors with filter_func.
NOTE : The filter_func takes an int n as parameter, and will evaluate the relation between points[n] and points[n - 1]. This should probably be changed so
the function takes two points as parameters.
Parameters
----------
points : np.ndarray
points defining the bezier curve.
filter_func : typing.Callable[int, bool]
Filter-func defining the relation.
Returns
-------
typing.Tuple
subpaths formed by the points.
"""
nppcc = self.n_points_per_cubic_curve
filtered = filter(filter_func, range(nppcc, len(points), nppcc))
split_indices = [0] + list(filtered) + [len(points)]
return (
points[i1:i2]
for i1, i2 in zip(split_indices, split_indices[1:])
if (i2 - i1) >= nppcc
)
def get_subpaths_from_points(self, points):
return list(
self._gen_subpaths_from_points(
points,
lambda n: not self.consider_points_equals(points[n - 1], points[n]),
),
)
def gen_subpaths_from_points_2d(self, points):
return self._gen_subpaths_from_points(
points,
lambda n: not self.consider_points_equals_2d(points[n - 1], points[n]),
)
def get_subpaths(self) -> typing.Tuple:
"""Returns subpaths formed by the curves of the VMobject.
Subpaths are ranges of curves with each pair of consecutive curves having their end/start points coincident.
Returns
-------
typing.Tuple
subpaths.
"""
return self.get_subpaths_from_points(self.points)
def get_nth_curve_points(self, n: int) -> np.ndarray:
"""Returns the points defining the nth curve of the vmobject.
Parameters
----------
n : int
index of the desired bezier curve.
Returns
-------
np.ndarray
points defininf the nth bezier curve (anchors, handles)
"""
assert n < self.get_num_curves()
nppcc = self.n_points_per_cubic_curve
return self.points[nppcc * n : nppcc * (n + 1)]
def get_nth_curve_function(self, n: int) -> typing.Callable[[float], np.ndarray]:
"""Returns the expression of the nth curve.
Parameters
----------
n : int
index of the desired curve.
Returns
-------
typing.Callable[float]
expression of the nth bezier curve.
"""
return bezier(self.get_nth_curve_points(n))
def get_nth_curve_length_pieces(
self,
n: int,
sample_points: Optional[int] = None,
) -> np.ndarray:
"""Returns the array of short line lengths used for length approximation.
Parameters
----------
n
The index of the desired curve.
sample_points
The number of points to sample to find the length.
Returns
-------
np.ndarray
The short length-pieces of the nth curve.
"""
if sample_points is None:
sample_points = 10
curve = self.get_nth_curve_function(n)
points = np.array([curve(a) for a in np.linspace(0, 1, sample_points)])
diffs = points[1:] - points[:-1]
norms = np.apply_along_axis(np.linalg.norm, 1, diffs)
return norms
def get_nth_curve_length(
self,
n: int,
sample_points: Optional[int] = None,
) -> float:
"""Returns the (approximate) length of the nth curve.
Parameters
----------
n
The index of the desired curve.
sample_points
The number of points to sample to find the length.
Returns
-------
length : :class:`float`
The length of the nth curve.
"""
_, length = self.get_nth_curve_function_with_length(n, sample_points)
return length
def get_nth_curve_function_with_length(
self,
n: int,
sample_points: Optional[int] = None,
) -> typing.Tuple[typing.Callable[[float], np.ndarray], float]:
"""Returns the expression of the nth curve along with its (approximate) length.
Parameters
----------
n
The index of the desired curve.
sample_points
The number of points to sample to find the length.
Returns
-------
curve : typing.Callable[[float], np.ndarray]
The function for the nth curve.
length : :class:`float`
The length of the nth curve.
"""
curve = self.get_nth_curve_function(n)
norms = self.get_nth_curve_length_pieces(n, sample_points=sample_points)
length = np.sum(norms)
return curve, length
def get_num_curves(self) -> int:
"""Returns the number of curves of the vmobject.
Returns
-------
int
number of curves. of the vmobject.
"""
nppcc = self.n_points_per_cubic_curve
return len(self.points) // nppcc
def get_curve_functions(
self,
) -> typing.Iterable[typing.Callable[[float], np.ndarray]]:
"""Gets the functions for the curves of the mobject.
Returns
-------
typing.Iterable[typing.Callable[[float], np.ndarray]]
The functions for the curves.
"""
num_curves = self.get_num_curves()
for n in range(num_curves):
yield self.get_nth_curve_function(n)
def get_curve_functions_with_lengths(
self, **kwargs
) -> typing.Iterable[typing.Tuple[typing.Callable[[float], np.ndarray], float]]:
"""Gets the functions and lengths of the curves for the mobject.
Parameters
----------
**kwargs
The keyword arguments passed to :meth:`get_nth_curve_function_with_length`
Returns
-------
typing.Iterable[typing.Tuple[typing.Callable[[float], np.ndarray], float]]
The functions and lengths of the curves.
"""
num_curves = self.get_num_curves()
for n in range(num_curves):
yield self.get_nth_curve_function_with_length(n, **kwargs)
def point_from_proportion(self, alpha: float) -> np.ndarray:
"""Gets the point at a proportion along the path of the :class:`VMobject`.
Parameters
----------
alpha
The proportion along the the path of the :class:`VMobject`.
Returns
-------
:class:`numpy.ndarray`
The point on the :class:`VMobject`.
Raises
------
:exc:`ValueError`
If ``alpha`` is not between 0 and 1.
:exc:`Exception`
If the :class:`VMobject` has no points.
"""
if alpha < 0 or alpha > 1:
raise ValueError(f"Alpha {alpha} not between 0 and 1.")
self.throw_error_if_no_points()
if alpha == 1:
return self.points[-1]
curves_and_lengths = tuple(self.get_curve_functions_with_lengths())
target_length = alpha * sum(length for _, length in curves_and_lengths)
current_length = 0
for curve, length in curves_and_lengths:
if current_length + length >= target_length:
if length != 0:
residue = (target_length - current_length) / length
else:
residue = 0
return curve(residue)
current_length += length
def proportion_from_point(
self,
point: typing.Iterable[typing.Union[float, int]],
) -> float:
"""Returns the proportion along the path of the :class:`VMobject`
a particular given point is at.
Parameters
----------
point
The Cartesian coordinates of the point which may or may not lie on the :class:`VMobject`
Returns
-------
float
The proportion along the path of the :class:`VMobject`.
Raises
------
:exc:`ValueError`
If ``point`` does not lie on the curve.
:exc:`Exception`
If the :class:`VMobject` has no points.
"""
self.throw_error_if_no_points()
# Iterate over each bezier curve that the ``VMobject`` is composed of, checking
# if the point lies on that curve. If it does not lie on that curve, add
# the whole length of the curve to ``target_length`` and move onto the next
# curve. If the point does lie on the curve, add how far along the curve
# the point is to ``target_length``.
# Then, divide ``target_length`` by the total arc length of the shape to get
# the proportion along the ``VMobject`` the point is at.
num_curves = self.get_num_curves()
total_length = self.get_arc_length()
target_length = 0
for n in range(num_curves):
control_points = self.get_nth_curve_points(n)
length = self.get_nth_curve_length(n)
proportions_along_bezier = proportions_along_bezier_curve_for_point(
point,
control_points,
)
if len(proportions_along_bezier) > 0:
proportion_along_nth_curve = max(proportions_along_bezier)
target_length += length * proportion_along_nth_curve
break
target_length += length
else:
raise ValueError(f"Point {point} does not lie on this curve.")
alpha = target_length / total_length
return alpha
def get_anchors_and_handles(self) -> typing.Iterable[np.ndarray]:
"""Returns anchors1, handles1, handles2, anchors2,
where (anchors1[i], handles1[i], handles2[i], anchors2[i])
will be four points defining a cubic bezier curve
for any i in range(0, len(anchors1))
Returns
-------
typing.Iterable[np.ndarray]
Iterable of the anchors and handles.
"""
nppcc = self.n_points_per_cubic_curve
return [self.points[i::nppcc] for i in range(nppcc)]
def get_start_anchors(self) -> np.ndarray:
"""Returns the start anchors of the bezier curves.
Returns
-------
np.ndarray
Starting anchors
"""
return self.points[0 :: self.n_points_per_cubic_curve]
def get_end_anchors(self) -> np.ndarray:
"""Return the starting anchors of the bezier curves.
Returns
-------
np.ndarray
Starting anchors
"""
nppcc = self.n_points_per_cubic_curve
return self.points[nppcc - 1 :: nppcc]
def get_anchors(self) -> np.ndarray:
"""Returns the anchors of the curves forming the VMobject.
Returns
-------
np.ndarray
The anchors.
"""
if self.points.shape[0] == 1:
return self.points
return np.array(
list(it.chain(*zip(self.get_start_anchors(), self.get_end_anchors()))),
)
def get_points_defining_boundary(self):
# Probably returns all anchors, but this is weird regarding the name of the method.
return np.array(list(it.chain(*(sm.get_anchors() for sm in self.get_family()))))
def get_arc_length(self, sample_points_per_curve: Optional[int] = None) -> float:
"""Return the approximated length of the whole curve.
Parameters
----------
sample_points_per_curve
Number of sample points per curve used to approximate the length. More points result in a better approximation.
Returns
-------
float
The length of the :class:`VMobject`.
"""
return sum(
length
for _, length in self.get_curve_functions_with_lengths(
sample_points=sample_points_per_curve,
)
)
# Alignment
def align_points(self, vmobject: "VMobject"):
"""Adds points to self and vmobject so that they both have the same number of subpaths, with
corresponding subpaths each containing the same number of points.
Points are added either by subdividing curves evenly along the subpath, or by creating new subpaths consisting
of a single point repeated.
Parameters
----------
vmobject
The object to align points with.
Returns
-------
:class:`VMobject`
``self``
"""
self.align_rgbas(vmobject)
# TODO: This shortcut can be a bit over eager. What if they have the same length, but different subpath lengths?
if self.get_num_points() == vmobject.get_num_points():
return
for mob in self, vmobject:
# If there are no points, add one to
# wherever the "center" is
if mob.has_no_points():
mob.start_new_path(mob.get_center())
# If there's only one point, turn it into
# a null curve
if mob.has_new_path_started():
mob.add_line_to(mob.get_last_point())
# Figure out what the subpaths are
subpaths1 = self.get_subpaths()
subpaths2 = vmobject.get_subpaths()
n_subpaths = max(len(subpaths1), len(subpaths2))
# Start building new ones
new_path1 = np.zeros((0, self.dim))
new_path2 = np.zeros((0, self.dim))
nppcc = self.n_points_per_cubic_curve
def get_nth_subpath(path_list, n):
if n >= len(path_list):
# Create a null path at the very end
return [path_list[-1][-1]] * nppcc
path = path_list[n]
# Check for useless points at the end of the path and remove them
# https://github.com/ManimCommunity/manim/issues/1959
while len(path) > nppcc:
# If the last nppc points are all equal to the preceding point
if self.consider_points_equals(path[-nppcc:], path[-nppcc - 1]):
path = path[:-nppcc]
else:
break
return path
for n in range(n_subpaths):
# For each pair of subpaths, add points until they are the same length
sp1 = get_nth_subpath(subpaths1, n)
sp2 = get_nth_subpath(subpaths2, n)
diff1 = max(0, (len(sp2) - len(sp1)) // nppcc)
diff2 = max(0, (len(sp1) - len(sp2)) // nppcc)
sp1 = self.insert_n_curves_to_point_list(diff1, sp1)
sp2 = self.insert_n_curves_to_point_list(diff2, sp2)
new_path1 = np.append(new_path1, sp1, axis=0)
new_path2 = np.append(new_path2, sp2, axis=0)
self.set_points(new_path1)
vmobject.set_points(new_path2)
return self
def insert_n_curves(self, n: int):
"""Inserts n curves to the bezier curves of the vmobject.
Parameters
----------
n
Number of curves to insert.
Returns
-------
:class:`VMobject`
``self``
"""
new_path_point = None
if self.has_new_path_started():
new_path_point = self.get_last_point()
new_points = self.insert_n_curves_to_point_list(n, self.points)
self.set_points(new_points)
if new_path_point is not None:
self.append_points([new_path_point])
return self
def insert_n_curves_to_point_list(self, n: int, points: np.ndarray) -> np.ndarray:
"""Given an array of k points defining a bezier curves (anchors and handles), returns points defining exactly k + n bezier curves.
Parameters
----------
n : int
Number of desired curves.
points : np.ndarray
Starting points.
Returns
-------
np.ndarray
Points generated.
"""
if len(points) == 1:
nppcc = self.n_points_per_cubic_curve
return np.repeat(points, nppcc * n, 0)
bezier_quads = self.get_cubic_bezier_tuples_from_points(points)
curr_num = len(bezier_quads)
target_num = curr_num + n
# This is an array with values ranging from 0
# up to curr_num, with repeats such that
# it's total length is target_num. For example,
# with curr_num = 10, target_num = 15, this would
# be [0, 0, 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9]
repeat_indices = (np.arange(target_num, dtype="i") * curr_num) // target_num
# If the nth term of this list is k, it means
# that the nth curve of our path should be split
# into k pieces.
# In the above example our array had the following elements
# [0, 0, 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9]
# We have two 0s, one 1, two 2s and so on.
# The split factors array would hence be:
# [2, 1, 2, 1, 2, 1, 2, 1, 2, 1]
split_factors = np.zeros(curr_num, dtype="i")
for val in repeat_indices:
split_factors[val] += 1
new_points = np.zeros((0, self.dim))
for quad, sf in zip(bezier_quads, split_factors):
# What was once a single cubic curve defined
# by "quad" will now be broken into sf
# smaller cubic curves
alphas = np.linspace(0, 1, sf + 1)
for a1, a2 in zip(alphas, alphas[1:]):
new_points = np.append(
new_points,
partial_bezier_points(quad, a1, a2),
axis=0,
)
return new_points
def align_rgbas(self, vmobject):
attrs = ["fill_rgbas", "stroke_rgbas", "background_stroke_rgbas"]
for attr in attrs:
a1 = getattr(self, attr)
a2 = getattr(vmobject, attr)
if len(a1) > len(a2):
new_a2 = stretch_array_to_length(a2, len(a1))
setattr(vmobject, attr, new_a2)
elif len(a2) > len(a1):
new_a1 = stretch_array_to_length(a1, len(a2))
setattr(self, attr, new_a1)
return self
def get_point_mobject(self, center=None):
if center is None:
center = self.get_center()
point = VectorizedPoint(center)
point.match_style(self)
return point
def interpolate_color(self, mobject1, mobject2, alpha):
attrs = [
"fill_rgbas",
"stroke_rgbas",
"background_stroke_rgbas",
"stroke_width",
"background_stroke_width",
"sheen_direction",
"sheen_factor",
]
for attr in attrs:
setattr(
self,
attr,
interpolate(getattr(mobject1, attr), getattr(mobject2, attr), alpha),
)
if alpha == 1.0:
setattr(self, attr, getattr(mobject2, attr))
def pointwise_become_partial(
self,
vmobject: "VMobject",
a: float,
b: float,
):
"""Given two bounds a and b, transforms the points of the self vmobject into the points of the vmobject
passed as parameter with respect to the bounds. Points here stand for control points of the bezier curves (anchors and handles)
Parameters
----------
vmobject : VMobject
The vmobject that will serve as a model.
a : float
upper-bound.
b : float
lower-bound
Returns
-------
:class:`VMobject`
``self``
"""
assert isinstance(vmobject, VMobject)
# Partial curve includes three portions:
# - A middle section, which matches the curve exactly
# - A start, which is some ending portion of an inner cubic
# - An end, which is the starting portion of a later inner cubic
if a <= 0 and b >= 1:
self.set_points(vmobject.points)
return self
bezier_quads = vmobject.get_cubic_bezier_tuples()
num_cubics = len(bezier_quads)
# The following two lines will compute which bezier curves of the given mobject need to be processed.
# The residue basically indicates de proportion of the selected bezier curve that have to be selected.
# Ex : if lower_index is 3, and lower_residue is 0.4, then the algorithm will append to the points 0.4 of the third bezier curve
lower_index, lower_residue = integer_interpolate(0, num_cubics, a)
upper_index, upper_residue = integer_interpolate(0, num_cubics, b)
self.clear_points()
if num_cubics == 0:
return self
if lower_index == upper_index:
self.append_points(
partial_bezier_points(
bezier_quads[lower_index],
lower_residue,
upper_residue,
),
)
else:
self.append_points(
partial_bezier_points(bezier_quads[lower_index], lower_residue, 1),
)
for quad in bezier_quads[lower_index + 1 : upper_index]:
self.append_points(quad)
self.append_points(
partial_bezier_points(bezier_quads[upper_index], 0, upper_residue),
)
return self
def get_subcurve(self, a: float, b: float) -> "VMobject":
"""Returns the subcurve of the VMobject between the interval [a, b].
The curve is a VMobject itself.
Parameters
----------
a
The lower bound.
b
The upper bound.
Returns
-------
VMobject
The subcurve between of [a, b]
"""
if self.is_closed() and a > b:
vmob = self.copy()
vmob.pointwise_become_partial(self, a, 1)
vmob2 = self.copy()
vmob2.pointwise_become_partial(self, 0, b)
vmob.append_vectorized_mobject(vmob2)
else:
vmob = self.copy()
vmob.pointwise_become_partial(self, a, b)
return vmob
def get_direction(self):
"""Uses :func:`~.space_ops.shoelace_direction` to calculate the direction.
The direction of points determines in which direction the
object is drawn, clockwise or counterclockwise.
Examples
--------
The default direction of a :class:`~.Circle` is counterclockwise::
>>> from manim import Circle
>>> Circle().get_direction()
'CCW'
Returns
-------
:class:`str`
Either ``"CW"`` or ``"CCW"``.
"""
return shoelace_direction(self.get_start_anchors())
def reverse_direction(self):
"""Reverts the point direction by inverting the point order.
Returns
-------
:class:`VMobject`
Returns self.
Examples
--------
.. manim:: ChangeOfDirection
class ChangeOfDirection(Scene):
def construct(self):
ccw = RegularPolygon(5)
ccw.shift(LEFT)
cw = RegularPolygon(5)
cw.shift(RIGHT).reverse_direction()
self.play(Create(ccw), Create(cw),
run_time=4)
"""
self.points = self.points[::-1]
return self
def force_direction(self, target_direction):
"""Makes sure that points are either directed clockwise or
counterclockwise.
Parameters
----------
target_direction : :class:`str`
Either ``"CW"`` or ``"CCW"``.
"""
if target_direction not in ("CW", "CCW"):
raise ValueError('Invalid input for force_direction. Use "CW" or "CCW"')
if self.get_direction() != target_direction:
# Since we already assured the input is CW or CCW,
# and the directions don't match, we just reverse
self.reverse_direction()
return self
class VGroup(VMobject, metaclass=ConvertToOpenGL):
"""A group of vectorized mobjects.
This can be used to group multiple :class:`~.VMobject` instances together
in order to scale, move, ... them together.
Examples
--------
To add :class:`~.VMobject`s to a :class:`~.VGroup`, you can either use the
:meth:`~.VGroup.add` method, or use the `+` and `+=` operators. Similarly, you
can subtract elements of a VGroup via :meth:`~.VGroup.remove` method, or
`-` and `-=` operators:
>>> from manim import Triangle, Square, VGroup
>>> vg = VGroup()
>>> triangle, square = Triangle(), Square()
>>> vg.add(triangle)
VGroup(Triangle)
>>> vg + square # a new VGroup is constructed
VGroup(Triangle, Square)
>>> vg # not modified
VGroup(Triangle)
>>> vg += square; vg # modifies vg
VGroup(Triangle, Square)
>>> vg.remove(triangle)
VGroup(Square)
>>> vg - square; # a new VGroup is constructed
VGroup()
>>> vg # not modified
VGroup(Square)
>>> vg -= square; vg # modifies vg
VGroup()
.. manim:: ArcShapeIris
:save_last_frame:
class ArcShapeIris(Scene):
def construct(self):
colors = [DARK_BROWN, BLUE_E, BLUE_D, BLUE_A, TEAL_B, GREEN_B, YELLOW_E]
radius = [1 + rad * 0.1 for rad in range(len(colors))]
circles_group = VGroup()
# zip(radius, color) makes the iterator [(radius[i], color[i]) for i in range(radius)]
circles_group.add(*[Circle(radius=rad, stroke_width=10, color=col)
for rad, col in zip(radius, colors)])
self.add(circles_group)
"""
def __init__(self, *vmobjects, **kwargs):
super().__init__(**kwargs)
self.add(*vmobjects)
def __repr__(self):
return (
self.__class__.__name__
+ "("
+ ", ".join(str(mob) for mob in self.submobjects)
+ ")"
)
def __str__(self):
return (
f"{self.__class__.__name__} of {len(self.submobjects)} "
f"submobject{'s' if len(self.submobjects) > 0 else ''}"
)
def add(self, *vmobjects):
"""Checks if all passed elements are an instance of VMobject and then add them to submobjects
Parameters
----------
vmobjects : :class:`~.VMobject`
List of VMobject to add
Returns
-------
:class:`VGroup`
Raises
------
TypeError
If one element of the list is not an instance of VMobject
Examples
--------
.. manim:: AddToVGroup
class AddToVGroup(Scene):
def construct(self):
circle_red = Circle(color=RED)
circle_green = Circle(color=GREEN)
circle_blue = Circle(color=BLUE)
circle_red.shift(LEFT)
circle_blue.shift(RIGHT)
gr = VGroup(circle_red, circle_green)
gr2 = VGroup(circle_blue) # Constructor uses add directly
self.add(gr,gr2)
self.wait()
gr += gr2 # Add group to another
self.play(
gr.animate.shift(DOWN),
)
gr -= gr2 # Remove group
self.play( # Animate groups separately
gr.animate.shift(LEFT),
gr2.animate.shift(UP),
)
self.play( #Animate groups without modification
(gr+gr2).animate.shift(RIGHT)
)
self.play( # Animate group without component
(gr-circle_red).animate.shift(RIGHT)
)
"""
if not all(isinstance(m, (VMobject, OpenGLVMobject)) for m in vmobjects):
raise TypeError("All submobjects must be of type VMobject")
return super().add(*vmobjects)
def __add__(self, vmobject):
return VGroup(*self.submobjects, vmobject)
def __iadd__(self, vmobject):
return self.add(vmobject)
def __sub__(self, vmobject):
copy = VGroup(*self.submobjects)
copy.remove(vmobject)
return copy
def __isub__(self, vmobject):
return self.remove(vmobject)
def __setitem__(self, key: int, value: Union[VMobject, typing.Sequence[VMobject]]):
"""Override the [] operator for item assignment.
Parameters
----------
key
The index of the submobject to be assigned
value
The vmobject value to assign to the key
Returns
-------
None
Examples
--------
Normal usage::
>>> vgroup = VGroup(VMobject())
>>> new_obj = VMobject()
>>> vgroup[0] = new_obj
"""
if not all(isinstance(m, (VMobject, OpenGLVMobject)) for m in value):
raise TypeError("All submobjects must be of type VMobject")
self.submobjects[key] = value
class VDict(VMobject, metaclass=ConvertToOpenGL):
"""A VGroup-like class, also offering submobject access by
key, like a python dict
Parameters
----------
mapping_or_iterable : Union[:class:`Mapping`, Iterable[Tuple[Hashable, :class:`~.VMobject`]]], optional
The parameter specifying the key-value mapping of keys and mobjects.
show_keys : :class:`bool`, optional
Whether to also display the key associated with
the mobject. This might be useful when debugging,
especially when there are a lot of mobjects in the
:class:`VDict`. Defaults to False.
kwargs : Any
Other arguments to be passed to `Mobject`.
Attributes
----------
show_keys : :class:`bool`
Whether to also display the key associated with
the mobject. This might be useful when debugging,
especially when there are a lot of mobjects in the
:class:`VDict`. When displayed, the key is towards
the left of the mobject.
Defaults to False.
submob_dict : :class:`dict`
Is the actual python dictionary that is used to bind
the keys to the mobjects.
Examples
--------
.. manim:: ShapesWithVDict
class ShapesWithVDict(Scene):
def construct(self):
square = Square().set_color(RED)
circle = Circle().set_color(YELLOW).next_to(square, UP)
# create dict from list of tuples each having key-mobject pair
pairs = [("s", square), ("c", circle)]
my_dict = VDict(pairs, show_keys=True)
# display it just like a VGroup
self.play(Create(my_dict))
self.wait()
text = Tex("Some text").set_color(GREEN).next_to(square, DOWN)
# add a key-value pair by wrapping it in a single-element list of tuple
# after attrs branch is merged, it will be easier like `.add(t=text)`
my_dict.add([("t", text)])
self.wait()
rect = Rectangle().next_to(text, DOWN)
# can also do key assignment like a python dict
my_dict["r"] = rect
# access submobjects like a python dict
my_dict["t"].set_color(PURPLE)
self.play(my_dict["t"].animate.scale(3))
self.wait()
# also supports python dict styled reassignment
my_dict["t"] = Tex("Some other text").set_color(BLUE)
self.wait()
# remove submobject by key
my_dict.remove("t")
self.wait()
self.play(Uncreate(my_dict["s"]))
self.wait()
self.play(FadeOut(my_dict["c"]))
self.wait()
self.play(FadeOut(my_dict["r"], shift=DOWN))
self.wait()
# you can also make a VDict from an existing dict of mobjects
plain_dict = {
1: Integer(1).shift(DOWN),
2: Integer(2).shift(2 * DOWN),
3: Integer(3).shift(3 * DOWN),
}
vdict_from_plain_dict = VDict(plain_dict)
vdict_from_plain_dict.shift(1.5 * (UP + LEFT))
self.play(Create(vdict_from_plain_dict))
# you can even use zip
vdict_using_zip = VDict(zip(["s", "c", "r"], [Square(), Circle(), Rectangle()]))
vdict_using_zip.shift(1.5 * RIGHT)
self.play(Create(vdict_using_zip))
self.wait()
"""
def __init__(self, mapping_or_iterable={}, show_keys=False, **kwargs):
super().__init__(**kwargs)
self.show_keys = show_keys
self.submob_dict = {}
self.add(mapping_or_iterable)
def __repr__(self):
return __class__.__name__ + "(" + repr(self.submob_dict) + ")"
def add(self, mapping_or_iterable):
"""Adds the key-value pairs to the :class:`VDict` object.
Also, it internally adds the value to the `submobjects` :class:`list`
of :class:`~.Mobject`, which is responsible for actual on-screen display.
Parameters
---------
mapping_or_iterable : Union[:class:`Mapping`, Iterable[Tuple[Hashable, :class:`~.VMobject`]]], optional
The parameter specifying the key-value mapping of keys and mobjects.
Returns
-------
:class:`VDict`
Returns the :class:`VDict` object on which this method was called.
Examples
--------
Normal usage::
square_obj = Square()
my_dict.add([('s', square_obj)])
"""
for key, value in dict(mapping_or_iterable).items():
self.add_key_value_pair(key, value)
return self
def remove(self, key):
"""Removes the mobject from the :class:`VDict` object having the key `key`
Also, it internally removes the mobject from the `submobjects` :class:`list`
of :class:`~.Mobject`, (which is responsible for removing it from the screen)
Parameters
----------
key : :class:`typing.Hashable`
The key of the submoject to be removed.
Returns
-------
:class:`VDict`
Returns the :class:`VDict` object on which this method was called.
Examples
--------
Normal usage::
my_dict.remove('square')
"""
if key not in self.submob_dict:
raise KeyError("The given key '%s' is not present in the VDict" % str(key))
super().remove(self.submob_dict[key])
del self.submob_dict[key]
return self
def __getitem__(self, key):
"""Override the [] operator for item retrieval.
Parameters
----------
key : :class:`typing.Hashable`
The key of the submoject to be accessed
Returns
-------
:class:`VMobject`
The submobject corresponding to the key `key`
Examples
--------
Normal usage::
self.play(Create(my_dict['s']))
"""
submob = self.submob_dict[key]
return submob
def __setitem__(self, key, value):
"""Override the [] operator for item assignment.
Parameters
----------
key : :class:`typing.Hashable`
The key of the submoject to be assigned
value : :class:`VMobject`
The submobject to bind the key to
Returns
-------
None
Examples
--------
Normal usage::
square_obj = Square()
my_dict['sq'] = square_obj
"""
if key in self.submob_dict:
self.remove(key)
self.add([(key, value)])
def __delitem__(self, key):
"""Override the del operator for deleting an item.
Parameters
----------
key : :class:`typing.Hashable`
The key of the submoject to be deleted
Returns
-------
None
Examples
--------
::
>>> from manim import *
>>> my_dict = VDict({'sq': Square()})
>>> 'sq' in my_dict
True
>>> del my_dict['sq']
>>> 'sq' in my_dict
False
Notes
-----
Removing an item from a VDict does not remove that item from any Scene
that the VDict is part of.
"""
del self.submob_dict[key]
def __contains__(self, key):
"""Override the in operator.
Parameters
----------
key : :class:`typing.Hashable`
The key to check membership of.
Returns
-------
:class:`bool`
Examples
--------
::
>>> from manim import *
>>> my_dict = VDict({'sq': Square()})
>>> 'sq' in my_dict
True
"""
return key in self.submob_dict
def get_all_submobjects(self):
"""To get all the submobjects associated with a particular :class:`VDict` object
Returns
-------
:class:`dict_values`
All the submobjects associated with the :class:`VDict` object
Examples
--------
Normal usage::
for submob in my_dict.get_all_submobjects():
self.play(Create(submob))
"""
submobjects = self.submob_dict.values()
return submobjects
def add_key_value_pair(self, key, value):
"""A utility function used by :meth:`add` to add the key-value pair
to :attr:`submob_dict`. Not really meant to be used externally.
Parameters
----------
key : :class:`typing.Hashable`
The key of the submobject to be added.
value : :class:`~.VMobject`
The mobject associated with the key
Returns
-------
None
Raises
------
TypeError
If the value is not an instance of VMobject
Examples
--------
Normal usage::
square_obj = Square()
self.add_key_value_pair('s', square_obj)
"""
if not isinstance(value, (VMobject, OpenGLVMobject)):
raise TypeError("All submobjects must be of type VMobject")
mob = value
if self.show_keys:
# This import is here and not at the top to avoid circular import
from manim.mobject.text.tex_mobject import Tex
key_text = Tex(str(key)).next_to(value, LEFT)
mob.add(key_text)
self.submob_dict[key] = mob
super().add(value)
class VectorizedPoint(VMobject, metaclass=ConvertToOpenGL):
def __init__(
self,
location=ORIGIN,
color=BLACK,
fill_opacity=0,
stroke_width=0,
artificial_width=0.01,
artificial_height=0.01,
**kwargs,
):
self.artificial_width = artificial_width
self.artificial_height = artificial_height
super().__init__(
color=color,
fill_opacity=fill_opacity,
stroke_width=stroke_width,
**kwargs,
)
self.set_points(np.array([location]))
basecls = OpenGLVMobject if config.renderer == "opengl" else VMobject
@basecls.width.getter
def width(self):
return self.artificial_width
@basecls.height.getter
def height(self):
return self.artificial_height
def get_location(self):
return np.array(self.points[0])
def set_location(self, new_loc):
self.set_points(np.array([new_loc]))
class CurvesAsSubmobjects(VGroup):
"""Convert a curve's elements to submobjects.
Examples
--------
.. manim:: LineGradientExample
:save_last_frame:
class LineGradientExample(Scene):
def construct(self):
curve = ParametricFunction(lambda t: [t, np.sin(t), 0], t_range=[-PI, PI, 0.01], stroke_width=10)
new_curve = CurvesAsSubmobjects(curve)
new_curve.set_color_by_gradient(BLUE, RED)
self.add(new_curve.shift(UP), curve)
"""
def __init__(self, vmobject, **kwargs):
super().__init__(**kwargs)
tuples = vmobject.get_cubic_bezier_tuples()
for tup in tuples:
part = VMobject()
part.set_points(tup)
part.match_style(vmobject)
self.add(part)
class DashedVMobject(VMobject, metaclass=ConvertToOpenGL):
"""A :class:`VMobject` composed of dashes instead of lines.
Parameters
----------
vmobject
The object that will get dashed
num_dashes
Number of dashes to add.
dashed_ratio
Ratio of dash to empty space.
dash_offset
Shifts the starting point of dashes along the
path. Value 1 shifts by one full dash length.
equal_lengths
If ``True``, dashes will be (approximately) equally long.
If ``False``, dashes will be split evenly in the curve's
input t variable (legacy behavior).
Examples
--------
.. manim:: DashedVMobjectExample
:save_last_frame:
class DashedVMobjectExample(Scene):
def construct(self):
r = 0.5
top_row = VGroup() # Increasing num_dashes
for dashes in range(1, 12):
circ = DashedVMobject(Circle(radius=r, color=WHITE), num_dashes=dashes)
top_row.add(circ)
middle_row = VGroup() # Increasing dashed_ratio
for ratio in np.arange(1 / 11, 1, 1 / 11):
circ = DashedVMobject(
Circle(radius=r, color=WHITE), dashed_ratio=ratio
)
middle_row.add(circ)
func1 = FunctionGraph(lambda t: t**5,[-1,1],color=WHITE)
func_even = DashedVMobject(func1,num_dashes=6,equal_lengths=True)
func_stretched = DashedVMobject(func1, num_dashes=6, equal_lengths=False)
bottom_row = VGroup(func_even,func_stretched)
top_row.arrange(buff=0.3)
middle_row.arrange()
bottom_row.arrange(buff=1)
everything = VGroup(top_row, middle_row, bottom_row).arrange(DOWN, buff=1)
self.add(everything)
"""
def __init__(
self,
vmobject,
num_dashes=15,
dashed_ratio=0.5,
dash_offset=0,
color=WHITE,
equal_lengths=True,
**kwargs,
):
self.dashed_ratio = dashed_ratio
self.num_dashes = num_dashes
super().__init__(color=color, **kwargs)
r = self.dashed_ratio
n = self.num_dashes
if n > 0:
# Assuming total length is 1
dash_len = r / n
if vmobject.is_closed():
void_len = (1 - r) / n
else:
if n == 1:
void_len = 1 - r
else:
void_len = (1 - r) / (n - 1)
period = dash_len + void_len
phase_shift = (dash_offset % 1) * period
if vmobject.is_closed():
# closed curves have equal amount of dashes and voids
pattern_len = 1
else:
# open curves start and end with a dash, so the whole dash pattern with the last void is longer
pattern_len = 1 + void_len
dash_starts = [((i * period + phase_shift) % pattern_len) for i in range(n)]
dash_ends = [
((i * period + dash_len + phase_shift) % pattern_len) for i in range(n)
]
# closed shapes can handle overflow at the 0-point
# open shapes need special treatment for it
if not vmobject.is_closed():
# due to phase shift being [0...1] range, always the last dash element needs attention for overflow
# if an entire dash moves out of the shape end:
if dash_ends[-1] > 1 and dash_starts[-1] > 1:
# remove the last element since it is out-of-bounds
dash_ends.pop()
dash_starts.pop()
elif dash_ends[-1] < dash_len: # if it overflowed
if (
dash_starts[-1] < 1
): # if the beginning of the piece is still in range
dash_starts.append(0)
dash_ends.append(dash_ends[-1])
dash_ends[-2] = 1
else:
dash_starts[-1] = 0
elif dash_starts[-1] > (1 - dash_len):
dash_ends[-1] = 1
if equal_lengths:
# calculate the entire length by adding up short line-pieces
norms = np.array(0)
for k in range(vmobject.get_num_curves()):
norms = np.append(norms, vmobject.get_nth_curve_length_pieces(k))
# add up length-pieces in array form
length_vals = np.cumsum(norms)
ref_points = np.linspace(0, 1, length_vals.size)
curve_length = length_vals[-1]
self.add(
*(
vmobject.get_subcurve(
np.interp(
dash_starts[i] * curve_length,
length_vals,
ref_points,
),
np.interp(
dash_ends[i] * curve_length,
length_vals,
ref_points,
),
)
for i in range(len(dash_starts))
)
)
else:
self.add(
*(
vmobject.get_subcurve(
dash_starts[i],
dash_ends[i],
)
for i in range(len(dash_starts))
)
)
# Family is already taken care of by get_subcurve
# implementation
if config.renderer == "opengl":
self.match_style(vmobject, recurse=False)
else:
self.match_style(vmobject, family=False)
| 33.496308
| 162
| 0.568439
|
b40cfd4d1c2fdb6112a7bb776e25c15d983598ff
| 889
|
py
|
Python
|
lldb/packages/Python/lldbsuite/test/python_api/default-constructor/sb_listener.py
|
medismailben/llvm-project
|
e334a839032fe500c3bba22bf976ab7af13ce1c1
|
[
"Apache-2.0"
] | 2,338
|
2018-06-19T17:34:51.000Z
|
2022-03-31T11:00:37.000Z
|
lldb/packages/Python/lldbsuite/test/python_api/default-constructor/sb_listener.py
|
medismailben/llvm-project
|
e334a839032fe500c3bba22bf976ab7af13ce1c1
|
[
"Apache-2.0"
] | 3,740
|
2019-01-23T15:36:48.000Z
|
2022-03-31T22:01:13.000Z
|
lldb/packages/Python/lldbsuite/test/python_api/default-constructor/sb_listener.py
|
medismailben/llvm-project
|
e334a839032fe500c3bba22bf976ab7af13ce1c1
|
[
"Apache-2.0"
] | 500
|
2019-01-23T07:49:22.000Z
|
2022-03-30T02:59:37.000Z
|
"""
Fuzz tests an object after the default construction to make sure it does not crash lldb.
"""
import lldb
def fuzz_obj(obj):
obj.AddEvent(lldb.SBEvent())
obj.StartListeningForEvents(lldb.SBBroadcaster(), 0xffffffff)
obj.StopListeningForEvents(lldb.SBBroadcaster(), 0xffffffff)
event = lldb.SBEvent()
broadcaster = lldb.SBBroadcaster()
obj.WaitForEvent(5, event)
obj.WaitForEventForBroadcaster(5, broadcaster, event)
obj.WaitForEventForBroadcasterWithType(5, broadcaster, 0xffffffff, event)
obj.PeekAtNextEvent(event)
obj.PeekAtNextEventForBroadcaster(broadcaster, event)
obj.PeekAtNextEventForBroadcasterWithType(broadcaster, 0xffffffff, event)
obj.GetNextEvent(event)
obj.GetNextEventForBroadcaster(broadcaster, event)
obj.GetNextEventForBroadcasterWithType(broadcaster, 0xffffffff, event)
obj.HandleBroadcastEvent(event)
| 37.041667
| 88
| 0.777278
|
c679731725cf02fdeb21808aa6b0562cceaedca6
| 400
|
py
|
Python
|
tests/apps/app1/overrides/orm/metaclass.py
|
coboyoshi/uvicore
|
9cfdeeac83000b156fe48f068b4658edaf51c8de
|
[
"MIT"
] | 11
|
2021-03-22T22:07:49.000Z
|
2022-03-08T16:18:33.000Z
|
tests/apps/app1/overrides/orm/metaclass.py
|
coboyoshi/uvicore
|
9cfdeeac83000b156fe48f068b4658edaf51c8de
|
[
"MIT"
] | 12
|
2021-03-04T05:51:24.000Z
|
2021-09-22T05:16:18.000Z
|
tests/apps/app1/overrides/orm/metaclass.py
|
coboyoshi/uvicore
|
9cfdeeac83000b156fe48f068b4658edaf51c8de
|
[
"MIT"
] | 2
|
2021-03-25T14:49:56.000Z
|
2021-11-17T23:20:29.000Z
|
from typing import Any
from uvicore.support.dumper import dd, dump
from uvicore.orm.query import QueryBuilder
# Parent
from uvicore.orm.metaclass import ModelMetaclass as X
class ModelMetaclass(X):
async def find(entity, id: Any) -> Any:
"""Query builder passthrough"""
dump('find override----------------------------------')
return await QueryBuilder(entity).find(id)
| 28.571429
| 63
| 0.66
|
f820eaa3528b4620f596e0cf982d4639e6e1d25f
| 9,726
|
py
|
Python
|
paz/evaluation/detection.py
|
niqbal996/paz
|
f27205907367415d5b21f90e1a1d1d1ce598e889
|
[
"MIT"
] | 300
|
2020-10-29T08:02:05.000Z
|
2022-03-30T21:47:32.000Z
|
paz/evaluation/detection.py
|
albertofernandezvillan/paz
|
9fbd50b993f37e1e807297a29c6044c09967c9cc
|
[
"MIT"
] | 30
|
2020-10-29T12:40:32.000Z
|
2022-03-31T14:06:35.000Z
|
paz/evaluation/detection.py
|
albertofernandezvillan/paz
|
9fbd50b993f37e1e807297a29c6044c09967c9cc
|
[
"MIT"
] | 62
|
2020-10-29T12:34:13.000Z
|
2022-03-29T05:21:45.000Z
|
import numpy as np
from ..backend.boxes import compute_ious
from ..backend.image import load_image
def compute_matches(dataset, detector, class_to_arg, iou_thresh=0.5):
"""
Arguments:
dataset: List of dictionaries containing 'image' as key and a
numpy array representing an image as value.
detector : Function for performing inference
class_to_arg: Dict. of class names and their id
iou_thresh (float): A prediction is correct if its Intersection over
Union with the ground truth is above this value..
Returns:
num_positives: Dict. containing number of positives for each class
score: Dict. containing matching scores of boxes for each class
match: Dict. containing match/non-match info of boxes in each class
"""
# classes_count = len(np.unique(np.concatenate(ground_truth_class_args)))
num_classes = len(class_to_arg)
num_positives = {label_id: 0 for label_id in range(1, num_classes + 1)}
score = {label_id: [] for label_id in range(1, num_classes + 1)}
match = {label_id: [] for label_id in range(1, num_classes + 1)}
for sample in dataset:
# obtaining ground truths
ground_truth_boxes = np.array(sample['boxes'][:, :4])
ground_truth_class_args = np.array(sample['boxes'][:, 4])
if 'difficulties' in sample.keys():
difficulties = np.array(sample['difficulties'])
else:
difficulties = None
# obtaining predictions
image = load_image(sample['image'])
results = detector(image)
predicted_boxes, predicted_class_args, predicted_scores = [], [], []
for box2D in results['boxes2D']:
predicted_scores.append(box2D.score)
predicted_class_args.append(class_to_arg[box2D.class_name])
predicted_boxes.append(list(box2D.coordinates))
predicted_boxes = np.array(predicted_boxes, dtype=np.float32)
predicted_class_args = np.array(predicted_class_args)
predicted_scores = np.array(predicted_scores, dtype=np.float32)
# setting difficulties to ``Easy`` if they are None
if difficulties is None:
difficulties = np.zeros(len(ground_truth_boxes), dtype=bool)
# iterating over each class present in the image
class_args = np.concatenate(
(predicted_class_args, ground_truth_class_args))
class_args = np.unique(class_args).astype(int)
for class_arg in class_args:
# masking predictions by class
class_mask = class_arg == predicted_class_args
class_predicted_boxes = predicted_boxes[class_mask]
class_predicted_scores = predicted_scores[class_mask]
# sort score from maximum to minimum for masked predictions
sorted_args = class_predicted_scores.argsort()[::-1]
class_predicted_boxes = class_predicted_boxes[sorted_args]
class_predicted_scores = class_predicted_scores[sorted_args]
# masking ground truths by class
class_mask = class_arg == ground_truth_class_args
class_ground_truth_boxes = ground_truth_boxes[class_mask]
class_difficulties = difficulties[class_mask]
# the number of positives equals the number of easy boxes
num_easy = np.logical_not(class_difficulties).sum()
num_positives[class_arg] = num_positives[class_arg] + num_easy
# add all predicted scores to scores
score[class_arg].extend(class_predicted_scores)
# if not predicted boxes for this class continue
if len(class_predicted_boxes) == 0:
continue
# if not ground truth boxes continue but add zeros as matches
if len(class_ground_truth_boxes) == 0:
match[class_arg].extend((0,) * len(class_predicted_boxes))
continue
# evaluation on VOC follows integer typed bounding boxes.
class_predicted_boxes = class_predicted_boxes.copy()
class_predicted_boxes[:, 2:] = (
class_predicted_boxes[:, 2:] + 1)
class_ground_truth_boxes = class_ground_truth_boxes.copy()
class_ground_truth_boxes[:, 2:] = (
class_ground_truth_boxes[:, 2:] + 1)
ious = compute_ious(
class_predicted_boxes, class_ground_truth_boxes)
ground_truth_args = ious.argmax(axis=1)
# set -1 if there is no matching ground truth
ground_truth_args[ious.max(axis=1) < iou_thresh] = -1
selected = np.zeros(len(class_ground_truth_boxes), dtype=bool)
for ground_truth_arg in ground_truth_args:
if ground_truth_arg >= 0:
if class_difficulties[ground_truth_arg]:
match[class_arg].append(-1)
else:
if not selected[ground_truth_arg]:
match[class_arg].append(1)
else:
match[class_arg].append(0)
selected[ground_truth_arg] = True
else:
match[class_arg].append(0)
return num_positives, score, match
def calculate_relevance_metrics(num_positives, scores, matches):
"""Calculates precision and recall.
Arguments:
num_positives: Dict. with number of positives for each class
scores: Dict. with matching scores of boxes for each class
matches: Dict. wth match/non-match info for boxes for each class
Returns:
precision: Dict. with precision values per class
recall : Dict. with recall values per class
"""
num_classes = max(num_positives.keys()) + 1
precision, recall = [None] * num_classes, [None] * num_classes
for class_arg in num_positives.keys():
class_positive_matches = np.array(matches[class_arg], dtype=np.int8)
class_scores = np.array(scores[class_arg])
order = class_scores.argsort()[::-1]
class_positive_matches = class_positive_matches[order]
true_positives = np.cumsum(class_positive_matches == 1)
false_positives = np.cumsum(class_positive_matches == 0)
precision[class_arg] = (
true_positives / (false_positives + true_positives))
if num_positives[class_arg] > 0:
recall[class_arg] = true_positives / num_positives[class_arg]
return precision, recall
def calculate_average_precisions(precision, recall, use_07_metric=False):
"""Calculate average precisions based based on PASCAL VOC evaluation
Arguments:
num_positives: Dict. with number of positives for each class
scores: Dict. with matching scores of boxes for each class
matches: Dict. wth match/non-match info for boxes for each class
Returns:
"""
num_classes = len(precision)
average_precisions = np.empty(num_classes)
for class_arg in range(num_classes):
if precision[class_arg] is None or recall[class_arg] is None:
average_precisions[class_arg] = np.nan
continue
if use_07_metric:
# 11 point metric
average_precisions[class_arg] = 0
for t in np.arange(0., 1.1, 0.1):
if np.sum(recall[class_arg] >= t) == 0:
p_interpolation = 0
else:
p_interpolation = np.max(
np.nan_to_num(
precision[class_arg]
)[recall[class_arg] >= t]
)
average_precision_class = average_precisions[class_arg]
average_precision_class = (average_precision_class +
(p_interpolation / 11))
average_precisions[class_arg] = average_precision_class
else:
# first append sentinel values at the end
average_precision = np.concatenate(
([0], np.nan_to_num(precision[class_arg]), [0]))
average_recall = np.concatenate(([0], recall[class_arg], [1]))
average_precision = np.maximum.accumulate(
average_precision[::-1])[::-1]
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
recall_change_arg = np.where(
average_recall[1:] != average_recall[:-1])[0]
# and sum (\Delta recall) * precision
average_precisions[class_arg] = np.sum(
(average_recall[recall_change_arg + 1] -
average_recall[recall_change_arg]) *
average_precision[recall_change_arg + 1])
return average_precisions
def evaluateMAP(detector, dataset, class_to_arg, iou_thresh=0.5,
use_07_metric=False):
"""Calculate average precisions based on evaluation code of PASCAL VOC.
Arguments:
dataset: List of dictionaries containing 'image' as key and a
numpy array representing an image as value.
detector : Function for performing inference
class_to_arg: Dict. of class names and their id
iou_thresh: Float indicating intersection over union threshold for
assigning a prediction as correct.
# Returns:
"""
positives, score, match = compute_matches(
dataset, detector, class_to_arg, iou_thresh)
precision, recall = calculate_relevance_metrics(positives, score, match)
average_precisions = calculate_average_precisions(
precision, recall, use_07_metric)
return {'ap': average_precisions, 'map': np.nanmean(average_precisions)}
| 47.213592
| 77
| 0.634176
|
0a76edbd926d66f160f7e9865fee900d8ea34ce0
| 936
|
py
|
Python
|
freeCodeCamp/01-scientific-computing-with-python/src/04-strings.py
|
aysedemirel/python-journey
|
16abd1729c2d999aedb2b4db5ade6c14aca7a23f
|
[
"MIT"
] | 1
|
2021-02-27T14:22:53.000Z
|
2021-02-27T14:22:53.000Z
|
freeCodeCamp/01-scientific-computing-with-python/src/04-strings.py
|
aysedemirel/python-journey
|
16abd1729c2d999aedb2b4db5ade6c14aca7a23f
|
[
"MIT"
] | null | null | null |
freeCodeCamp/01-scientific-computing-with-python/src/04-strings.py
|
aysedemirel/python-journey
|
16abd1729c2d999aedb2b4db5ade6c14aca7a23f
|
[
"MIT"
] | null | null | null |
hello = "Hello"
world = "World"
hello_world = hello + " " + world
print(hello_world)
num = '123'
num = int(num) + 1
print(num)
text = "Sample Text For Python"
print(text[0:6])
print(text[7:11])
print(text[12:15])
print(text[16:22])
fruit = "banana"
print('n' in fruit)
print('m' in fruit)
print("Hello Wolrd".lower())
print("Hello Wolrd".upper())
print("hello world".capitalize())
hello = "Hello World"
print(type(hello))
dir(hello)
str = "Hello world"
isFound = str.find('wo')
print(isFound)
str = "Hello Ayse"
print(str)
replace_str = str.replace("Ayse","Mustafa")
print(replace_str)
str = " Hello World "
print(str.lstrip())
print(str.rstrip())
print(str.strip())
str = "Please have a nice day"
print(str.startswith('Please'))
print(str.startswith('please'))
data = "hello there,i'm Ayse"
comma = data.find(',')
empty = data.find(' ',comma)
text = data[comma+1: empty]
print(comma)
print(empty)
print(text)
| 16.714286
| 43
| 0.665598
|
49ed777f2fb60bb228c99ec5ce931550c38380c9
| 2,036
|
py
|
Python
|
pyports.py
|
dunker4o/PyPortS
|
394e155c681764fd263aa667f9645264629bd071
|
[
"MIT"
] | null | null | null |
pyports.py
|
dunker4o/PyPortS
|
394e155c681764fd263aa667f9645264629bd071
|
[
"MIT"
] | null | null | null |
pyports.py
|
dunker4o/PyPortS
|
394e155c681764fd263aa667f9645264629bd071
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 3 23:05:47 2019
@author: Borko
"""
# 1. Parse input from the console:
# 1) IP Address
# 2) Port range - optional
# 3) IPv - optional
# 4) TCP/UDP - optional
import argparse
import ipaddress
import socket
parser = argparse.ArgumentParser(description="A simple Python Port Scanner \
that checks if any are open.")
parser.add_argument("first_ip", type=str, help="Starting IP address of the \
scan.")
parser.add_argument("last_ip", type=str, help="Last IP address to scan.")
parser.add_argument("-p", "--port", type=int, help="Set the last port to scan.\
Default is 1024.", required=False, default=1024)
'''
parser.add_argument("-v", "--version", type=int, help="Specify if you are \
testing IPv4 or IPv6 addresses. IPv4 is default.",
required=False, default=4)
parser.add_argument("-u", "--udp", help="Selects UDP as the underlying \
protocol for testing.")
'''
parser.add_argument("-t", "--timeout", help="Set the timeout in seconds for \
each port scan. Default is 3.", default=3, type=int)
args = parser.parse_args()
# 2. Loop through the IP range and check for connections.
# 3. Print results
start_ip = ipaddress.IPv4Address(args.first_ip)
end_ip = ipaddress.IPv4Address(args.last_ip)
for current_ip in range(int(start_ip), int(end_ip)+1):
print("\r\n" + "***"*15)
print("Open ports on {}:".format(ipaddress.IPv4Address(current_ip)))
none = True
for port in range(1, args.port+1):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(args.timeout)
connection = sock.connect_ex((str(ipaddress.IPv4Address(current_ip)), port))
if connection == 0:
print("Port {} is OPEN!".format(port))
none = False
sock.close()
if none:
print("No open ports found.")
print("---"*15)
| 33.377049
| 85
| 0.611493
|
77b0ac2ef14accf667b6c72d4de635696eb68e42
| 1,271
|
py
|
Python
|
bann/b_test_train_prepare/functions/test/one_cl_stats/acc_calc.py
|
arturOnRails/BANN
|
027af04349304941fb73c2ede502aca4b76f1ad1
|
[
"MIT"
] | null | null | null |
bann/b_test_train_prepare/functions/test/one_cl_stats/acc_calc.py
|
arturOnRails/BANN
|
027af04349304941fb73c2ede502aca4b76f1ad1
|
[
"MIT"
] | null | null | null |
bann/b_test_train_prepare/functions/test/one_cl_stats/acc_calc.py
|
arturOnRails/BANN
|
027af04349304941fb73c2ede502aca4b76f1ad1
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
.. moduleauthor:: Artur Lissin
.. moduleauthor:: Fabian Tann
"""
from typing import Tuple, List, Dict
from bann.b_test_train_prepare.container.test.rttff_c import RtTfF
from bann.b_test_train_prepare.functions.test.one_cl_stats.merge_rttff import calc_one_class_stats
def _acc_calc(fp_c: int, tn_c: int, tp_c: int, fn_c: int, /) -> float:
try:
res = (tp_c + tn_c) / (tp_c + tn_c + fp_c + fn_c)
except ZeroDivisionError as ex_z:
print(ex_z)
res = -1
return res
def merge_one_class_acc(data: List[Tuple[Dict[int, RtTfF], ...]], class_num: int,
step_cnt: int, /) -> Tuple[str, ...]:
rev_list, classes_list = calc_one_class_stats(class_num, step_cnt, data)
res = [
(str(cl_id), str(_acc_calc(
cl_el[rev_list[cl_id][1]].r_fp,
cl_el[rev_list[cl_id][1]].r_tn,
cl_el[rev_list[cl_id][1]].r_tp,
cl_el[rev_list[cl_id][1]].r_fn
)))
for cl_id, cl_el in enumerate(classes_list)
]
cross_tab = [
"\"OneClass_ACC\": {",
"\"ClassID\": [" + ','.join(re_t[0] for re_t in res) + "],",
"\"ACC\": [" + ','.join(re_t[1] for re_t in res) + "]"
"}"
]
return tuple(cross_tab)
| 31
| 98
| 0.583792
|
1e8f48a070611060d25e3fbb8844c49eb0751e6b
| 901
|
py
|
Python
|
spikeinterface/extractors/neoextractors/spike2.py
|
khl02007/spikeinterface
|
a29739a2ecc1d27af0fdb7adfed895aa9fdd0be4
|
[
"MIT"
] | 116
|
2019-07-12T14:33:43.000Z
|
2022-03-29T01:10:00.000Z
|
spikeinterface/extractors/neoextractors/spike2.py
|
khl02007/spikeinterface
|
a29739a2ecc1d27af0fdb7adfed895aa9fdd0be4
|
[
"MIT"
] | 424
|
2019-07-15T13:29:34.000Z
|
2022-03-30T13:30:45.000Z
|
spikeinterface/extractors/neoextractors/spike2.py
|
khl02007/spikeinterface
|
a29739a2ecc1d27af0fdb7adfed895aa9fdd0be4
|
[
"MIT"
] | 60
|
2019-08-26T11:59:07.000Z
|
2022-03-24T20:05:38.000Z
|
from .neobaseextractor import NeoBaseRecordingExtractor, NeoBaseSortingExtractor
class Spike2RecordingExtractor(NeoBaseRecordingExtractor):
"""
Class for reading spike2 smr files.
smrx are not supported with this, prefer CedRecordingExtractor instead.
Based on neo.rawio.Spike2RawIO
Parameters
----------
file_path: str
The xml file.
stream_id: str or None
"""
mode = 'file'
NeoRawIOClass = 'Spike2RawIO'
def __init__(self, file_path, stream_id=None):
neo_kwargs = {'filename': file_path}
NeoBaseRecordingExtractor.__init__(self, stream_id=stream_id, **neo_kwargs)
self._kwargs = {'file_path': str(file_path), 'stream_id': stream_id}
def read_spike2(*args, **kwargs):
recording = Spike2RecordingExtractor(*args, **kwargs)
return recording
read_spike2.__doc__ = Spike2RecordingExtractor.__doc__
| 27.30303
| 83
| 0.705882
|
ccd3f2d84a301cf43ea7cbd885d3d8dc149547ff
| 3,805
|
bzl
|
Python
|
bazel/dependency_imports.bzl
|
Zerpet/envoy
|
1533857904db361de3ab7d10167f12310c6e6abf
|
[
"Apache-2.0"
] | null | null | null |
bazel/dependency_imports.bzl
|
Zerpet/envoy
|
1533857904db361de3ab7d10167f12310c6e6abf
|
[
"Apache-2.0"
] | null | null | null |
bazel/dependency_imports.bzl
|
Zerpet/envoy
|
1533857904db361de3ab7d10167f12310c6e6abf
|
[
"Apache-2.0"
] | null | null | null |
load("@rules_foreign_cc//:workspace_definitions.bzl", "rules_foreign_cc_dependencies")
load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_dependencies")
load("@envoy_build_tools//toolchains:rbe_toolchains_config.bzl", "rbe_toolchains_config")
load("@bazel_toolchains//rules/exec_properties:exec_properties.bzl", "create_rbe_exec_properties_dict", "custom_exec_properties")
load("@bazel_gazelle//:deps.bzl", "gazelle_dependencies", "go_repository")
load("@build_bazel_rules_apple//apple:repositories.bzl", "apple_rules_dependencies")
load("@rules_fuzzing//fuzzing:repositories.bzl", "rules_fuzzing_dependencies")
load("@upb//bazel:workspace_deps.bzl", "upb_deps")
load("@rules_rust//rust:repositories.bzl", "rust_repositories")
load("@config_validation_pip3//:requirements.bzl", config_validation_pip_install = "pip_install")
load("@configs_pip3//:requirements.bzl", configs_pip_install = "pip_install")
load("@headersplit_pip3//:requirements.bzl", headersplit_pip_install = "pip_install")
load("@kafka_pip3//:requirements.bzl", kafka_pip_install = "pip_install")
load("@protodoc_pip3//:requirements.bzl", protodoc_pip_install = "pip_install")
load("@thrift_pip3//:requirements.bzl", thrift_pip_install = "pip_install")
load("@fuzzing_pip3//:requirements.bzl", fuzzing_pip_install = "pip_install")
load("@rules_antlr//antlr:deps.bzl", "antlr_dependencies")
load("@proxy_wasm_rust_sdk//bazel:dependencies.bzl", "proxy_wasm_rust_sdk_dependencies")
# go version for rules_go
GO_VERSION = "1.15.5"
def envoy_dependency_imports(go_version = GO_VERSION):
rules_foreign_cc_dependencies()
go_rules_dependencies()
go_register_toolchains(go_version)
rbe_toolchains_config()
gazelle_dependencies()
apple_rules_dependencies()
rust_repositories()
upb_deps()
antlr_dependencies(472)
proxy_wasm_rust_sdk_dependencies()
rules_fuzzing_dependencies(
oss_fuzz = True,
honggfuzz = False,
)
custom_exec_properties(
name = "envoy_large_machine_exec_property",
constants = {
"LARGE_MACHINE": create_rbe_exec_properties_dict(labels = dict(size = "large")),
},
)
# These dependencies, like most of the Go in this repository, exist only for the API.
go_repository(
name = "org_golang_google_grpc",
build_file_proto_mode = "disable",
importpath = "google.golang.org/grpc",
sum = "h1:EC2SB8S04d2r73uptxphDSUG+kTKVgjRPF+N3xpxRB4=",
version = "v1.29.1",
)
go_repository(
name = "org_golang_x_net",
importpath = "golang.org/x/net",
sum = "h1:fHDIZ2oxGnUZRN6WgWFCbYBjH9uqVPRCUVUDhs0wnbA=",
version = "v0.0.0-20190813141303-74dc4d7220e7",
)
go_repository(
name = "org_golang_x_text",
importpath = "golang.org/x/text",
sum = "h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=",
version = "v0.3.0",
)
go_repository(
name = "com_github_spf13_afero",
importpath = "github.com/spf13/afero",
sum = "h1:8q6vk3hthlpb2SouZcnBVKboxWQWMDNF38bwholZrJc=",
version = "v1.3.4",
)
go_repository(
name = "com_github_lyft_protoc_gen_star",
importpath = "github.com/lyft/protoc-gen-star",
sum = "h1:sImehRT+p7lW9n6R7MQc5hVgzWGEkDVZU4AsBQ4Isu8=",
version = "v0.5.1",
)
go_repository(
name = "com_github_iancoleman_strcase",
importpath = "github.com/iancoleman/strcase",
sum = "h1:ux/56T2xqZO/3cP1I2F86qpeoYPCOzk+KF/UH/Ar+lk=",
version = "v0.0.0-20180726023541-3605ed457bf7",
)
config_validation_pip_install()
configs_pip_install()
headersplit_pip_install()
kafka_pip_install()
protodoc_pip_install()
thrift_pip_install()
fuzzing_pip_install()
| 41.358696
| 129
| 0.716426
|
868a0863ffecc9c4156a7e2dfd05bb2ff74a28fa
| 1,423
|
py
|
Python
|
main.py
|
nathanjmcdougall/convergence-plot
|
0f2173094549d107de0b86a7d3e2bafc9bba70b9
|
[
"MIT"
] | null | null | null |
main.py
|
nathanjmcdougall/convergence-plot
|
0f2173094549d107de0b86a7d3e2bafc9bba70b9
|
[
"MIT"
] | null | null | null |
main.py
|
nathanjmcdougall/convergence-plot
|
0f2173094549d107de0b86a7d3e2bafc9bba70b9
|
[
"MIT"
] | null | null | null |
"""A script to create nice animations of Julia sets for families of functions.
"""
from matplotlib.animation import ArtistAnimation
from tqdm import tqdm
import numpy as np
import matplotlib.pyplot as plt
from plotting import plot_recursions_convergence
if __name__ == '__main__':
xlim = [-2, 2]
ylim = [-2, 2]
extent = xlim + ylim
PIXELS_PER_AXIS = 2000
THRESHOLD = 2
MAX_IT = 20
def julia_func(theta):
"""Returns a julia function for the given parameter a.
"""
constant = 0.2*np.exp(theta*1j)
return lambda z: z**4 - 1.3*z + constant
# TODO(NAMC) incorporate the more efficient method in this SO answer:
# https://stackoverflow.com/a/15883620/10931340
fig, ax = plt.subplots(figsize=(10, 10))
frames = tqdm(np.linspace(0, 2*np.pi, num=100))
ims = []
for frame in frames:
im = plot_recursions_convergence(
ax,
func=julia_func(frame),
threshold=THRESHOLD,
max_it=MAX_IT,
extent=extent,
pixels_per_axis=PIXELS_PER_AXIS,
cmap='inferno'
)
ax.axis('off')
ims.append([im])
fig.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=None, hspace=None)
fig.set_size_inches(19.20, 10.80, True)
ani = ArtistAnimation(fig, ims, interval=30, blit=True)
ani.save("animation.mp4", dpi=100)
plt.show()
| 27.901961
| 83
| 0.624034
|
a082fe4d9a593477a3e9ed582e36bb9a13e715da
| 4,371
|
py
|
Python
|
prune/pruning_method_transposable_block_l1.py
|
itayhubara/AcceleratedSparseNeuralTraining
|
425897dec9c7ef185841d7000c4418ebb1c95896
|
[
"Apache-2.0"
] | 3
|
2021-11-24T12:46:11.000Z
|
2021-11-25T01:45:14.000Z
|
prune/pruning_method_transposable_block_l1.py
|
itayhubara/AcceleratedSparseNeuralTraining
|
425897dec9c7ef185841d7000c4418ebb1c95896
|
[
"Apache-2.0"
] | null | null | null |
prune/pruning_method_transposable_block_l1.py
|
itayhubara/AcceleratedSparseNeuralTraining
|
425897dec9c7ef185841d7000c4418ebb1c95896
|
[
"Apache-2.0"
] | null | null | null |
from pulp import *
from tqdm import tqdm
from multiprocessing import Pool
from common.timer import Timer
from prune.pruning_method_utils import *
import numpy as np
import torch.nn.utils.prune as prune
class PruningMethodTransposableBlockL1(prune.BasePruningMethod):
PRUNING_TYPE = 'unstructured' # pruning type "structured" refers to channels
RUN_SPEED_TEST = False
def __init__(self, block_size, topk, optimize_transposed=False, n_workers=None, with_tqdm=True):
super(PruningMethodTransposableBlockL1, self).__init__()
assert topk <= block_size
assert n_workers is None or n_workers > 0
self.bs = block_size
self.topk = topk
self.optimize_transposed = optimize_transposed
self.n_workers = n_workers
self.with_tqdm = with_tqdm
# used for multiprocess in order to avoid serialize/deserialize tensors etc.
self.mp_tensor, self.mp_mask = None, None
def ip_transpose(self, data):
prob = LpProblem('TransposableMask', LpMaximize)
combinations = []
magnitude_loss = {}
indicators = {}
bs = self.bs
for r in range(bs):
for c in range(bs):
combinations.append('ind' + '_{}r_{}c'.format(r, c))
magnitude_loss['ind' + '_{}r_{}c'.format(r, c)] = abs(data[r, c])
indicators['ind' + '_{}r_{}c'.format(r, c)] = \
LpVariable('ind' + '_{}r_{}c'.format(r, c), 0, 1, LpInteger)
prob += lpSum([indicators[ind] * magnitude_loss[ind] for ind in magnitude_loss.keys()])
for r in range(bs):
prob += lpSum([indicators[key] for key in combinations if '_{}r'.format(r) in key]) == self.topk
for c in range(bs):
prob += lpSum([indicators[key] for key in combinations if '_{}c'.format(c) in key]) == self.topk
solver = LpSolverDefault
solver.msg = False
prob.solve(solver)
assert prob.status != -1, 'Infeasible'
mask = np.zeros([self.bs, self.bs])
for v in prob.variables():
if 'ind' in v.name:
rc = re.findall(r'\d+', v.name)
mask[int(rc[0]), int(rc[1])] = v.varValue
return mask
def get_mask_iter(self, c):
co, inners = self.mp_tensor.shape
block_numel = self.bs ** 2
n_blocks = inners // block_numel
for j in range(n_blocks):
offset = j * block_numel
w_block = self.mp_tensor[c, offset:offset + block_numel].reshape(self.bs, self.bs)
w_block = w_block + w_block.T if self.optimize_transposed else w_block
mask_block = self.ip_transpose(w_block).reshape(-1)
self.mp_mask[c, offset:offset + block_numel] = torch.from_numpy(mask_block)
def get_mask(self, t):
self.mp_tensor = t
self.mp_mask = torch.zeros_like(t)
co, inners = t.shape
n_blocks = inners // (self.bs ** 2)
if self.RUN_SPEED_TEST:
self.RUN_SPEED_TEST = False
with Timer() as t:
self.get_mask_iter(0)
elapsed = t.total().total_seconds()
print('Single core speed test: blocks={} secs={} block-time={}'.format(n_blocks, elapsed, elapsed/n_blocks))
p = Pool(self.n_workers)
n_iterations = co
bar = tqdm(total=n_iterations, ncols=80) if self.with_tqdm else None
bar.set_postfix_str('n_processes={}, blocks/iter={}'.format(p._processes, n_blocks)) if self.with_tqdm else None
block_indexes = range(co)
for _ in p.imap_unordered(self.get_mask_iter, block_indexes):
bar.update(1) if self.with_tqdm else None
bar.close() if self.with_tqdm else None
p.close()
return self.mp_mask
def compute_mask(self, t, default_mask):
# permute and pad
validate_tensor_shape_2d_4d(t)
t_masked = t.clone().detach().mul_(default_mask)
t_permuted = permute_to_nhwc(t_masked)
pad_to = self.bs ** 2
t_padded = pad_inner_dims(t_permuted, pad_to)
t = t_padded.data.abs().to(t)
# compute mask
mask = self.get_mask(t)
# restore to original shape
block_mask = clip_padding(mask, t_permuted.shape).reshape(t_permuted.shape)
block_mask = permute_to_nchw(block_mask)
return block_mask
| 39.026786
| 120
| 0.616106
|
23a3bae65bfa7b4b4888b40642f0506cc7173b75
| 1,097
|
py
|
Python
|
kubernetes/test/test_v2beta1_cross_version_object_reference.py
|
L3T/python
|
b6e4ae81a2afb49f668a142eb7d1c6e2571ef478
|
[
"Apache-2.0"
] | 2
|
2020-06-21T08:03:18.000Z
|
2020-06-21T09:53:29.000Z
|
kubernetes/test/test_v2beta1_cross_version_object_reference.py
|
L3T/python
|
b6e4ae81a2afb49f668a142eb7d1c6e2571ef478
|
[
"Apache-2.0"
] | null | null | null |
kubernetes/test/test_v2beta1_cross_version_object_reference.py
|
L3T/python
|
b6e4ae81a2afb49f668a142eb7d1c6e2571ef478
|
[
"Apache-2.0"
] | 1
|
2020-12-10T07:28:08.000Z
|
2020-12-10T07:28:08.000Z
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: release-1.16
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import kubernetes.client
from kubernetes.client.models.v2beta1_cross_version_object_reference import V2beta1CrossVersionObjectReference # noqa: E501
from kubernetes.client.rest import ApiException
class TestV2beta1CrossVersionObjectReference(unittest.TestCase):
"""V2beta1CrossVersionObjectReference unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testV2beta1CrossVersionObjectReference(self):
"""Test V2beta1CrossVersionObjectReference"""
# FIXME: construct object with mandatory attributes with example values
# model = kubernetes.client.models.v2beta1_cross_version_object_reference.V2beta1CrossVersionObjectReference() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 27.425
| 132
| 0.758432
|
0a6a8ad142eda6a4f975fd848ab05b249e2dac30
| 168
|
py
|
Python
|
mara_superset/cli.py
|
leo-schick/mara-superset
|
359adcab3c2ac32283bd465901ceeb768d436557
|
[
"MIT"
] | 3
|
2021-12-14T18:01:57.000Z
|
2022-01-01T10:17:42.000Z
|
mara_superset/cli.py
|
leo-schick/mara-superset
|
359adcab3c2ac32283bd465901ceeb768d436557
|
[
"MIT"
] | null | null | null |
mara_superset/cli.py
|
leo-schick/mara-superset
|
359adcab3c2ac32283bd465901ceeb768d436557
|
[
"MIT"
] | null | null | null |
import click
@click.command()
def update_metadata():
"""Sync schema definitions from Mara to Superset"""
from . import metadata
metadata.update_metadata()
| 21
| 55
| 0.720238
|
2d2cbeb79f650476ca7348776e39f0b55c32e0e9
| 1,136
|
py
|
Python
|
setup.py
|
cycomanic/pelican-perpagepublications
|
75657a708996fee8ad47759a0d92a3498a05f50a
|
[
"Unlicense"
] | null | null | null |
setup.py
|
cycomanic/pelican-perpagepublications
|
75657a708996fee8ad47759a0d92a3498a05f50a
|
[
"Unlicense"
] | null | null | null |
setup.py
|
cycomanic/pelican-perpagepublications
|
75657a708996fee8ad47759a0d92a3498a05f50a
|
[
"Unlicense"
] | null | null | null |
import pelican_bibtex
from distutils.core import setup
CLASSIFIERS = """\
Development Status :: 5 - Production/Stable
Intended Audience :: Science/Research
Intended Audience :: Developers
License :: Public Domain
Programming Language :: Python
Programming Language :: Python :: 3
Topic :: Software Development
Operating System :: POSIX
Operating System :: Unix
"""
LONG_DESCRIPTION = """\
Requirements
============
pelican\_perpagepublications requires pybtex.
Create scientific publications list for different publication type BibTeX databases given in the page or post metadata. This plugin is an extension of the Pelican BibTex plugni by Vlad Vene.
"""
setup(
name='pelican_perpagepublications',
description='Create per page/post publication lists with BibTeX in Pelican',
long_description=LONG_DESCRIPTION,
version=pelican_bibtex.__version__,
author='Jochen Schroeder',
author_email='jochen.schroeder@jochenschroeder.com',
url='https://pypi.python.org/pypi/pelican_perpagepublications',
py_modules=['pelican_perpagepublications'],
classifiers=[_f for _f in CLASSIFIERS.split('\n') if _f]
)
| 30.702703
| 190
| 0.765845
|
92a53e19f501124e4a55f27ca617dbf03d3931d0
| 4,429
|
py
|
Python
|
app/recipe/tests/test_ingredients_api.py
|
AndreWicaksono/recipe-app-api
|
a134306ece5b74ff83b3aea4d8a64225caeaa07c
|
[
"MIT"
] | null | null | null |
app/recipe/tests/test_ingredients_api.py
|
AndreWicaksono/recipe-app-api
|
a134306ece5b74ff83b3aea4d8a64225caeaa07c
|
[
"MIT"
] | null | null | null |
app/recipe/tests/test_ingredients_api.py
|
AndreWicaksono/recipe-app-api
|
a134306ece5b74ff83b3aea4d8a64225caeaa07c
|
[
"MIT"
] | null | null | null |
from django.contrib.auth import get_user_model
from django.urls import reverse
from django.test import TestCase
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Ingredient, Recipe
from recipe.serializers import IngredientSerializer
INGREDIENTS_URL = reverse('recipe:ingredient-list')
class PublicIngredientsApiTests(TestCase):
"""Test the publicly available ingredients API"""
def setUp(self):
self.client = APIClient()
def test_login_required(self):
"""Test that login is required to access the endpoint"""
res = self.client.get(INGREDIENTS_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateIngredientsApiTests(TestCase):
"""Test the private ingredients API"""
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user(
'administrator@andrewicaksono.com',
'testpass'
)
self.client.force_authenticate(self.user)
def test_retrieve_ingredient_list(self):
"""Test retrieving a list of ingredients"""
Ingredient.objects.create(user=self.user, name='Kale')
Ingredient.objects.create(user=self.user, name='Salt')
res = self.client.get(INGREDIENTS_URL)
ingredients = Ingredient.objects.all().order_by('-name')
serializer = IngredientSerializer(ingredients, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_ingredients_limited_to_user(self):
"""Test that ingredients for the authenticated user are returned"""
user2 = get_user_model().objects.create_user(
'moderator@andrewicaksono.com',
'testpass'
)
Ingredient.objects.create(user=user2, name='Vinegar')
ingredient = Ingredient.objects.create(user=self.user, name='Tumeric')
res = self.client.get(INGREDIENTS_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data[0]['name'], ingredient.name)
def test_create_ingredients_successful(self):
"""Test create a new ingredient"""
payload = {'name': 'Cabbage'}
self.client.post(INGREDIENTS_URL, payload)
exists = Ingredient.objects.filter(
user=self.user,
name=payload['name'],
).exists()
self.assertTrue(exists)
def test_create_ingredients_invalid(self):
"""Test creating invalid ingredient fails"""
payload = {'name': ''}
res = self.client.post(INGREDIENTS_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_retrieve_ingredients_assigned_to_recipes(self):
"""Test filtering ingredients by those assigned to recipes"""
ingredient1 = Ingredient.objects.create(
user=self.user, name='Apple'
)
ingredient2 = Ingredient.objects.create(
user=self.user, name='Turkey'
)
recipe = Recipe.objects.create(
title='Apple Crumble',
time_minutes=5,
price=10,
user=self.user
)
recipe.ingredients.add(ingredient1)
res = self.client.get(INGREDIENTS_URL, {'assigned_only': 1})
serializer1 = IngredientSerializer(ingredient1)
serializer2 = IngredientSerializer(ingredient2)
self.assertIn(serializer1.data, res.data)
self.assertNotIn(serializer2.data, res.data)
def test_retrieve_ingredients_assigned_unique(self):
"""Test filtering ingredients by assigned returns unique items"""
ingredient = Ingredient.objects.create(user=self.user, name='Eggs')
Ingredient.objects.create(user=self.user, name='Cheese')
recipe1 = Recipe.objects.create(
title='Eggs Benedict',
time_minutes=30,
price=12.00,
user=self.user
)
recipe1.ingredients.add(ingredient)
recipe2 = Recipe.objects.create(
title='Coriander Eggs on Toast',
time_minutes=20,
price=5.00,
user=self.user
)
recipe2.ingredients.add(ingredient)
res = self.client.get(INGREDIENTS_URL, {'assigned_only': 1})
self.assertEqual(len(res.data), 1)
| 33.55303
| 78
| 0.658162
|
86ff11f0c9cf93e9e206f9d7ee5a6ffb6643a0b3
| 6,376
|
py
|
Python
|
faang-codingexercises/codechallenge_017.py
|
veilair/code-development
|
953eef62204809f3ea0715a6ace5c5b7c1ff5115
|
[
"MIT"
] | 1
|
2022-02-20T00:48:20.000Z
|
2022-02-20T00:48:20.000Z
|
faang-codingexercises/codechallenge_017.py
|
exajobs/coding-interview-collection
|
3daa3863b4a72d8959530edb9ecc9161a93ee99e
|
[
"MIT"
] | null | null | null |
faang-codingexercises/codechallenge_017.py
|
exajobs/coding-interview-collection
|
3daa3863b4a72d8959530edb9ecc9161a93ee99e
|
[
"MIT"
] | null | null | null |
'''
Date: 12/29/2018
Problem description:
===================
This problem was asked by Jane Street.
Suppose you are given a table of currency exchange rates, represented as a 2D array.
Determine whether there is a possible arbitrage: that is, whether there is some sequence
of trades you can make, starting with some amount A of any currency, so that you can
end up with some amount greater than A of that currency.
There are no transaction costs and you can trade fractional quantities.
Some research:
=============
Arbitrage involves three immediate transactions of buy low sell high.
For example, we use 2 US dollars to buy 1 British pound sterling,
then use that pound to buy 1.50 euros, and then use the l.50 euros
to buy $2.50. By trading this way we have gained $0.50
In actuality, arbitrage takes advantage of market inefficiency(delay in infomation
sharing) to trade for fractional gain
Below is the actual currency exchange rate table on 12-31-2018
US Dollar 1.00 USD inv. 1.00 USD
--------- -------- -------------
Euro 0.870903 1.148233
British Pound 0.783992 1.275523
Indian Rupee 69.605725 0.014367
Australian Dollar 1.420549 0.703953
Canadian Dollar 1.363172 0.733583
Singapore Dollar 1.362844 0.733760
Swiss Franc 0.983397 1.016883
Malaysian Ringgit 4.132583 0.241979
Japanese Yen 109.558545 0.009128
Chinese Renminbi 6.874934 0.145456
Given:
USCurrencyEquivalent = {
"Euro": 0.870903,
"British Pound": 0.783992,
"Indian Rupee": 69.605725,
"Australian Dollar": 1.420549,
"Canadian Dollar": 1.363172,
"Singapore Dollar": 1.362844,
"Swiss Franc": 0.983397,
"Malaysian Ringgit": 4.132583,
"Japanese Yen": 109.558545,
"Chinese Renminbi": 6.874934
}
Then figure out the inversion rate,
# e.g. 1 Euro = 1.148233 US
USInversionRate = {
"Euro": 1.148233,
"British Pound": 1.275523,
"Indian Rupee": 0.014367,
"Australian Dollar": 0.703953,
"Canadian Dollar": 0.733583,
"Singapore Dollar": 0.733760,
"Swiss Franc": 1.016883,
"Malaysian Ringgit": 0.241979,
"Japanese Yen": 0.009128,
"Chinese Renminbi": 0.145456
}
The best marginal gain is 1 * (lowest inversion rate / highest inversion rate)
e.g. 1US * (0.009128Yen/1.275523Pound) = 0.007156280208196952US
If we invest 1000US buying Pound then Yen then back to US dollars, we gain 1000 * 0.007156280208196952 = 7.156$
Algorithm:
=========
Input: A dictionary of USCurrencyEquivalent, and an investment number in US dollars
Ouput: Gain in decimal value of US dollars
Pseudo code:
1. Check for valid input
2. Convert dictionary into inversion hash table
3. Find the highest ratio in the hash table. i.e. lowest/highest
4. Output the InvestAmount * (ratio)
'''
import unittest
def invert(func):
def inner(dict_of_currencies):
for k in dict_of_currencies:
#print (k, 1/dict_of_currencies[k])
dict_of_currencies[k] = 1 / dict_of_currencies[k]
return func(dict_of_currencies)
return inner
@invert
def inversionRatio(USCurrencyEquivalent={}):
return USCurrencyEquivalent
def gainArbitrage(USCurrencyEquivalent, AmountUSD):
inversionHash = inversionRatio(USCurrencyEquivalent)
# step1
maxrate = max([inversionHash[k] for k in inversionHash])
XAmount = AmountUSD/maxrate
XCurrencyName = [k for k,v in inversionHash.items() if v == maxrate]
print("Step1: Trade {} USD for {} {}".format(AmountUSD, XAmount, str(XCurrencyName[0])))
# step2
minrate = min([inversionHash[k] for k in inversionHash])
YAmount = XAmount/minrate
YCurrencyName = [k for k,v in inversionHash.items() if v == minrate]
print("Step2: Trade {} {} for {} {}".format(XAmount, str(XCurrencyName[0]), YAmount, str(YCurrencyName[0])))
# step3
ZAmount = AmountUSD + AmountUSD *(minrate/maxrate)
print("Step3: Trade {} {} back to {} USD".format(YAmount, str(YCurrencyName[0]), ZAmount))
return AmountUSD * (minrate/maxrate)
class TestArbitrage(unittest.TestCase):
def test_code(self):
InvestAmount = 1000000
USCurrencyEquivalent = {
"Euro": 0.870903,
"British Pound": 0.783992,
"Indian Rupee": 69.605725,
"Australian Dollar": 1.420549,
"Canadian Dollar": 1.363172,
"Singapore Dollar": 1.362844,
"Swiss Franc": 0.983397,
"Malaysian Ringgit": 4.132583,
"Japanese Yen": 109.558545,
"Chinese Renminbi": 6.874934 }
self.assertEqual(gainArbitrage(USCurrencyEquivalent, InvestAmount) == 7155.91832658968, True)
if __name__ == '__main__':
Amount = 1000000 # US dollars
USCurrencyEquivalent = {
"Euro": 0.870903,
"British Pound": 0.783992,
"Indian Rupee": 69.605725,
"Australian Dollar": 1.420549,
"Canadian Dollar": 1.363172,
"Singapore Dollar": 1.362844,
"Swiss Franc": 0.983397,
"Malaysian Ringgit": 4.132583,
"Japanese Yen": 109.558545,
"Chinese Renminbi": 6.874934
}
print("Gain from arbitrage trades: {} USD".format(gainArbitrage(USCurrencyEquivalent, Amount)))
unittest.main()
'''
Run-time output:
===============
markn@u17101vaio:~/devel/python-prj/DailyCodingChallenge$ python codechallenge_017.py
Step1: Trade 1000000 USD for 783992.0000000001 British Pound
Step2: Trade 783992.0000000001 British Pound for 85893022.81164001Japanese Yen
Step3: Trade 85893022.81164001 Japanese Yen back to 1007155.9183265896 USD
Gain from arbitrage trades: 7155.91832658968 USD
markn@u17101vaio:~/devel/python-prj/DailyCodingChallenge$ pytest codechallenge_017.py
===================================== test session starts =====================================
platform linux -- Python 3.6.4, pytest-4.0.2, py-1.7.0, pluggy-0.8.0
rootdir: /home/markn/devel/python-prj/DailyCodingChallenge, inifile:
collected 1 item
codechallenge_017.py . [100%]
================================== 1 passed in 0.65 seconds ===================================
'''
| 36.227273
| 112
| 0.638331
|
6776a9e1a90e18fdc5722cb3e8e4959c79771638
| 2,108
|
py
|
Python
|
core/models.py
|
Armestrong/resale_api
|
75a798f2aa95b7d316e145f3811fa05537c606df
|
[
"MIT"
] | null | null | null |
core/models.py
|
Armestrong/resale_api
|
75a798f2aa95b7d316e145f3811fa05537c606df
|
[
"MIT"
] | null | null | null |
core/models.py
|
Armestrong/resale_api
|
75a798f2aa95b7d316e145f3811fa05537c606df
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.contrib.auth.models import BaseUserManager, \
AbstractBaseUser, PermissionsMixin
from django.conf import settings
class UserManager(BaseUserManager):
def create_user(self, email, password=None, **extra_fields):
"""Creates and saves a new user"""
if not email:
raise ValueError('Users must have an email address')
user = self.model(email=self.normalize_email(email), **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password):
"""Creates a nee super user """
user = self.create_user(email, password)
user.is_staff = True
user.is_superuser = True
user.save(using=self._db)
return user
class User(AbstractBaseUser, PermissionsMixin):
""" Custom user model that using email instead of username """
email = models.EmailField(max_length=255, unique=True)
name = models.CharField(max_length=255)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
objects = UserManager()
USERNAME_FIELD = 'email'
class RealEstate(models.Model):
name = models.CharField(max_length=255)
address = models.CharField(max_length=255)
user = models.ForeignKey(settings.AUTH_USER_MODEL,
on_delete=models.CASCADE)
def __str__(self):
return self.name
class Property(models.Model):
"""Property object"""
name = models.CharField(max_length=255)
address = models.CharField(max_length=255)
description = models.CharField(max_length=255)
features = models.CharField(max_length=255, blank=True)
status = models.BooleanField(default=False)
type = models.CharField(max_length=255)
finality = models.CharField(max_length=255, blank=True)
real_estates = models.ManyToManyField('RealEstate')
user = models.ForeignKey(settings.AUTH_USER_MODEL,
on_delete=models.CASCADE)
def __str__(self):
return self.name
| 31.462687
| 76
| 0.686433
|
e29cc2c51658b00bdbc2b22c8cd108383155a434
| 3,113
|
py
|
Python
|
Fujitsu/benchmarks/resnet/implementations/mxnet/3rdparty/tvm/tests/python/unittest/test_autotvm_measure.py
|
mengkai94/training_results_v0.6
|
43dc3e250f8da47b5f8833197d74cb8cf1004fc9
|
[
"Apache-2.0"
] | 64
|
2021-05-02T14:42:34.000Z
|
2021-05-06T01:35:03.000Z
|
tests/python/unittest/test_autotvm_measure.py
|
clhne/tvm
|
d59320c764bd09474775e1b292f3c05c27743d24
|
[
"Apache-2.0"
] | 23
|
2019-07-29T05:21:52.000Z
|
2020-08-31T18:51:42.000Z
|
tests/python/unittest/test_autotvm_measure.py
|
clhne/tvm
|
d59320c764bd09474775e1b292f3c05c27743d24
|
[
"Apache-2.0"
] | 51
|
2019-07-12T05:10:25.000Z
|
2021-07-28T16:19:06.000Z
|
"""Test builder and runner"""
import logging
import time
import numpy as np
import tvm
from tvm import autotvm
from test_autotvm_common import get_sample_task, bad_matmul
from tvm.autotvm.measure.measure import Runner, MeasureResult, MeasureErrorNo
def test_task_tuner_without_measurement():
"""test task and tuner without measurement"""
task, target = get_sample_task()
class DummyRunner(Runner):
def __init__(self):
super(DummyRunner, self).__init__(1, 1)
def run(self, measure_inputs, build_results):
return [MeasureResult((np.random.random(),), 0, 0.2, time.time())
for _ in range(len(measure_inputs))]
def get_build_kwargs(self):
return {}
measure_option = autotvm.measure_option(
builder=autotvm.LocalBuilder(),
runner=DummyRunner()
)
logging.info("%s", task.config_space)
for tuner_class in [autotvm.tuner.RandomTuner,
autotvm.tuner.GridSearchTuner,
autotvm.tuner.GATuner,
autotvm.tuner.XGBTuner]:
tuner = tuner_class(task)
tuner.tune(n_trial=10, measure_option=measure_option)
assert tuner.best_flops > 1
def test_check_correctness():
task, target = get_sample_task()
measure_option = autotvm.measure_option(
builder=autotvm.LocalBuilder(),
runner=autotvm.LocalRunner(check_correctness=True)
)
def _callback_correct(tuner, measure_inputs, measure_results):
for inp, res in zip(measure_inputs, measure_results):
assert res.error_no == 0
tuner = autotvm.tuner.RandomTuner(task)
tuner.tune(n_trial=2, measure_option=measure_option,
callbacks=[_callback_correct])
# a bad template
n = 128
target = tvm.target.create("llvm -device=bad_device")
task = autotvm.task.create(bad_matmul, args=(n, n, n, 'float32'), target=target)
def _callback_wrong(tuner, measure_inputs, measure_results):
for inp, res in zip(measure_inputs, measure_results):
assert res.error_no == MeasureErrorNo.WRONG_ANSWER
tuner = autotvm.tuner.RandomTuner(task)
tuner.tune(n_trial=2, measure_option=measure_option,
callbacks=[_callback_wrong])
def test_min_repeat_ms():
task, target = get_sample_task()
measure_option = autotvm.measure_option(
builder=autotvm.LocalBuilder(),
runner=autotvm.LocalRunner(number=1, min_repeat_ms=100)
)
def _callback(tuner, measure_inputs, measure_results):
for inp, res in zip(measure_inputs, measure_results):
if res.error_no != 0:
continue
assert 1000 * np.mean(res.costs) * \
measure_option['runner'].cur_number >= 100
tuner = autotvm.tuner.RandomTuner(task)
tuner.tune(n_trial=5, measure_option=measure_option,
callbacks=[_callback])
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
test_task_tuner_without_measurement()
test_check_correctness()
test_min_repeat_ms()
| 31.765306
| 84
| 0.668166
|
f646ddf258814d6277ec173cf1f790481709dd22
| 30,980
|
py
|
Python
|
pymc3_ext/variational/inference.py
|
wlad111/pymc3
|
43432834be5bbca72caa32d40a848515eea554a8
|
[
"Apache-2.0"
] | null | null | null |
pymc3_ext/variational/inference.py
|
wlad111/pymc3
|
43432834be5bbca72caa32d40a848515eea554a8
|
[
"Apache-2.0"
] | null | null | null |
pymc3_ext/variational/inference.py
|
wlad111/pymc3
|
43432834be5bbca72caa32d40a848515eea554a8
|
[
"Apache-2.0"
] | null | null | null |
import logging
import warnings
import collections
import numpy as np
from fastprogress.fastprogress import progress_bar
import pymc3_ext as pm
from pymc3_ext.variational import test_functions
from pymc3_ext.variational.approximations import (
MeanField,
FullRank,
Empirical,
NormalizingFlow,
)
from pymc3_ext.variational.operators import KL, KSD
from . import opvi
logger = logging.getLogger(__name__)
__all__ = [
"ADVI",
"FullRankADVI",
"SVGD",
"ASVGD",
"NFVI",
"Inference",
"ImplicitGradient",
"KLqp",
"fit",
]
State = collections.namedtuple("State", "i,step,callbacks,score")
class Inference:
r"""**Base class for Variational Inference**
Communicates Operator, Approximation and Test Function to build Objective Function
Parameters
----------
op : Operator class
approx : Approximation class or instance
tf : TestFunction instance
model : Model
PyMC3 Model
kwargs : kwargs passed to :class:`Operator`
"""
def __init__(self, op, approx, tf, **kwargs):
self.hist = np.asarray(())
self.objective = op(approx, **kwargs)(tf)
self.state = None
approx = property(lambda self: self.objective.approx)
def _maybe_score(self, score):
returns_loss = self.objective.op.returns_loss
if score is None:
score = returns_loss
elif score and not returns_loss:
warnings.warn(
"method `fit` got `score == True` but %s "
"does not return loss. Ignoring `score` argument" % self.objective.op
)
score = False
else:
pass
return score
def run_profiling(self, n=1000, score=None, **kwargs):
score = self._maybe_score(score)
fn_kwargs = kwargs.pop("fn_kwargs", dict())
fn_kwargs["profile"] = True
step_func = self.objective.step_function(
score=score, fn_kwargs=fn_kwargs, **kwargs
)
progress = progress_bar(range(n))
try:
for _ in progress:
step_func()
except KeyboardInterrupt:
pass
return step_func.profile
def fit(self, n=10000, score=None, callbacks=None, progressbar=True, **kwargs):
"""Perform Operator Variational Inference
Parameters
----------
n : int
number of iterations
score : bool
evaluate loss on each iteration or not
callbacks : list[function : (Approximation, losses, i) -> None]
calls provided functions after each iteration step
progressbar : bool
whether to show progressbar or not
Other Parameters
----------------
obj_n_mc : `int`
Number of monte carlo samples used for approximation of objective gradients
tf_n_mc : `int`
Number of monte carlo samples used for approximation of test function gradients
obj_optimizer : function (grads, params) -> updates
Optimizer that is used for objective params
test_optimizer : function (grads, params) -> updates
Optimizer that is used for test function params
more_obj_params : `list`
Add custom params for objective optimizer
more_tf_params : `list`
Add custom params for test function optimizer
more_updates : `dict`
Add custom updates to resulting updates
total_grad_norm_constraint : `float`
Bounds gradient norm, prevents exploding gradient problem
fn_kwargs : `dict`
Add kwargs to theano.function (e.g. `{'profile': True}`)
more_replacements : `dict`
Apply custom replacements before calculating gradients
Returns
-------
:class:`Approximation`
"""
if callbacks is None:
callbacks = []
score = self._maybe_score(score)
step_func = self.objective.step_function(score=score, **kwargs)
if progressbar:
progress = progress_bar(range(n), display=progressbar)
else:
progress = range(n)
if score:
state = self._iterate_with_loss(0, n, step_func, progress, callbacks)
else:
state = self._iterate_without_loss(0, n, step_func, progress, callbacks)
# hack to allow pm.fit() access to loss hist
self.approx.hist = self.hist
self.state = state
return self.approx
def _iterate_without_loss(self, s, _, step_func, progress, callbacks):
i = 0
try:
for i in progress:
step_func()
current_param = self.approx.params[0].get_value()
if np.isnan(current_param).any():
name_slc = []
tmp_hold = list(range(current_param.size))
vmap = self.approx.groups[0].bij.ordering.vmap
for vmap_ in vmap:
slclen = len(tmp_hold[vmap_.slc])
for j in range(slclen):
name_slc.append((vmap_.var, j))
index = np.where(np.isnan(current_param))[0]
errmsg = ["NaN occurred in optimization. "]
suggest_solution = (
"Try tracking this parameter: "
"http://docs.pymc.io/notebooks/variational_api_quickstart.html#Tracking-parameters"
)
try:
for ii in index:
errmsg.append(
"The current approximation of RV `{}`.ravel()[{}]"
" is NaN.".format(*name_slc[ii])
)
errmsg.append(suggest_solution)
except IndexError:
pass
raise FloatingPointError("\n".join(errmsg))
for callback in callbacks:
callback(self.approx, None, i + s + 1)
except (KeyboardInterrupt, StopIteration) as e:
if isinstance(e, StopIteration):
logger.info(str(e))
return State(i + s, step=step_func, callbacks=callbacks, score=False)
def _iterate_with_loss(self, s, n, step_func, progress, callbacks):
def _infmean(input_array):
"""Return the mean of the finite values of the array"""
input_array = input_array[np.isfinite(input_array)].astype("float64")
if len(input_array) == 0:
return np.nan
else:
return np.mean(input_array)
scores = np.empty(n)
scores[:] = np.nan
i = 0
try:
for i in progress:
e = step_func()
if np.isnan(e): # pragma: no cover
scores = scores[:i]
self.hist = np.concatenate([self.hist, scores])
current_param = self.approx.params[0].get_value()
name_slc = []
tmp_hold = list(range(current_param.size))
vmap = self.approx.groups[0].bij.ordering.vmap
for vmap_ in vmap:
slclen = len(tmp_hold[vmap_.slc])
for j in range(slclen):
name_slc.append((vmap_.var, j))
index = np.where(np.isnan(current_param))[0]
errmsg = ["NaN occurred in optimization. "]
suggest_solution = (
"Try tracking this parameter: "
"http://docs.pymc.io/notebooks/variational_api_quickstart.html#Tracking-parameters"
)
try:
for ii in index:
errmsg.append(
"The current approximation of RV `{}`.ravel()[{}]"
" is NaN.".format(*name_slc[ii])
)
errmsg.append(suggest_solution)
except IndexError:
pass
raise FloatingPointError("\n".join(errmsg))
scores[i] = e
if i % 10 == 0:
avg_loss = _infmean(scores[max(0, i - 1000): i + 1])
if hasattr(progress, 'comment'):
progress.comment = "Average Loss = {:,.5g}".format(avg_loss)
avg_loss = scores[max(0, i - 1000): i + 1].mean()
if hasattr(progress, 'comment'):
progress.comment = "Average Loss = {:,.5g}".format(avg_loss)
for callback in callbacks:
callback(self.approx, scores[: i + 1], i + s + 1)
except (KeyboardInterrupt, StopIteration) as e: # pragma: no cover
# do not print log on the same line
scores = scores[:i]
if isinstance(e, StopIteration):
logger.info(str(e))
if n < 10:
logger.info(
"Interrupted at {:,d} [{:.0f}%]: Loss = {:,.5g}".format(
i, 100 * i // n, scores[i]
)
)
else:
avg_loss = _infmean(scores[min(0, i - 1000): i + 1])
logger.info(
"Interrupted at {:,d} [{:.0f}%]: Average Loss = {:,.5g}".format(
i, 100 * i // n, avg_loss
)
)
else:
if n < 10:
logger.info("Finished [100%]: Loss = {:,.5g}".format(scores[-1]))
else:
avg_loss = _infmean(scores[max(0, i - 1000): i + 1])
logger.info("Finished [100%]: Average Loss = {:,.5g}".format(avg_loss))
self.hist = np.concatenate([self.hist, scores])
return State(i + s, step=step_func, callbacks=callbacks, score=True)
def refine(self, n, progressbar=True):
"""Refine the solution using the last compiled step function
"""
if self.state is None:
raise TypeError("Need to call `.fit` first")
i, step, callbacks, score = self.state
if progressbar:
progress = progress_bar(n, display=progressbar)
else:
progress = range(n) # This is a guess at what progress_bar(n) does.
if score:
state = self._iterate_with_loss(i, n, step, progress, callbacks)
else:
state = self._iterate_without_loss(i, n, step, progress, callbacks)
self.state = state
class KLqp(Inference):
"""**Kullback Leibler Divergence Inference**
General approach to fit Approximations that define :math:`logq`
by maximizing ELBO (Evidence Lower Bound). In some cases
rescaling the regularization term KL may be beneficial
.. math::
ELBO_\beta = \log p(D|\theta) - \beta KL(q||p)
Parameters
----------
approx : :class:`Approximation`
Approximation to fit, it is required to have `logQ`
beta : float
Scales the regularization term in ELBO (see Christopher P. Burgess et al., 2017)
References
----------
- Christopher P. Burgess et al. (NIPS, 2017)
Understanding disentangling in :math:`\beta`-VAE
arXiv preprint 1804.03599
"""
def __init__(self, approx, beta=1.0):
super().__init__(KL, approx, None, beta=beta)
class ADVI(KLqp):
r"""**Automatic Differentiation Variational Inference (ADVI)**
This class implements the meanfield ADVI, where the variational
posterior distribution is assumed to be spherical Gaussian without
correlation of parameters and fit to the true posterior distribution.
The means and standard deviations of the variational posterior are referred
to as variational parameters.
For explanation, we classify random variables in probabilistic models into
three types. Observed random variables
:math:`{\cal Y}=\{\mathbf{y}_{i}\}_{i=1}^{N}` are :math:`N` observations.
Each :math:`\mathbf{y}_{i}` can be a set of observed random variables,
i.e., :math:`\mathbf{y}_{i}=\{\mathbf{y}_{i}^{k}\}_{k=1}^{V_{o}}`, where
:math:`V_{k}` is the number of the types of observed random variables
in the model.
The next ones are global random variables
:math:`\Theta=\{\theta^{k}\}_{k=1}^{V_{g}}`, which are used to calculate
the probabilities for all observed samples.
The last ones are local random variables
:math:`{\cal Z}=\{\mathbf{z}_{i}\}_{i=1}^{N}`, where
:math:`\mathbf{z}_{i}=\{\mathbf{z}_{i}^{k}\}_{k=1}^{V_{l}}`.
These RVs are used only in AEVB.
The goal of ADVI is to approximate the posterior distribution
:math:`p(\Theta,{\cal Z}|{\cal Y})` by variational posterior
:math:`q(\Theta)\prod_{i=1}^{N}q(\mathbf{z}_{i})`. All of these terms
are normal distributions (mean-field approximation).
:math:`q(\Theta)` is parametrized with its means and standard deviations.
These parameters are denoted as :math:`\gamma`. While :math:`\gamma` is
a constant, the parameters of :math:`q(\mathbf{z}_{i})` are dependent on
each observation. Therefore these parameters are denoted as
:math:`\xi(\mathbf{y}_{i}; \nu)`, where :math:`\nu` is the parameters
of :math:`\xi(\cdot)`. For example, :math:`\xi(\cdot)` can be a
multilayer perceptron or convolutional neural network.
In addition to :math:`\xi(\cdot)`, we can also include deterministic
mappings for the likelihood of observations. We denote the parameters of
the deterministic mappings as :math:`\eta`. An example of such mappings is
the deconvolutional neural network used in the convolutional VAE example
in the PyMC3 notebook directory.
This function maximizes the evidence lower bound (ELBO)
:math:`{\cal L}(\gamma, \nu, \eta)` defined as follows:
.. math::
{\cal L}(\gamma,\nu,\eta) & =
\mathbf{c}_{o}\mathbb{E}_{q(\Theta)}\left[
\sum_{i=1}^{N}\mathbb{E}_{q(\mathbf{z}_{i})}\left[
\log p(\mathbf{y}_{i}|\mathbf{z}_{i},\Theta,\eta)
\right]\right] \\ &
- \mathbf{c}_{g}KL\left[q(\Theta)||p(\Theta)\right]
- \mathbf{c}_{l}\sum_{i=1}^{N}
KL\left[q(\mathbf{z}_{i})||p(\mathbf{z}_{i})\right],
where :math:`KL[q(v)||p(v)]` is the Kullback-Leibler divergence
.. math::
KL[q(v)||p(v)] = \int q(v)\log\frac{q(v)}{p(v)}dv,
:math:`\mathbf{c}_{o/g/l}` are vectors for weighting each term of ELBO.
More precisely, we can write each of the terms in ELBO as follows:
.. math::
\mathbf{c}_{o}\log p(\mathbf{y}_{i}|\mathbf{z}_{i},\Theta,\eta) & = &
\sum_{k=1}^{V_{o}}c_{o}^{k}
\log p(\mathbf{y}_{i}^{k}|
{\rm pa}(\mathbf{y}_{i}^{k},\Theta,\eta)) \\
\mathbf{c}_{g}KL\left[q(\Theta)||p(\Theta)\right] & = &
\sum_{k=1}^{V_{g}}c_{g}^{k}KL\left[
q(\theta^{k})||p(\theta^{k}|{\rm pa(\theta^{k})})\right] \\
\mathbf{c}_{l}KL\left[q(\mathbf{z}_{i}||p(\mathbf{z}_{i})\right] & = &
\sum_{k=1}^{V_{l}}c_{l}^{k}KL\left[
q(\mathbf{z}_{i}^{k})||
p(\mathbf{z}_{i}^{k}|{\rm pa}(\mathbf{z}_{i}^{k}))\right],
where :math:`{\rm pa}(v)` denotes the set of parent variables of :math:`v`
in the directed acyclic graph of the model.
When using mini-batches, :math:`c_{o}^{k}` and :math:`c_{l}^{k}` should be
set to :math:`N/M`, where :math:`M` is the number of observations in each
mini-batch. This is done with supplying `total_size` parameter to
observed nodes (e.g. :code:`Normal('x', 0, 1, observed=data, total_size=10000)`).
In this case it is possible to automatically determine appropriate scaling for :math:`logp`
of observed nodes. Interesting to note that it is possible to have two independent
observed variables with different `total_size` and iterate them independently
during inference.
For working with ADVI, we need to give
- The probabilistic model
`model` with three types of RVs (`observed_RVs`,
`global_RVs` and `local_RVs`).
- (optional) Minibatches
The tensors to which mini-bathced samples are supplied are
handled separately by using callbacks in :func:`Inference.fit` method
that change storage of shared theano variable or by :func:`pymc3_ext.generator`
that automatically iterates over minibatches and defined beforehand.
- (optional) Parameters of deterministic mappings
They have to be passed along with other params to :func:`Inference.fit` method
as `more_obj_params` argument.
For more information concerning training stage please reference
:func:`pymc3_ext.variational.opvi.ObjectiveFunction.step_function`
Parameters
----------
local_rv : dict[var->tuple]
mapping {model_variable -> approx params}
Local Vars are used for Autoencoding Variational Bayes
See (AEVB; Kingma and Welling, 2014) for details
model : :class:`pymc3.Model`
PyMC3 model for inference
random_seed : None or int
leave None to use package global RandomStream or other
valid value to create instance specific one
start : `Point`
starting point for inference
References
----------
- Kucukelbir, A., Tran, D., Ranganath, R., Gelman, A.,
and Blei, D. M. (2016). Automatic Differentiation Variational
Inference. arXiv preprint arXiv:1603.00788.
- Geoffrey Roeder, Yuhuai Wu, David Duvenaud, 2016
Sticking the Landing: A Simple Reduced-Variance Gradient for ADVI
approximateinference.org/accepted/RoederEtAl2016.pdf
- Kingma, D. P., & Welling, M. (2014).
Auto-Encoding Variational Bayes. stat, 1050, 1.
"""
def __init__(self, *args, **kwargs):
super().__init__(MeanField(*args, **kwargs))
class FullRankADVI(KLqp):
r"""**Full Rank Automatic Differentiation Variational Inference (ADVI)**
Parameters
----------
local_rv : dict[var->tuple]
mapping {model_variable -> approx params}
Local Vars are used for Autoencoding Variational Bayes
See (AEVB; Kingma and Welling, 2014) for details
model : :class:`pymc3.Model`
PyMC3 model for inference
random_seed : None or int
leave None to use package global RandomStream or other
valid value to create instance specific one
start : `Point`
starting point for inference
References
----------
- Kucukelbir, A., Tran, D., Ranganath, R., Gelman, A.,
and Blei, D. M. (2016). Automatic Differentiation Variational
Inference. arXiv preprint arXiv:1603.00788.
- Geoffrey Roeder, Yuhuai Wu, David Duvenaud, 2016
Sticking the Landing: A Simple Reduced-Variance Gradient for ADVI
approximateinference.org/accepted/RoederEtAl2016.pdf
- Kingma, D. P., & Welling, M. (2014).
Auto-Encoding Variational Bayes. stat, 1050, 1.
"""
def __init__(self, *args, **kwargs):
super().__init__(FullRank(*args, **kwargs))
class ImplicitGradient(Inference):
"""**Implicit Gradient for Variational Inference**
**not suggested to use**
An approach to fit arbitrary approximation by computing kernel based gradient
By default RBF kernel is used for gradient estimation. Default estimator is
Kernelized Stein Discrepancy with temperature equal to 1. This temperature works
only for large number of samples. Larger temperature is needed for small number of
samples but there is no theoretical approach to choose the best one in such case.
"""
def __init__(self, approx, estimator=KSD, kernel=test_functions.rbf, **kwargs):
super().__init__(op=estimator, approx=approx, tf=kernel, **kwargs)
class SVGD(ImplicitGradient):
r"""**Stein Variational Gradient Descent**
This inference is based on Kernelized Stein Discrepancy
it's main idea is to move initial noisy particles so that
they fit target distribution best.
Algorithm is outlined below
*Input:* A target distribution with density function :math:`p(x)`
and a set of initial particles :math:`\{x^0_i\}^n_{i=1}`
*Output:* A set of particles :math:`\{x^{*}_i\}^n_{i=1}` that approximates the target distribution.
.. math::
x_i^{l+1} &\leftarrow x_i^{l} + \epsilon_l \hat{\phi}^{*}(x_i^l) \\
\hat{\phi}^{*}(x) &= \frac{1}{n}\sum^{n}_{j=1}[k(x^l_j,x) \nabla_{x^l_j} logp(x^l_j)+ \nabla_{x^l_j} k(x^l_j,x)]
Parameters
----------
n_particles : `int`
number of particles to use for approximation
jitter : `float`
noise sd for initial point
model : :class:`pymc3.Model`
PyMC3 model for inference
kernel : `callable`
kernel function for KSD :math:`f(histogram) -> (k(x,.), \nabla_x k(x,.))`
temperature : float
parameter responsible for exploration, higher temperature gives more broad posterior estimate
start : `dict`
initial point for inference
random_seed : None or int
leave None to use package global RandomStream or other
valid value to create instance specific one
start : `Point`
starting point for inference
kwargs : other keyword arguments passed to estimator
References
----------
- Qiang Liu, Dilin Wang (2016)
Stein Variational Gradient Descent: A General Purpose Bayesian Inference Algorithm
arXiv:1608.04471
- Yang Liu, Prajit Ramachandran, Qiang Liu, Jian Peng (2017)
Stein Variational Policy Gradient
arXiv:1704.02399
"""
def __init__(
self,
n_particles=100,
jitter=1,
model=None,
start=None,
random_seed=None,
estimator=KSD,
kernel=test_functions.rbf,
**kwargs,
):
if kwargs.get("local_rv") is not None:
raise opvi.AEVBInferenceError("SVGD does not support local groups")
empirical = Empirical(
size=n_particles,
jitter=jitter,
start=start,
model=model,
random_seed=random_seed,
)
super().__init__(approx=empirical, estimator=estimator, kernel=kernel, **kwargs)
class ASVGD(ImplicitGradient):
r"""**Amortized Stein Variational Gradient Descent**
**not suggested to use**
This inference is based on Kernelized Stein Discrepancy
it's main idea is to move initial noisy particles so that
they fit target distribution best.
Algorithm is outlined below
*Input:* Parametrized random generator :math:`R_{\theta}`
*Output:* :math:`R_{\theta^{*}}` that approximates the target distribution.
.. math::
\Delta x_i &= \hat{\phi}^{*}(x_i) \\
\hat{\phi}^{*}(x) &= \frac{1}{n}\sum^{n}_{j=1}[k(x_j,x) \nabla_{x_j} logp(x_j)+ \nabla_{x_j} k(x_j,x)] \\
\Delta_{\theta} &= \frac{1}{n}\sum^{n}_{i=1}\Delta x_i\frac{\partial x_i}{\partial \theta}
Parameters
----------
approx : :class:`Approximation`
default is :class:`FullRank` but can be any
kernel : `callable`
kernel function for KSD :math:`f(histogram) -> (k(x,.), \nabla_x k(x,.))`
model : :class:`Model`
kwargs : kwargs for gradient estimator
References
----------
- Dilin Wang, Yihao Feng, Qiang Liu (2016)
Learning to Sample Using Stein Discrepancy
http://bayesiandeeplearning.org/papers/BDL_21.pdf
- Dilin Wang, Qiang Liu (2016)
Learning to Draw Samples: With Application to Amortized MLE for Generative Adversarial Learning
arXiv:1611.01722
- Yang Liu, Prajit Ramachandran, Qiang Liu, Jian Peng (2017)
Stein Variational Policy Gradient
arXiv:1704.02399
"""
def __init__(self, approx=None, estimator=KSD, kernel=test_functions.rbf, **kwargs):
warnings.warn(
"You are using experimental inference Operator. "
"It requires careful choice of temperature, default is 1. "
"Default temperature works well for low dimensional problems and "
"for significant `n_obj_mc`. Temperature > 1 gives more exploration "
"power to algorithm, < 1 leads to undesirable results. Please take "
"it in account when looking at inference result. Posterior variance "
"is often **underestimated** when using temperature = 1."
)
if approx is None:
approx = FullRank(
model=kwargs.pop("model", None), local_rv=kwargs.pop("local_rv", None)
)
super().__init__(estimator=estimator, approx=approx, kernel=kernel, **kwargs)
def fit(
self,
n=10000,
score=None,
callbacks=None,
progressbar=True,
obj_n_mc=500,
**kwargs,
):
return super().fit(
n=n,
score=score,
callbacks=callbacks,
progressbar=progressbar,
obj_n_mc=obj_n_mc,
**kwargs,
)
def run_profiling(self, n=1000, score=None, obj_n_mc=500, **kwargs):
return super().run_profiling(n=n, score=score, obj_n_mc=obj_n_mc, **kwargs)
class NFVI(KLqp):
r"""**Normalizing Flow based :class:`KLqp` inference**
Normalizing flow is a series of invertible transformations on initial distribution.
.. math::
z_K = f_K \circ \dots \circ f_2 \circ f_1(z_0)
In that case we can compute tractable density for the flow.
.. math::
\ln q_K(z_K) = \ln q_0(z_0) - \sum_{k=1}^{K}\ln \left|\frac{\partial f_k}{\partial z_{k-1}}\right|
Every :math:`f_k` here is a parametric function with defined determinant.
We can choose every step here. For example the here is a simple flow
is an affine transform:
.. math::
z = loc(scale(z_0)) = \mu + \sigma * z_0
Here we get mean field approximation if :math:`z_0 \sim \mathcal{N}(0, 1)`
**Flow Formulas**
In PyMC3 there is a flexible way to define flows with formulas. We have 5 of them by the moment:
- Loc (:code:`loc`): :math:`z' = z + \mu`
- Scale (:code:`scale`): :math:`z' = \sigma * z`
- Planar (:code:`planar`): :math:`z' = z + u * \tanh(w^T z + b)`
- Radial (:code:`radial`): :math:`z' = z + \beta (\alpha + (z-z_r))^{-1}(z-z_r)`
- Householder (:code:`hh`): :math:`z' = H z`
Formula can be written as a string, e.g. `'scale-loc'`, `'scale-hh*4-loc'`, `'panar*10'`.
Every step is separated with `'-'`, repeated flow is marked with `'*'` producing `'flow*repeats'`.
Parameters
----------
flow : str|AbstractFlow
formula or initialized Flow, default is `'scale-loc'` that
is identical to MeanField
model : :class:`pymc3.Model`
PyMC3 model for inference
random_seed : None or int
leave None to use package global RandomStream or other
valid value to create instance specific one
"""
def __init__(self, *args, **kwargs):
super().__init__(NormalizingFlow(*args, **kwargs))
def fit(
n=10000,
local_rv=None,
method="advi",
model=None,
random_seed=None,
start=None,
inf_kwargs=None,
**kwargs,
):
r"""Handy shortcut for using inference methods in functional way
Parameters
----------
n : `int`
number of iterations
local_rv : dict[var->tuple]
mapping {model_variable -> approx params}
Local Vars are used for Autoencoding Variational Bayes
See (AEVB; Kingma and Welling, 2014) for details
method : str or :class:`Inference`
string name is case insensitive in:
- 'advi' for ADVI
- 'fullrank_advi' for FullRankADVI
- 'svgd' for Stein Variational Gradient Descent
- 'asvgd' for Amortized Stein Variational Gradient Descent
- 'nfvi' for Normalizing Flow with default `scale-loc` flow
- 'nfvi=<formula>' for Normalizing Flow using formula
model : :class:`Model`
PyMC3 model for inference
random_seed : None or int
leave None to use package global RandomStream or other
valid value to create instance specific one
inf_kwargs : dict
additional kwargs passed to :class:`Inference`
start : `Point`
starting point for inference
Other Parameters
----------------
score : bool
evaluate loss on each iteration or not
callbacks : list[function : (Approximation, losses, i) -> None]
calls provided functions after each iteration step
progressbar: bool
whether to show progressbar or not
obj_n_mc : `int`
Number of monte carlo samples used for approximation of objective gradients
tf_n_mc : `int`
Number of monte carlo samples used for approximation of test function gradients
obj_optimizer : function (grads, params) -> updates
Optimizer that is used for objective params
test_optimizer : function (grads, params) -> updates
Optimizer that is used for test function params
more_obj_params : `list`
Add custom params for objective optimizer
more_tf_params : `list`
Add custom params for test function optimizer
more_updates : `dict`
Add custom updates to resulting updates
total_grad_norm_constraint : `float`
Bounds gradient norm, prevents exploding gradient problem
fn_kwargs : `dict`
Add kwargs to theano.function (e.g. `{'profile': True}`)
more_replacements : `dict`
Apply custom replacements before calculating gradients
Returns
-------
:class:`Approximation`
"""
if inf_kwargs is None:
inf_kwargs = dict()
else:
inf_kwargs = inf_kwargs.copy()
if local_rv is not None:
inf_kwargs["local_rv"] = local_rv
if random_seed is not None:
inf_kwargs["random_seed"] = random_seed
if start is not None:
inf_kwargs["start"] = start
if model is None:
model = pm.modelcontext(model)
_select = dict(
advi=ADVI, fullrank_advi=FullRankADVI, svgd=SVGD, asvgd=ASVGD, nfvi=NFVI
)
if isinstance(method, str):
method = method.lower()
if method.startswith("nfvi="):
formula = method[5:]
inference = NFVI(formula, **inf_kwargs)
elif method in _select:
inference = _select[method](model=model, **inf_kwargs)
else:
raise KeyError(
f"method should be one of {set(_select.keys())} or Inference instance"
)
elif isinstance(method, Inference):
inference = method
else:
raise TypeError(
f"method should be one of {set(_select.keys())} or Inference instance"
)
return inference.fit(n, **kwargs)
| 37.460701
| 120
| 0.595997
|
6a4befedd903104c8718d7218a09d36bcf076cd7
| 3,465
|
py
|
Python
|
terrascript/data/rancher/rancher2.py
|
mjuenema/python-terrascript
|
6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d
|
[
"BSD-2-Clause"
] | 507
|
2017-07-26T02:58:38.000Z
|
2022-01-21T12:35:13.000Z
|
terrascript/data/rancher/rancher2.py
|
mjuenema/python-terrascript
|
6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d
|
[
"BSD-2-Clause"
] | 135
|
2017-07-20T12:01:59.000Z
|
2021-10-04T22:25:40.000Z
|
terrascript/data/rancher/rancher2.py
|
mjuenema/python-terrascript
|
6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d
|
[
"BSD-2-Clause"
] | 81
|
2018-02-20T17:55:28.000Z
|
2022-01-31T07:08:40.000Z
|
# terrascript/data/rancher/rancher2.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:25:37 UTC)
import terrascript
class rancher2_app(terrascript.Data):
pass
class rancher2_catalog(terrascript.Data):
pass
class rancher2_catalog_v2(terrascript.Data):
pass
class rancher2_certificate(terrascript.Data):
pass
class rancher2_cloud_credential(terrascript.Data):
pass
class rancher2_cluster(terrascript.Data):
pass
class rancher2_cluster_alert_group(terrascript.Data):
pass
class rancher2_cluster_alert_rule(terrascript.Data):
pass
class rancher2_cluster_driver(terrascript.Data):
pass
class rancher2_cluster_logging(terrascript.Data):
pass
class rancher2_cluster_role_template_binding(terrascript.Data):
pass
class rancher2_cluster_scan(terrascript.Data):
pass
class rancher2_cluster_template(terrascript.Data):
pass
class rancher2_cluster_v2(terrascript.Data):
pass
class rancher2_etcd_backup(terrascript.Data):
pass
class rancher2_global_dns_provider(terrascript.Data):
pass
class rancher2_global_role(terrascript.Data):
pass
class rancher2_global_role_binding(terrascript.Data):
pass
class rancher2_multi_cluster_app(terrascript.Data):
pass
class rancher2_namespace(terrascript.Data):
pass
class rancher2_node_driver(terrascript.Data):
pass
class rancher2_node_pool(terrascript.Data):
pass
class rancher2_node_template(terrascript.Data):
pass
class rancher2_notifier(terrascript.Data):
pass
class rancher2_pod_security_policy_template(terrascript.Data):
pass
class rancher2_project(terrascript.Data):
pass
class rancher2_project_alert_group(terrascript.Data):
pass
class rancher2_project_alert_rule(terrascript.Data):
pass
class rancher2_project_logging(terrascript.Data):
pass
class rancher2_project_role_template_binding(terrascript.Data):
pass
class rancher2_registry(terrascript.Data):
pass
class rancher2_role_template(terrascript.Data):
pass
class rancher2_secret(terrascript.Data):
pass
class rancher2_secret_v2(terrascript.Data):
pass
class rancher2_setting(terrascript.Data):
pass
class rancher2_storage_class_v2(terrascript.Data):
pass
class rancher2_user(terrascript.Data):
pass
__all__ = [
"rancher2_app",
"rancher2_catalog",
"rancher2_catalog_v2",
"rancher2_certificate",
"rancher2_cloud_credential",
"rancher2_cluster",
"rancher2_cluster_alert_group",
"rancher2_cluster_alert_rule",
"rancher2_cluster_driver",
"rancher2_cluster_logging",
"rancher2_cluster_role_template_binding",
"rancher2_cluster_scan",
"rancher2_cluster_template",
"rancher2_cluster_v2",
"rancher2_etcd_backup",
"rancher2_global_dns_provider",
"rancher2_global_role",
"rancher2_global_role_binding",
"rancher2_multi_cluster_app",
"rancher2_namespace",
"rancher2_node_driver",
"rancher2_node_pool",
"rancher2_node_template",
"rancher2_notifier",
"rancher2_pod_security_policy_template",
"rancher2_project",
"rancher2_project_alert_group",
"rancher2_project_alert_rule",
"rancher2_project_logging",
"rancher2_project_role_template_binding",
"rancher2_registry",
"rancher2_role_template",
"rancher2_secret",
"rancher2_secret_v2",
"rancher2_setting",
"rancher2_storage_class_v2",
"rancher2_user",
]
| 17.953368
| 73
| 0.765657
|
1fa313065b367fbacede0465502d8e1fd14d89d1
| 21,000
|
py
|
Python
|
baselines/cifar/rank1_bnn.py
|
y0ast/uncertainty-baselines
|
8d32c77ba0803ed715c1406378adf10ebd61ab74
|
[
"Apache-2.0"
] | null | null | null |
baselines/cifar/rank1_bnn.py
|
y0ast/uncertainty-baselines
|
8d32c77ba0803ed715c1406378adf10ebd61ab74
|
[
"Apache-2.0"
] | null | null | null |
baselines/cifar/rank1_bnn.py
|
y0ast/uncertainty-baselines
|
8d32c77ba0803ed715c1406378adf10ebd61ab74
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2021 The Uncertainty Baselines Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Rank-1 BNN Wide ResNet 28-10 on CIFAR-10 and CIFAR-100.
A Rank-1 Bayesian neural net (Rank-1 BNN) [1] is an efficient and scalable
approach to variational BNNs that posits prior distributions on rank-1 factors
of the weights and optimizes global mixture variational posterior distributions.
References:
[1]: Michael W. Dusenberry*, Ghassen Jerfel*, Yeming Wen, Yian Ma, Jasper
Snoek, Katherine Heller, Balaji Lakshminarayanan, Dustin Tran. Efficient
and Scalable Bayesian Neural Nets with Rank-1 Factors. In Proc. of
International Conference on Machine Learning (ICML) 2020.
https://arxiv.org/abs/2005.07186
"""
import os
import time
from absl import app
from absl import flags
from absl import logging
import robustness_metrics as rm
import tensorflow as tf
import tensorflow_datasets as tfds
import uncertainty_baselines as ub
import utils # local file import from baselines.cifar
from tensorboard.plugins.hparams import api as hp
flags.DEFINE_integer('kl_annealing_epochs', 200,
'Number of epoch over which to anneal the KL term to 1.')
flags.DEFINE_string('alpha_initializer', 'trainable_normal',
'Initializer name for the alpha parameters.')
flags.DEFINE_string('gamma_initializer', 'trainable_normal',
'Initializer name for the gamma parameters.')
flags.DEFINE_string('alpha_regularizer', 'normal_kl_divergence',
'Regularizer name for the alpha parameters.')
flags.DEFINE_string('gamma_regularizer', 'normal_kl_divergence',
'Regularizer name for the gamma parameters.')
flags.DEFINE_boolean('use_additive_perturbation', False,
'Use additive perturbations instead of multiplicative.')
flags.DEFINE_float('dropout_rate', 1e-3,
'Dropout rate. Only used if alpha/gamma initializers are, '
'e.g., trainable normal.')
flags.DEFINE_float('prior_mean', 1., 'Prior mean.')
flags.DEFINE_float('prior_stddev', 0.1,
'Prior stddev. Sort of like a prior on dropout rate, where '
'it encourages defaulting/shrinking to this value.')
flags.DEFINE_integer('ensemble_size', 4, 'Size of ensemble.')
flags.DEFINE_float('random_sign_init', 0.5,
'Use random sign init for fast weights.')
flags.DEFINE_float('fast_weight_lr_multiplier', 1.0,
'fast weights lr multiplier.')
flags.DEFINE_integer('num_eval_samples', 1,
'Number of model predictions to sample per example at '
'eval time.')
# Redefining default values
flags.FLAGS.set_default('corruptions_interval', 250)
flags.FLAGS.set_default('train_epochs', 250)
flags.FLAGS.set_default('l2', 1e-4)
flags.FLAGS.set_default('lr_decay_epochs', ['80', '160', '180'])
FLAGS = flags.FLAGS
def main(argv):
del argv # unused arg
tf.io.gfile.makedirs(FLAGS.output_dir)
logging.info('Saving checkpoints at %s', FLAGS.output_dir)
tf.random.set_seed(FLAGS.seed)
data_dir = FLAGS.data_dir
if FLAGS.use_gpu:
logging.info('Use GPU')
strategy = tf.distribute.MirroredStrategy()
else:
logging.info('Use TPU at %s',
FLAGS.tpu if FLAGS.tpu is not None else 'local')
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu=FLAGS.tpu)
tf.config.experimental_connect_to_cluster(resolver)
tf.tpu.experimental.initialize_tpu_system(resolver)
strategy = tf.distribute.TPUStrategy(resolver)
ds_info = tfds.builder(FLAGS.dataset).info
batch_size = ((FLAGS.per_core_batch_size // FLAGS.ensemble_size) *
FLAGS.num_cores)
train_dataset_size = ds_info.splits['train'].num_examples
steps_per_epoch = train_dataset_size // batch_size
test_dataset_size = ds_info.splits['test'].num_examples
steps_per_eval = test_dataset_size // batch_size
num_classes = ds_info.features['label'].num_classes
train_builder = ub.datasets.get(
FLAGS.dataset,
data_dir=data_dir,
download_data=FLAGS.download_data,
split=tfds.Split.TRAIN,
validation_percent=1. - FLAGS.train_proportion)
train_dataset = train_builder.load(batch_size=batch_size)
validation_dataset = None
steps_per_validation = 0
if FLAGS.train_proportion < 1.0:
validation_builder = ub.datasets.get(
FLAGS.dataset,
data_dir=data_dir,
download_data=FLAGS.download_data,
split=tfds.Split.VALIDATION,
validation_percent=1. - FLAGS.train_proportion)
validation_dataset = validation_builder.load(batch_size=batch_size)
validation_dataset = strategy.experimental_distribute_dataset(
validation_dataset)
steps_per_validation = validation_builder.num_examples // batch_size
clean_test_builder = ub.datasets.get(
FLAGS.dataset,
data_dir=data_dir,
download_data=FLAGS.download_data,
split=tfds.Split.TEST)
clean_test_dataset = clean_test_builder.load(batch_size=batch_size)
train_dataset = strategy.experimental_distribute_dataset(train_dataset)
test_datasets = {
'clean': strategy.experimental_distribute_dataset(clean_test_dataset),
}
steps_per_epoch = train_builder.num_examples // batch_size
steps_per_eval = clean_test_builder.num_examples // batch_size
num_classes = 100 if FLAGS.dataset == 'cifar100' else 10
if FLAGS.corruptions_interval > 0:
if FLAGS.dataset == 'cifar100':
data_dir = FLAGS.cifar100_c_path
corruption_types, _ = utils.load_corrupted_test_info(FLAGS.dataset)
for corruption_type in corruption_types:
for severity in range(1, 6):
dataset = ub.datasets.get(
f'{FLAGS.dataset}_corrupted',
corruption_type=corruption_type,
data_dir=data_dir,
severity=severity,
split=tfds.Split.TEST).load(batch_size=batch_size)
test_datasets[f'{corruption_type}_{severity}'] = (
strategy.experimental_distribute_dataset(dataset))
summary_writer = tf.summary.create_file_writer(
os.path.join(FLAGS.output_dir, 'summaries'))
with strategy.scope():
logging.info('Building Keras model')
model = ub.models.wide_resnet_rank1(
input_shape=(32, 32, 3),
depth=28,
width_multiplier=10,
num_classes=num_classes,
alpha_initializer=FLAGS.alpha_initializer,
gamma_initializer=FLAGS.gamma_initializer,
alpha_regularizer=FLAGS.alpha_regularizer,
gamma_regularizer=FLAGS.gamma_regularizer,
use_additive_perturbation=FLAGS.use_additive_perturbation,
ensemble_size=FLAGS.ensemble_size,
random_sign_init=FLAGS.random_sign_init,
dropout_rate=FLAGS.dropout_rate,
prior_mean=FLAGS.prior_mean,
prior_stddev=FLAGS.prior_stddev)
logging.info('Model input shape: %s', model.input_shape)
logging.info('Model output shape: %s', model.output_shape)
logging.info('Model number of weights: %s', model.count_params())
# Linearly scale learning rate and the decay epochs by vanilla settings.
base_lr = FLAGS.base_learning_rate * batch_size / 128
lr_decay_epochs = [(int(start_epoch_str) * FLAGS.train_epochs) // 200
for start_epoch_str in FLAGS.lr_decay_epochs]
lr_schedule = ub.schedules.WarmUpPiecewiseConstantSchedule(
steps_per_epoch,
base_lr,
decay_ratio=FLAGS.lr_decay_ratio,
decay_epochs=lr_decay_epochs,
warmup_epochs=FLAGS.lr_warmup_epochs)
optimizer = tf.keras.optimizers.SGD(lr_schedule,
momentum=1.0 - FLAGS.one_minus_momentum,
nesterov=True)
metrics = {
'train/negative_log_likelihood': tf.keras.metrics.Mean(),
'train/kl': tf.keras.metrics.Mean(),
'train/kl_scale': tf.keras.metrics.Mean(),
'train/elbo': tf.keras.metrics.Mean(),
'train/loss': tf.keras.metrics.Mean(),
'train/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
'train/ece': rm.metrics.ExpectedCalibrationError(
num_bins=FLAGS.num_bins),
'test/negative_log_likelihood': tf.keras.metrics.Mean(),
'test/kl': tf.keras.metrics.Mean(),
'test/elbo': tf.keras.metrics.Mean(),
'test/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
'test/ece': rm.metrics.ExpectedCalibrationError(
num_bins=FLAGS.num_bins),
}
eval_dataset_splits = ['test']
if validation_dataset:
metrics.update({
'validation/negative_log_likelihood': tf.keras.metrics.Mean(),
'validation/kl': tf.keras.metrics.Mean(),
'validation/elbo': tf.keras.metrics.Mean(),
'validation/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
'validation/ece': rm.metrics.ExpectedCalibrationError(
num_bins=FLAGS.num_bins),
})
eval_dataset_splits += ['validation']
for i in range(FLAGS.ensemble_size):
for dataset_split in eval_dataset_splits:
metrics[f'{dataset_split}/nll_member_{i}'] = tf.keras.metrics.Mean()
metrics[f'{dataset_split}/accuracy_member_{i}'] = (
tf.keras.metrics.SparseCategoricalAccuracy())
if FLAGS.corruptions_interval > 0:
corrupt_metrics = {}
for intensity in range(1, 6):
for corruption in corruption_types:
dataset_name = '{0}_{1}'.format(corruption, intensity)
corrupt_metrics['test/nll_{}'.format(dataset_name)] = (
tf.keras.metrics.Mean())
corrupt_metrics['test/kl_{}'.format(dataset_name)] = (
tf.keras.metrics.Mean())
corrupt_metrics['test/elbo_{}'.format(dataset_name)] = (
tf.keras.metrics.Mean())
corrupt_metrics['test/accuracy_{}'.format(dataset_name)] = (
tf.keras.metrics.SparseCategoricalAccuracy())
corrupt_metrics['test/ece_{}'.format(dataset_name)] = (
rm.metrics.ExpectedCalibrationError(num_bins=FLAGS.num_bins))
checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer)
latest_checkpoint = tf.train.latest_checkpoint(FLAGS.output_dir)
initial_epoch = 0
if latest_checkpoint:
# checkpoint.restore must be within a strategy.scope() so that optimizer
# slot variables are mirrored.
checkpoint.restore(latest_checkpoint)
logging.info('Loaded checkpoint %s', latest_checkpoint)
initial_epoch = optimizer.iterations.numpy() // steps_per_epoch
def compute_l2_loss(model):
filtered_variables = []
for var in model.trainable_variables:
# Apply l2 on the BN parameters and bias terms. This
# excludes only fast weight approximate posterior/prior parameters,
# but pay caution to their naming scheme.
if ('kernel' in var.name or
'batch_norm' in var.name or
'bias' in var.name):
filtered_variables.append(tf.reshape(var, (-1,)))
l2_loss = FLAGS.l2 * 2 * tf.nn.l2_loss(
tf.concat(filtered_variables, axis=0))
return l2_loss
@tf.function
def train_step(iterator):
"""Training StepFn."""
def step_fn(inputs):
"""Per-Replica StepFn."""
images = inputs['features']
labels = inputs['labels']
if FLAGS.ensemble_size > 1:
images = tf.tile(images, [FLAGS.ensemble_size, 1, 1, 1])
labels = tf.tile(labels, [FLAGS.ensemble_size])
with tf.GradientTape() as tape:
logits = model(images, training=True)
negative_log_likelihood = tf.reduce_mean(
tf.keras.losses.sparse_categorical_crossentropy(labels,
logits,
from_logits=True))
l2_loss = compute_l2_loss(model)
kl = sum(model.losses) / train_dataset_size
kl_scale = tf.cast(optimizer.iterations + 1, kl.dtype)
kl_scale /= steps_per_epoch * FLAGS.kl_annealing_epochs
kl_scale = tf.minimum(1., kl_scale)
kl_loss = kl_scale * kl
# Scale the loss given the TPUStrategy will reduce sum all gradients.
loss = negative_log_likelihood + l2_loss + kl_loss
scaled_loss = loss / strategy.num_replicas_in_sync
elbo = -(negative_log_likelihood + l2_loss + kl)
grads = tape.gradient(scaled_loss, model.trainable_variables)
# Separate learning rate implementation.
if FLAGS.fast_weight_lr_multiplier != 1.0:
grads_and_vars = []
for grad, var in zip(grads, model.trainable_variables):
# Apply different learning rate on the fast weight approximate
# posterior/prior parameters. This is excludes BN and slow weights,
# but pay caution to the naming scheme.
if ('kernel' not in var.name and
'batch_norm' not in var.name and
'bias' not in var.name):
grads_and_vars.append((grad * FLAGS.fast_weight_lr_multiplier, var))
else:
grads_and_vars.append((grad, var))
optimizer.apply_gradients(grads_and_vars)
else:
optimizer.apply_gradients(zip(grads, model.trainable_variables))
probs = tf.nn.softmax(logits)
metrics['train/negative_log_likelihood'].update_state(
negative_log_likelihood)
metrics['train/kl'].update_state(kl)
metrics['train/kl_scale'].update_state(kl_scale)
metrics['train/elbo'].update_state(elbo)
metrics['train/loss'].update_state(loss)
metrics['train/accuracy'].update_state(labels, probs)
metrics['train/ece'].add_batch(probs, label=labels)
for _ in tf.range(tf.cast(steps_per_epoch, tf.int32)):
strategy.run(step_fn, args=(next(iterator),))
@tf.function
def test_step(iterator, dataset_split, dataset_name, num_steps):
"""Evaluation StepFn."""
def step_fn(inputs):
"""Per-Replica StepFn."""
images = inputs['features']
labels = inputs['labels']
if FLAGS.ensemble_size > 1:
images = tf.tile(images, [FLAGS.ensemble_size, 1, 1, 1])
logits = tf.reshape(
[model(images, training=False)
for _ in range(FLAGS.num_eval_samples)],
[FLAGS.num_eval_samples, FLAGS.ensemble_size, -1, num_classes])
probs = tf.nn.softmax(logits)
if FLAGS.ensemble_size > 1:
per_probs = tf.reduce_mean(probs, axis=0) # marginalize samples
for i in range(FLAGS.ensemble_size):
member_probs = per_probs[i]
member_loss = tf.keras.losses.sparse_categorical_crossentropy(
labels, member_probs)
metrics[f'{dataset_split}/nll_member_{i}'].update_state(member_loss)
metrics[f'{dataset_split}/accuracy_member_{i}'].update_state(
labels, member_probs)
# Negative log marginal likelihood computed in a numerically-stable way.
labels_broadcasted = tf.broadcast_to(
labels,
[FLAGS.num_eval_samples, FLAGS.ensemble_size, tf.shape(labels)[0]])
log_likelihoods = -tf.keras.losses.sparse_categorical_crossentropy(
labels_broadcasted, logits, from_logits=True)
negative_log_likelihood = tf.reduce_mean(
-tf.reduce_logsumexp(log_likelihoods, axis=[0, 1]) +
tf.math.log(float(FLAGS.num_eval_samples * FLAGS.ensemble_size)))
probs = tf.math.reduce_mean(probs, axis=[0, 1]) # marginalize
l2_loss = compute_l2_loss(model)
kl = sum(model.losses) / test_dataset_size
elbo = -(negative_log_likelihood + l2_loss + kl)
if dataset_name == 'clean':
metrics[f'{dataset_split}/negative_log_likelihood'].update_state(
negative_log_likelihood)
metrics[f'{dataset_split}/kl'].update_state(kl)
metrics[f'{dataset_split}/elbo'].update_state(elbo)
metrics[f'{dataset_split}/accuracy'].update_state(labels, probs)
metrics[f'{dataset_split}/ece'].add_batch(probs, label=labels)
else:
corrupt_metrics['test/nll_{}'.format(dataset_name)].update_state(
negative_log_likelihood)
corrupt_metrics['test/kl_{}'.format(dataset_name)].update_state(kl)
corrupt_metrics['test/elbo_{}'.format(dataset_name)].update_state(elbo)
corrupt_metrics['test/accuracy_{}'.format(dataset_name)].update_state(
labels, probs)
corrupt_metrics['test/ece_{}'.format(dataset_name)].add_batch(
probs, label=labels)
for _ in tf.range(tf.cast(num_steps, tf.int32)):
strategy.run(step_fn, args=(next(iterator),))
train_iterator = iter(train_dataset)
start_time = time.time()
for epoch in range(initial_epoch, FLAGS.train_epochs):
logging.info('Starting to run epoch: %s', epoch)
train_step(train_iterator)
current_step = (epoch + 1) * steps_per_epoch
max_steps = steps_per_epoch * FLAGS.train_epochs
time_elapsed = time.time() - start_time
steps_per_sec = float(current_step) / time_elapsed
eta_seconds = (max_steps - current_step) / steps_per_sec
message = ('{:.1%} completion: epoch {:d}/{:d}. {:.1f} steps/s. '
'ETA: {:.0f} min. Time elapsed: {:.0f} min'.format(
current_step / max_steps,
epoch + 1,
FLAGS.train_epochs,
steps_per_sec,
eta_seconds / 60,
time_elapsed / 60))
logging.info(message)
if validation_dataset:
validation_iterator = iter(validation_dataset)
test_step(
validation_iterator, 'validation', 'clean', steps_per_validation)
datasets_to_evaluate = {'clean': test_datasets['clean']}
if (FLAGS.corruptions_interval > 0 and
(epoch + 1) % FLAGS.corruptions_interval == 0):
datasets_to_evaluate = test_datasets
for dataset_name, test_dataset in datasets_to_evaluate.items():
test_iterator = iter(test_dataset)
logging.info('Testing on dataset %s', dataset_name)
logging.info('Starting to run eval at epoch: %s', epoch)
test_step(test_iterator, 'test', dataset_name, steps_per_eval)
logging.info('Done with testing on %s', dataset_name)
corrupt_results = {}
if (FLAGS.corruptions_interval > 0 and
(epoch + 1) % FLAGS.corruptions_interval == 0):
corrupt_results = utils.aggregate_corrupt_metrics(corrupt_metrics,
corruption_types)
logging.info('Train Loss: %.4f, Accuracy: %.2f%%',
metrics['train/loss'].result(),
metrics['train/accuracy'].result() * 100)
logging.info('Test NLL: %.4f, Accuracy: %.2f%%',
metrics['test/negative_log_likelihood'].result(),
metrics['test/accuracy'].result() * 100)
if FLAGS.ensemble_size > 1:
for i in range(FLAGS.ensemble_size):
logging.info('Member %d Test Loss: %.4f, Accuracy: %.2f%%',
i, metrics['test/nll_member_{}'.format(i)].result(),
metrics['test/accuracy_member_{}'.format(i)].result()*100)
total_results = {name: metric.result() for name, metric in metrics.items()}
total_results.update(corrupt_results)
# Metrics from Robustness Metrics (like ECE) will return a dict with a
# single key/value, instead of a scalar.
total_results = {
k: (list(v.values())[0] if isinstance(v, dict) else v)
for k, v in total_results.items()
}
with summary_writer.as_default():
for name, result in total_results.items():
tf.summary.scalar(name, result, step=epoch + 1)
for metric in metrics.values():
metric.reset_states()
if (FLAGS.checkpoint_interval > 0 and
(epoch + 1) % FLAGS.checkpoint_interval == 0):
checkpoint_name = checkpoint.save(
os.path.join(FLAGS.output_dir, 'checkpoint'))
logging.info('Saved checkpoint to %s', checkpoint_name)
final_checkpoint_name = checkpoint.save(
os.path.join(FLAGS.output_dir, 'checkpoint'))
logging.info('Saved last checkpoint to %s', final_checkpoint_name)
with summary_writer.as_default():
hp.hparams({
'base_learning_rate': FLAGS.base_learning_rate,
'one_minus_momentum': FLAGS.one_minus_momentum,
'l2': FLAGS.l2,
'fast_weight_lr_multiplier': FLAGS.fast_weight_lr_multiplier,
'num_eval_samples': FLAGS.num_eval_samples,
})
if __name__ == '__main__':
app.run(main)
| 44.491525
| 80
| 0.672619
|
7496bc0a5db11df897273bbc4d797dbfbcb6d33d
| 2,134
|
py
|
Python
|
airbyte-integrations/connectors/source-greenhouse/source_greenhouse/source.py
|
vagrantism/airbyte
|
b072a6d7bd6ea0b843ad7e98ff8f98dd3b8aa824
|
[
"MIT"
] | 6,215
|
2020-09-21T13:45:56.000Z
|
2022-03-31T21:21:45.000Z
|
airbyte-integrations/connectors/source-greenhouse/source_greenhouse/source.py
|
vitoravancini/airbyte
|
a7ddd167f6c42905af16b6dac744bcf18354de19
|
[
"MIT"
] | 8,448
|
2020-09-21T00:43:50.000Z
|
2022-03-31T23:56:06.000Z
|
airbyte-integrations/connectors/source-greenhouse/source_greenhouse/source.py
|
vitoravancini/airbyte
|
a7ddd167f6c42905af16b6dac744bcf18354de19
|
[
"MIT"
] | 1,251
|
2020-09-20T05:48:47.000Z
|
2022-03-31T10:41:29.000Z
|
#
# Copyright (c) 2021 Airbyte, Inc., all rights reserved.
#
from typing import Any, List, Mapping, Tuple
from airbyte_cdk import AirbyteLogger
from airbyte_cdk.models import SyncMode
from airbyte_cdk.sources import AbstractSource
from airbyte_cdk.sources.streams import Stream
from requests.auth import HTTPBasicAuth
from source_greenhouse.streams import (
Applications,
ApplicationsInterviews,
Candidates,
CloseReasons,
CustomFields,
Degrees,
Departments,
Interviews,
JobPosts,
Jobs,
JobsOpenings,
JobsStages,
JobStages,
Offers,
RejectionReasons,
Scorecards,
Sources,
Users,
)
class SourceGreenhouse(AbstractSource):
def check_connection(self, logger: AirbyteLogger, config: Mapping[str, Any]) -> Tuple[bool, Any]:
try:
auth = HTTPBasicAuth(config["api_key"], "")
users_gen = Users(authenticator=auth).read_records(sync_mode=SyncMode.full_refresh)
next(users_gen)
return True, None
except Exception as error:
return False, f"Unable to connect to Greenhouse API with the provided credentials - {repr(error)}"
def streams(self, config: Mapping[str, Any]) -> List[Stream]:
auth = HTTPBasicAuth(config["api_key"], "")
streams = [
Applications(authenticator=auth),
ApplicationsInterviews(authenticator=auth),
Candidates(authenticator=auth),
CloseReasons(authenticator=auth),
CustomFields(authenticator=auth),
Degrees(authenticator=auth),
Departments(authenticator=auth),
Interviews(authenticator=auth),
JobPosts(authenticator=auth),
JobStages(authenticator=auth),
Jobs(authenticator=auth),
JobsOpenings(authenticator=auth),
JobsStages(authenticator=auth),
Offers(authenticator=auth),
RejectionReasons(authenticator=auth),
Scorecards(authenticator=auth),
Sources(authenticator=auth),
Users(authenticator=auth),
]
return streams
| 31.382353
| 110
| 0.656514
|
1c0aa1604f519962a11a563803754ae30395d547
| 3,452
|
py
|
Python
|
src/oscar/profiling/middleware.py
|
akiyoko/oscar_sandbox
|
b384f1c0b5f297fd4b84509a575f6766a48630a5
|
[
"BSD-3-Clause"
] | 68
|
2016-11-06T05:07:57.000Z
|
2021-12-17T09:17:38.000Z
|
src/oscar/profiling/middleware.py
|
akiyoko/oscar_sandbox
|
b384f1c0b5f297fd4b84509a575f6766a48630a5
|
[
"BSD-3-Clause"
] | null | null | null |
src/oscar/profiling/middleware.py
|
akiyoko/oscar_sandbox
|
b384f1c0b5f297fd4b84509a575f6766a48630a5
|
[
"BSD-3-Clause"
] | 28
|
2016-12-04T07:12:50.000Z
|
2021-02-06T21:13:15.000Z
|
import cProfile
import hotshot
import hotshot.stats
import pstats
import sys
import tempfile
from cStringIO import StringIO
def profile_this(fn):
def profiled_fn(*args, **kwargs):
filepath = "/tmp/%s.profile" % fn.__name__
prof = cProfile.Profile()
ret = prof.runcall(fn, *args, **kwargs)
print("Writing to %s" % filepath)
prof.dump_stats(filepath)
print("Printing stats")
stats = pstats.Stats(filepath)
stats.sort_stats('cumulative')
stats.print_stats()
return ret
return profiled_fn
class BaseMiddleware(object):
query_param = None
def show_profile(self, request):
return self.query_param in request.GET
def process_request(self, request):
if self.show_profile(request):
if 'prof_file' in request.GET:
# It's sometimes useful to generate a file of output that can
# converted for use with kcachegrind. To convert this file,
# use:
#
# pyprof2calltree -o /tmp/callgrind.stats -i /tmp/out.stats
#
# then open the file in kcachegrind.
self.tmpfile = open('/tmp/out.stats', 'w')
else:
self.tmpfile = tempfile.NamedTemporaryFile()
self.profile = self.profiler()
def profiler(self):
return None
def process_view(self, request, callback, callback_args, callback_kwargs):
# We profile the view call - note that this misses the rest of Django's
# request processing (eg middleware etc)
if self.show_profile(request):
return self.profile.runcall(
callback, request, *callback_args, **callback_kwargs)
def process_response(self, request, response):
if self.show_profile(request):
stats = self.stats()
if 'prof_strip' in request.GET:
stats.strip_dirs()
if 'prof_sort' in request.GET:
# See
# http://docs.python.org/2/library/profile.html#pstats.Stats.sort_stats # noqa
# for the fields you can sort on.
stats.sort_stats(*request.GET['prof_sort'].split(','))
else:
stats.sort_stats('time', 'calls')
# Capture STDOUT temporarily
old_stdout = sys.stdout
out = StringIO()
sys.stdout = out
stats.print_stats()
stats_str = out.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
# Print status within PRE block
if response and response.content and stats_str:
response.content = "<pre>" + stats_str + "</pre>"
return response
class ProfileMiddleware(BaseMiddleware):
query_param = 'cprofile'
def profiler(self):
return cProfile.Profile()
def stats(self):
self.profile.dump_stats(self.tmpfile.name)
return pstats.Stats(self.tmpfile.name)
class HotshotMiddleware(BaseMiddleware):
"""
Displays hotshot profiling for any view.
http://yoursite.com/yourview/?prof
Based on http://djangosnippets.org/snippets/186/
"""
query_param = 'hotshot'
def profiler(self):
return hotshot.Profile(self.tmpfile.name)
def stats(self):
self.profile.close()
return hotshot.stats.load(self.tmpfile.name)
| 30.017391
| 95
| 0.595886
|
4b24e0aad366f70b2ec047e627ab24b6616a571d
| 165
|
py
|
Python
|
recipes/Python/473788_processing_options_program_that_runs_another/recipe-473788.py
|
tdiprima/code
|
61a74f5f93da087d27c70b2efe779ac6bd2a3b4f
|
[
"MIT"
] | 2,023
|
2017-07-29T09:34:46.000Z
|
2022-03-24T08:00:45.000Z
|
recipes/Python/473788_processing_options_program_that_runs_another/recipe-473788.py
|
unhacker/code
|
73b09edc1b9850c557a79296655f140ce5e853db
|
[
"MIT"
] | 32
|
2017-09-02T17:20:08.000Z
|
2022-02-11T17:49:37.000Z
|
recipes/Python/473788_processing_options_program_that_runs_another/recipe-473788.py
|
unhacker/code
|
73b09edc1b9850c557a79296655f140ce5e853db
|
[
"MIT"
] | 780
|
2017-07-28T19:23:28.000Z
|
2022-03-25T20:39:41.000Z
|
optparser = OptionParser()
...
optparser.disable_interspersed_args()
(opts, argv) = optparser.parse_args()
## argv now has the options to pass to the second program
| 27.5
| 57
| 0.763636
|
33d9906d1c5d703e926954736995e5ab6bd0c980
| 2,193
|
py
|
Python
|
hc/api/management/commands/sendalerts.py
|
aMugabi/healthchecks
|
df137bfdad57cb53bcf31bdc92056cb4f316f921
|
[
"BSD-3-Clause"
] | 2
|
2018-06-21T18:11:04.000Z
|
2018-06-22T14:52:42.000Z
|
hc/api/management/commands/sendalerts.py
|
aMugabi/healthchecks
|
df137bfdad57cb53bcf31bdc92056cb4f316f921
|
[
"BSD-3-Clause"
] | 34
|
2019-09-05T06:41:12.000Z
|
2021-06-25T15:25:28.000Z
|
hc/api/management/commands/sendalerts.py
|
aMugabi/healthchecks
|
df137bfdad57cb53bcf31bdc92056cb4f316f921
|
[
"BSD-3-Clause"
] | 30
|
2017-04-22T07:09:56.000Z
|
2019-06-30T08:24:01.000Z
|
import logging
import time
from concurrent.futures import ThreadPoolExecutor
from django.core.management.base import BaseCommand
from django.db import connection
from django.utils import timezone
from hc.api.models import Check
executor = ThreadPoolExecutor(max_workers=10)
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = 'Sends UP/DOWN email alerts'
def handle_many(self):
""" Send alerts for many checks simultaneously. """
query = Check.objects.filter(user__isnull=False).select_related("user")
now = timezone.now()
going_down = query.filter(alert_after__lt=now, status="up")
going_up = query.filter(alert_after__gt=now, status="down")
# Don't combine this in one query so Postgres can query using index:
checks = list(going_down.iterator()) + list(going_up.iterator())
if not checks:
return False
futures = [executor.submit(self.handle_one, check) for check in checks]
for future in futures:
future.result()
return True
def handle_one(self, check):
""" Send an alert for a single check.
Return True if an appropriate check was selected and processed.
Return False if no checks need to be processed.
"""
# Save the new status. If sendalerts crashes,
# it won't process this check again.
check.status = check.get_status()
check.save()
tmpl = "\nSending alert, status=%s, code=%s\n"
self.stdout.write(tmpl % (check.status, check.code))
errors = check.send_alert()
for ch, error in errors:
self.stdout.write("ERROR: %s %s %s\n" % (ch.kind, ch.value, error))
connection.close()
return True
def handle(self, *args, **options):
self.stdout.write("sendalerts is now running")
ticks = 0
while True:
if self.handle_many():
ticks = 1
else:
ticks += 1
time.sleep(1)
if ticks % 60 == 0:
formatted = timezone.now().isoformat()
self.stdout.write("-- MARK %s --" % formatted)
| 30.887324
| 79
| 0.616507
|
d686ec9a38c3ca481092bfdfa076187e420af7e6
| 2,359
|
py
|
Python
|
src/rembg/cmd/server.py
|
prostakov/rembg
|
bc2680e7b7f3065b6a56d5b1f444c5a326a7fc4a
|
[
"MIT"
] | 1
|
2021-03-18T18:41:18.000Z
|
2021-03-18T18:41:18.000Z
|
src/rembg/cmd/server.py
|
SSardorf/rembg
|
125adc690abfce2eb7094f92e6bcca79e8b097c6
|
[
"MIT"
] | null | null | null |
src/rembg/cmd/server.py
|
SSardorf/rembg
|
125adc690abfce2eb7094f92e6bcca79e8b097c6
|
[
"MIT"
] | 1
|
2021-05-14T09:37:40.000Z
|
2021-05-14T09:37:40.000Z
|
import argparse
from io import BytesIO
from urllib.parse import unquote_plus
from urllib.request import urlopen
from flask import Flask, request, send_file
from waitress import serve
from ..bg import remove
app = Flask(__name__)
@app.route("/", methods=["GET", "POST"])
def index():
file_content = ""
if request.method == "POST":
if "file" not in request.files:
return {"error": "missing post form param 'file'"}, 400
file_content = request.files["file"].read()
if request.method == "GET":
url = request.args.get("url", type=str)
if url is None:
return {"error": "missing query param 'url'"}, 400
file_content = urlopen(unquote_plus(url)).read()
if file_content == "":
return {"error": "File content is empty"}, 400
alpha_matting = "a" in request.values
af = request.values.get("af", type=int, default=240)
ab = request.values.get("ab", type=int, default=10)
ae = request.values.get("ae", type=int, default=10)
az = request.values.get("az", type=int, default=1000)
model = request.args.get("model", type=str, default="u2net")
if model not in ("u2net", "u2net_human_seg", "u2netp"):
return {"error": "invalid query param 'model'"}, 400
try:
return send_file(
BytesIO(
remove(
file_content,
model_name=model,
alpha_matting=alpha_matting,
alpha_matting_foreground_threshold=af,
alpha_matting_background_threshold=ab,
alpha_matting_erode_structure_size=ae,
alpha_matting_base_size=az,
)
),
mimetype="image/png",
)
except Exception as e:
app.logger.exception(e, exc_info=True)
return {"error": "oops, something went wrong!"}, 500
def main():
ap = argparse.ArgumentParser()
ap.add_argument(
"-a",
"--addr",
default="0.0.0.0",
type=str,
help="The IP address to bind to.",
)
ap.add_argument(
"-p",
"--port",
default=5000,
type=int,
help="The port to bind to.",
)
args = ap.parse_args()
serve(app, host=args.addr, port=args.port)
if __name__ == "__main__":
main()
| 26.505618
| 67
| 0.570581
|
2e459beeaed959342dfedcab5d8a6271c8091eef
| 443
|
py
|
Python
|
tests/functests.py
|
epigos/data-explorer
|
d90be65fe046b49025dc6fa422baa5c127dcb734
|
[
"MIT"
] | 1
|
2017-01-18T08:40:40.000Z
|
2017-01-18T08:40:40.000Z
|
tests/functests.py
|
epigos/data-explorer
|
d90be65fe046b49025dc6fa422baa5c127dcb734
|
[
"MIT"
] | null | null | null |
tests/functests.py
|
epigos/data-explorer
|
d90be65fe046b49025dc6fa422baa5c127dcb734
|
[
"MIT"
] | null | null | null |
import unittest
from tornado.testing import AsyncHTTPTestCase
from dexplorer import DataExplorer
class TestDataServer(AsyncHTTPTestCase):
def get_app(self):
dte = DataExplorer()
return dte.make_app()
def test_homepage(self):
response = self.fetch('/')
self.assertEqual(response.code, 200)
self.assertIn("Data Explorer", str(response.body))
if __name__ == "__main__":
unittest.main()
| 20.136364
| 58
| 0.68623
|
f6a5212bab65f8494f2a60b7d03eedaca35bc11e
| 36
|
py
|
Python
|
session/example.py
|
Navya-PampariRaghu/cis-demo
|
298da7918689e3716ad558be2dcb197381654657
|
[
"MIT"
] | null | null | null |
session/example.py
|
Navya-PampariRaghu/cis-demo
|
298da7918689e3716ad558be2dcb197381654657
|
[
"MIT"
] | null | null | null |
session/example.py
|
Navya-PampariRaghu/cis-demo
|
298da7918689e3716ad558be2dcb197381654657
|
[
"MIT"
] | 1
|
2021-12-05T02:57:02.000Z
|
2021-12-05T02:57:02.000Z
|
print("Hey, I am practicing this!!")
| 36
| 36
| 0.694444
|
746c0db4693ab751a24d7ab97e88601db714cf88
| 3,312
|
py
|
Python
|
gui/merge.py
|
mokojm/townshell
|
3cb209892c63742ed4e20066e29a178a04c4f7ee
|
[
"MIT"
] | 34
|
2021-02-02T09:16:32.000Z
|
2022-02-28T22:45:40.000Z
|
gui/merge.py
|
mokojm/townshell
|
3cb209892c63742ed4e20066e29a178a04c4f7ee
|
[
"MIT"
] | 1
|
2021-04-26T21:12:36.000Z
|
2021-05-09T15:38:42.000Z
|
gui/merge.py
|
mokojm/townshell
|
3cb209892c63742ed4e20066e29a178a04c4f7ee
|
[
"MIT"
] | 1
|
2021-02-21T19:58:33.000Z
|
2021-02-21T19:58:33.000Z
|
from kivy.app import App
from kivy.factory import Factory
from kivy.lang import Builder
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.screenmanager import Screen
Builder.load_file(r"gui\merge.kv")
class BoxClip(FloatLayout):
def __init__(self, **kwargs):
self.util = App.get_running_app().util
super(BoxClip, self).__init__(**kwargs)
self.clip = ""
self.clipIsValid = False
def checkClip(self, *args):
if self.util.isclip(self.boxclip.text) is False:
self.boxclip.color = 1, 0, 0, 1
self.clipIsValid = False
else:
self.boxclip.color = 0, 0, 0, 1
self.clip = self.boxclip.text
self.clipIsValid = True
class MergeScreen(Screen):
def __init__(self, **kwargs):
self.util = App.get_running_app().util
super(MergeScreen, self).__init__(**kwargs)
self.amountBoxClip = 2
self.maxAmountBoxClip = 5
def add_boxclip(self):
if self.amountBoxClip < self.maxAmountBoxClip:
self.add_widget(BoxClip(pos_hint=self.add.pos_hint, size_hint=(0.5, 0.1)))
self.add.pos_hint = {
"x": self.add.pos_hint["x"],
"y": self.add.pos_hint["y"] - 0.1,
}
self.amountBoxClip += 1
def del_boxclip(self):
for child in self.children[:]:
if isinstance(child, BoxClip):
self.remove_widget(child)
self.add.pos_hint = {
"x": self.add.pos_hint["x"],
"y": self.add.pos_hint["y"] + 0.1,
}
self.amountBoxClip -= 1
break
def save_to_clipboard(self):
myPopUp = Factory.NotifPopUp()
myPopUp.title = "Merge"
if any(
[
isinstance(child, BoxClip) and child.clipIsValid is False
for child in self.walk(loopback=True)
]
):
myPopUp.level = "ERROR"
myPopUp.mytext = "One of the clip is wrong or not filled"
myPopUp.open()
else:
settings = {
"input" + str(i): child.clip
for i, child in enumerate(self.walk(loopback=True))
if isinstance(child, BoxClip)
}
settings['op'] = self.box_ope.ope.text[-2]
#print(settings)
if self.util.merge(settings):
myPopUp.level = "INFO"
myPopUp.mytext = "Click 'Load from Clipboard'"
else:
myPopUp.level = "ERROR"
myPopUp.mytext = "See 'town.log' for more information"
myPopUp.open()
def reset(self):
#Deleting widget
count = 0
for child in self.walk(loopback=True):
if isinstance(child, BoxClip):
count += 1
child.boxclip.text = ""
if count > 2:
self.remove_widget(child)
self.add.pos_hint = {
"x": self.add.pos_hint["x"],
"y": self.add.pos_hint["y"] + 0.1,
}
self.amountBoxClip -= 1
#reset operator
self.box_ope.ope.text = self.box_ope.ope.values[0]
| 30.666667
| 86
| 0.519626
|
e3d41d21a38beb9011fa8deb8f36620ac027210e
| 9,341
|
py
|
Python
|
angola_erp/angola_erpnext/report/user_item_wise_sales_register/user_item_wise_sales_register.py
|
smehata/angola_erp
|
51614992709476e353aef1c03099d78f2a7cedb2
|
[
"MIT"
] | 4
|
2019-06-12T06:54:10.000Z
|
2021-08-28T06:07:42.000Z
|
angola_erp/angola_erpnext/report/user_item_wise_sales_register/user_item_wise_sales_register.py
|
smehata/angola_erp
|
51614992709476e353aef1c03099d78f2a7cedb2
|
[
"MIT"
] | 4
|
2017-08-24T17:33:45.000Z
|
2017-09-24T16:54:01.000Z
|
angola_erp/angola_erpnext/report/user_item_wise_sales_register/user_item_wise_sales_register.py
|
smehata/angola_erp
|
51614992709476e353aef1c03099d78f2a7cedb2
|
[
"MIT"
] | 4
|
2018-02-10T21:08:10.000Z
|
2021-08-28T06:08:11.000Z
|
# Copyright (c) 2013, Helio de Jesus and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe, erpnext
from frappe import _
from frappe.utils import flt
from frappe.model.meta import get_field_precision
from frappe.utils.xlsxutils import handle_html
from erpnext.accounts.report.sales_register.sales_register import get_mode_of_payments
def execute(filters=None):
return _execute(filters)
def _execute(filters=None, additional_table_columns=None, additional_query_columns=None):
if not filters: filters = {}
columns = get_columns(additional_table_columns)
company_currency = erpnext.get_company_currency(filters.company)
item_list = get_items(filters, additional_query_columns)
if item_list:
itemised_tax, tax_columns = get_tax_accounts(item_list, columns, company_currency)
columns.append({
"fieldname": "currency",
"label": _("Currency"),
"fieldtype": "Data",
"width": 80
})
mode_of_payments = get_mode_of_payments(set([d.parent for d in item_list]))
so_dn_map = get_delivery_notes_against_sales_order(item_list)
data = []
for d in item_list:
delivery_note = None
if d.delivery_note:
delivery_note = d.delivery_note
elif d.so_detail:
delivery_note = ", ".join(so_dn_map.get(d.so_detail, []))
if not delivery_note and d.update_stock:
delivery_note = d.parent
row = [d.item_code, d.item_name, d.item_group, d.parent, d.posting_date, d.customer, d.customer_name, d.owner]
if additional_query_columns:
for col in additional_query_columns:
row.append(d.get(col))
row += [
d.customer_group, d.debit_to, ", ".join(mode_of_payments.get(d.parent, [])),
d.territory, d.project, d.company, d.sales_order,
delivery_note, d.income_account, d.cost_center, d.stock_qty, d.stock_uom
]
row += [(d.base_net_rate * d.qty)/d.stock_qty, d.base_net_amount] \
if d.stock_uom != d.uom else [d.base_net_rate, d.base_net_amount]
total_tax = 0
for tax in tax_columns:
item_tax = itemised_tax.get(d.name, {}).get(tax, {})
row += [item_tax.get("tax_rate", 0), item_tax.get("tax_amount", 0)]
total_tax += flt(item_tax.get("tax_amount"))
row += [total_tax, d.base_net_amount + total_tax, company_currency]
data.append(row)
return columns, data
def get_columns(additional_table_columns):
columns = [
_("Item Code") + ":Link/Item:120", _("Item Name") + "::120",
_("Item Group") + ":Link/Item Group:100", _("Invoice") + ":Link/Sales Invoice:120",
_("Posting Date") + ":Date:80", _("Customer") + ":Link/Customer:120",
_("Customer Name") + "::120",
_("User") + "::120"]
if additional_table_columns:
columns += additional_table_columns
columns += [
_("Customer Group") + ":Link/Customer Group:120",
_("Receivable Account") + ":Link/Account:120",
_("Mode of Payment") + "::120", _("Territory") + ":Link/Territory:80",
_("Project") + ":Link/Project:80", _("Company") + ":Link/Company:100",
_("Sales Order") + ":Link/Sales Order:100", _("Delivery Note") + ":Link/Delivery Note:100",
_("Income Account") + ":Link/Account:140", _("Cost Center") + ":Link/Cost Center:140",
_("Stock Qty") + ":Float:120", _("Stock UOM") + "::100",
_("Rate") + ":Currency/currency:120",
_("Amount") + ":Currency/currency:120"
]
return columns
def get_conditions(filters):
conditions = ""
for opts in (("company", " and company=%(company)s"),
("cost_center", " and `tabSales Invoice Item`.cost_center = %(cost_center)s"),
("customer", " and `tabSales Invoice`.customer = %(customer)s"),
("item_code", " and `tabSales Invoice Item`.item_code = %(item_code)s"),
("from_date", " and `tabSales Invoice`.posting_date>=%(from_date)s"),
("to_date", " and `tabSales Invoice`.posting_date<=%(to_date)s"),
("owner", " and `tabSales Invoice`.owner = %(owner)s")):
if filters.get(opts[0]):
conditions += opts[1]
if filters.get("mode_of_payment"):
conditions += """ and exists(select name from `tabSales Invoice Payment`
where parent=`tabSales Invoice`.name
and ifnull(`tabSales Invoice Payment`.mode_of_payment, '') = %(mode_of_payment)s)"""
return conditions
def get_items(filters, additional_query_columns):
conditions = get_conditions(filters)
match_conditions = frappe.build_match_conditions("Sales Invoice")
if match_conditions:
match_conditions = " and {0} ".format(match_conditions)
if additional_query_columns:
additional_query_columns = ', ' + ', '.join(additional_query_columns)
return frappe.db.sql("""
select
`tabSales Invoice Item`.name, `tabSales Invoice Item`.parent,
`tabSales Invoice`.posting_date, `tabSales Invoice`.debit_to,
`tabSales Invoice`.project, `tabSales Invoice`.customer, `tabSales Invoice`.remarks,
`tabSales Invoice`.territory, `tabSales Invoice`.company, `tabSales Invoice`.base_net_total,
`tabSales Invoice Item`.item_code, `tabSales Invoice Item`.item_name,
`tabSales Invoice Item`.item_group, `tabSales Invoice Item`.sales_order,
`tabSales Invoice Item`.delivery_note, `tabSales Invoice Item`.income_account,
`tabSales Invoice Item`.cost_center, `tabSales Invoice Item`.stock_qty,
`tabSales Invoice Item`.stock_uom, `tabSales Invoice Item`.base_net_rate,
`tabSales Invoice Item`.base_net_amount, `tabSales Invoice`.customer_name,
`tabSales Invoice`.customer_group, `tabSales Invoice Item`.so_detail,
`tabSales Invoice`.update_stock, `tabSales Invoice Item`.uom, `tabSales Invoice Item`.qty {0},
`tabSales Invoice`.owner
from `tabSales Invoice`, `tabSales Invoice Item`
where `tabSales Invoice`.name = `tabSales Invoice Item`.parent
and `tabSales Invoice`.docstatus = 1 %s %s
order by `tabSales Invoice`.posting_date desc, `tabSales Invoice Item`.cost_center desc, `tabSales Invoice Item`.item_code desc
""".format(additional_query_columns or '') % (conditions, match_conditions), filters, as_dict=1)
def get_delivery_notes_against_sales_order(item_list):
so_dn_map = frappe._dict()
so_item_rows = list(set([d.so_detail for d in item_list]))
if so_item_rows:
delivery_notes = frappe.db.sql("""
select parent, so_detail
from `tabDelivery Note Item`
where docstatus=1 and so_detail in (%s)
group by so_detail, parent
""" % (', '.join(['%s']*len(so_item_rows))), tuple(so_item_rows), as_dict=1)
for dn in delivery_notes:
so_dn_map.setdefault(dn.so_detail, []).append(dn.parent)
return so_dn_map
def get_tax_accounts(item_list, columns, company_currency,
doctype="Sales Invoice", tax_doctype="Sales Taxes and Charges"):
import json
item_row_map = {}
tax_columns = []
invoice_item_row = {}
itemised_tax = {}
tax_amount_precision = get_field_precision(frappe.get_meta(tax_doctype).get_field("tax_amount"),
currency=company_currency) or 2
for d in item_list:
invoice_item_row.setdefault(d.parent, []).append(d)
item_row_map.setdefault(d.parent, {}).setdefault(d.item_code or d.item_name, []).append(d)
conditions = ""
if doctype == "Purchase Invoice":
conditions = " and category in ('Total', 'Valuation and Total') and base_tax_amount_after_discount_amount != 0"
tax_details = frappe.db.sql("""
select
parent, description, item_wise_tax_detail,
charge_type, base_tax_amount_after_discount_amount
from `tab%s`
where
parenttype = %s and docstatus = 1
and (description is not null and description != '')
and parent in (%s)
%s
order by description
""" % (tax_doctype, '%s', ', '.join(['%s']*len(invoice_item_row)), conditions),
tuple([doctype] + invoice_item_row.keys()))
for parent, description, item_wise_tax_detail, charge_type, tax_amount in tax_details:
description = handle_html(description)
if description not in tax_columns and tax_amount:
# as description is text editor earlier and markup can break the column convention in reports
tax_columns.append(description)
if item_wise_tax_detail:
try:
item_wise_tax_detail = json.loads(item_wise_tax_detail)
for item_code, tax_data in item_wise_tax_detail.items():
itemised_tax.setdefault(item_code, frappe._dict())
if isinstance(tax_data, list):
tax_rate, tax_amount = tax_data
else:
tax_rate = tax_data
tax_amount = 0
if charge_type == "Actual" and not tax_rate:
tax_rate = "NA"
item_net_amount = sum([flt(d.base_net_amount)
for d in item_row_map.get(parent, {}).get(item_code, [])])
for d in item_row_map.get(parent, {}).get(item_code, []):
item_tax_amount = flt((tax_amount * d.base_net_amount) / item_net_amount) \
if item_net_amount else 0
if item_tax_amount:
itemised_tax.setdefault(d.name, {})[description] = frappe._dict({
"tax_rate": tax_rate,
"tax_amount": flt(item_tax_amount, tax_amount_precision)
})
except ValueError:
continue
elif charge_type == "Actual" and tax_amount:
for d in invoice_item_row.get(parent, []):
itemised_tax.setdefault(d.name, {})[description] = frappe._dict({
"tax_rate": "NA",
"tax_amount": flt((tax_amount * d.base_net_amount) / d.base_net_total,
tax_amount_precision)
})
tax_columns.sort()
for desc in tax_columns:
columns.append(desc + " Rate:Data:80")
columns.append(desc + " Amount:Currency/currency:100")
columns += ["Total Tax:Currency/currency:80", "Total:Currency/currency:100"]
return itemised_tax, tax_columns
| 37.514056
| 129
| 0.716626
|
14c112ca41ff83d6b0a74de3d4799bd0ccc44b07
| 563
|
py
|
Python
|
tests/conf_zephyr_tests.py
|
sriiora/tcf
|
e607ce04f97dbb4910d94428c0600a6a7145a825
|
[
"Apache-2.0"
] | 24
|
2018-08-21T18:04:48.000Z
|
2022-02-07T22:50:06.000Z
|
tests/conf_zephyr_tests.py
|
sriiora/tcf
|
e607ce04f97dbb4910d94428c0600a6a7145a825
|
[
"Apache-2.0"
] | 16
|
2018-08-21T18:03:52.000Z
|
2022-03-01T17:15:42.000Z
|
tests/conf_zephyr_tests.py
|
sriiora/tcf
|
e607ce04f97dbb4910d94428c0600a6a7145a825
|
[
"Apache-2.0"
] | 29
|
2018-08-22T19:40:59.000Z
|
2021-12-21T11:13:23.000Z
|
#! /usr/bin/python3
#
# Copyright (c) 2017 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
# We only use single BSP BSP models here, because with QEMU targets
# otherwise we have to muck around which is the right console
ttbl.config.target_add(tt_qemu_zephyr("za-01", [ "x86" ]),
target_type = "qemu-x86")
ttbl.config.target_add(tt_qemu_zephyr("zb-01", [ "arm" ]),
target_type = "qemu-arm")
ttbl.config.target_add(tt_qemu_zephyr("zc-01", [ "nios2" ]),
target_type = "qemu-nios2")
| 33.117647
| 67
| 0.630551
|
ae810d130874396842e2e9b0430580f1fcc2e7d9
| 1,846
|
py
|
Python
|
datapack/data/scripts/ai/individual/nurka.py
|
DigitalCoin1/L2SPERO
|
f9ec069804d7bf13f9c4bfb508db2eb6ce37ab94
|
[
"Unlicense"
] | null | null | null |
datapack/data/scripts/ai/individual/nurka.py
|
DigitalCoin1/L2SPERO
|
f9ec069804d7bf13f9c4bfb508db2eb6ce37ab94
|
[
"Unlicense"
] | null | null | null |
datapack/data/scripts/ai/individual/nurka.py
|
DigitalCoin1/L2SPERO
|
f9ec069804d7bf13f9c4bfb508db2eb6ce37ab94
|
[
"Unlicense"
] | null | null | null |
import sys
from com.l2jfrozen.gameserver.ai import CtrlIntention
from com.l2jfrozen.gameserver.model.entity.siege.clanhalls import FortressOfResistance
from com.l2jfrozen.gameserver.model.quest import State
from com.l2jfrozen.gameserver.model.quest import QuestState
from com.l2jfrozen.gameserver.model.quest.jython import QuestJython as JQuest
from com.l2jfrozen.gameserver.managers import ClanHallManager
from com.l2jfrozen.util.random import Rnd
from java.lang import System
NURKA = 35368
MESSENGER = 35382
CLANLEADERS = []
class Nurka(JQuest):
def __init__(self,id,name,descr): JQuest.__init__(self,id,name,descr)
def onTalk (self,npc,player):
global CLANLEADERS
npcId = npc.getNpcId()
if npcId == MESSENGER :
for clname in CLANLEADERS:
if player.getName() == clname :
return "<html><body>You already registered!</body></html>"
if FortressOfResistance.getInstance().Conditions(player) :
CLANLEADERS.append(player.getName())
return "<html><body>You have successful registered on a battle</body></html>"
else:
return "<html><body>Condition are not allow to do that!</body></html>"
return
def onAttack (self,npc,player,damage,isPet):
CLAN = player.getClan()
if CLAN == None :
return
CLANLEADER = CLAN.getLeader()
if CLANLEADER == None :
return
global CLANLEADERS
for clname in CLANLEADERS:
if clname <> None :
if CLANLEADER.getName() == clname :
FortressOfResistance.getInstance().addSiegeDamage(CLAN,damage)
return
def onKill(self,npc,player,isPet):
FortressOfResistance.getInstance().CaptureFinish()
return
QUEST = Nurka(-1, "nurka", "ai")
CREATED = State('Start', QUEST)
QUEST.setInitialState(CREATED)
QUEST.addTalkId(MESSENGER)
QUEST.addStartNpc(MESSENGER)
QUEST.addAttackId(NURKA)
QUEST.addKillId(NURKA)
| 30.766667
| 86
| 0.733478
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.